From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> Although kvm_gmem_punch_hole() keeps all pages in mapping on punching hole, it's common expectation that pages are truncated. Truncate pages on punching hole. As page contents can be encrypted, avoid zeroing partial folio by refusing partial punch hole. Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx> --- virt/kvm/guest_mem.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c index a819367434e9..01fb4ca861d0 100644 --- a/virt/kvm/guest_mem.c +++ b/virt/kvm/guest_mem.c @@ -130,22 +130,32 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start, static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len) { struct list_head *gmem_list = &inode->i_mapping->private_list; + struct address_space *mapping = inode->i_mapping; pgoff_t start = offset >> PAGE_SHIFT; pgoff_t end = (offset + len) >> PAGE_SHIFT; struct kvm_gmem *gmem; + /* + * punch hole may result in zeroing partial area. As pages can be + * encrypted, prohibit zeroing partial area. + */ + if (offset & ~PAGE_MASK || len & ~PAGE_MASK) + return -EINVAL; + /* * Bindings must stable across invalidation to ensure the start+end * are balanced. */ - filemap_invalidate_lock(inode->i_mapping); + filemap_invalidate_lock(mapping); list_for_each_entry(gmem, gmem_list, entry) { kvm_gmem_invalidate_begin(gmem, start, end); kvm_gmem_invalidate_end(gmem, start, end); } - filemap_invalidate_unlock(inode->i_mapping); + truncate_inode_pages_range(mapping, offset, offset + len - 1); + + filemap_invalidate_unlock(mapping); return 0; } -- 2.25.1