Currently the preparedness tracking relies on holding a folio's lock to keep allocations/preparations and corresponding updates to the prepared bitmap atomic. However, on the invalidation side, the bitmap entry for the GFN/index corresponding to a folio might need to be cleared after truncation. In these cases the folio's are no longer part of the filemap, so nothing guards against a newly-allocated folio getting prepared for the same GFN/index, and then subsequently having its bitmap entry cleared by the concurrently executing invalidation code. Avoid this by ensuring that the filemap invalidation lock is held to ensure allocations/preparations and corresponding updates to the prepared bitmap are atomic even versus invalidations. Use a shared lock in the kvm_gmem_get_pfn() case so vCPUs can still fault in pages in parallel. Signed-off-by: Michael Roth <michael.roth@xxxxxxx> --- virt/kvm/guest_memfd.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index 6907ae9fe149..9a5172de6a03 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -154,6 +154,8 @@ static void kvm_gmem_mark_prepared(struct file *file, pgoff_t index, int order) unsigned long npages = (1ul << order); unsigned long *p; + rwsem_assert_held(&file->f_mapping->invalidate_lock); + /* The index isn't necessarily aligned to the requested order. */ index &= ~(npages - 1); p = i_gmem->prepared + BIT_WORD(index); @@ -174,6 +176,8 @@ static void kvm_gmem_mark_range_unprepared(struct inode *inode, pgoff_t index, p struct kvm_gmem_inode *i_gmem = (struct kvm_gmem_inode *)inode->i_private; unsigned long *p = i_gmem->prepared + BIT_WORD(index); + rwsem_assert_held(&inode->i_mapping->invalidate_lock); + index &= BITS_PER_LONG - 1; if (index) { int first_word_count = min(npages, BITS_PER_LONG - index); @@ -200,6 +204,8 @@ static bool kvm_gmem_is_prepared(struct file *file, pgoff_t index, int order) unsigned long *p; bool ret; + rwsem_assert_held(&file->f_mapping->invalidate_lock); + /* The index isn't necessarily aligned to the requested order. */ index &= ~(npages - 1); p = i_gmem->prepared + BIT_WORD(index); @@ -232,6 +238,8 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct file *file, pgoff_t index, aligned_index; int r; + rwsem_assert_held(&file->f_mapping->invalidate_lock); + index = gfn - slot->base_gfn + slot->gmem.pgoff; nr_pages = (1ull << max_order); WARN_ON(nr_pages > folio_nr_pages(folio)); @@ -819,12 +827,16 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, pgoff_t index = kvm_gmem_get_index(slot, gfn); struct file *file = kvm_gmem_get_file(slot); int max_order_local; + struct address_space *mapping; struct folio *folio; int r = 0; if (!file) return -EFAULT; + mapping = file->f_inode->i_mapping; + filemap_invalidate_lock_shared(mapping); + /* * The caller might pass a NULL 'max_order', but internally this * function needs to be aware of any order limitations set by @@ -838,6 +850,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, folio = __kvm_gmem_get_pfn(file, slot, index, pfn, &max_order_local); if (IS_ERR(folio)) { r = PTR_ERR(folio); + filemap_invalidate_unlock_shared(mapping); goto out; } @@ -845,6 +858,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, r = kvm_gmem_prepare_folio(kvm, file, slot, gfn, folio, max_order_local); folio_unlock(folio); + filemap_invalidate_unlock_shared(mapping); if (!r) *page = folio_file_page(folio, index); -- 2.25.1