Fuad Tabba <tabba@xxxxxxxxxx> writes: > <snip> > > +static vm_fault_t kvm_gmem_fault(struct vm_fault *vmf) > +{ > + struct inode *inode = file_inode(vmf->vma->vm_file); > + struct folio *folio; > + vm_fault_t ret = VM_FAULT_LOCKED; > + > + filemap_invalidate_lock_shared(inode->i_mapping); > + > + folio = kvm_gmem_get_folio(inode, vmf->pgoff); > + if (IS_ERR(folio)) { > + switch (PTR_ERR(folio)) { > + case -EAGAIN: > + ret = VM_FAULT_RETRY; > + break; > + case -ENOMEM: > + ret = VM_FAULT_OOM; > + break; > + default: > + ret = VM_FAULT_SIGBUS; > + break; > + } > + goto out_filemap; > + } > + > + if (folio_test_hwpoison(folio)) { > + ret = VM_FAULT_HWPOISON; > + goto out_folio; > + } > + > + /* Must be called with folio lock held, i.e., after kvm_gmem_get_folio() */ > + if (!kvm_gmem_offset_is_shared(vmf->vma->vm_file, vmf->pgoff)) { > + ret = VM_FAULT_SIGBUS; > + goto out_folio; > + } > + > + /* > + * Only private folios are marked as "guestmem" so far, and we never > + * expect private folios at this point. > + */ I think this is not quite accurate. Based on my understanding and kvm_gmem_handle_folio_put() in this other patch [1], only pages *in transition* from shared to private state are marked "guestmem", although it is true that no private folios or folios marked guestmem are expected here. > + if (WARN_ON_ONCE(folio_test_guestmem(folio))) { > + ret = VM_FAULT_SIGBUS; > + goto out_folio; > + } > + > + /* No support for huge pages. */ > + if (WARN_ON_ONCE(folio_test_large(folio))) { > + ret = VM_FAULT_SIGBUS; > + goto out_folio; > + } > + > + if (!folio_test_uptodate(folio)) { > + clear_highpage(folio_page(folio, 0)); > + kvm_gmem_mark_prepared(folio); > + } > + > + vmf->page = folio_file_page(folio, vmf->pgoff); > + > +out_folio: > + if (ret != VM_FAULT_LOCKED) { > + folio_unlock(folio); > + folio_put(folio); > + } > + > +out_filemap: > + filemap_invalidate_unlock_shared(inode->i_mapping); > + > + return ret; > +} > > <snip> [1] https://lore.kernel.org/all/20250117163001.2326672-7-tabba@xxxxxxxxxx/