Here's a more comprehensive read_folio patch. It's not at all efficient, but then if we wanted an efficient vboxsf, we'd implement vboxsf_readahead() and actually do an async call with deferred setting of the uptodate flag. I can consult with anyone who wants to do all this work. I haven't even compiled this, just trying to show the direction this should take. diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index 2307f8037efc..f1af9a7bd3d8 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -227,26 +227,31 @@ const struct inode_operations vboxsf_reg_iops = { static int vboxsf_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; struct vboxsf_handle *sf_handle = file->private_data; - loff_t off = page_offset(page); - u32 nread = PAGE_SIZE; - u8 *buf; + loff_t pos = folio_pos(folio); + size_t offset = 0; int err; - buf = kmap(page); + do { + u8 *buf = kmap_local_folio(folio, offset); + u32 nread = PAGE_SIZE; - err = vboxsf_read(sf_handle->root, sf_handle->handle, off, &nread, buf); - if (err == 0) { - memset(&buf[nread], 0, PAGE_SIZE - nread); - flush_dcache_page(page); - SetPageUptodate(page); - } else { - SetPageError(page); - } + err = vboxsf_read(sf_handle->root, sf_handle->handle, pos, + &nread, buf); + if (nread < PAGE_SIZE) + memset(&buf[nread], 0, PAGE_SIZE - nread); + kunmap_local(buf); + if (err) + break; + offset += PAGE_SIZE; + pos += PAGE_SIZE; + } while (offset < folio_size(folio); - kunmap(page); - unlock_page(page); + if (!err) { + flush_dcache_folio(folio); + folio_mark_uptodate(folio); + } + folio_unlock(folio); return err; }