[PATCH v4 4/6] io_uring: rsrc: avoid use of vmas parameter in pin_user_pages()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



We are shortly to remove pin_user_pages(), and instead perform the required
VMA checks ourselves. In most cases there will be a single VMA so this
should caues no undue impact on an already slow path.

Doing this eliminates the one instance of vmas being used by
pin_user_pages().

Suggested-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Signed-off-by: Lorenzo Stoakes <lstoakes@xxxxxxxxx>
---
 io_uring/rsrc.c | 55 ++++++++++++++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 24 deletions(-)

diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 7a43aed8e395..3a927df9d913 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -1138,12 +1138,37 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
 	return ret;
 }
 
+static int check_vmas_locked(unsigned long addr, unsigned long len)
+{
+	struct file *file;
+	VMA_ITERATOR(vmi, current->mm, addr);
+	struct vm_area_struct *vma = vma_next(&vmi);
+	unsigned long end = addr + len;
+
+	if (WARN_ON_ONCE(!vma))
+		return -EINVAL;
+
+	file = vma->vm_file;
+	if (file && !is_file_hugepages(file))
+		return -EOPNOTSUPP;
+
+	/* don't support file backed memory */
+	for_each_vma_range(vmi, vma, end) {
+		if (vma->vm_file != file)
+			return -EINVAL;
+
+		if (file && !vma_is_shmem(vma))
+			return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
 {
 	unsigned long start, end, nr_pages;
-	struct vm_area_struct **vmas = NULL;
 	struct page **pages = NULL;
-	int i, pret, ret = -ENOMEM;
+	int pret, ret = -ENOMEM;
 
 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	start = ubuf >> PAGE_SHIFT;
@@ -1153,31 +1178,14 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
 	if (!pages)
 		goto done;
 
-	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
-			      GFP_KERNEL);
-	if (!vmas)
-		goto done;
-
 	ret = 0;
 	mmap_read_lock(current->mm);
+
 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
-			      pages, vmas);
-	if (pret == nr_pages) {
-		struct file *file = vmas[0]->vm_file;
+			      pages, NULL);
 
-		/* don't support file backed memory */
-		for (i = 0; i < nr_pages; i++) {
-			if (vmas[i]->vm_file != file) {
-				ret = -EINVAL;
-				break;
-			}
-			if (!file)
-				continue;
-			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
-				ret = -EOPNOTSUPP;
-				break;
-			}
-		}
+	if (pret == nr_pages) {
+		ret = check_vmas_locked(ubuf, len);
 		*npages = nr_pages;
 	} else {
 		ret = pret < 0 ? pret : -EFAULT;
@@ -1194,7 +1202,6 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
 	}
 	ret = 0;
 done:
-	kvfree(vmas);
 	if (ret < 0) {
 		kvfree(pages);
 		pages = ERR_PTR(ret);
-- 
2.40.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux