[PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Add a new test ioctl for simulating the dirty IOVAs
in the mock domain, and implement the mock iommu domain ops
that get the dirty tracking supported.

The selftest exercises the usual main workflow of:

1) Setting/Clearing dirty tracking from the iommu domain
2) Read and clear dirty IOPTEs
3) Unmap and read dirty back

Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
 drivers/iommu/iommufd/iommufd_test.h    |   9 ++
 drivers/iommu/iommufd/selftest.c        | 137 +++++++++++++++++++++++-
 tools/testing/selftests/iommu/Makefile  |   1 +
 tools/testing/selftests/iommu/iommufd.c | 135 +++++++++++++++++++++++
 4 files changed, 279 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index d22ef484af1a..90dafa513078 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -14,6 +14,7 @@ enum {
 	IOMMU_TEST_OP_MD_CHECK_REFS,
 	IOMMU_TEST_OP_ACCESS_PAGES,
 	IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
+	IOMMU_TEST_OP_DIRTY,
 };
 
 enum {
@@ -57,6 +58,14 @@ struct iommu_test_cmd {
 		struct {
 			__u32 limit;
 		} memory_limit;
+		struct {
+			__u32 flags;
+			__aligned_u64 iova;
+			__aligned_u64 length;
+			__aligned_u64 page_size;
+			__aligned_u64 uptr;
+			__aligned_u64 out_nr_dirty;
+		} dirty;
 	};
 	__u32 last;
 };
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a665719b493e..b02309722436 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -13,6 +13,7 @@
 size_t iommufd_test_memory_limit = 65536;
 
 enum {
+	MOCK_DIRTY_TRACK = 1,
 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
 
 	/*
@@ -25,9 +26,11 @@ enum {
 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
+	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
 };
 
 struct mock_iommu_domain {
+	unsigned long flags;
 	struct iommu_domain domain;
 	struct xarray pfns;
 };
@@ -133,7 +136,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 
 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
-			WARN_ON(!ent);
+
 			/*
 			 * iommufd generates unmaps that must be a strict
 			 * superset of the map's performend So every starting
@@ -143,12 +146,12 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 			 * passed to map_pages
 			 */
 			if (first) {
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_START_IOVA));
 				first = false;
 			}
 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_LAST_IOVA));
 
 			iova += MOCK_IO_PAGE_SIZE;
@@ -171,6 +174,75 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
 }
 
+static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
+					  bool enable)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long flags = mock->flags;
+
+	/* No change? */
+	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
+		return -EINVAL;
+
+	flags = (enable ?
+		 flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
+
+	mock->flags = flags;
+	return 0;
+}
+
+static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
+					    unsigned long iova, size_t size,
+					    struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
+	void *ent, *old;
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK))
+		return -EINVAL;
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+
+		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+		if (ent &&
+		    (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
+			unsigned long val;
+
+			/* Clear dirty */
+			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			iommu_dirty_bitmap_record(dirty, cur, MOCK_IO_PAGE_SIZE);
+		}
+	}
+
+	return 0;
+}
+
+static size_t mock_domain_unmap_read_dirty(struct iommu_domain *domain,
+					   unsigned long iova, size_t page_size,
+					   struct iommu_iotlb_gather *gather,
+					   struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	void *ent;
+
+	WARN_ON(page_size != MOCK_IO_PAGE_SIZE);
+
+	ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+	if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA) &&
+	    (mock->flags & MOCK_DIRTY_TRACK))
+		iommu_dirty_bitmap_record(dirty, iova, page_size);
+
+	return ent ? page_size : 0;
+}
+
 static const struct iommu_ops mock_ops = {
 	.owner = THIS_MODULE,
 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
@@ -181,6 +253,9 @@ static const struct iommu_ops mock_ops = {
 			.map_pages = mock_domain_map_pages,
 			.unmap_pages = mock_domain_unmap_pages,
 			.iova_to_phys = mock_domain_iova_to_phys,
+			.set_dirty_tracking = mock_domain_set_dirty_tracking,
+			.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
+			.unmap_read_dirty = mock_domain_unmap_read_dirty,
 		},
 };
 
@@ -442,6 +517,56 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
 	return rc;
 }
 
+static int iommufd_test_dirty(struct iommufd_ucmd *ucmd,
+			      unsigned int mockpt_id, unsigned long iova,
+			      size_t length, unsigned long page_size,
+			      void __user *uptr, u32 flags)
+{
+	unsigned long i, max = length / page_size;
+	struct iommu_test_cmd *cmd = ucmd->cmd;
+	struct iommufd_hw_pagetable *hwpt;
+	struct mock_iommu_domain *mock;
+	int rc, count = 0;
+
+	if (iova % page_size || length % page_size ||
+	    (uintptr_t)uptr % page_size)
+		return -EINVAL;
+
+	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
+	if (IS_ERR(hwpt))
+		return PTR_ERR(hwpt);
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
+		rc = -EINVAL;
+		goto out_put;
+	}
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * page_size;
+		void *ent, *old;
+
+		if (!test_bit(i, (unsigned long *) uptr))
+			continue;
+
+		ent = xa_load(&mock->pfns, cur / page_size);
+		if (ent) {
+			unsigned long val;
+
+			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / page_size,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			count++;
+		}
+	}
+
+	cmd->dirty.out_nr_dirty = count;
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+out_put:
+	iommufd_put_object(&hwpt->obj);
+	return rc;
+}
+
 void iommufd_selftest_destroy(struct iommufd_object *obj)
 {
 	struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
@@ -486,6 +611,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
 			cmd->access_pages.length,
 			u64_to_user_ptr(cmd->access_pages.uptr),
 			cmd->access_pages.flags);
+	case IOMMU_TEST_OP_DIRTY:
+		return iommufd_test_dirty(
+			ucmd, cmd->id, cmd->dirty.iova,
+			cmd->dirty.length, cmd->dirty.page_size,
+			u64_to_user_ptr(cmd->dirty.uptr),
+			cmd->dirty.flags);
 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
 		iommufd_test_memory_limit = cmd->memory_limit.limit;
 		return 0;
diff --git a/tools/testing/selftests/iommu/Makefile b/tools/testing/selftests/iommu/Makefile
index 7bc38b3beaeb..48d4dcf11506 100644
--- a/tools/testing/selftests/iommu/Makefile
+++ b/tools/testing/selftests/iommu/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 CFLAGS += -Wall -O2 -Wno-unused-function
+CFLAGS += -I../../../../tools/include/
 CFLAGS += -I../../../../include/uapi/
 CFLAGS += -I../../../../include/
 
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 5c47d706ed94..3a494f7958f4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -13,13 +13,18 @@
 #define __EXPORTED_HEADERS__
 #include <linux/iommufd.h>
 #include <linux/vfio.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
+#define BITS_PER_BYTE 8
 
 static void *buffer;
+static void *bitmap;
 
 static unsigned long PAGE_SIZE;
 static unsigned long HUGEPAGE_SIZE;
 static unsigned long BUFFER_SIZE;
+static unsigned long BITMAP_SIZE;
 
 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
 
@@ -52,6 +57,10 @@ static __attribute__((constructor)) void setup_sizes(void)
 	BUFFER_SIZE = PAGE_SIZE * 16;
 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
 	assert(rc || buffer || (uintptr_t)buffer % HUGEPAGE_SIZE == 0);
+
+	BITMAP_SIZE = BUFFER_SIZE / MOCK_PAGE_SIZE / BITS_PER_BYTE;
+	rc = posix_memalign(&bitmap, PAGE_SIZE, BUFFER_SIZE);
+	assert(rc || buffer || (uintptr_t)buffer % PAGE_SIZE == 0);
 }
 
 /*
@@ -546,6 +555,132 @@ TEST_F(iommufd_ioas, iova_ranges)
 	EXPECT_EQ(0, cmd->out_valid_iovas[1].last);
 }
 
+TEST_F(iommufd_ioas, dirty)
+{
+	struct iommu_ioas_map map_cmd = {
+		.size = sizeof(map_cmd),
+		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+		.ioas_id = self->ioas_id,
+		.user_va = (uintptr_t)buffer,
+		.length = BUFFER_SIZE,
+		.iova = MOCK_APERTURE_START,
+	};
+	struct iommu_test_cmd mock_cmd = {
+		.size = sizeof(mock_cmd),
+		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
+		.id = self->ioas_id,
+	};
+	struct iommu_hwpt_set_dirty set_dirty_cmd = {
+		.size = sizeof(set_dirty_cmd),
+		.flags = IOMMU_DIRTY_TRACKING_ENABLED,
+		.hwpt_id = self->ioas_id,
+	};
+	struct iommu_test_cmd dirty_cmd = {
+		.size = sizeof(dirty_cmd),
+		.op = IOMMU_TEST_OP_DIRTY,
+		.id = self->ioas_id,
+		.dirty = { .iova = MOCK_APERTURE_START,
+			   .length = BUFFER_SIZE,
+			   .page_size = MOCK_PAGE_SIZE,
+			   .uptr = (uintptr_t)bitmap },
+	};
+	struct iommu_hwpt_get_dirty_iova get_dirty_cmd = {
+		.size = sizeof(get_dirty_cmd),
+		.hwpt_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		}
+	};
+	struct iommu_ioas_unmap_dirty unmap_dirty_cmd = {
+		.size = sizeof(unmap_dirty_cmd),
+		.ioas_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		},
+	};
+	struct iommu_destroy destroy_cmd = { .size = sizeof(destroy_cmd) };
+	unsigned long i, count, nbits = BITMAP_SIZE * BITS_PER_BYTE;
+
+	/* Toggle dirty with a domain and a single map */
+	ASSERT_EQ(0, ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MOCK_DOMAIN),
+			   &mock_cmd));
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_MAP, &map_cmd));
+
+	set_dirty_cmd.hwpt_id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+
+	dirty_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	get_dirty_cmd.hwpt_id = mock_cmd.id;
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* Should be all zeroes */
+	for (i = 0; i < nbits; i++)
+		ASSERT_EQ(0, test_bit(i, (unsigned long *) bitmap));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_IOAS_UNMAP_DIRTY, &unmap_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	set_dirty_cmd.flags = IOMMU_DIRTY_TRACKING_DISABLED;
+	ASSERT_EQ(0,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	destroy_cmd.id = mock_cmd.mock_domain.device_id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+	destroy_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+}
+
 TEST_F(iommufd_ioas, access)
 {
 	struct iommu_ioas_map map_cmd = {
-- 
2.17.2




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux