iommu_cap::IOMMU_CAP_DIRTY: new device iommu_capable value when probing for
capabilities of the device.
.set_dirty_tracking(): an iommu driver is expected to change its
translation structures and enable dirty tracking for the devices in the
iommu_domain. For drivers making dirty tracking always-enabled, it should
just return 0.
.read_and_clear_dirty(): an iommu driver is expected to walk the iova range
passed in and use iommu_dirty_bitmap_record() to record dirty info per
IOVA. When detecting a given IOVA is dirty it should also clear its dirty
state from the PTE, *unless* the flag IOMMU_DIRTY_NO_CLEAR is passed in --
flushing is steered from the caller of the domain_op via iotlb_gather. The
iommu core APIs use the same data structure in use for dirty tracking for
VFIO device dirty (struct iova_bitmap) abstracted by
iommu_dirty_bitmap_record() helper function.
Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx>
---
drivers/iommu/iommu.c | 11 +++++++
include/linux/io-pgtable.h | 4 +++
include/linux/iommu.h | 67 ++++++++++++++++++++++++++++++++++++++
3 files changed, 82 insertions(+)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2088caae5074..95acc543e8fb 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2013,6 +2013,17 @@ struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
+int iommu_domain_set_flags(struct iommu_domain *domain,
+ const struct bus_type *bus, unsigned long val)
+{
+ if (!(val & bus->iommu_ops->supported_flags))
+ return -EINVAL;
+
+ domain->flags |= val;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_domain_set_flags);
+
void iommu_domain_free(struct iommu_domain *domain)
{
if (domain->type == IOMMU_DOMAIN_SVA)
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 1b7a44b35616..25142a0e2fc2 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -166,6 +166,10 @@ struct io_pgtable_ops {
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 39d25645a5ab..992ea87f2f8e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/iova_bitmap.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -65,6 +66,11 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+/* Domain feature flags that do not define domain types */
+#define IOMMU_DOMAIN_F_ENFORCE_DIRTY (1U << 6) /* Enforce attachment of
+ dirty tracking supported
+ devices */
+
/*
* This are the possible domain-types
*
@@ -93,6 +99,7 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
+ unsigned flags;
const struct iommu_domain_ops *ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
@@ -128,6 +135,7 @@ enum iommu_cap {
* this device.
*/
IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+ IOMMU_CAP_DIRTY, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
@@ -220,6 +228,17 @@ struct iommu_iotlb_gather {
bool queued;
};
+/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ *
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
@@ -248,6 +267,7 @@ struct iommu_iotlb_gather {
* pasid, so that any DMA transactions with this pasid
* will be blocked by the hardware.
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @flags: All non domain type supported features
* @owner: Driver module providing these ops
*/
struct iommu_ops {
@@ -281,6 +301,7 @@ struct iommu_ops {
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
+ unsigned long supported_flags;
struct module *owner;
};
@@ -316,6 +337,11 @@ struct iommu_ops {
* @enable_nesting: Enable nesting
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @free: Release the domain after use.
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
*/
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
@@ -348,6 +374,12 @@ struct iommu_domain_ops {
unsigned long quirks);
void (*free)(struct iommu_domain *domain);
+
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
@@ -461,6 +493,9 @@ extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
+extern int iommu_domain_set_flags(struct iommu_domain *domain,
+ const struct bus_type *bus,
+ unsigned long flags);
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
@@ -627,6 +662,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
return gather && gather->queued;
}
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
+
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void
+iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
+}
+
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
@@ -657,6 +714,9 @@ struct iommu_fwspec {
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
/**
* struct iommu_sva - handle to a device-mm bond
*/
@@ -755,6 +815,13 @@ static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus
return NULL;
}
+static inline int iommu_domain_set_flags(struct iommu_domain *domain,
+ const struct bus_type *bus,
+ unsigned long flags)
+{
+ return -ENODEV;
+}
+
static inline void iommu_domain_free(struct iommu_domain *domain)
{
}