From: Leon Romanovsky <leonro@xxxxxxxxxx> Introduce new DMA link/unlink API to provide a way for HMM users to link pages to already preallocated IOVA. Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx> --- include/linux/dma-mapping.h | 15 ++++++ kernel/dma/mapping.c | 102 ++++++++++++++++++++++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index bb541f8944e5..8c2a468c5420 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -123,6 +123,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) int dma_alloc_iova_unaligned(struct dma_iova_state *state, phys_addr_t phys, size_t size); void dma_free_iova(struct dma_iova_state *state); +dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn, + dma_addr_t dma_offset); +void dma_hmm_unlink_page(struct dma_iova_state *state, unsigned long *pfn, + dma_addr_t dma_offset); dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, @@ -189,6 +193,17 @@ static inline int dma_alloc_iova_unaligned(struct dma_iova_state *state, static inline void dma_free_iova(struct dma_iova_state *state) { } +static inline dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, + unsigned long *pfn, + dma_addr_t dma_offset) +{ + return DMA_MAPPING_ERROR; +} +static inline void dma_hmm_unlink_page(struct dma_iova_state *state, + unsigned long *pfn, + dma_addr_t dma_offset) +{ +} static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 39fac8c21643..5354ddc3ac03 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/cc_platform.h> +#include <linux/hmm.h> #include "debug.h" #include "direct.h" @@ -1084,3 +1085,104 @@ void dma_unlink_range_attrs(struct dma_iova_state *state, unsigned long attrs) state->dir, attrs); } EXPORT_SYMBOL_GPL(dma_unlink_range_attrs); + +/** + * dma_hmm_link_page - Link a physical HMM page to DMA address + * @state: IOVA state + * @pfn: HMM PFN + * @dma_offset: DMA offset form which this page needs to be linked + * + * dma_alloc_iova() allocates IOVA based on the size specified by their use in + * iova->size. Call this function after IOVA allocation to link whole @page + * to get the DMA address. Note that very first call to this function + * will have @dma_offset set to 0 in the IOVA space allocated from + * dma_alloc_iova(). For subsequent calls to this function on same @iova, + * @dma_offset needs to be advanced by the caller with the size of previous + * page that was linked + DMA address returned for the previous page that was + * linked by this function. + */ +dma_addr_t dma_hmm_link_page(struct dma_iova_state *state, unsigned long *pfn, + dma_addr_t dma_offset) +{ + struct device *dev = state->dev; + struct page *page = hmm_pfn_to_page(*pfn); + phys_addr_t phys = page_to_phys(page); + bool coherent = dev_is_dma_coherent(dev); + dma_addr_t addr; + int ret; + + if (*pfn & HMM_PFN_DMA_MAPPED) + /* + * We are in this flow when there is a need to resync flags, + * for example when page was already linked in prefetch call + * with READ flag and now we need to add WRITE flag + * + * This page was already programmed to HW and we don't want/need + * to unlink and link it again just to resync flags. + * + * The DMA address calculation below is based on the fact that + * HMM doesn't work with swiotlb. + */ + return (state->addr) ? state->addr + dma_offset : + phys_to_dma(dev, phys); + + state->range_size = dma_offset; + + /* + * The below check is based on assumption that HMM range users + * don't work with swiotlb and hence can be or in direct mode + * or in IOMMU mode. + */ + if (!use_dma_iommu(dev)) { + if (!coherent) + arch_sync_dma_for_device(phys, PAGE_SIZE, state->dir); + + addr = phys_to_dma(dev, phys); + goto done; + } + + ret = dma_start_range(state); + if (ret) + return DMA_MAPPING_ERROR; + + addr = dma_link_range(state, phys, PAGE_SIZE); + dma_end_range(state); + if (dma_mapping_error(state->dev, addr)) + return addr; + +done: + kmsan_handle_dma(page, 0, PAGE_SIZE, state->dir); + *pfn |= HMM_PFN_DMA_MAPPED; + return addr; +} +EXPORT_SYMBOL_GPL(dma_hmm_link_page); + +/** + * dma_hmm_unlink_page - Unlink a physical HMM page from DMA address + * @state: IOVA state + * @pfn: HMM PFN + * @dma_offset: DMA offset form which this page needs to be unlinked + * from the IOVA space + */ +void dma_hmm_unlink_page(struct dma_iova_state *state, unsigned long *pfn, + dma_addr_t dma_offset) +{ + struct device *dev = state->dev; + struct page *page; + phys_addr_t phys; + + *pfn &= ~HMM_PFN_DMA_MAPPED; + + if (!use_dma_iommu(dev)) { + page = hmm_pfn_to_page(*pfn); + phys = page_to_phys(page); + + dma_direct_sync_single_for_cpu(dev, phys_to_dma(dev, phys), + PAGE_SIZE, state->dir); + return; + } + + iommu_dma_unlink_range(dev, state->addr + dma_offset, PAGE_SIZE, + state->dir, 0); +} +EXPORT_SYMBOL_GPL(dma_hmm_unlink_page); -- 2.46.0