From: Leon Romanovsky <leonro@xxxxxxxxxx> Introduce new DMA link/unlink API to provide a way for HMM users to link pages to already preallocated IOVA. Signed-off-by: Leon Romanovsky <leonro@xxxxxxxxxx> --- include/linux/dma-mapping.h | 15 +++++ kernel/dma/mapping.c | 108 ++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+) diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index c530095ff232..2578b6615a2f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -135,6 +135,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) int dma_alloc_iova(struct dma_iova_attrs *iova); void dma_free_iova(struct dma_iova_attrs *iova); +dma_addr_t dma_hmm_link_page(unsigned long *pfn, struct dma_iova_attrs *iova, + dma_addr_t dma_offset); +void dma_hmm_unlink_page(unsigned long *pfn, struct dma_iova_attrs *iova, + dma_addr_t dma_offset); dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, @@ -199,6 +203,17 @@ static inline int dma_alloc_iova(struct dma_iova_attrs *iova) static inline void dma_free_iova(struct dma_iova_attrs *iova) { } +static inline dma_addr_t dma_hmm_link_page(unsigned long *pfn, + struct dma_iova_attrs *iova, + dma_addr_t dma_offset) +{ + return DMA_MAPPING_ERROR; +} +static inline void dma_hmm_unlink_page(unsigned long *pfn, + struct dma_iova_attrs *iova, + dma_addr_t dma_offset) +{ +} static inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, unsigned long attrs) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 089b4a977bab..69c431bd89e6 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/cc_platform.h> +#include <linux/hmm.h> #include "debug.h" #include "direct.h" @@ -1076,3 +1077,110 @@ void dma_unlink_range(struct dma_iova_state *state) ops->unlink_range(state, state->iova->addr, state->range_size); } EXPORT_SYMBOL_GPL(dma_unlink_range); + +/** + * dma_hmm_link_page - Link a physical HMM page to DMA address + * @pfn: HMM PFN + * @iova: Preallocated IOVA space + * @dma_offset: DMA offset form which this page needs to be linked + * + * dma_alloc_iova() allocates IOVA based on the size specified by their use in + * iova->size. Call this function after IOVA allocation to link whole @page + * to get the DMA address. Note that very first call to this function + * will have @dma_offset set to 0 in the IOVA space allocated from + * dma_alloc_iova(). For subsequent calls to this function on same @iova, + * @dma_offset needs to be advanced by the caller with the size of previous + * page that was linked + DMA address returned for the previous page that was + * linked by this function. + */ +dma_addr_t dma_hmm_link_page(unsigned long *pfn, struct dma_iova_attrs *iova, + dma_addr_t dma_offset) +{ + struct device *dev = iova->dev; + struct page *page = hmm_pfn_to_page(*pfn); + phys_addr_t phys = page_to_phys(page); + bool coherent = dev_is_dma_coherent(dev); + struct dma_memory_type type = {}; + struct dma_iova_state state = {}; + dma_addr_t addr; + int ret; + + if (*pfn & HMM_PFN_DMA_MAPPED) + /* + * We are in this flow when there is a need to resync flags, + * for example when page was already linked in prefetch call + * with READ flag and now we need to add WRITE flag + * + * This page was already programmed to HW and we don't want/need + * to unlink and link it again just to resync flags. + * + * The DMA address calculation below is based on the fact that + * HMM doesn't work with swiotlb. + */ + return (iova->addr) ? iova->addr + dma_offset : + phys_to_dma(dev, phys); + + dma_get_memory_type(page, &type); + + state.iova = iova; + state.type = &type; + state.range_size = dma_offset; + + if (!dma_can_use_iova(&state, PAGE_SIZE)) { + if (!coherent && !(iova->attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, PAGE_SIZE, iova->dir); + + addr = phys_to_dma(dev, phys); + goto done; + } + + ret = dma_start_range(&state); + if (ret) + return DMA_MAPPING_ERROR; + + ret = dma_link_range(&state, page_to_phys(page), PAGE_SIZE); + dma_end_range(&state); + if (ret) + return DMA_MAPPING_ERROR; + + addr = iova->addr + dma_offset; +done: + kmsan_handle_dma(page, 0, PAGE_SIZE, iova->dir); + *pfn |= HMM_PFN_DMA_MAPPED; + return addr; +} +EXPORT_SYMBOL_GPL(dma_hmm_link_page); + +/** + * dma_hmm_unlink_page - Unlink a physical HMM page from DMA address + * @pfn: HMM PFN + * @iova: Preallocated IOVA space + * @dma_offset: DMA offset form which this page needs to be unlinked + * from the IOVA space + */ +void dma_hmm_unlink_page(unsigned long *pfn, struct dma_iova_attrs *iova, + dma_addr_t dma_offset) +{ + struct device *dev = iova->dev; + struct page *page = hmm_pfn_to_page(*pfn); + struct dma_memory_type type = {}; + struct dma_iova_state state = {}; + const struct dma_map_ops *ops = get_dma_ops(dev); + + dma_get_memory_type(page, &type); + + state.iova = iova; + state.type = &type; + + *pfn &= ~HMM_PFN_DMA_MAPPED; + + if (!dma_can_use_iova(&state, PAGE_SIZE)) { + if (!(iova->attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_cpu(dev, dma_offset, + PAGE_SIZE, iova->dir); + return; + } + + ops->unlink_range(&state, state.iova->addr + dma_offset, PAGE_SIZE); +} +EXPORT_SYMBOL_GPL(dma_hmm_unlink_page); -- 2.45.2