From: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Add support for mapping device pages to Xe SVM by attaching drm_pagemap to a memory region, which is then linked to a GPU SVM devmem allocation. This enables GPU SVM to derive the device page address. v3: - Better commit message (Thomas) - New drm_pagemap.h location Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx> Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Reviewed-by: Matthew Brost <matthew.brost@xxxxxxxxx> --- drivers/gpu/drm/xe/xe_device_types.h | 7 +++++++ drivers/gpu/drm/xe/xe_svm.c | 30 ++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index b949a960cebb..b76d08df13ef 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -10,8 +10,10 @@ #include <drm/drm_device.h> #include <drm/drm_file.h> +#include <drm/drm_pagemap.h> #include <drm/ttm/ttm_device.h> + #include "xe_devcoredump_types.h" #include "xe_heci_gsc.h" #include "xe_lmtt_types.h" @@ -104,6 +106,11 @@ struct xe_mem_region { void __iomem *mapping; /** @pagemap: Used to remap device memory as ZONE_DEVICE */ struct dev_pagemap pagemap; + /** + * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory + * pages of this tile. + */ + struct drm_pagemap dpagemap; /** * @hpa_base: base host physical address * diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 2860e54bb035..faeacf0ccdaa 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -449,6 +449,32 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) } #if IS_ENABLED(CONFIG_XE_DEVMEM_MIRROR) +static struct drm_pagemap_dma_addr +xe_drm_pagemap_map_dma(struct drm_pagemap *dpagemap, + struct device *dev, + struct page *page, + unsigned int order, + enum dma_data_direction dir) +{ + struct device *pgmap_dev = dpagemap->dev; + enum drm_interconnect_protocol prot; + dma_addr_t addr; + + if (pgmap_dev == dev) { + addr = xe_mem_region_page_to_dpa(page_to_mr(page), page); + prot = XE_INTERCONNECT_VRAM; + } else { + addr = DMA_MAPPING_ERROR; + prot = 0; + } + + return drm_pagemap_dma_addr_encode(addr, prot, order, dir); +} + +static const struct drm_pagemap_ops xe_drm_pagemap_ops = { + .map_dma = xe_drm_pagemap_map_dma, +}; + /** * xe_devm_add: Remap and provide memmap backing for device memory * @tile: tile that the memory region belongs to @@ -481,6 +507,10 @@ int xe_devm_add(struct xe_tile *tile, struct xe_mem_region *mr) mr->pagemap.ops = drm_gpusvm_pagemap_ops_get(); mr->pagemap.owner = xe_svm_devm_owner(xe); addr = devm_memremap_pages(dev, &mr->pagemap); + + mr->dpagemap.dev = dev; + mr->dpagemap.ops = &xe_drm_pagemap_ops; + if (IS_ERR(addr)) { devm_release_mem_region(dev, res->start, resource_size(res)); ret = PTR_ERR(addr); -- 2.34.1