Some DMA allocations are not going to be performed through dedicated sub-allocators but using the default path. We need to create a default region to track those as well. Signed-off-by: Maxime Ripard <mripard@xxxxxxxxxx> --- kernel/dma/mapping.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index cda127027e48a757f2d9fb04a49249d2b0238871..7bc3957512fd84e0bf3a89c210338be72457b5c9 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -5,10 +5,11 @@ * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo <teheo@xxxxxxx> */ #include <linux/memblock.h> /* for max_pfn */ #include <linux/acpi.h> +#include <linux/cgroup_dmem.h> #include <linux/dma-map-ops.h> #include <linux/export.h> #include <linux/gfp.h> #include <linux/iommu-dma.h> #include <linux/kmsan.h> @@ -25,10 +26,14 @@ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT); #endif +#if IS_ENABLED(CONFIG_CGROUP_DMEM) +static struct dmem_cgroup_region *default_dmem_cgroup_region; +#endif + /* * Managed DMA API */ struct dma_devres { size_t size; @@ -587,10 +592,28 @@ u64 dma_get_required_mask(struct device *dev) */ return DMA_BIT_MASK(32); } EXPORT_SYMBOL_GPL(dma_get_required_mask); +#if IS_ENABLED(CONFIG_CGROUP_DMEM) +static int __init dma_init_dmem_cgroup(void) +{ + struct dmem_cgroup_region *region; + + if (default_dmem_cgroup_region) + return -EBUSY; + + region = dmem_cgroup_register_region(U64_MAX, "dma/global"); + if (IS_ERR(region)) + return PTR_ERR(region); + + default_dmem_cgroup_region = region; + return 0; +} +core_initcall(dma_init_dmem_cgroup); +#endif + void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { const struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; -- 2.48.1