There can be several coherent memory region in the system, and all of them might end up being used to allocate a DMA buffer. Let's register a dmem region for each of them to make sure we can track those allocations. Signed-off-by: Maxime Ripard <mripard@xxxxxxxxxx> --- kernel/dma/coherent.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c index 3b2bdca9f1d4b0274bf4874892b94730cd05c5df..2a2d515e43acbdef19c14d8840ed90e48e7ebb43 100644 --- a/kernel/dma/coherent.c +++ b/kernel/dma/coherent.c @@ -5,10 +5,11 @@ */ #include <linux/io.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/cgroup_dmem.h> #include <linux/dma-direct.h> #include <linux/dma-map-ops.h> struct dma_coherent_mem { void *virt_base; @@ -16,10 +17,12 @@ struct dma_coherent_mem { unsigned long pfn_base; int size; unsigned long *bitmap; spinlock_t spinlock; bool use_dev_dma_pfn_offset; + + struct dmem_cgroup_region *dmem_cgroup_region; }; static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) { if (dev && dev->dma_mem) @@ -335,16 +338,25 @@ static phys_addr_t dma_reserved_default_memory_size __initdata; #endif static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) { if (!rmem->priv) { + struct dmem_cgroup_region *region; struct dma_coherent_mem *mem; mem = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, true); if (IS_ERR(mem)) return PTR_ERR(mem); + + region = dmem_cgroup_register_region(rmem->size, + "dma/coherent/%s", + rmem->name); + if (IS_ERR(region)) + return PTR_ERR(region); + + mem->dmem_cgroup_region = region; rmem->priv = mem; } dma_assign_coherent_memory(dev, rmem->priv); return 0; } -- 2.48.1