The following commit has been merged into the perf/core branch of tip: Commit-ID: 5a09928d339f3cf0973991ddc3a2798825c84c99 Gitweb: https://git.kernel.org/tip/5a09928d339f3cf0973991ddc3a2798825c84c99 Author: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> AuthorDate: Fri, 03 Jul 2020 05:49:24 -07:00 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitterDate: Wed, 08 Jul 2020 11:38:55 +02:00 perf/x86: Remove task_ctx_size A new kmem_cache method has replaced the kzalloc() to allocate the PMU specific data. The task_ctx_size is not required anymore. Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Link: https://lkml.kernel.org/r/1593780569-62993-19-git-send-email-kan.liang@xxxxxxxxxxxxxxx --- arch/x86/events/core.c | 1 - arch/x86/events/intel/lbr.c | 1 - include/linux/perf_event.h | 4 ---- kernel/events/core.c | 4 +--- 4 files changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d740c86..6b1228a 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2371,7 +2371,6 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, - .task_ctx_size = sizeof(struct x86_perf_task_context), .swap_task_ctx = x86_pmu_swap_task_ctx, .check_period = x86_pmu_check_period, diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index e784c1d..3ad5289 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -1672,7 +1672,6 @@ void __init intel_pmu_arch_lbr_init(void) size = sizeof(struct x86_perf_task_context_arch_lbr) + lbr_nr * sizeof(struct lbr_entry); - x86_get_pmu()->task_ctx_size = size; x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0); x86_pmu.lbr_from = MSR_ARCH_LBR_FROM_0; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 09915ae..3b22db0 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -419,10 +419,6 @@ struct pmu { */ void (*sched_task) (struct perf_event_context *ctx, bool sched_in); - /* - * PMU specific data size - */ - size_t task_ctx_size; /* * Kmem cache of PMU specific data diff --git a/kernel/events/core.c b/kernel/events/core.c index 30d9b31..7c436d7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,15 +1243,13 @@ static void *alloc_task_ctx_data(struct pmu *pmu) if (pmu->task_ctx_cache) return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); - return kzalloc(pmu->task_ctx_size, GFP_KERNEL); + return NULL; } static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) { if (pmu->task_ctx_cache && task_ctx_data) kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); - else - kfree(task_ctx_data); } static void free_ctx(struct rcu_head *head)