The following commit has been merged into the perf/core branch of tip: Commit-ID: 6c8b0b835f003647e593c08331a4dd2150d5eb0e Gitweb: https://git.kernel.org/tip/6c8b0b835f003647e593c08331a4dd2150d5eb0e Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Mon, 04 Nov 2024 14:39:15 +01:00 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitterDate: Tue, 04 Mar 2025 09:42:29 +01:00 perf/core: Simplify perf_pmu_register() Using the previously introduced perf_pmu_free() and a new IDR helper, simplify the perf_pmu_register error paths. Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Acked-by: Ravi Bangoria <ravi.bangoria@xxxxxxx> Link: https://lore.kernel.org/r/20241104135518.198937277@xxxxxxxxxxxxx --- include/linux/idr.h | 17 ++++++++++- kernel/events/core.c | 71 +++++++++++++++++-------------------------- 2 files changed, 46 insertions(+), 42 deletions(-) diff --git a/include/linux/idr.h b/include/linux/idr.h index da5f5fa..cd729be 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -15,6 +15,7 @@ #include <linux/radix-tree.h> #include <linux/gfp.h> #include <linux/percpu.h> +#include <linux/cleanup.h> struct idr { struct radix_tree_root idr_rt; @@ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid); void *idr_replace(struct idr *, void *, unsigned long id); void idr_destroy(struct idr *); +struct __class_idr { + struct idr *idr; + int id; +}; + +#define idr_null ((struct __class_idr){ NULL, -1 }) +#define take_idr_id(id) __get_and_null(id, idr_null) + +DEFINE_CLASS(idr_alloc, struct __class_idr, + if (_T.id >= 0) idr_remove(_T.idr, _T.id), + ((struct __class_idr){ + .idr = idr, + .id = idr_alloc(idr, ptr, start, end, gfp), + }), + struct idr *idr, void *ptr, int start, int end, gfp_t gfp); + /** * idr_init_base() - Initialise an IDR. * @idr: IDR handle. diff --git a/kernel/events/core.c b/kernel/events/core.c index ee5cdd6..215dad5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -11914,52 +11914,49 @@ static void perf_pmu_free(struct pmu *pmu) free_percpu(pmu->cpu_pmu_context); } -int perf_pmu_register(struct pmu *pmu, const char *name, int type) +DEFINE_FREE(pmu_unregister, struct pmu *, if (_T) perf_pmu_free(_T)) + +int perf_pmu_register(struct pmu *_pmu, const char *name, int type) { - int cpu, ret, max = PERF_TYPE_MAX; + int cpu, max = PERF_TYPE_MAX; - pmu->type = -1; + struct pmu *pmu __free(pmu_unregister) = _pmu; + guard(mutex)(&pmus_lock); - mutex_lock(&pmus_lock); - ret = -ENOMEM; pmu->pmu_disable_count = alloc_percpu(int); if (!pmu->pmu_disable_count) - goto unlock; + return -ENOMEM; - if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) { - ret = -EINVAL; - goto free; - } + if (WARN_ONCE(!name, "Can not register anonymous pmu.\n")) + return -EINVAL; - if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, "Can not register a pmu with an invalid scope.\n")) { - ret = -EINVAL; - goto free; - } + if (WARN_ONCE(pmu->scope >= PERF_PMU_MAX_SCOPE, + "Can not register a pmu with an invalid scope.\n")) + return -EINVAL; pmu->name = name; if (type >= 0) max = type; - ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); - if (ret < 0) - goto free; + CLASS(idr_alloc, pmu_type)(&pmu_idr, NULL, max, 0, GFP_KERNEL); + if (pmu_type.id < 0) + return pmu_type.id; - WARN_ON(type >= 0 && ret != type); + WARN_ON(type >= 0 && pmu_type.id != type); - pmu->type = ret; + pmu->type = pmu_type.id; atomic_set(&pmu->exclusive_cnt, 0); if (pmu_bus_running && !pmu->dev) { - ret = pmu_dev_alloc(pmu); + int ret = pmu_dev_alloc(pmu); if (ret) - goto free; + return ret; } - ret = -ENOMEM; pmu->cpu_pmu_context = alloc_percpu(struct perf_cpu_pmu_context); if (!pmu->cpu_pmu_context) - goto free; + return -ENOMEM; for_each_possible_cpu(cpu) { struct perf_cpu_pmu_context *cpc; @@ -12000,32 +11997,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) /* * Now that the PMU is complete, make it visible to perf_try_init_event(). */ - if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) { - ret = -EINVAL; - goto free; - } + if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) + return -EINVAL; list_add_rcu(&pmu->entry, &pmus); - ret = 0; -unlock: - mutex_unlock(&pmus_lock); - - return ret; - -free: - if (pmu->type >= 0) - idr_remove(&pmu_idr, pmu->type); - perf_pmu_free(pmu); - goto unlock; + take_idr_id(pmu_type); + _pmu = no_free_ptr(pmu); // let it rip + return 0; } EXPORT_SYMBOL_GPL(perf_pmu_register); void perf_pmu_unregister(struct pmu *pmu) { - mutex_lock(&pmus_lock); - list_del_rcu(&pmu->entry); - idr_remove(&pmu_idr, pmu->type); - mutex_unlock(&pmus_lock); + scoped_guard (mutex, &pmus_lock) { + list_del_rcu(&pmu->entry); + idr_remove(&pmu_idr, pmu->type); + } /* * We dereference the pmu list under both SRCU and regular RCU, so
![]() |