Qemu virt machine supports a very minimal set of legacy perf events. Add them to the vendor table so that users can use them when counter delegation is enabled. Signed-off-by: Atish Patra <atishp@xxxxxxxxxxxx> --- arch/riscv/include/asm/vendorid_list.h | 4 ++++ drivers/perf/riscv_pmu_dev.c | 36 ++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h index 2f2bb0c84f9a..ef22b03552bc 100644 --- a/arch/riscv/include/asm/vendorid_list.h +++ b/arch/riscv/include/asm/vendorid_list.h @@ -9,4 +9,8 @@ #define SIFIVE_VENDOR_ID 0x489 #define THEAD_VENDOR_ID 0x5b7 +#define QEMU_VIRT_VENDOR_ID 0x000 +#define QEMU_VIRT_IMPL_ID 0x000 +#define QEMU_VIRT_ARCH_ID 0x000 + #endif diff --git a/drivers/perf/riscv_pmu_dev.c b/drivers/perf/riscv_pmu_dev.c index 8c5598253af0..d28d60abaaf2 100644 --- a/drivers/perf/riscv_pmu_dev.c +++ b/drivers/perf/riscv_pmu_dev.c @@ -26,6 +26,7 @@ #include <asm/sbi.h> #include <asm/cpufeature.h> #include <asm/vendor_extensions.h> +#include <asm/vendorid_list.h> #include <asm/vendor_extensions/andes.h> #include <asm/hwcap.h> #include <asm/csr_ind.h> @@ -384,7 +385,42 @@ struct riscv_vendor_pmu_events { .hw_event_map = _hw_event_map, .cache_event_map = _cache_event_map, \ .attrs_events = _attrs }, +/* QEMU virt PMU events */ +static const struct riscv_pmu_event qemu_virt_hw_event_map[PERF_COUNT_HW_MAX] = { + PERF_MAP_ALL_UNSUPPORTED, + [PERF_COUNT_HW_CPU_CYCLES] = {0x01, 0xFFFFFFF8}, + [PERF_COUNT_HW_INSTRUCTIONS] = {0x02, 0xFFFFFFF8} +}; + +static const struct riscv_pmu_event qemu_virt_cache_event_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + PERF_CACHE_MAP_ALL_UNSUPPORTED, + [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = {0x10019, 0xFFFFFFF8}, + [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = {0x1001B, 0xFFFFFFF8}, + + [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = {0x10021, 0xFFFFFFF8}, +}; + +RVPMU_EVENT_CMASK_ATTR(cycles, cycles, 0x01, 0xFFFFFFF8); +RVPMU_EVENT_CMASK_ATTR(instructions, instructions, 0x02, 0xFFFFFFF8); +RVPMU_EVENT_CMASK_ATTR(dTLB-load-misses, dTLB_load_miss, 0x10019, 0xFFFFFFF8); +RVPMU_EVENT_CMASK_ATTR(dTLB-store-misses, dTLB_store_miss, 0x1001B, 0xFFFFFFF8); +RVPMU_EVENT_CMASK_ATTR(iTLB-load-misses, iTLB_load_miss, 0x10021, 0xFFFFFFF8); + +static struct attribute *qemu_virt_event_group[] = { + RVPMU_EVENT_ATTR_PTR(cycles), + RVPMU_EVENT_ATTR_PTR(instructions), + RVPMU_EVENT_ATTR_PTR(dTLB_load_miss), + RVPMU_EVENT_ATTR_PTR(dTLB_store_miss), + RVPMU_EVENT_ATTR_PTR(iTLB_load_miss), + NULL, +}; + static struct riscv_vendor_pmu_events pmu_vendor_events_table[] = { + RISCV_VENDOR_PMU_EVENTS(QEMU_VIRT_VENDOR_ID, QEMU_VIRT_ARCH_ID, QEMU_VIRT_IMPL_ID, + qemu_virt_hw_event_map, qemu_virt_cache_event_map, + qemu_virt_event_group) }; const struct riscv_pmu_event *current_pmu_hw_event_map; -- 2.34.1