On 9/10/20 3:50 AM, peterz@xxxxxxxxxxxxx wrote: > On Thu, Sep 10, 2020 at 10:32:23AM +0200, peterz@xxxxxxxxxxxxx wrote: >>> @@ -363,7 +363,14 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event, >>> static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, >>> struct hw_perf_event *hwc, u64 config) >>> { >>> - wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); >>> + u64 _config = (hwc->config | config) & ~perf_ibs->enable_mask; >>> + >>> + /* On Fam17h, the periodic fetch counter is set when IbsFetchEn is changed from 0 to 1 */ >>> + if (perf_ibs == &perf_ibs_fetch && boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18) >>> + wrmsrl(hwc->config_base, _config); > >> A better option would be to use hwc->flags, you're loading from that >> line already, so it's guaranteed hot and then you only have a single >> branch. Or stick it in perf_ibs near enable_mask, same difference. > > I fixed it for you. > @@ -370,7 +371,13 @@ perf_ibs_event_update(struct perf_ibs *p > static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs, > struct hw_perf_event *hwc, u64 config) > { > - wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask); > + u64 _config = (hwc->config | config) & ~perf_ibs->enable_mask; > + > + if (perf_ibs->fetch_count_reset_broken) Nice, we don't even need the perf_ibs == &perf_ibs_fetch check here because fetch_count_reset_broken is guaranteed to be 0 in perf_ibs_op. Thanks! Kim