Hi Tom, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on vkoul-dmaengine/next] [also build test WARNING on linux/master linus/master v5.12-rc5 next-20210401] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Tom-Zanussi/dmaengine-idxd-IDXD-pmu-support/20210403-005240 base: https://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git next config: x86_64-allyesconfig (attached as .config) compiler: gcc-9 (Debian 9.3.0-22) 9.3.0 reproduce (this is a W=1 build): # https://github.com/0day-ci/linux/commit/ef9587b8e4ebe37a46d89b14ed68fb321e33242f git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Tom-Zanussi/dmaengine-idxd-IDXD-pmu-support/20210403-005240 git checkout ef9587b8e4ebe37a46d89b14ed68fb321e33242f # save the attached .config to linux build tree make W=1 ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@xxxxxxxxx> All warnings (new ones prefixed by >>): drivers/dma/idxd/perfmon.c: In function 'perfmon_pmu_event_init': >> drivers/dma/idxd/perfmon.c:192:17: warning: variable 'dev' set but not used [-Wunused-but-set-variable] 192 | struct device *dev; | ^~~ drivers/dma/idxd/perfmon.c: In function 'perfmon_pmu_read_counter': drivers/dma/idxd/perfmon.c:228:17: warning: variable 'dev' set but not used [-Wunused-but-set-variable] 228 | struct device *dev; | ^~~ drivers/dma/idxd/perfmon.c: In function 'perfmon_pmu_event_start': drivers/dma/idxd/perfmon.c:325:17: warning: variable 'dev' set but not used [-Wunused-but-set-variable] 325 | struct device *dev; | ^~~ >> drivers/dma/idxd/perfmon.c:323:19: warning: variable 'idxd_pmu' set but not used [-Wunused-but-set-variable] 323 | struct idxd_pmu *idxd_pmu; | ^~~~~~~~ vim +/dev +192 drivers/dma/idxd/perfmon.c 188 189 static int perfmon_pmu_event_init(struct perf_event *event) 190 { 191 struct idxd_device *idxd; > 192 struct device *dev; 193 int ret = 0; 194 195 idxd = event_to_idxd(event); 196 dev = &idxd->pdev->dev; 197 event->hw.idx = -1; 198 199 if (event->attr.type != event->pmu->type) 200 return -ENOENT; 201 202 /* sampling not supported */ 203 if (event->attr.sample_period) 204 return -EINVAL; 205 206 if (event->cpu < 0) 207 return -EINVAL; 208 209 if (event->pmu != &idxd->idxd_pmu->pmu) 210 return -EINVAL; 211 212 event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd)); 213 event->cpu = idxd->idxd_pmu->cpu; 214 event->hw.config = event->attr.config; 215 216 if (event->group_leader != event) 217 /* non-group events have themselves as leader */ 218 ret = perfmon_validate_group(idxd->idxd_pmu, event); 219 220 return ret; 221 } 222 223 static inline u64 perfmon_pmu_read_counter(struct perf_event *event) 224 { 225 struct hw_perf_event *hwc = &event->hw; 226 struct idxd_device *idxd; 227 int cntr = hwc->idx; 228 struct device *dev; 229 u64 cntrdata; 230 231 idxd = event_to_idxd(event); 232 dev = &idxd->pdev->dev; 233 234 cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); 235 236 return cntrdata; 237 } 238 239 static void perfmon_pmu_event_update(struct perf_event *event) 240 { 241 struct idxd_device *idxd = event_to_idxd(event); 242 u64 prev_raw_count, new_raw_count, delta, p, n; 243 int shift = 64 - idxd->idxd_pmu->counter_width; 244 struct hw_perf_event *hwc = &event->hw; 245 246 do { 247 prev_raw_count = local64_read(&hwc->prev_count); 248 new_raw_count = perfmon_pmu_read_counter(event); 249 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 250 new_raw_count) != prev_raw_count); 251 252 n = (new_raw_count << shift); 253 p = (prev_raw_count << shift); 254 255 delta = ((n - p) >> shift); 256 257 local64_add(delta, &event->count); 258 } 259 260 void perfmon_counter_overflow(struct idxd_device *idxd) 261 { 262 int i, n_counters, max_loop = OVERFLOW_SIZE; 263 struct perf_event *event; 264 unsigned long ovfstatus; 265 266 n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE); 267 268 ovfstatus = ioread32(OVFSTATUS_REG(idxd)); 269 270 /* 271 * While updating overflowed counters, other counters behind 272 * them could overflow and be missed in a given pass. 273 * Normally this could happen at most n_counters times, but in 274 * theory a tiny counter width could result in continual 275 * overflows and endless looping. max_loop provides a 276 * failsafe in that highly unlikely case. 277 */ 278 while (ovfstatus && max_loop--) { 279 /* Figure out which counter(s) overflowed */ 280 for_each_set_bit(i, &ovfstatus, n_counters) { 281 /* Update event->count for overflowed counter */ 282 event = idxd->idxd_pmu->event_list[i]; 283 perfmon_pmu_event_update(event); 284 clear_bit(i, &ovfstatus); 285 iowrite32(ovfstatus, OVFSTATUS_REG(idxd)); 286 } 287 288 ovfstatus = ioread32(OVFSTATUS_REG(idxd)); 289 } 290 291 /* 292 * Should never happen. If so, it means a counter(s) looped 293 * around twice while this handler was running. 294 */ 295 WARN_ON_ONCE(ovfstatus); 296 } 297 298 static inline void perfmon_reset_config(struct idxd_device *idxd) 299 { 300 iowrite32(CONFIG_RESET, PERFRST_REG(idxd)); 301 iowrite32(0, OVFSTATUS_REG(idxd)); 302 iowrite32(0, PERFFRZ_REG(idxd)); 303 } 304 305 static inline void perfmon_reset_counters(struct idxd_device *idxd) 306 { 307 iowrite32(CNTR_RESET, PERFRST_REG(idxd)); 308 } 309 310 static inline void perfmon_reset(struct idxd_device *idxd) 311 { 312 perfmon_reset_config(idxd); 313 perfmon_reset_counters(idxd); 314 } 315 316 static void perfmon_pmu_event_start(struct perf_event *event, int mode) 317 { 318 u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0; 319 u64 cntr_cfg, cntrdata, event_enc, event_cat = 0; 320 struct hw_perf_event *hwc = &event->hw; 321 union filter_cfg flt_cfg; 322 union event_cfg event_cfg; > 323 struct idxd_pmu *idxd_pmu; 324 struct idxd_device *idxd; 325 struct device *dev; 326 int cntr; 327 328 idxd_pmu = event_to_pmu(event); 329 idxd = event_to_idxd(event); 330 dev = &idxd->pdev->dev; 331 332 event->hw.idx = hwc->idx; 333 cntr = hwc->idx; 334 335 /* Obtain event category and event value from user space */ 336 event_cfg.val = event->attr.config; 337 flt_cfg.val = event->attr.config1; 338 event_cat = event_cfg.event_cat; 339 event_enc = event_cfg.event_enc; 340 341 /* Obtain filter configuration from user space */ 342 flt_wq = flt_cfg.wq; 343 flt_tc = flt_cfg.tc; 344 flt_pg_sz = flt_cfg.pg_sz; 345 flt_xfer_sz = flt_cfg.xfer_sz; 346 flt_eng = flt_cfg.eng; 347 348 if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters)) 349 iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ)); 350 if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters)) 351 iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC)); 352 if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters)) 353 iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ)); 354 if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters)) 355 iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ)); 356 if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters)) 357 iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG)); 358 359 /* Read the start value */ 360 cntrdata = ioread64(CNTRDATA_REG(idxd, cntr)); 361 local64_set(&event->hw.prev_count, cntrdata); 362 363 /* Set counter to event/category */ 364 cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT; 365 cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT; 366 /* Set interrupt on overflow and counter enable bits */ 367 cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE); 368 369 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr)); 370 } 371 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx
Attachment:
.config.gz
Description: application/gzip