tree: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git master head: 10569a280f259f696c0b32fc1d45866d2fd33f53 commit: 085fee1a72a9fba101a4a68a2c02fa8bd2b6f913 [4108/4964] bpf: Use recursion prevention helpers in hashtab code :::::: branch date: 16 hours ago :::::: commit date: 3 days ago If you fix the issue, kindly add following tag Reported-by: kbuild test robot <lkp@xxxxxxxxx> Reported-by: Dan Carpenter <dan.carpenter@xxxxxxxxxx> smatch warnings: kernel/bpf/hashtab.c:1341 __htab_map_lookup_and_delete_batch() warn: potential spectre issue 'htab->buckets' [r] (local cap) # https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git/commit/?id=085fee1a72a9fba101a4a68a2c02fa8bd2b6f913 git remote add linux-next https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git git remote update linux-next git checkout 085fee1a72a9fba101a4a68a2c02fa8bd2b6f913 vim +1341 kernel/bpf/hashtab.c 057996380a42bb Yonghong Song 2020-01-15 1265 static int 057996380a42bb Yonghong Song 2020-01-15 1266 __htab_map_lookup_and_delete_batch(struct bpf_map *map, 057996380a42bb Yonghong Song 2020-01-15 1267 const union bpf_attr *attr, 057996380a42bb Yonghong Song 2020-01-15 1268 union bpf_attr __user *uattr, 057996380a42bb Yonghong Song 2020-01-15 1269 bool do_delete, bool is_lru_map, 057996380a42bb Yonghong Song 2020-01-15 1270 bool is_percpu) 057996380a42bb Yonghong Song 2020-01-15 1271 { 057996380a42bb Yonghong Song 2020-01-15 1272 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 057996380a42bb Yonghong Song 2020-01-15 1273 u32 bucket_cnt, total, key_size, value_size, roundup_key_size; 057996380a42bb Yonghong Song 2020-01-15 1274 void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; 057996380a42bb Yonghong Song 2020-01-15 1275 void __user *uvalues = u64_to_user_ptr(attr->batch.values); 057996380a42bb Yonghong Song 2020-01-15 1276 void __user *ukeys = u64_to_user_ptr(attr->batch.keys); 057996380a42bb Yonghong Song 2020-01-15 1277 void *ubatch = u64_to_user_ptr(attr->batch.in_batch); 057996380a42bb Yonghong Song 2020-01-15 1278 u32 batch, max_count, size, bucket_size; b9aff38de2cb16 Yonghong Song 2020-02-19 1279 struct htab_elem *node_to_free = NULL; 057996380a42bb Yonghong Song 2020-01-15 1280 u64 elem_map_flags, map_flags; 057996380a42bb Yonghong Song 2020-01-15 1281 struct hlist_nulls_head *head; 057996380a42bb Yonghong Song 2020-01-15 1282 struct hlist_nulls_node *n; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1283 unsigned long flags = 0; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1284 bool locked = false; 057996380a42bb Yonghong Song 2020-01-15 1285 struct htab_elem *l; 057996380a42bb Yonghong Song 2020-01-15 1286 struct bucket *b; 057996380a42bb Yonghong Song 2020-01-15 1287 int ret = 0; 057996380a42bb Yonghong Song 2020-01-15 1288 057996380a42bb Yonghong Song 2020-01-15 1289 elem_map_flags = attr->batch.elem_flags; 057996380a42bb Yonghong Song 2020-01-15 1290 if ((elem_map_flags & ~BPF_F_LOCK) || 057996380a42bb Yonghong Song 2020-01-15 1291 ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) 057996380a42bb Yonghong Song 2020-01-15 1292 return -EINVAL; 057996380a42bb Yonghong Song 2020-01-15 1293 057996380a42bb Yonghong Song 2020-01-15 1294 map_flags = attr->batch.flags; 057996380a42bb Yonghong Song 2020-01-15 1295 if (map_flags) 057996380a42bb Yonghong Song 2020-01-15 1296 return -EINVAL; 057996380a42bb Yonghong Song 2020-01-15 1297 057996380a42bb Yonghong Song 2020-01-15 1298 max_count = attr->batch.count; 057996380a42bb Yonghong Song 2020-01-15 1299 if (!max_count) 057996380a42bb Yonghong Song 2020-01-15 1300 return 0; 057996380a42bb Yonghong Song 2020-01-15 1301 057996380a42bb Yonghong Song 2020-01-15 1302 if (put_user(0, &uattr->batch.count)) 057996380a42bb Yonghong Song 2020-01-15 1303 return -EFAULT; 057996380a42bb Yonghong Song 2020-01-15 1304 057996380a42bb Yonghong Song 2020-01-15 1305 batch = 0; 057996380a42bb Yonghong Song 2020-01-15 1306 if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch))) 057996380a42bb Yonghong Song 2020-01-15 1307 return -EFAULT; 057996380a42bb Yonghong Song 2020-01-15 1308 057996380a42bb Yonghong Song 2020-01-15 1309 if (batch >= htab->n_buckets) This this use array_index_nospec() some how? 057996380a42bb Yonghong Song 2020-01-15 1310 return -ENOENT; 057996380a42bb Yonghong Song 2020-01-15 1311 057996380a42bb Yonghong Song 2020-01-15 1312 key_size = htab->map.key_size; 057996380a42bb Yonghong Song 2020-01-15 1313 roundup_key_size = round_up(htab->map.key_size, 8); 057996380a42bb Yonghong Song 2020-01-15 1314 value_size = htab->map.value_size; 057996380a42bb Yonghong Song 2020-01-15 1315 size = round_up(value_size, 8); 057996380a42bb Yonghong Song 2020-01-15 1316 if (is_percpu) 057996380a42bb Yonghong Song 2020-01-15 1317 value_size = size * num_possible_cpus(); 057996380a42bb Yonghong Song 2020-01-15 1318 total = 0; 057996380a42bb Yonghong Song 2020-01-15 1319 /* while experimenting with hash tables with sizes ranging from 10 to 057996380a42bb Yonghong Song 2020-01-15 1320 * 1000, it was observed that a bucket can have upto 5 entries. 057996380a42bb Yonghong Song 2020-01-15 1321 */ 057996380a42bb Yonghong Song 2020-01-15 1322 bucket_size = 5; 057996380a42bb Yonghong Song 2020-01-15 1323 057996380a42bb Yonghong Song 2020-01-15 1324 alloc: 057996380a42bb Yonghong Song 2020-01-15 1325 /* We cannot do copy_from_user or copy_to_user inside 057996380a42bb Yonghong Song 2020-01-15 1326 * the rcu_read_lock. Allocate enough space here. 057996380a42bb Yonghong Song 2020-01-15 1327 */ 057996380a42bb Yonghong Song 2020-01-15 1328 keys = kvmalloc(key_size * bucket_size, GFP_USER | __GFP_NOWARN); 057996380a42bb Yonghong Song 2020-01-15 1329 values = kvmalloc(value_size * bucket_size, GFP_USER | __GFP_NOWARN); 057996380a42bb Yonghong Song 2020-01-15 1330 if (!keys || !values) { 057996380a42bb Yonghong Song 2020-01-15 1331 ret = -ENOMEM; 057996380a42bb Yonghong Song 2020-01-15 1332 goto after_loop; 057996380a42bb Yonghong Song 2020-01-15 1333 } 057996380a42bb Yonghong Song 2020-01-15 1334 057996380a42bb Yonghong Song 2020-01-15 1335 again: 085fee1a72a9fb Thomas Gleixner 2020-02-24 1336 bpf_disable_instrumentation(); 057996380a42bb Yonghong Song 2020-01-15 1337 rcu_read_lock(); 057996380a42bb Yonghong Song 2020-01-15 1338 again_nocopy: 057996380a42bb Yonghong Song 2020-01-15 1339 dst_key = keys; 057996380a42bb Yonghong Song 2020-01-15 1340 dst_val = values; 057996380a42bb Yonghong Song 2020-01-15 @1341 b = &htab->buckets[batch]; 057996380a42bb Yonghong Song 2020-01-15 1342 head = &b->head; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1343 /* do not grab the lock unless need it (bucket_cnt > 0). */ 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1344 if (locked) 057996380a42bb Yonghong Song 2020-01-15 1345 raw_spin_lock_irqsave(&b->lock, flags); 057996380a42bb Yonghong Song 2020-01-15 1346 057996380a42bb Yonghong Song 2020-01-15 1347 bucket_cnt = 0; 057996380a42bb Yonghong Song 2020-01-15 1348 hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) 057996380a42bb Yonghong Song 2020-01-15 1349 bucket_cnt++; 057996380a42bb Yonghong Song 2020-01-15 1350 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1351 if (bucket_cnt && !locked) { 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1352 locked = true; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1353 goto again_nocopy; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1354 } 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1355 057996380a42bb Yonghong Song 2020-01-15 1356 if (bucket_cnt > (max_count - total)) { 057996380a42bb Yonghong Song 2020-01-15 1357 if (total == 0) 057996380a42bb Yonghong Song 2020-01-15 1358 ret = -ENOSPC; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1359 /* Note that since bucket_cnt > 0 here, it is implicit 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1360 * that the locked was grabbed, so release it. 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1361 */ 057996380a42bb Yonghong Song 2020-01-15 1362 raw_spin_unlock_irqrestore(&b->lock, flags); 057996380a42bb Yonghong Song 2020-01-15 1363 rcu_read_unlock(); 085fee1a72a9fb Thomas Gleixner 2020-02-24 1364 bpf_enable_instrumentation(); 057996380a42bb Yonghong Song 2020-01-15 1365 goto after_loop; 057996380a42bb Yonghong Song 2020-01-15 1366 } 057996380a42bb Yonghong Song 2020-01-15 1367 057996380a42bb Yonghong Song 2020-01-15 1368 if (bucket_cnt > bucket_size) { 057996380a42bb Yonghong Song 2020-01-15 1369 bucket_size = bucket_cnt; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1370 /* Note that since bucket_cnt > 0 here, it is implicit 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1371 * that the locked was grabbed, so release it. 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1372 */ 057996380a42bb Yonghong Song 2020-01-15 1373 raw_spin_unlock_irqrestore(&b->lock, flags); 057996380a42bb Yonghong Song 2020-01-15 1374 rcu_read_unlock(); 085fee1a72a9fb Thomas Gleixner 2020-02-24 1375 bpf_enable_instrumentation(); 057996380a42bb Yonghong Song 2020-01-15 1376 kvfree(keys); 057996380a42bb Yonghong Song 2020-01-15 1377 kvfree(values); 057996380a42bb Yonghong Song 2020-01-15 1378 goto alloc; 057996380a42bb Yonghong Song 2020-01-15 1379 } 057996380a42bb Yonghong Song 2020-01-15 1380 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1381 /* Next block is only safe to run if you have grabbed the lock */ 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1382 if (!locked) 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1383 goto next_batch; 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1384 057996380a42bb Yonghong Song 2020-01-15 1385 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { 057996380a42bb Yonghong Song 2020-01-15 1386 memcpy(dst_key, l->key, key_size); 057996380a42bb Yonghong Song 2020-01-15 1387 057996380a42bb Yonghong Song 2020-01-15 1388 if (is_percpu) { 057996380a42bb Yonghong Song 2020-01-15 1389 int off = 0, cpu; 057996380a42bb Yonghong Song 2020-01-15 1390 void __percpu *pptr; 057996380a42bb Yonghong Song 2020-01-15 1391 057996380a42bb Yonghong Song 2020-01-15 1392 pptr = htab_elem_get_ptr(l, map->key_size); 057996380a42bb Yonghong Song 2020-01-15 1393 for_each_possible_cpu(cpu) { 057996380a42bb Yonghong Song 2020-01-15 1394 bpf_long_memcpy(dst_val + off, 057996380a42bb Yonghong Song 2020-01-15 1395 per_cpu_ptr(pptr, cpu), size); 057996380a42bb Yonghong Song 2020-01-15 1396 off += size; 057996380a42bb Yonghong Song 2020-01-15 1397 } 057996380a42bb Yonghong Song 2020-01-15 1398 } else { 057996380a42bb Yonghong Song 2020-01-15 1399 value = l->key + roundup_key_size; 057996380a42bb Yonghong Song 2020-01-15 1400 if (elem_map_flags & BPF_F_LOCK) 057996380a42bb Yonghong Song 2020-01-15 1401 copy_map_value_locked(map, dst_val, value, 057996380a42bb Yonghong Song 2020-01-15 1402 true); 057996380a42bb Yonghong Song 2020-01-15 1403 else 057996380a42bb Yonghong Song 2020-01-15 1404 copy_map_value(map, dst_val, value); 057996380a42bb Yonghong Song 2020-01-15 1405 check_and_init_map_lock(map, dst_val); 057996380a42bb Yonghong Song 2020-01-15 1406 } 057996380a42bb Yonghong Song 2020-01-15 1407 if (do_delete) { 057996380a42bb Yonghong Song 2020-01-15 1408 hlist_nulls_del_rcu(&l->hash_node); b9aff38de2cb16 Yonghong Song 2020-02-19 1409 b9aff38de2cb16 Yonghong Song 2020-02-19 1410 /* bpf_lru_push_free() will acquire lru_lock, which b9aff38de2cb16 Yonghong Song 2020-02-19 1411 * may cause deadlock. See comments in function b9aff38de2cb16 Yonghong Song 2020-02-19 1412 * prealloc_lru_pop(). Let us do bpf_lru_push_free() b9aff38de2cb16 Yonghong Song 2020-02-19 1413 * after releasing the bucket lock. b9aff38de2cb16 Yonghong Song 2020-02-19 1414 */ b9aff38de2cb16 Yonghong Song 2020-02-19 1415 if (is_lru_map) { b9aff38de2cb16 Yonghong Song 2020-02-19 1416 l->batch_flink = node_to_free; b9aff38de2cb16 Yonghong Song 2020-02-19 1417 node_to_free = l; b9aff38de2cb16 Yonghong Song 2020-02-19 1418 } else { 057996380a42bb Yonghong Song 2020-01-15 1419 free_htab_elem(htab, l); 057996380a42bb Yonghong Song 2020-01-15 1420 } b9aff38de2cb16 Yonghong Song 2020-02-19 1421 } 057996380a42bb Yonghong Song 2020-01-15 1422 dst_key += key_size; 057996380a42bb Yonghong Song 2020-01-15 1423 dst_val += value_size; 057996380a42bb Yonghong Song 2020-01-15 1424 } 057996380a42bb Yonghong Song 2020-01-15 1425 057996380a42bb Yonghong Song 2020-01-15 1426 raw_spin_unlock_irqrestore(&b->lock, flags); 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1427 locked = false; b9aff38de2cb16 Yonghong Song 2020-02-19 1428 b9aff38de2cb16 Yonghong Song 2020-02-19 1429 while (node_to_free) { b9aff38de2cb16 Yonghong Song 2020-02-19 1430 l = node_to_free; b9aff38de2cb16 Yonghong Song 2020-02-19 1431 node_to_free = node_to_free->batch_flink; b9aff38de2cb16 Yonghong Song 2020-02-19 1432 bpf_lru_push_free(&htab->lru, &l->lru_node); b9aff38de2cb16 Yonghong Song 2020-02-19 1433 } b9aff38de2cb16 Yonghong Song 2020-02-19 1434 492e0d0d6f2eb4 Brian Vazquez 2020-02-18 1435 next_batch: 057996380a42bb Yonghong Song 2020-01-15 1436 /* If we are not copying data, we can go to next bucket and avoid 057996380a42bb Yonghong Song 2020-01-15 1437 * unlocking the rcu. 057996380a42bb Yonghong Song 2020-01-15 1438 */ 057996380a42bb Yonghong Song 2020-01-15 1439 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { 057996380a42bb Yonghong Song 2020-01-15 1440 batch++; 057996380a42bb Yonghong Song 2020-01-15 1441 goto again_nocopy; 057996380a42bb Yonghong Song 2020-01-15 1442 } 057996380a42bb Yonghong Song 2020-01-15 1443 057996380a42bb Yonghong Song 2020-01-15 1444 rcu_read_unlock(); 085fee1a72a9fb Thomas Gleixner 2020-02-24 1445 bpf_enable_instrumentation(); 057996380a42bb Yonghong Song 2020-01-15 1446 if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys, 057996380a42bb Yonghong Song 2020-01-15 1447 key_size * bucket_cnt) || 057996380a42bb Yonghong Song 2020-01-15 1448 copy_to_user(uvalues + total * value_size, values, 057996380a42bb Yonghong Song 2020-01-15 1449 value_size * bucket_cnt))) { 057996380a42bb Yonghong Song 2020-01-15 1450 ret = -EFAULT; 057996380a42bb Yonghong Song 2020-01-15 1451 goto after_loop; 057996380a42bb Yonghong Song 2020-01-15 1452 } 057996380a42bb Yonghong Song 2020-01-15 1453 057996380a42bb Yonghong Song 2020-01-15 1454 total += bucket_cnt; 057996380a42bb Yonghong Song 2020-01-15 1455 batch++; 057996380a42bb Yonghong Song 2020-01-15 1456 if (batch >= htab->n_buckets) { 057996380a42bb Yonghong Song 2020-01-15 1457 ret = -ENOENT; 057996380a42bb Yonghong Song 2020-01-15 1458 goto after_loop; 057996380a42bb Yonghong Song 2020-01-15 1459 } 057996380a42bb Yonghong Song 2020-01-15 1460 goto again; 057996380a42bb Yonghong Song 2020-01-15 1461 057996380a42bb Yonghong Song 2020-01-15 1462 after_loop: 057996380a42bb Yonghong Song 2020-01-15 1463 if (ret == -EFAULT) 057996380a42bb Yonghong Song 2020-01-15 1464 goto out; 057996380a42bb Yonghong Song 2020-01-15 1465 057996380a42bb Yonghong Song 2020-01-15 1466 /* copy # of entries and next batch */ 057996380a42bb Yonghong Song 2020-01-15 1467 ubatch = u64_to_user_ptr(attr->batch.out_batch); 057996380a42bb Yonghong Song 2020-01-15 1468 if (copy_to_user(ubatch, &batch, sizeof(batch)) || 057996380a42bb Yonghong Song 2020-01-15 1469 put_user(total, &uattr->batch.count)) 057996380a42bb Yonghong Song 2020-01-15 1470 ret = -EFAULT; 057996380a42bb Yonghong Song 2020-01-15 1471 057996380a42bb Yonghong Song 2020-01-15 1472 out: 057996380a42bb Yonghong Song 2020-01-15 1473 kvfree(keys); 057996380a42bb Yonghong Song 2020-01-15 1474 kvfree(values); 057996380a42bb Yonghong Song 2020-01-15 1475 return ret; 057996380a42bb Yonghong Song 2020-01-15 1476 } --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx