16
16
#include "bpf_lru_list.h"
17
17
#include "map_in_map.h"
18
18
#include <linux/bpf_mem_alloc.h>
19
+ #include <asm/rqspinlock.h>
19
20
20
21
#define HTAB_CREATE_FLAG_MASK \
21
22
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
78
79
*/
79
80
struct bucket {
80
81
struct hlist_nulls_head head ;
81
- raw_spinlock_t raw_lock ;
82
+ rqspinlock_t raw_lock ;
82
83
};
83
84
84
85
#define HASHTAB_MAP_LOCK_COUNT 8
@@ -104,8 +105,6 @@ struct bpf_htab {
104
105
u32 n_buckets ; /* number of hash buckets */
105
106
u32 elem_size ; /* size of each element in bytes */
106
107
u32 hashrnd ;
107
- struct lock_class_key lockdep_key ;
108
- int __percpu * map_locked [HASHTAB_MAP_LOCK_COUNT ];
109
108
};
110
109
111
110
/* each htab element is struct htab_elem + key + value */
@@ -140,45 +139,26 @@ static void htab_init_buckets(struct bpf_htab *htab)
140
139
141
140
for (i = 0 ; i < htab -> n_buckets ; i ++ ) {
142
141
INIT_HLIST_NULLS_HEAD (& htab -> buckets [i ].head , i );
143
- raw_spin_lock_init (& htab -> buckets [i ].raw_lock );
144
- lockdep_set_class (& htab -> buckets [i ].raw_lock ,
145
- & htab -> lockdep_key );
142
+ raw_res_spin_lock_init (& htab -> buckets [i ].raw_lock );
146
143
cond_resched ();
147
144
}
148
145
}
149
146
150
- static inline int htab_lock_bucket (const struct bpf_htab * htab ,
151
- struct bucket * b , u32 hash ,
152
- unsigned long * pflags )
147
+ static inline int htab_lock_bucket (struct bucket * b , unsigned long * pflags )
153
148
{
154
149
unsigned long flags ;
150
+ int ret ;
155
151
156
- hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
157
-
158
- preempt_disable ();
159
- local_irq_save (flags );
160
- if (unlikely (__this_cpu_inc_return (* (htab -> map_locked [hash ])) != 1 )) {
161
- __this_cpu_dec (* (htab -> map_locked [hash ]));
162
- local_irq_restore (flags );
163
- preempt_enable ();
164
- return - EBUSY ;
165
- }
166
-
167
- raw_spin_lock (& b -> raw_lock );
152
+ ret = raw_res_spin_lock_irqsave (& b -> raw_lock , flags );
153
+ if (ret )
154
+ return ret ;
168
155
* pflags = flags ;
169
-
170
156
return 0 ;
171
157
}
172
158
173
- static inline void htab_unlock_bucket (const struct bpf_htab * htab ,
174
- struct bucket * b , u32 hash ,
175
- unsigned long flags )
159
+ static inline void htab_unlock_bucket (struct bucket * b , unsigned long flags )
176
160
{
177
- hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
178
- raw_spin_unlock (& b -> raw_lock );
179
- __this_cpu_dec (* (htab -> map_locked [hash ]));
180
- local_irq_restore (flags );
181
- preempt_enable ();
161
+ raw_res_spin_unlock_irqrestore (& b -> raw_lock , flags );
182
162
}
183
163
184
164
static bool htab_lru_map_delete_node (void * arg , struct bpf_lru_node * node );
@@ -483,14 +463,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
483
463
bool percpu_lru = (attr -> map_flags & BPF_F_NO_COMMON_LRU );
484
464
bool prealloc = !(attr -> map_flags & BPF_F_NO_PREALLOC );
485
465
struct bpf_htab * htab ;
486
- int err , i ;
466
+ int err ;
487
467
488
468
htab = bpf_map_area_alloc (sizeof (* htab ), NUMA_NO_NODE );
489
469
if (!htab )
490
470
return ERR_PTR (- ENOMEM );
491
471
492
- lockdep_register_key (& htab -> lockdep_key );
493
-
494
472
bpf_map_init_from_attr (& htab -> map , attr );
495
473
496
474
if (percpu_lru ) {
@@ -536,15 +514,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
536
514
if (!htab -> buckets )
537
515
goto free_elem_count ;
538
516
539
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ ) {
540
- htab -> map_locked [i ] = bpf_map_alloc_percpu (& htab -> map ,
541
- sizeof (int ),
542
- sizeof (int ),
543
- GFP_USER );
544
- if (!htab -> map_locked [i ])
545
- goto free_map_locked ;
546
- }
547
-
548
517
if (htab -> map .map_flags & BPF_F_ZERO_SEED )
549
518
htab -> hashrnd = 0 ;
550
519
else
@@ -607,15 +576,12 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
607
576
free_map_locked :
608
577
if (htab -> use_percpu_counter )
609
578
percpu_counter_destroy (& htab -> pcount );
610
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
611
- free_percpu (htab -> map_locked [i ]);
612
579
bpf_map_area_free (htab -> buckets );
613
580
bpf_mem_alloc_destroy (& htab -> pcpu_ma );
614
581
bpf_mem_alloc_destroy (& htab -> ma );
615
582
free_elem_count :
616
583
bpf_map_free_elem_count (& htab -> map );
617
584
free_htab :
618
- lockdep_unregister_key (& htab -> lockdep_key );
619
585
bpf_map_area_free (htab );
620
586
return ERR_PTR (err );
621
587
}
@@ -820,7 +786,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
820
786
b = __select_bucket (htab , tgt_l -> hash );
821
787
head = & b -> head ;
822
788
823
- ret = htab_lock_bucket (htab , b , tgt_l -> hash , & flags );
789
+ ret = htab_lock_bucket (b , & flags );
824
790
if (ret )
825
791
return false;
826
792
@@ -831,7 +797,7 @@ static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
831
797
break ;
832
798
}
833
799
834
- htab_unlock_bucket (htab , b , tgt_l -> hash , flags );
800
+ htab_unlock_bucket (b , flags );
835
801
836
802
if (l == tgt_l )
837
803
check_and_free_fields (htab , l );
@@ -1150,7 +1116,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1150
1116
*/
1151
1117
}
1152
1118
1153
- ret = htab_lock_bucket (htab , b , hash , & flags );
1119
+ ret = htab_lock_bucket (b , & flags );
1154
1120
if (ret )
1155
1121
return ret ;
1156
1122
@@ -1201,7 +1167,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1201
1167
check_and_free_fields (htab , l_old );
1202
1168
}
1203
1169
}
1204
- htab_unlock_bucket (htab , b , hash , flags );
1170
+ htab_unlock_bucket (b , flags );
1205
1171
if (l_old ) {
1206
1172
if (old_map_ptr )
1207
1173
map -> ops -> map_fd_put_ptr (map , old_map_ptr , true);
@@ -1210,7 +1176,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1210
1176
}
1211
1177
return 0 ;
1212
1178
err :
1213
- htab_unlock_bucket (htab , b , hash , flags );
1179
+ htab_unlock_bucket (b , flags );
1214
1180
return ret ;
1215
1181
}
1216
1182
@@ -1257,7 +1223,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
1257
1223
copy_map_value (& htab -> map ,
1258
1224
l_new -> key + round_up (map -> key_size , 8 ), value );
1259
1225
1260
- ret = htab_lock_bucket (htab , b , hash , & flags );
1226
+ ret = htab_lock_bucket (b , & flags );
1261
1227
if (ret )
1262
1228
goto err_lock_bucket ;
1263
1229
@@ -1278,7 +1244,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
1278
1244
ret = 0 ;
1279
1245
1280
1246
err :
1281
- htab_unlock_bucket (htab , b , hash , flags );
1247
+ htab_unlock_bucket (b , flags );
1282
1248
1283
1249
err_lock_bucket :
1284
1250
if (ret )
@@ -1315,7 +1281,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1315
1281
b = __select_bucket (htab , hash );
1316
1282
head = & b -> head ;
1317
1283
1318
- ret = htab_lock_bucket (htab , b , hash , & flags );
1284
+ ret = htab_lock_bucket (b , & flags );
1319
1285
if (ret )
1320
1286
return ret ;
1321
1287
@@ -1340,7 +1306,7 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1340
1306
}
1341
1307
ret = 0 ;
1342
1308
err :
1343
- htab_unlock_bucket (htab , b , hash , flags );
1309
+ htab_unlock_bucket (b , flags );
1344
1310
return ret ;
1345
1311
}
1346
1312
@@ -1381,7 +1347,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1381
1347
return - ENOMEM ;
1382
1348
}
1383
1349
1384
- ret = htab_lock_bucket (htab , b , hash , & flags );
1350
+ ret = htab_lock_bucket (b , & flags );
1385
1351
if (ret )
1386
1352
goto err_lock_bucket ;
1387
1353
@@ -1405,7 +1371,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1405
1371
}
1406
1372
ret = 0 ;
1407
1373
err :
1408
- htab_unlock_bucket (htab , b , hash , flags );
1374
+ htab_unlock_bucket (b , flags );
1409
1375
err_lock_bucket :
1410
1376
if (l_new ) {
1411
1377
bpf_map_dec_elem_count (& htab -> map );
@@ -1447,7 +1413,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
1447
1413
b = __select_bucket (htab , hash );
1448
1414
head = & b -> head ;
1449
1415
1450
- ret = htab_lock_bucket (htab , b , hash , & flags );
1416
+ ret = htab_lock_bucket (b , & flags );
1451
1417
if (ret )
1452
1418
return ret ;
1453
1419
@@ -1457,7 +1423,7 @@ static long htab_map_delete_elem(struct bpf_map *map, void *key)
1457
1423
else
1458
1424
ret = - ENOENT ;
1459
1425
1460
- htab_unlock_bucket (htab , b , hash , flags );
1426
+ htab_unlock_bucket (b , flags );
1461
1427
1462
1428
if (l )
1463
1429
free_htab_elem (htab , l );
@@ -1483,7 +1449,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1483
1449
b = __select_bucket (htab , hash );
1484
1450
head = & b -> head ;
1485
1451
1486
- ret = htab_lock_bucket (htab , b , hash , & flags );
1452
+ ret = htab_lock_bucket (b , & flags );
1487
1453
if (ret )
1488
1454
return ret ;
1489
1455
@@ -1494,7 +1460,7 @@ static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1494
1460
else
1495
1461
ret = - ENOENT ;
1496
1462
1497
- htab_unlock_bucket (htab , b , hash , flags );
1463
+ htab_unlock_bucket (b , flags );
1498
1464
if (l )
1499
1465
htab_lru_push_free (htab , l );
1500
1466
return ret ;
@@ -1561,7 +1527,6 @@ static void htab_map_free_timers_and_wq(struct bpf_map *map)
1561
1527
static void htab_map_free (struct bpf_map * map )
1562
1528
{
1563
1529
struct bpf_htab * htab = container_of (map , struct bpf_htab , map );
1564
- int i ;
1565
1530
1566
1531
/* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1567
1532
* bpf_free_used_maps() is called after bpf prog is no longer executing.
@@ -1586,9 +1551,6 @@ static void htab_map_free(struct bpf_map *map)
1586
1551
bpf_mem_alloc_destroy (& htab -> ma );
1587
1552
if (htab -> use_percpu_counter )
1588
1553
percpu_counter_destroy (& htab -> pcount );
1589
- for (i = 0 ; i < HASHTAB_MAP_LOCK_COUNT ; i ++ )
1590
- free_percpu (htab -> map_locked [i ]);
1591
- lockdep_unregister_key (& htab -> lockdep_key );
1592
1554
bpf_map_area_free (htab );
1593
1555
}
1594
1556
@@ -1631,7 +1593,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1631
1593
b = __select_bucket (htab , hash );
1632
1594
head = & b -> head ;
1633
1595
1634
- ret = htab_lock_bucket (htab , b , hash , & bflags );
1596
+ ret = htab_lock_bucket (b , & bflags );
1635
1597
if (ret )
1636
1598
return ret ;
1637
1599
@@ -1668,7 +1630,7 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1668
1630
hlist_nulls_del_rcu (& l -> hash_node );
1669
1631
1670
1632
out_unlock :
1671
- htab_unlock_bucket (htab , b , hash , bflags );
1633
+ htab_unlock_bucket (b , bflags );
1672
1634
1673
1635
if (l ) {
1674
1636
if (is_lru_map )
@@ -1790,7 +1752,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1790
1752
head = & b -> head ;
1791
1753
/* do not grab the lock unless need it (bucket_cnt > 0). */
1792
1754
if (locked ) {
1793
- ret = htab_lock_bucket (htab , b , batch , & flags );
1755
+ ret = htab_lock_bucket (b , & flags );
1794
1756
if (ret ) {
1795
1757
rcu_read_unlock ();
1796
1758
bpf_enable_instrumentation ();
@@ -1813,7 +1775,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1813
1775
/* Note that since bucket_cnt > 0 here, it is implicit
1814
1776
* that the locked was grabbed, so release it.
1815
1777
*/
1816
- htab_unlock_bucket (htab , b , batch , flags );
1778
+ htab_unlock_bucket (b , flags );
1817
1779
rcu_read_unlock ();
1818
1780
bpf_enable_instrumentation ();
1819
1781
goto after_loop ;
@@ -1824,7 +1786,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1824
1786
/* Note that since bucket_cnt > 0 here, it is implicit
1825
1787
* that the locked was grabbed, so release it.
1826
1788
*/
1827
- htab_unlock_bucket (htab , b , batch , flags );
1789
+ htab_unlock_bucket (b , flags );
1828
1790
rcu_read_unlock ();
1829
1791
bpf_enable_instrumentation ();
1830
1792
kvfree (keys );
@@ -1887,7 +1849,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1887
1849
dst_val += value_size ;
1888
1850
}
1889
1851
1890
- htab_unlock_bucket (htab , b , batch , flags );
1852
+ htab_unlock_bucket (b , flags );
1891
1853
locked = false;
1892
1854
1893
1855
while (node_to_free ) {
0 commit comments