89
89
#define NIB ((1ULL << SLICE) - 1)
90
90
#define SLNODES (1 << SLICE)
91
91
92
+ // it has to be >= 2
93
+ #define LEAF_VALID (2ULL)
94
+
92
95
typedef uintptr_t word ;
93
96
typedef uint8_t sh_t ;
94
97
@@ -312,12 +315,13 @@ static void add_to_deleted_leaf_list(struct critnib *__restrict c,
312
315
assert (k );
313
316
struct critnib_leaf * deleted_leaf ;
314
317
315
- do {
316
- utils_atomic_load_acquire_ptr ((void * * )& c -> deleted_leaf ,
317
- (void * * )& deleted_leaf );
318
- utils_atomic_store_release_ptr (& k -> value , deleted_leaf );
319
- } while (!utils_compare_exchange_u64 (
320
- (uint64_t * )& c -> deleted_leaf , (uint64_t * )& k -> value , (uint64_t * )& k ));
318
+ utils_atomic_load_acquire_ptr ((void * * )& c -> deleted_leaf ,
319
+ (void * * )& deleted_leaf );
320
+ utils_atomic_store_release_ptr (& k -> value , deleted_leaf );
321
+ while (!utils_compare_exchange_u64 ((uint64_t * )& c -> deleted_leaf ,
322
+ (uint64_t * )& k -> value , (uint64_t * )& k )) {
323
+ ;
324
+ }
321
325
}
322
326
323
327
/*
@@ -331,18 +335,26 @@ static void free_leaf(struct critnib *__restrict c,
331
335
return ;
332
336
}
333
337
338
+ // k should be added to the c->deleted_leaf list here
339
+ // or in critnib_release() when the reference count drops to 0.
340
+ utils_atomic_store_release_u8 (& k -> pending_deleted_leaf , 1 );
341
+
334
342
if (c -> cb_free_leaf ) {
335
343
uint64_t ref_count ;
336
344
utils_atomic_load_acquire_u64 (& k -> ref_count , & ref_count );
337
345
if (ref_count > 0 ) {
338
- // k will be added to c->deleted_leaf in critnib_release()
346
+ // k will be added to the c->deleted_leaf list in critnib_release()
339
347
// when the reference count drops to 0.
340
- utils_atomic_store_release_u8 (& k -> pending_deleted_leaf , 1 );
341
348
return ;
342
349
}
343
350
}
344
351
345
- add_to_deleted_leaf_list (c , k );
352
+ uint8_t expected = 1 ;
353
+ uint8_t desired = 0 ;
354
+ if (utils_compare_exchange_u8 (& k -> pending_deleted_leaf , & expected ,
355
+ & desired )) {
356
+ add_to_deleted_leaf_list (c , k );
357
+ }
346
358
}
347
359
348
360
/*
@@ -351,8 +363,8 @@ static void free_leaf(struct critnib *__restrict c,
351
363
static struct critnib_leaf * alloc_leaf (struct critnib * __restrict c ) {
352
364
struct critnib_leaf * k ;
353
365
366
+ utils_atomic_load_acquire_ptr ((void * * )& c -> deleted_leaf , (void * * )& k );
354
367
do {
355
- utils_atomic_load_acquire_ptr ((void * * )& c -> deleted_leaf , (void * * )& k );
356
368
if (!k ) {
357
369
return umf_ba_global_aligned_alloc (sizeof (struct critnib_leaf ), 8 );
358
370
}
@@ -392,8 +404,8 @@ int critnib_insert(struct critnib *c, word key, void *value, int update) {
392
404
utils_atomic_store_release_u8 (& k -> pending_deleted_leaf , 0 );
393
405
394
406
if (c -> cb_free_leaf ) {
395
- // mark the leaf as valid (ref_count == 1 )
396
- utils_atomic_store_release_u64 (& k -> ref_count , 1ULL );
407
+ // mark the leaf as valid (ref_count == 2 )
408
+ utils_atomic_store_release_u64 (& k -> ref_count , LEAF_VALID );
397
409
} else {
398
410
// the reference counter is not used in this case
399
411
utils_atomic_store_release_u64 (& k -> ref_count , 0ULL );
@@ -602,36 +614,52 @@ int critnib_release(struct critnib *c, void *ref) {
602
614
struct critnib_leaf * k = (struct critnib_leaf * )ref ;
603
615
604
616
uint64_t ref_count ;
605
- utils_atomic_load_acquire_u64 (& k -> ref_count , & ref_count );
606
-
607
- if (ref_count == 0 ) {
608
- return -1 ;
609
- }
610
-
617
+ uint64_t ref_desired ;
611
618
/* decrement the reference count */
612
- if (utils_atomic_decrement_u64 (& k -> ref_count ) == 0 ) {
613
- void * to_be_freed = NULL ;
614
- utils_atomic_load_acquire_ptr (& k -> to_be_freed , & to_be_freed );
615
- if (to_be_freed ) {
616
- utils_atomic_store_release_ptr (& k -> to_be_freed , NULL );
617
- c -> cb_free_leaf (c -> leaf_allocator , to_be_freed );
618
- }
619
- uint8_t pending_deleted_leaf ;
620
- utils_atomic_load_acquire_u8 (& k -> pending_deleted_leaf ,
621
- & pending_deleted_leaf );
622
- if (pending_deleted_leaf ) {
623
- utils_atomic_store_release_u8 (& k -> pending_deleted_leaf , 0 );
624
- add_to_deleted_leaf_list (c , k );
619
+ utils_atomic_load_acquire_u64 (& k -> ref_count , & ref_count );
620
+ do {
621
+ if (ref_count < LEAF_VALID ) {
622
+ #ifndef NDEBUG
623
+ LOG_FATAL ("critnib_release() was called too many times (ref_count "
624
+ "= %llu)\n" ,
625
+ (unsigned long long )ref_count );
626
+ assert (ref_count >= LEAF_VALID );
627
+ #endif
628
+ return -1 ;
625
629
}
630
+ ref_desired = ref_count - 1 ;
631
+ } while (
632
+ !utils_compare_exchange_u64 (& k -> ref_count , & ref_count , & ref_desired ));
633
+
634
+ if (ref_desired >= LEAF_VALID ) {
635
+ // ref_counter was decremented and it is still valid
636
+ return 0 ;
626
637
}
627
638
639
+ /* ref_counter == (LEAF_VALID - 1)) - the leaf will be freed */
640
+ void * to_be_freed = NULL ;
641
+ utils_atomic_load_acquire_ptr (& k -> to_be_freed , & to_be_freed );
642
+ utils_atomic_store_release_ptr (& k -> to_be_freed , NULL );
628
643
#ifndef NDEBUG
629
- // check if the reference count is overflowed
630
- utils_atomic_load_acquire_u64 (& k -> ref_count , & ref_count );
631
- assert ((ref_count & (1ULL << 63 )) == 0 );
632
- assert (ref_count != (uint64_t )(0 - 1ULL ));
644
+ if (to_be_freed == NULL ) {
645
+ LOG_FATAL ("leaf will not be freed (to_be_freed == NULL, value = %p)\n" ,
646
+ k -> value );
647
+ assert (to_be_freed != NULL );
648
+ }
633
649
#endif
634
650
651
+ // mark the leaf as not used (ref_count == 0)
652
+ utils_atomic_store_release_u64 (& k -> ref_count , 0ULL );
653
+
654
+ c -> cb_free_leaf (c -> leaf_allocator , to_be_freed );
655
+
656
+ uint8_t expected = 1 ;
657
+ uint8_t desired = 0 ;
658
+ if (utils_compare_exchange_u8 (& k -> pending_deleted_leaf , & expected ,
659
+ & desired )) {
660
+ add_to_deleted_leaf_list (c , k );
661
+ }
662
+
635
663
return 0 ;
636
664
}
637
665
@@ -659,9 +687,9 @@ static inline int increment_ref_count(struct critnib_leaf *k) {
659
687
uint64_t expected ;
660
688
uint64_t desired ;
661
689
690
+ utils_atomic_load_acquire_u64 (& k -> ref_count , & expected );
662
691
do {
663
- utils_atomic_load_acquire_u64 (& k -> ref_count , & expected );
664
- if (expected == 0 ) {
692
+ if (expected < LEAF_VALID ) {
665
693
return -1 ;
666
694
}
667
695
desired = expected + 1 ;
0 commit comments