Skip to content

Commit daa394f

Browse files
committed
Merge tag 'core-debugobjects-2024-09-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull debugobjects updates from Thomas Gleixner: - Use the threshold to check for the pool refill condition and not the run time recorded all time low fill value, which is lower than the threshold and therefore causes refills to be delayed. - KCSAN annotation updates and simplification of the fill_pool() code. * tag 'core-debugobjects-2024-09-16' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: debugobjects: Remove redundant checks in fill_pool() debugobjects: Fix conditions in fill_pool() debugobjects: Fix the compilation attributes of some global variables
2 parents 9ea925c + 63a4a9b commit daa394f

File tree

1 file changed

+13
-14
lines changed

1 file changed

+13
-14
lines changed

lib/debugobjects.c

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -70,10 +70,10 @@ static HLIST_HEAD(obj_to_free);
7070
* made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
7171
* can be off.
7272
*/
73-
static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74-
static int obj_pool_free = ODEBUG_POOL_SIZE;
73+
static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE;
74+
static int __data_racy obj_pool_free = ODEBUG_POOL_SIZE;
7575
static int obj_pool_used;
76-
static int obj_pool_max_used;
76+
static int __data_racy obj_pool_max_used;
7777
static bool obj_freeing;
7878
/* The number of objs on the global free list */
7979
static int obj_nr_tofree;
@@ -84,9 +84,9 @@ static int __data_racy debug_objects_fixups __read_mostly;
8484
static int __data_racy debug_objects_warnings __read_mostly;
8585
static int __data_racy debug_objects_enabled __read_mostly
8686
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87-
static int __data_racy debug_objects_pool_size __read_mostly
87+
static int debug_objects_pool_size __ro_after_init
8888
= ODEBUG_POOL_SIZE;
89-
static int __data_racy debug_objects_pool_min_level __read_mostly
89+
static int debug_objects_pool_min_level __ro_after_init
9090
= ODEBUG_POOL_MIN_LEVEL;
9191

9292
static const struct debug_obj_descr *descr_test __read_mostly;
@@ -95,8 +95,8 @@ static struct kmem_cache *obj_cache __ro_after_init;
9595
/*
9696
* Track numbers of kmem_cache_alloc()/free() calls done.
9797
*/
98-
static int debug_objects_allocated;
99-
static int debug_objects_freed;
98+
static int __data_racy debug_objects_allocated;
99+
static int __data_racy debug_objects_freed;
100100

101101
static void free_obj_work(struct work_struct *work);
102102
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
@@ -135,20 +135,19 @@ static void fill_pool(void)
135135
return;
136136

137137
/*
138-
* Reuse objs from the global free list; they will be reinitialized
139-
* when allocating.
138+
* Reuse objs from the global obj_to_free list; they will be
139+
* reinitialized when allocating.
140140
*
141-
* Both obj_nr_tofree and obj_pool_free are checked locklessly; the
142-
* READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
143-
* sections.
141+
* obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
142+
* the WRITE_ONCE() in pool_lock critical sections.
144143
*/
145-
while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
144+
if (READ_ONCE(obj_nr_tofree)) {
146145
raw_spin_lock_irqsave(&pool_lock, flags);
147146
/*
148147
* Recheck with the lock held as the worker thread might have
149148
* won the race and freed the global free list already.
150149
*/
151-
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
150+
while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
152151
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
153152
hlist_del(&obj->node);
154153
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);

0 commit comments

Comments
 (0)