Skip to content

Commit 6c8ad3a

Browse files
committed
atomic64: Use arch_spin_locks instead of raw_spin_locks
raw_spin_locks can be traced by lockdep or tracing itself. Atomic64 operations can be used in the tracing infrastructure. When an architecture does not have true atomic64 operations it can use the generic version that disables interrupts and uses spin_locks. The tracing ring buffer code uses atomic64 operations for the time keeping. But because some architectures use the default operations, the locking inside the atomic operations can cause an infinite recursion. As atomic64 implementation is architecture specific, it should not be using raw_spin_locks() but instead arch_spin_locks as that is the purpose of arch_spin_locks. To be used in architecture specific implementations of generic infrastructure like atomic64 operations. Note, by switching from raw_spin_locks to arch_spin_locks, the locks taken to emulate the atomic64 operations will not have lockdep, mmio, or any kind of checks done on them. They will not even disable preemption, although the code will disable interrupts preventing the tasks that hold the locks from being preempted. As the locks held are done so for very short periods of time, and the logic is only done to emulate atomic64, not having them be instrumented should not be an issue. Cc: stable@vger.kernel.org Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andreas Larsson <andreas@gaisler.com> Link: https://lore.kernel.org/20250122144311.64392baf@gandalf.local.home Fixes: c84897c ("ring-buffer: Remove 32bit timestamp logic") Closes: https://lore.kernel.org/all/86fb4f86-a0e4-45a2-a2df-3154acc4f086@gaisler.com/ Reported-by: Ludwig Rydberg <ludwig.rydberg@gaisler.com> Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
1 parent cd2375a commit 6c8ad3a

File tree

1 file changed

+48
-30
lines changed

1 file changed

+48
-30
lines changed

lib/atomic64.c

Lines changed: 48 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,15 @@
2525
* Ensure each lock is in a separate cacheline.
2626
*/
2727
static union {
28-
raw_spinlock_t lock;
28+
arch_spinlock_t lock;
2929
char pad[L1_CACHE_BYTES];
3030
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
3131
[0 ... (NR_LOCKS - 1)] = {
32-
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
32+
.lock = __ARCH_SPIN_LOCK_UNLOCKED,
3333
},
3434
};
3535

36-
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
36+
static inline arch_spinlock_t *lock_addr(const atomic64_t *v)
3737
{
3838
unsigned long addr = (unsigned long) v;
3939

@@ -45,49 +45,57 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
4545
s64 generic_atomic64_read(const atomic64_t *v)
4646
{
4747
unsigned long flags;
48-
raw_spinlock_t *lock = lock_addr(v);
48+
arch_spinlock_t *lock = lock_addr(v);
4949
s64 val;
5050

51-
raw_spin_lock_irqsave(lock, flags);
51+
local_irq_save(flags);
52+
arch_spin_lock(lock);
5253
val = v->counter;
53-
raw_spin_unlock_irqrestore(lock, flags);
54+
arch_spin_unlock(lock);
55+
local_irq_restore(flags);
5456
return val;
5557
}
5658
EXPORT_SYMBOL(generic_atomic64_read);
5759

5860
void generic_atomic64_set(atomic64_t *v, s64 i)
5961
{
6062
unsigned long flags;
61-
raw_spinlock_t *lock = lock_addr(v);
63+
arch_spinlock_t *lock = lock_addr(v);
6264

63-
raw_spin_lock_irqsave(lock, flags);
65+
local_irq_save(flags);
66+
arch_spin_lock(lock);
6467
v->counter = i;
65-
raw_spin_unlock_irqrestore(lock, flags);
68+
arch_spin_unlock(lock);
69+
local_irq_restore(flags);
6670
}
6771
EXPORT_SYMBOL(generic_atomic64_set);
6872

6973
#define ATOMIC64_OP(op, c_op) \
7074
void generic_atomic64_##op(s64 a, atomic64_t *v) \
7175
{ \
7276
unsigned long flags; \
73-
raw_spinlock_t *lock = lock_addr(v); \
77+
arch_spinlock_t *lock = lock_addr(v); \
7478
\
75-
raw_spin_lock_irqsave(lock, flags); \
79+
local_irq_save(flags); \
80+
arch_spin_lock(lock); \
7681
v->counter c_op a; \
77-
raw_spin_unlock_irqrestore(lock, flags); \
82+
arch_spin_unlock(lock); \
83+
local_irq_restore(flags); \
7884
} \
7985
EXPORT_SYMBOL(generic_atomic64_##op);
8086

8187
#define ATOMIC64_OP_RETURN(op, c_op) \
8288
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
8389
{ \
8490
unsigned long flags; \
85-
raw_spinlock_t *lock = lock_addr(v); \
91+
arch_spinlock_t *lock = lock_addr(v); \
8692
s64 val; \
8793
\
88-
raw_spin_lock_irqsave(lock, flags); \
94+
local_irq_save(flags); \
95+
arch_spin_lock(lock); \
8996
val = (v->counter c_op a); \
90-
raw_spin_unlock_irqrestore(lock, flags); \
97+
arch_spin_unlock(lock); \
98+
local_irq_restore(flags); \
9199
return val; \
92100
} \
93101
EXPORT_SYMBOL(generic_atomic64_##op##_return);
@@ -96,13 +104,15 @@ EXPORT_SYMBOL(generic_atomic64_##op##_return);
96104
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
97105
{ \
98106
unsigned long flags; \
99-
raw_spinlock_t *lock = lock_addr(v); \
107+
arch_spinlock_t *lock = lock_addr(v); \
100108
s64 val; \
101109
\
102-
raw_spin_lock_irqsave(lock, flags); \
110+
local_irq_save(flags); \
111+
arch_spin_lock(lock); \
103112
val = v->counter; \
104113
v->counter c_op a; \
105-
raw_spin_unlock_irqrestore(lock, flags); \
114+
arch_spin_unlock(lock); \
115+
local_irq_restore(flags); \
106116
return val; \
107117
} \
108118
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
@@ -131,58 +141,66 @@ ATOMIC64_OPS(xor, ^=)
131141
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
132142
{
133143
unsigned long flags;
134-
raw_spinlock_t *lock = lock_addr(v);
144+
arch_spinlock_t *lock = lock_addr(v);
135145
s64 val;
136146

137-
raw_spin_lock_irqsave(lock, flags);
147+
local_irq_save(flags);
148+
arch_spin_lock(lock);
138149
val = v->counter - 1;
139150
if (val >= 0)
140151
v->counter = val;
141-
raw_spin_unlock_irqrestore(lock, flags);
152+
arch_spin_unlock(lock);
153+
local_irq_restore(flags);
142154
return val;
143155
}
144156
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
145157

146158
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
147159
{
148160
unsigned long flags;
149-
raw_spinlock_t *lock = lock_addr(v);
161+
arch_spinlock_t *lock = lock_addr(v);
150162
s64 val;
151163

152-
raw_spin_lock_irqsave(lock, flags);
164+
local_irq_save(flags);
165+
arch_spin_lock(lock);
153166
val = v->counter;
154167
if (val == o)
155168
v->counter = n;
156-
raw_spin_unlock_irqrestore(lock, flags);
169+
arch_spin_unlock(lock);
170+
local_irq_restore(flags);
157171
return val;
158172
}
159173
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
160174

161175
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
162176
{
163177
unsigned long flags;
164-
raw_spinlock_t *lock = lock_addr(v);
178+
arch_spinlock_t *lock = lock_addr(v);
165179
s64 val;
166180

167-
raw_spin_lock_irqsave(lock, flags);
181+
local_irq_save(flags);
182+
arch_spin_lock(lock);
168183
val = v->counter;
169184
v->counter = new;
170-
raw_spin_unlock_irqrestore(lock, flags);
185+
arch_spin_unlock(lock);
186+
local_irq_restore(flags);
171187
return val;
172188
}
173189
EXPORT_SYMBOL(generic_atomic64_xchg);
174190

175191
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
176192
{
177193
unsigned long flags;
178-
raw_spinlock_t *lock = lock_addr(v);
194+
arch_spinlock_t *lock = lock_addr(v);
179195
s64 val;
180196

181-
raw_spin_lock_irqsave(lock, flags);
197+
local_irq_save(flags);
198+
arch_spin_lock(lock);
182199
val = v->counter;
183200
if (val != u)
184201
v->counter += a;
185-
raw_spin_unlock_irqrestore(lock, flags);
202+
arch_spin_unlock(lock);
203+
local_irq_restore(flags);
186204

187205
return val;
188206
}

0 commit comments

Comments
 (0)