|
18 | 18 | #include <linux/lockdep.h>
|
19 | 19 | #include <linux/mutex.h>
|
20 | 20 | #include <linux/preempt.h>
|
| 21 | +#include <linux/seqlock_types.h> |
21 | 22 | #include <linux/spinlock.h>
|
22 | 23 |
|
23 | 24 | #include <asm/processor.h>
|
|
37 | 38 | */
|
38 | 39 | #define KCSAN_SEQLOCK_REGION_MAX 1000
|
39 | 40 |
|
40 |
| -/* |
41 |
| - * Sequence counters (seqcount_t) |
42 |
| - * |
43 |
| - * This is the raw counting mechanism, without any writer protection. |
44 |
| - * |
45 |
| - * Write side critical sections must be serialized and non-preemptible. |
46 |
| - * |
47 |
| - * If readers can be invoked from hardirq or softirq contexts, |
48 |
| - * interrupts or bottom halves must also be respectively disabled before |
49 |
| - * entering the write section. |
50 |
| - * |
51 |
| - * This mechanism can't be used if the protected data contains pointers, |
52 |
| - * as the writer can invalidate a pointer that a reader is following. |
53 |
| - * |
54 |
| - * If the write serialization mechanism is one of the common kernel |
55 |
| - * locking primitives, use a sequence counter with associated lock |
56 |
| - * (seqcount_LOCKNAME_t) instead. |
57 |
| - * |
58 |
| - * If it's desired to automatically handle the sequence counter writer |
59 |
| - * serialization and non-preemptibility requirements, use a sequential |
60 |
| - * lock (seqlock_t) instead. |
61 |
| - * |
62 |
| - * See Documentation/locking/seqlock.rst |
63 |
| - */ |
64 |
| -typedef struct seqcount { |
65 |
| - unsigned sequence; |
66 |
| -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
67 |
| - struct lockdep_map dep_map; |
68 |
| -#endif |
69 |
| -} seqcount_t; |
70 |
| - |
71 | 41 | static inline void __seqcount_init(seqcount_t *s, const char *name,
|
72 | 42 | struct lock_class_key *key)
|
73 | 43 | {
|
@@ -131,28 +101,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
131 | 101 | * See Documentation/locking/seqlock.rst
|
132 | 102 | */
|
133 | 103 |
|
134 |
| -/* |
135 |
| - * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot |
136 |
| - * disable preemption. It can lead to higher latencies, and the write side |
137 |
| - * sections will not be able to acquire locks which become sleeping locks |
138 |
| - * (e.g. spinlock_t). |
139 |
| - * |
140 |
| - * To remain preemptible while avoiding a possible livelock caused by the |
141 |
| - * reader preempting the writer, use a different technique: let the reader |
142 |
| - * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the |
143 |
| - * case, acquire then release the associated LOCKNAME writer serialization |
144 |
| - * lock. This will allow any possibly-preempted writer to make progress |
145 |
| - * until the end of its writer serialization lock critical section. |
146 |
| - * |
147 |
| - * This lock-unlock technique must be implemented for all of PREEMPT_RT |
148 |
| - * sleeping locks. See Documentation/locking/locktypes.rst |
149 |
| - */ |
150 |
| -#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) |
151 |
| -#define __SEQ_LOCK(expr) expr |
152 |
| -#else |
153 |
| -#define __SEQ_LOCK(expr) |
154 |
| -#endif |
155 |
| - |
156 | 104 | /*
|
157 | 105 | * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
|
158 | 106 | * @seqcount: The real sequence counter
|
@@ -194,11 +142,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
|
194 | 142 | * @lockbase: prefix for associated lock/unlock
|
195 | 143 | */
|
196 | 144 | #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
|
197 |
| -typedef struct seqcount_##lockname { \ |
198 |
| - seqcount_t seqcount; \ |
199 |
| - __SEQ_LOCK(locktype *lock); \ |
200 |
| -} seqcount_##lockname##_t; \ |
201 |
| - \ |
202 | 145 | static __always_inline seqcount_t * \
|
203 | 146 | __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
|
204 | 147 | { \
|
@@ -284,6 +227,7 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
|
284 | 227 | SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
|
285 | 228 | SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
|
286 | 229 | SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
|
| 230 | +#undef SEQCOUNT_LOCKNAME |
287 | 231 |
|
288 | 232 | /*
|
289 | 233 | * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
|
@@ -794,25 +738,6 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
|
794 | 738 | smp_wmb(); /* increment "sequence" before following stores */
|
795 | 739 | }
|
796 | 740 |
|
797 |
| -/* |
798 |
| - * Sequential locks (seqlock_t) |
799 |
| - * |
800 |
| - * Sequence counters with an embedded spinlock for writer serialization |
801 |
| - * and non-preemptibility. |
802 |
| - * |
803 |
| - * For more info, see: |
804 |
| - * - Comments on top of seqcount_t |
805 |
| - * - Documentation/locking/seqlock.rst |
806 |
| - */ |
807 |
| -typedef struct { |
808 |
| - /* |
809 |
| - * Make sure that readers don't starve writers on PREEMPT_RT: use |
810 |
| - * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). |
811 |
| - */ |
812 |
| - seqcount_spinlock_t seqcount; |
813 |
| - spinlock_t lock; |
814 |
| -} seqlock_t; |
815 |
| - |
816 | 741 | #define __SEQLOCK_UNLOCKED(lockname) \
|
817 | 742 | { \
|
818 | 743 | .seqcount = SEQCNT_SPINLOCK_ZERO(lockname, &(lockname).lock), \
|
|
0 commit comments