@@ -14,11 +14,9 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
14
14
for_each_possible_cpu (cpu ) {
15
15
struct pcpu_freelist_head * head = per_cpu_ptr (s -> freelist , cpu );
16
16
17
- raw_spin_lock_init (& head -> lock );
17
+ raw_res_spin_lock_init (& head -> lock );
18
18
head -> first = NULL ;
19
19
}
20
- raw_spin_lock_init (& s -> extralist .lock );
21
- s -> extralist .first = NULL ;
22
20
return 0 ;
23
21
}
24
22
@@ -34,58 +32,39 @@ static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
34
32
WRITE_ONCE (head -> first , node );
35
33
}
36
34
37
- static inline void ___pcpu_freelist_push (struct pcpu_freelist_head * head ,
35
+ static inline bool ___pcpu_freelist_push (struct pcpu_freelist_head * head ,
38
36
struct pcpu_freelist_node * node )
39
37
{
40
- raw_spin_lock (& head -> lock );
41
- pcpu_freelist_push_node (head , node );
42
- raw_spin_unlock (& head -> lock );
43
- }
44
-
45
- static inline bool pcpu_freelist_try_push_extra (struct pcpu_freelist * s ,
46
- struct pcpu_freelist_node * node )
47
- {
48
- if (!raw_spin_trylock (& s -> extralist .lock ))
38
+ if (raw_res_spin_lock (& head -> lock ))
49
39
return false;
50
-
51
- pcpu_freelist_push_node (& s -> extralist , node );
52
- raw_spin_unlock (& s -> extralist .lock );
40
+ pcpu_freelist_push_node (head , node );
41
+ raw_res_spin_unlock (& head -> lock );
53
42
return true;
54
43
}
55
44
56
- static inline void ___pcpu_freelist_push_nmi (struct pcpu_freelist * s ,
57
- struct pcpu_freelist_node * node )
45
+ void __pcpu_freelist_push (struct pcpu_freelist * s ,
46
+ struct pcpu_freelist_node * node )
58
47
{
59
- int cpu , orig_cpu ;
48
+ struct pcpu_freelist_head * head ;
49
+ int cpu ;
60
50
61
- orig_cpu = raw_smp_processor_id ();
62
- while (1 ) {
63
- for_each_cpu_wrap (cpu , cpu_possible_mask , orig_cpu ) {
64
- struct pcpu_freelist_head * head ;
51
+ if (___pcpu_freelist_push (this_cpu_ptr (s -> freelist ), node ))
52
+ return ;
65
53
54
+ while (true) {
55
+ for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
56
+ if (cpu == raw_smp_processor_id ())
57
+ continue ;
66
58
head = per_cpu_ptr (s -> freelist , cpu );
67
- if (raw_spin_trylock (& head -> lock )) {
68
- pcpu_freelist_push_node (head , node );
69
- raw_spin_unlock (& head -> lock );
70
- return ;
71
- }
72
- }
73
-
74
- /* cannot lock any per cpu lock, try extralist */
75
- if (pcpu_freelist_try_push_extra (s , node ))
59
+ if (raw_res_spin_lock (& head -> lock ))
60
+ continue ;
61
+ pcpu_freelist_push_node (head , node );
62
+ raw_res_spin_unlock (& head -> lock );
76
63
return ;
64
+ }
77
65
}
78
66
}
79
67
80
- void __pcpu_freelist_push (struct pcpu_freelist * s ,
81
- struct pcpu_freelist_node * node )
82
- {
83
- if (in_nmi ())
84
- ___pcpu_freelist_push_nmi (s , node );
85
- else
86
- ___pcpu_freelist_push (this_cpu_ptr (s -> freelist ), node );
87
- }
88
-
89
68
void pcpu_freelist_push (struct pcpu_freelist * s ,
90
69
struct pcpu_freelist_node * node )
91
70
{
@@ -120,71 +99,29 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
120
99
121
100
static struct pcpu_freelist_node * ___pcpu_freelist_pop (struct pcpu_freelist * s )
122
101
{
102
+ struct pcpu_freelist_node * node = NULL ;
123
103
struct pcpu_freelist_head * head ;
124
- struct pcpu_freelist_node * node ;
125
104
int cpu ;
126
105
127
106
for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
128
107
head = per_cpu_ptr (s -> freelist , cpu );
129
108
if (!READ_ONCE (head -> first ))
130
109
continue ;
131
- raw_spin_lock (& head -> lock );
110
+ if (raw_res_spin_lock (& head -> lock ))
111
+ continue ;
132
112
node = head -> first ;
133
113
if (node ) {
134
114
WRITE_ONCE (head -> first , node -> next );
135
- raw_spin_unlock (& head -> lock );
115
+ raw_res_spin_unlock (& head -> lock );
136
116
return node ;
137
117
}
138
- raw_spin_unlock (& head -> lock );
118
+ raw_res_spin_unlock (& head -> lock );
139
119
}
140
-
141
- /* per cpu lists are all empty, try extralist */
142
- if (!READ_ONCE (s -> extralist .first ))
143
- return NULL ;
144
- raw_spin_lock (& s -> extralist .lock );
145
- node = s -> extralist .first ;
146
- if (node )
147
- WRITE_ONCE (s -> extralist .first , node -> next );
148
- raw_spin_unlock (& s -> extralist .lock );
149
- return node ;
150
- }
151
-
152
- static struct pcpu_freelist_node *
153
- ___pcpu_freelist_pop_nmi (struct pcpu_freelist * s )
154
- {
155
- struct pcpu_freelist_head * head ;
156
- struct pcpu_freelist_node * node ;
157
- int cpu ;
158
-
159
- for_each_cpu_wrap (cpu , cpu_possible_mask , raw_smp_processor_id ()) {
160
- head = per_cpu_ptr (s -> freelist , cpu );
161
- if (!READ_ONCE (head -> first ))
162
- continue ;
163
- if (raw_spin_trylock (& head -> lock )) {
164
- node = head -> first ;
165
- if (node ) {
166
- WRITE_ONCE (head -> first , node -> next );
167
- raw_spin_unlock (& head -> lock );
168
- return node ;
169
- }
170
- raw_spin_unlock (& head -> lock );
171
- }
172
- }
173
-
174
- /* cannot pop from per cpu lists, try extralist */
175
- if (!READ_ONCE (s -> extralist .first ) || !raw_spin_trylock (& s -> extralist .lock ))
176
- return NULL ;
177
- node = s -> extralist .first ;
178
- if (node )
179
- WRITE_ONCE (s -> extralist .first , node -> next );
180
- raw_spin_unlock (& s -> extralist .lock );
181
120
return node ;
182
121
}
183
122
184
123
struct pcpu_freelist_node * __pcpu_freelist_pop (struct pcpu_freelist * s )
185
124
{
186
- if (in_nmi ())
187
- return ___pcpu_freelist_pop_nmi (s );
188
125
return ___pcpu_freelist_pop (s );
189
126
}
190
127
0 commit comments