|
10 | 10 | #include <sbi/riscv_asm.h>
|
11 | 11 | #include <sbi/riscv_atomic.h>
|
12 | 12 | #include <sbi/riscv_barrier.h>
|
13 |
| -#include <sbi/riscv_locks.h> |
14 | 13 | #include <sbi/sbi_console.h>
|
15 | 14 | #include <sbi/sbi_cppc.h>
|
16 | 15 | #include <sbi/sbi_domain.h>
|
@@ -191,82 +190,19 @@ static void sbi_boot_print_hart(struct sbi_scratch *scratch, u32 hartid)
|
191 | 190 | sbi_hart_delegation_dump(scratch, "Boot HART ", " ");
|
192 | 191 | }
|
193 | 192 |
|
194 |
| -static spinlock_t coldboot_lock = SPIN_LOCK_INITIALIZER; |
195 |
| -static struct sbi_hartmask coldboot_wait_hmask = { 0 }; |
196 |
| - |
197 | 193 | static unsigned long coldboot_done;
|
198 | 194 |
|
199 | 195 | static void wait_for_coldboot(struct sbi_scratch *scratch, u32 hartid)
|
200 | 196 | {
|
201 |
| - unsigned long saved_mie, cmip; |
202 |
| - |
203 |
| - if (__smp_load_acquire(&coldboot_done)) |
204 |
| - return; |
205 |
| - |
206 |
| - /* Save MIE CSR */ |
207 |
| - saved_mie = csr_read(CSR_MIE); |
208 |
| - |
209 |
| - /* Set MSIE and MEIE bits to receive IPI */ |
210 |
| - csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP); |
211 |
| - |
212 |
| - /* Acquire coldboot lock */ |
213 |
| - spin_lock(&coldboot_lock); |
214 |
| - |
215 |
| - /* Mark current HART as waiting */ |
216 |
| - sbi_hartmask_set_hartid(hartid, &coldboot_wait_hmask); |
217 |
| - |
218 |
| - /* Release coldboot lock */ |
219 |
| - spin_unlock(&coldboot_lock); |
220 |
| - |
221 |
| - /* Wait for coldboot to finish using WFI */ |
222 |
| - while (!__smp_load_acquire(&coldboot_done)) { |
223 |
| - do { |
224 |
| - wfi(); |
225 |
| - cmip = csr_read(CSR_MIP); |
226 |
| - } while (!(cmip & (MIP_MSIP | MIP_MEIP))); |
227 |
| - } |
228 |
| - |
229 |
| - /* Acquire coldboot lock */ |
230 |
| - spin_lock(&coldboot_lock); |
231 |
| - |
232 |
| - /* Unmark current HART as waiting */ |
233 |
| - sbi_hartmask_clear_hartid(hartid, &coldboot_wait_hmask); |
234 |
| - |
235 |
| - /* Release coldboot lock */ |
236 |
| - spin_unlock(&coldboot_lock); |
237 |
| - |
238 |
| - /* Restore MIE CSR */ |
239 |
| - csr_write(CSR_MIE, saved_mie); |
240 |
| - |
241 |
| - /* |
242 |
| - * The wait for coldboot is common for both warm startup and |
243 |
| - * warm resume path so clearing IPI here would result in losing |
244 |
| - * an IPI in warm resume path. |
245 |
| - * |
246 |
| - * Also, the sbi_platform_ipi_init() called from sbi_ipi_init() |
247 |
| - * will automatically clear IPI for current HART. |
248 |
| - */ |
| 197 | + /* Wait for coldboot to finish */ |
| 198 | + while (!__smp_load_acquire(&coldboot_done)) |
| 199 | + cpu_relax(); |
249 | 200 | }
|
250 | 201 |
|
251 | 202 | static void wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid)
|
252 | 203 | {
|
253 |
| - u32 i, hartindex = sbi_hartid_to_hartindex(hartid); |
254 |
| - |
255 | 204 | /* Mark coldboot done */
|
256 | 205 | __smp_store_release(&coldboot_done, 1);
|
257 |
| - |
258 |
| - /* Acquire coldboot lock */ |
259 |
| - spin_lock(&coldboot_lock); |
260 |
| - |
261 |
| - /* Send an IPI to all HARTs waiting for coldboot */ |
262 |
| - sbi_hartmask_for_each_hartindex(i, &coldboot_wait_hmask) { |
263 |
| - if (i == hartindex) |
264 |
| - continue; |
265 |
| - sbi_ipi_raw_send(i); |
266 |
| - } |
267 |
| - |
268 |
| - /* Release coldboot lock */ |
269 |
| - spin_unlock(&coldboot_lock); |
270 | 206 | }
|
271 | 207 |
|
272 | 208 | static unsigned long entry_count_offset;
|
|
0 commit comments