Skip to content

Commit 28b8235

Browse files
committed
x86/apic: Wrap IPI calls into helper functions
Move them to one place so the static call conversion gets simpler. No functional change. [ dhansen: merge against recent x86/apic changes ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Tested-by: Juergen Gross <jgross@suse.com> # Xen PV (dom0 and unpriv. guest)
1 parent 54271fb commit 28b8235

File tree

13 files changed

+51
-20
lines changed

13 files changed

+51
-20
lines changed

arch/x86/hyperv/hv_spinlock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ static bool __initdata hv_pvspin = true;
2020

2121
static void hv_qlock_kick(int cpu)
2222
{
23-
apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
23+
__apic_send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
2424
}
2525

2626
static void hv_qlock_wait(u8 *byte, u8 val)

arch/x86/include/asm/apic.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -401,6 +401,36 @@ static __always_inline void apic_icr_write(u32 low, u32 high)
401401
apic->icr_write(low, high);
402402
}
403403

404+
static __always_inline void __apic_send_IPI(int cpu, int vector)
405+
{
406+
apic->send_IPI(cpu, vector);
407+
}
408+
409+
static __always_inline void __apic_send_IPI_mask(const struct cpumask *mask, int vector)
410+
{
411+
apic->send_IPI_mask(mask, vector);
412+
}
413+
414+
static __always_inline void __apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
415+
{
416+
apic->send_IPI_mask_allbutself(mask, vector);
417+
}
418+
419+
static __always_inline void __apic_send_IPI_allbutself(int vector)
420+
{
421+
apic->send_IPI_allbutself(vector);
422+
}
423+
424+
static __always_inline void __apic_send_IPI_all(int vector)
425+
{
426+
apic->send_IPI_all(vector);
427+
}
428+
429+
static __always_inline void __apic_send_IPI_self(int vector)
430+
{
431+
apic->send_IPI_self(vector);
432+
}
433+
404434
static __always_inline void apic_wait_icr_idle(void)
405435
{
406436
if (apic->wait_icr_idle)

arch/x86/kernel/apic/apic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,7 @@ static int lapic_timer_set_oneshot(struct clock_event_device *evt)
502502
static void lapic_timer_broadcast(const struct cpumask *mask)
503503
{
504504
#ifdef CONFIG_SMP
505-
apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
505+
__apic_send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
506506
#endif
507507
}
508508

arch/x86/kernel/apic/hw_nmi.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
#include <linux/init.h>
2222
#include <linux/delay.h>
2323

24+
#include "local.h"
25+
2426
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
2527
u64 hw_nmi_get_sample_period(int watchdog_thresh)
2628
{
@@ -31,7 +33,7 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
3133
#ifdef arch_trigger_cpumask_backtrace
3234
static void nmi_raise_cpu_backtrace(cpumask_t *mask)
3335
{
34-
apic->send_IPI_mask(mask, NMI_VECTOR);
36+
__apic_send_IPI_mask(mask, NMI_VECTOR);
3537
}
3638

3739
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)

arch/x86/kernel/apic/ipi.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ void apic_send_IPI_allbutself(unsigned int vector)
5454
return;
5555

5656
if (static_branch_likely(&apic_use_ipi_shorthand))
57-
apic->send_IPI_allbutself(vector);
57+
__apic_send_IPI_allbutself(vector);
5858
else
59-
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
59+
__apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
6060
}
6161

6262
/*
@@ -70,12 +70,12 @@ void native_smp_send_reschedule(int cpu)
7070
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
7171
return;
7272
}
73-
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
73+
__apic_send_IPI(cpu, RESCHEDULE_VECTOR);
7474
}
7575

7676
void native_send_call_func_single_ipi(int cpu)
7777
{
78-
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
78+
__apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
7979
}
8080

8181
void native_send_call_func_ipi(const struct cpumask *mask)
@@ -87,14 +87,14 @@ void native_send_call_func_ipi(const struct cpumask *mask)
8787
goto sendmask;
8888

8989
if (cpumask_test_cpu(cpu, mask))
90-
apic->send_IPI_all(CALL_FUNCTION_VECTOR);
90+
__apic_send_IPI_all(CALL_FUNCTION_VECTOR);
9191
else if (num_online_cpus() > 1)
92-
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
92+
__apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
9393
return;
9494
}
9595

9696
sendmask:
97-
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
97+
__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
9898
}
9999

100100
#endif /* CONFIG_SMP */
@@ -221,7 +221,7 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
221221
*/
222222
void default_send_IPI_single(int cpu, int vector)
223223
{
224-
apic->send_IPI_mask(cpumask_of(cpu), vector);
224+
__apic_send_IPI_mask(cpumask_of(cpu), vector);
225225
}
226226

227227
void default_send_IPI_allbutself(int vector)

arch/x86/kernel/apic/vector.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -898,7 +898,7 @@ static int apic_retrigger_irq(struct irq_data *irqd)
898898
unsigned long flags;
899899

900900
raw_spin_lock_irqsave(&vector_lock, flags);
901-
apic->send_IPI(apicd->cpu, apicd->vector);
901+
__apic_send_IPI(apicd->cpu, apicd->vector);
902902
raw_spin_unlock_irqrestore(&vector_lock, flags);
903903

904904
return 1;

arch/x86/kernel/cpu/mce/inject.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -270,8 +270,7 @@ static void __maybe_unused raise_mce(struct mce *m)
270270
mce_irq_ipi, NULL, 0);
271271
preempt_enable();
272272
} else if (m->inject_flags & MCJ_NMI_BROADCAST)
273-
apic->send_IPI_mask(mce_inject_cpumask,
274-
NMI_VECTOR);
273+
__apic_send_IPI_mask(mce_inject_cpumask, NMI_VECTOR);
275274
}
276275
start = jiffies;
277276
while (!cpumask_empty(mce_inject_cpumask)) {

arch/x86/kernel/irq_work.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ void arch_irq_work_raise(void)
2828
if (!arch_irq_work_has_interrupt())
2929
return;
3030

31-
apic->send_IPI_self(IRQ_WORK_VECTOR);
31+
__apic_send_IPI_self(IRQ_WORK_VECTOR);
3232
apic_wait_icr_idle();
3333
}
3434
#endif

arch/x86/kernel/nmi_selftest.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
7575
/* sync above data before sending NMI */
7676
wmb();
7777

78-
apic->send_IPI_mask(mask, NMI_VECTOR);
78+
__apic_send_IPI_mask(mask, NMI_VECTOR);
7979

8080
/* Don't wait longer than a second */
8181
timeout = USEC_PER_SEC;

arch/x86/kernel/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ static void native_stop_other_cpus(int wait)
237237
pr_emerg("Shutting down cpus with NMI\n");
238238

239239
for_each_cpu(cpu, &cpus_stop_mask)
240-
apic->send_IPI(cpu, NMI_VECTOR);
240+
__apic_send_IPI(cpu, NMI_VECTOR);
241241
}
242242
/*
243243
* Don't wait longer than 10 ms if the caller didn't

0 commit comments

Comments
 (0)