Skip to content

Commit f4854bf

Browse files
wei-w-wangbonzini
authored andcommitted
KVM: x86: Replace static_call_cond() with static_call()
The use of static_call_cond() is essentially the same as static_call() on x86 (e.g. static_call() now handles a NULL pointer as a NOP), so replace it with static_call() to simplify the code. Link: https://lore.kernel.org/all/3916caa1dcd114301a49beafa5030eca396745c1.1679456900.git.jpoimboe@kernel.org/ Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Link: https://lore.kernel.org/r/20240507133103.15052-2-wei.w.wang@intel.com Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent bc9cd5a commit f4854bf

File tree

7 files changed

+33
-35
lines changed

7 files changed

+33
-35
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@ BUILD_BUG_ON(1)
99
* "static_call_update()" calls.
1010
*
1111
* KVM_X86_OP_OPTIONAL() can be used for those functions that can have
12-
* a NULL definition, for example if "static_call_cond()" will be used
13-
* at the call sites. KVM_X86_OP_OPTIONAL_RET0() can be used likewise
12+
* a NULL definition. KVM_X86_OP_OPTIONAL_RET0() can be used likewise
1413
* to make a definition optional, but in this case the default will
1514
* be __static_call_return0.
1615
*/

arch/x86/include/asm/kvm-x86-pmu-ops.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@ BUILD_BUG_ON(1)
99
* "static_call_update()" calls.
1010
*
1111
* KVM_X86_PMU_OP_OPTIONAL() can be used for those functions that can have
12-
* a NULL definition, for example if "static_call_cond()" will be used
13-
* at the call sites.
12+
* a NULL definition.
1413
*/
1514
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
1615
KVM_X86_PMU_OP(msr_idx_to_pmc)

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2309,12 +2309,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
23092309

23102310
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
23112311
{
2312-
static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
2312+
static_call(kvm_x86_vcpu_blocking)(vcpu);
23132313
}
23142314

23152315
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
23162316
{
2317-
static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
2317+
static_call(kvm_x86_vcpu_unblocking)(vcpu);
23182318
}
23192319

23202320
static inline int kvm_cpu_get_apicid(int mps_cpu)

arch/x86/kvm/irq.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
157157
{
158158
__kvm_migrate_apic_timer(vcpu);
159159
__kvm_migrate_pit_timer(vcpu);
160-
static_call_cond(kvm_x86_migrate_timers)(vcpu);
160+
static_call(kvm_x86_migrate_timers)(vcpu);
161161
}
162162

163163
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)

arch/x86/kvm/lapic.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -738,7 +738,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
738738
if (unlikely(apic->apicv_active)) {
739739
/* need to update RVI */
740740
kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
741-
static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
741+
static_call(kvm_x86_hwapic_irr_update)(apic->vcpu,
742742
apic_find_highest_irr(apic));
743743
} else {
744744
apic->irr_pending = false;
@@ -765,7 +765,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
765765
* just set SVI.
766766
*/
767767
if (unlikely(apic->apicv_active))
768-
static_call_cond(kvm_x86_hwapic_isr_update)(vec);
768+
static_call(kvm_x86_hwapic_isr_update)(vec);
769769
else {
770770
++apic->isr_count;
771771
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -810,7 +810,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
810810
* and must be left alone.
811811
*/
812812
if (unlikely(apic->apicv_active))
813-
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
813+
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
814814
else {
815815
--apic->isr_count;
816816
BUG_ON(apic->isr_count < 0);
@@ -2577,7 +2577,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
25772577

25782578
if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
25792579
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2580-
static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2580+
static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
25812581
}
25822582

25832583
apic->base_address = apic->vcpu->arch.apic_base &
@@ -2687,7 +2687,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
26872687
u64 msr_val;
26882688
int i;
26892689

2690-
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2690+
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
26912691

26922692
if (!init_event) {
26932693
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
@@ -2742,9 +2742,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
27422742
vcpu->arch.pv_eoi.msr_val = 0;
27432743
apic_update_ppr(apic);
27442744
if (apic->apicv_active) {
2745-
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2746-
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2747-
static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2745+
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2746+
static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2747+
static_call(kvm_x86_hwapic_isr_update)(-1);
27482748
}
27492749

27502750
vcpu->arch.apic_arb_prio = 0;
@@ -3019,7 +3019,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30193019
struct kvm_lapic *apic = vcpu->arch.apic;
30203020
int r;
30213021

3022-
static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
3022+
static_call(kvm_x86_apicv_pre_state_restore)(vcpu);
30233023

30243024
kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
30253025
/* set SPIV separately to get count of SW disabled APICs right */
@@ -3046,9 +3046,9 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
30463046
kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
30473047
kvm_apic_update_apicv(vcpu);
30483048
if (apic->apicv_active) {
3049-
static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
3050-
static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3051-
static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3049+
static_call(kvm_x86_apicv_post_state_restore)(vcpu);
3050+
static_call(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3051+
static_call(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
30523052
}
30533053
kvm_make_request(KVM_REQ_EVENT, vcpu);
30543054
if (ioapic_in_kernel(vcpu->kvm))

arch/x86/kvm/pmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
607607
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
608608
{
609609
if (lapic_in_kernel(vcpu)) {
610-
static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
610+
static_call(kvm_x86_pmu_deliver_pmi)(vcpu);
611611
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
612612
}
613613
}
@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
740740

741741
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
742742

743-
static_call_cond(kvm_x86_pmu_reset)(vcpu);
743+
static_call(kvm_x86_pmu_reset)(vcpu);
744744
}
745745

746746

@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
818818
pmc_stop_counter(pmc);
819819
}
820820

821-
static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
821+
static_call(kvm_x86_pmu_cleanup)(vcpu);
822822

823823
bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
824824
}

arch/x86/kvm/x86.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -5122,7 +5122,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
51225122
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
51235123
struct kvm_lapic_state *s)
51245124
{
5125-
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
5125+
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
51265126

51275127
return kvm_apic_get_state(vcpu, s);
51285128
}
@@ -9336,7 +9336,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
93369336
kvm_rip_write(vcpu, ctxt->eip);
93379337
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
93389338
r = kvm_vcpu_do_singlestep(vcpu);
9339-
static_call_cond(kvm_x86_update_emulated_instruction)(vcpu);
9339+
static_call(kvm_x86_update_emulated_instruction)(vcpu);
93409340
__kvm_set_rflags(vcpu, ctxt->eflags);
93419341
}
93429342

@@ -10759,7 +10759,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
1075910759

1076010760
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
1076110761

10762-
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10762+
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
1076310763

1076410764
if (irqchip_split(vcpu->kvm))
1076510765
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
@@ -10784,25 +10784,25 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
1078410784
bitmap_or((ulong *)eoi_exit_bitmap,
1078510785
vcpu->arch.ioapic_handled_vectors,
1078610786
to_hv_synic(vcpu)->vec_bitmap, 256);
10787-
static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
10787+
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
1078810788
return;
1078910789
}
1079010790
#endif
10791-
static_call_cond(kvm_x86_load_eoi_exitmap)(
10791+
static_call(kvm_x86_load_eoi_exitmap)(
1079210792
vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
1079310793
}
1079410794

1079510795
void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
1079610796
{
10797-
static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
10797+
static_call(kvm_x86_guest_memory_reclaimed)(kvm);
1079810798
}
1079910799

1080010800
static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
1080110801
{
1080210802
if (!lapic_in_kernel(vcpu))
1080310803
return;
1080410804

10805-
static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
10805+
static_call(kvm_x86_set_apic_access_page_addr)(vcpu);
1080610806
}
1080710807

1080810808
/*
@@ -11050,7 +11050,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1105011050
* i.e. they can post interrupts even if APICv is temporarily disabled.
1105111051
*/
1105211052
if (kvm_lapic_enabled(vcpu))
11053-
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
11053+
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
1105411054

1105511055
if (kvm_vcpu_exit_request(vcpu)) {
1105611056
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -11099,7 +11099,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1109911099
break;
1110011100

1110111101
if (kvm_lapic_enabled(vcpu))
11102-
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
11102+
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
1110311103

1110411104
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
1110511105
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
@@ -11873,7 +11873,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
1187311873
*mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
1187411874
vcpu->arch.cr3 = sregs->cr3;
1187511875
kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
11876-
static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
11876+
static_call(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
1187711877

1187811878
kvm_set_cr8(vcpu, sregs->cr8);
1187911879

@@ -12822,7 +12822,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1282212822
mutex_unlock(&kvm->slots_lock);
1282312823
}
1282412824
kvm_unload_vcpu_mmus(kvm);
12825-
static_call_cond(kvm_x86_vm_destroy)(kvm);
12825+
static_call(kvm_x86_vm_destroy)(kvm);
1282612826
kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
1282712827
kvm_pic_destroy(kvm);
1282812828
kvm_ioapic_destroy(kvm);
@@ -13513,7 +13513,7 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
1351313513
void kvm_arch_start_assignment(struct kvm *kvm)
1351413514
{
1351513515
if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
13516-
static_call_cond(kvm_x86_pi_start_assignment)(kvm);
13516+
static_call(kvm_x86_pi_start_assignment)(kvm);
1351713517
}
1351813518
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
1351913519

@@ -13650,7 +13650,7 @@ int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_ord
1365013650
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
1365113651
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
1365213652
{
13653-
static_call_cond(kvm_x86_gmem_invalidate)(start, end);
13653+
static_call(kvm_x86_gmem_invalidate)(start, end);
1365413654
}
1365513655
#endif
1365613656

0 commit comments

Comments
 (0)