Skip to content

Commit 5bdecd8

Browse files
committed
RISC-V: KVM: Use NACL HFENCEs for KVM request based HFENCEs
When running under some other hypervisor, use SBI NACL based HFENCEs for TLB shoot-down via KVM requests. This makes HFENCEs faster whenever SBI nested acceleration is available. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20241020194734.58686-14-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent 3e7d154 commit 5bdecd8

File tree

1 file changed

+40
-17
lines changed

1 file changed

+40
-17
lines changed

arch/riscv/kvm/tlb.c

Lines changed: 40 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#include <asm/csr.h>
1515
#include <asm/cpufeature.h>
1616
#include <asm/insn-def.h>
17+
#include <asm/kvm_nacl.h>
1718

1819
#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
1920

@@ -186,18 +187,24 @@ void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
186187

187188
void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
188189
{
189-
struct kvm_vmid *vmid;
190+
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
191+
unsigned long vmid = READ_ONCE(v->vmid);
190192

191-
vmid = &vcpu->kvm->arch.vmid;
192-
kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
193+
if (kvm_riscv_nacl_available())
194+
nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
195+
else
196+
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
193197
}
194198

195199
void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
196200
{
197-
struct kvm_vmid *vmid;
201+
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
202+
unsigned long vmid = READ_ONCE(v->vmid);
198203

199-
vmid = &vcpu->kvm->arch.vmid;
200-
kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
204+
if (kvm_riscv_nacl_available())
205+
nacl_hfence_vvma_all(nacl_shmem(), vmid);
206+
else
207+
kvm_riscv_local_hfence_vvma_all(vmid);
201208
}
202209

203210
static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
@@ -251,6 +258,7 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
251258

252259
void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
253260
{
261+
unsigned long vmid;
254262
struct kvm_riscv_hfence d = { 0 };
255263
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
256264

@@ -259,26 +267,41 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
259267
case KVM_RISCV_HFENCE_UNKNOWN:
260268
break;
261269
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
262-
kvm_riscv_local_hfence_gvma_vmid_gpa(
263-
READ_ONCE(v->vmid),
264-
d.addr, d.size, d.order);
270+
vmid = READ_ONCE(v->vmid);
271+
if (kvm_riscv_nacl_available())
272+
nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
273+
d.addr, d.size, d.order);
274+
else
275+
kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
276+
d.size, d.order);
265277
break;
266278
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
267279
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
268-
kvm_riscv_local_hfence_vvma_asid_gva(
269-
READ_ONCE(v->vmid), d.asid,
270-
d.addr, d.size, d.order);
280+
vmid = READ_ONCE(v->vmid);
281+
if (kvm_riscv_nacl_available())
282+
nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
283+
d.addr, d.size, d.order);
284+
else
285+
kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
286+
d.size, d.order);
271287
break;
272288
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
273289
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
274-
kvm_riscv_local_hfence_vvma_asid_all(
275-
READ_ONCE(v->vmid), d.asid);
290+
vmid = READ_ONCE(v->vmid);
291+
if (kvm_riscv_nacl_available())
292+
nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
293+
else
294+
kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
276295
break;
277296
case KVM_RISCV_HFENCE_VVMA_GVA:
278297
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
279-
kvm_riscv_local_hfence_vvma_gva(
280-
READ_ONCE(v->vmid),
281-
d.addr, d.size, d.order);
298+
vmid = READ_ONCE(v->vmid);
299+
if (kvm_riscv_nacl_available())
300+
nacl_hfence_vvma(nacl_shmem(), vmid,
301+
d.addr, d.size, d.order);
302+
else
303+
kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
304+
d.size, d.order);
282305
break;
283306
default:
284307
break;

0 commit comments

Comments
 (0)