14
14
#include <asm/csr.h>
15
15
#include <asm/cpufeature.h>
16
16
#include <asm/insn-def.h>
17
+ #include <asm/kvm_nacl.h>
17
18
18
19
#define has_svinval () riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
19
20
@@ -186,18 +187,24 @@ void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)
186
187
187
188
void kvm_riscv_hfence_gvma_vmid_all_process (struct kvm_vcpu * vcpu )
188
189
{
189
- struct kvm_vmid * vmid ;
190
+ struct kvm_vmid * v = & vcpu -> kvm -> arch .vmid ;
191
+ unsigned long vmid = READ_ONCE (v -> vmid );
190
192
191
- vmid = & vcpu -> kvm -> arch .vmid ;
192
- kvm_riscv_local_hfence_gvma_vmid_all (READ_ONCE (vmid -> vmid ));
193
+ if (kvm_riscv_nacl_available ())
194
+ nacl_hfence_gvma_vmid_all (nacl_shmem (), vmid );
195
+ else
196
+ kvm_riscv_local_hfence_gvma_vmid_all (vmid );
193
197
}
194
198
195
199
void kvm_riscv_hfence_vvma_all_process (struct kvm_vcpu * vcpu )
196
200
{
197
- struct kvm_vmid * vmid ;
201
+ struct kvm_vmid * v = & vcpu -> kvm -> arch .vmid ;
202
+ unsigned long vmid = READ_ONCE (v -> vmid );
198
203
199
- vmid = & vcpu -> kvm -> arch .vmid ;
200
- kvm_riscv_local_hfence_vvma_all (READ_ONCE (vmid -> vmid ));
204
+ if (kvm_riscv_nacl_available ())
205
+ nacl_hfence_vvma_all (nacl_shmem (), vmid );
206
+ else
207
+ kvm_riscv_local_hfence_vvma_all (vmid );
201
208
}
202
209
203
210
static bool vcpu_hfence_dequeue (struct kvm_vcpu * vcpu ,
@@ -251,6 +258,7 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,
251
258
252
259
void kvm_riscv_hfence_process (struct kvm_vcpu * vcpu )
253
260
{
261
+ unsigned long vmid ;
254
262
struct kvm_riscv_hfence d = { 0 };
255
263
struct kvm_vmid * v = & vcpu -> kvm -> arch .vmid ;
256
264
@@ -259,26 +267,41 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
259
267
case KVM_RISCV_HFENCE_UNKNOWN :
260
268
break ;
261
269
case KVM_RISCV_HFENCE_GVMA_VMID_GPA :
262
- kvm_riscv_local_hfence_gvma_vmid_gpa (
263
- READ_ONCE (v -> vmid ),
264
- d .addr , d .size , d .order );
270
+ vmid = READ_ONCE (v -> vmid );
271
+ if (kvm_riscv_nacl_available ())
272
+ nacl_hfence_gvma_vmid (nacl_shmem (), vmid ,
273
+ d .addr , d .size , d .order );
274
+ else
275
+ kvm_riscv_local_hfence_gvma_vmid_gpa (vmid , d .addr ,
276
+ d .size , d .order );
265
277
break ;
266
278
case KVM_RISCV_HFENCE_VVMA_ASID_GVA :
267
279
kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD );
268
- kvm_riscv_local_hfence_vvma_asid_gva (
269
- READ_ONCE (v -> vmid ), d .asid ,
270
- d .addr , d .size , d .order );
280
+ vmid = READ_ONCE (v -> vmid );
281
+ if (kvm_riscv_nacl_available ())
282
+ nacl_hfence_vvma_asid (nacl_shmem (), vmid , d .asid ,
283
+ d .addr , d .size , d .order );
284
+ else
285
+ kvm_riscv_local_hfence_vvma_asid_gva (vmid , d .asid , d .addr ,
286
+ d .size , d .order );
271
287
break ;
272
288
case KVM_RISCV_HFENCE_VVMA_ASID_ALL :
273
289
kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD );
274
- kvm_riscv_local_hfence_vvma_asid_all (
275
- READ_ONCE (v -> vmid ), d .asid );
290
+ vmid = READ_ONCE (v -> vmid );
291
+ if (kvm_riscv_nacl_available ())
292
+ nacl_hfence_vvma_asid_all (nacl_shmem (), vmid , d .asid );
293
+ else
294
+ kvm_riscv_local_hfence_vvma_asid_all (vmid , d .asid );
276
295
break ;
277
296
case KVM_RISCV_HFENCE_VVMA_GVA :
278
297
kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_HFENCE_VVMA_RCVD );
279
- kvm_riscv_local_hfence_vvma_gva (
280
- READ_ONCE (v -> vmid ),
281
- d .addr , d .size , d .order );
298
+ vmid = READ_ONCE (v -> vmid );
299
+ if (kvm_riscv_nacl_available ())
300
+ nacl_hfence_vvma (nacl_shmem (), vmid ,
301
+ d .addr , d .size , d .order );
302
+ else
303
+ kvm_riscv_local_hfence_vvma_gva (vmid , d .addr ,
304
+ d .size , d .order );
282
305
break ;
283
306
default :
284
307
break ;
0 commit comments