@@ -156,7 +156,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
156
156
157
157
int kvm_emu_iocsr (larch_inst inst , struct kvm_run * run , struct kvm_vcpu * vcpu )
158
158
{
159
- int ret ;
159
+ int idx , ret ;
160
160
unsigned long * val ;
161
161
u32 addr , rd , rj , opcode ;
162
162
@@ -167,7 +167,6 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
167
167
rj = inst .reg2_format .rj ;
168
168
opcode = inst .reg2_format .opcode ;
169
169
addr = vcpu -> arch .gprs [rj ];
170
- ret = EMULATE_DO_IOCSR ;
171
170
run -> iocsr_io .phys_addr = addr ;
172
171
run -> iocsr_io .is_write = 0 ;
173
172
val = & vcpu -> arch .gprs [rd ];
@@ -207,20 +206,28 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
207
206
}
208
207
209
208
if (run -> iocsr_io .is_write ) {
210
- if (!kvm_io_bus_write (vcpu , KVM_IOCSR_BUS , addr , run -> iocsr_io .len , val ))
209
+ idx = srcu_read_lock (& vcpu -> kvm -> srcu );
210
+ ret = kvm_io_bus_write (vcpu , KVM_IOCSR_BUS , addr , run -> iocsr_io .len , val );
211
+ srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
212
+ if (ret == 0 )
211
213
ret = EMULATE_DONE ;
212
- else
214
+ else {
215
+ ret = EMULATE_DO_IOCSR ;
213
216
/* Save data and let user space to write it */
214
217
memcpy (run -> iocsr_io .data , val , run -> iocsr_io .len );
215
-
218
+ }
216
219
trace_kvm_iocsr (KVM_TRACE_IOCSR_WRITE , run -> iocsr_io .len , addr , val );
217
220
} else {
218
- if (!kvm_io_bus_read (vcpu , KVM_IOCSR_BUS , addr , run -> iocsr_io .len , val ))
221
+ idx = srcu_read_lock (& vcpu -> kvm -> srcu );
222
+ ret = kvm_io_bus_read (vcpu , KVM_IOCSR_BUS , addr , run -> iocsr_io .len , val );
223
+ srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
224
+ if (ret == 0 )
219
225
ret = EMULATE_DONE ;
220
- else
226
+ else {
227
+ ret = EMULATE_DO_IOCSR ;
221
228
/* Save register id for iocsr read completion */
222
229
vcpu -> arch .io_gpr = rd ;
223
-
230
+ }
224
231
trace_kvm_iocsr (KVM_TRACE_IOCSR_READ , run -> iocsr_io .len , addr , NULL );
225
232
}
226
233
@@ -359,7 +366,7 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
359
366
360
367
int kvm_emu_mmio_read (struct kvm_vcpu * vcpu , larch_inst inst )
361
368
{
362
- int ret ;
369
+ int idx , ret ;
363
370
unsigned int op8 , opcode , rd ;
364
371
struct kvm_run * run = vcpu -> run ;
365
372
@@ -464,8 +471,10 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
464
471
* it need not return to user space to handle the mmio
465
472
* exception.
466
473
*/
474
+ idx = srcu_read_lock (& vcpu -> kvm -> srcu );
467
475
ret = kvm_io_bus_read (vcpu , KVM_MMIO_BUS , vcpu -> arch .badv ,
468
476
run -> mmio .len , & vcpu -> arch .gprs [rd ]);
477
+ srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
469
478
if (!ret ) {
470
479
update_pc (& vcpu -> arch );
471
480
vcpu -> mmio_needed = 0 ;
@@ -531,7 +540,7 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
531
540
532
541
int kvm_emu_mmio_write (struct kvm_vcpu * vcpu , larch_inst inst )
533
542
{
534
- int ret ;
543
+ int idx , ret ;
535
544
unsigned int rd , op8 , opcode ;
536
545
unsigned long curr_pc , rd_val = 0 ;
537
546
struct kvm_run * run = vcpu -> run ;
@@ -631,7 +640,9 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
631
640
* it need not return to user space to handle the mmio
632
641
* exception.
633
642
*/
643
+ idx = srcu_read_lock (& vcpu -> kvm -> srcu );
634
644
ret = kvm_io_bus_write (vcpu , KVM_MMIO_BUS , vcpu -> arch .badv , run -> mmio .len , data );
645
+ srcu_read_unlock (& vcpu -> kvm -> srcu , idx );
635
646
if (!ret )
636
647
return EMULATE_DONE ;
637
648
0 commit comments