Skip to content

Commit 9f0e6b9

Browse files
radimkrcmaravpatel
authored andcommitted
KVM: RISC-V: remove unnecessary SBI reset state
The SBI reset state has only two variables -- pc and a1. The rest is known, so keep only the necessary information. The reset structures make sense if we want userspace to control the reset state (which we do), but I'd still remove them now and reintroduce with the userspace interface later -- we could probably have just a single reset state per VM, instead of a reset state for each VCPU. Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Signed-off-by: Radim Krčmář <rkrcmar@ventanamicro.com> Link: https://lore.kernel.org/r/20250403112522.1566629-6-rkrcmar@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
1 parent a1c6684 commit 9f0e6b9

File tree

5 files changed

+46
-45
lines changed

5 files changed

+46
-45
lines changed

arch/riscv/include/asm/kvm_aia.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
6363
/* CPU AIA CSR context of Guest VCPU */
6464
struct kvm_vcpu_aia_csr guest_csr;
6565

66-
/* CPU AIA CSR context upon Guest VCPU reset */
67-
struct kvm_vcpu_aia_csr guest_reset_csr;
68-
6966
/* Guest physical address of IMSIC for this VCPU */
7067
gpa_t imsic_addr;
7168

arch/riscv/include/asm/kvm_host.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,12 @@ struct kvm_vcpu_smstateen_csr {
193193
unsigned long sstateen0;
194194
};
195195

196+
struct kvm_vcpu_reset_state {
197+
spinlock_t lock;
198+
unsigned long pc;
199+
unsigned long a1;
200+
};
201+
196202
struct kvm_vcpu_arch {
197203
/* VCPU ran at least once */
198204
bool ran_atleast_once;
@@ -227,12 +233,8 @@ struct kvm_vcpu_arch {
227233
/* CPU Smstateen CSR context of Guest VCPU */
228234
struct kvm_vcpu_smstateen_csr smstateen_csr;
229235

230-
/* CPU context upon Guest VCPU reset */
231-
struct kvm_cpu_context guest_reset_context;
232-
spinlock_t reset_cntx_lock;
233-
234-
/* CPU CSR context upon Guest VCPU reset */
235-
struct kvm_vcpu_csr guest_reset_csr;
236+
/* CPU reset state of Guest VCPU */
237+
struct kvm_vcpu_reset_state reset_state;
236238

237239
/*
238240
* VCPU interrupts

arch/riscv/kvm/aia_device.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -526,12 +526,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
526526
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
527527
{
528528
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
529-
struct kvm_vcpu_aia_csr *reset_csr =
530-
&vcpu->arch.aia_context.guest_reset_csr;
531529

532530
if (!kvm_riscv_aia_available())
533531
return;
534-
memcpy(csr, reset_csr, sizeof(*csr));
532+
memset(csr, 0, sizeof(*csr));
535533

536534
/* Proceed only if AIA was initialized successfully */
537535
if (!kvm_riscv_aia_initialized(vcpu->kvm))

arch/riscv/kvm/vcpu.c

Lines changed: 33 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,41 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
5151
sizeof(kvm_vcpu_stats_desc),
5252
};
5353

54-
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
54+
static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu)
5555
{
5656
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
57-
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
5857
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
59-
struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
58+
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
6059
void *vector_datap = cntx->vector.datap;
60+
61+
memset(cntx, 0, sizeof(*cntx));
62+
memset(csr, 0, sizeof(*csr));
63+
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
64+
65+
/* Restore datap as it's not a part of the guest context. */
66+
cntx->vector.datap = vector_datap;
67+
68+
/* Load SBI reset values */
69+
cntx->a0 = vcpu->vcpu_id;
70+
71+
spin_lock(&reset_state->lock);
72+
cntx->sepc = reset_state->pc;
73+
cntx->a1 = reset_state->a1;
74+
spin_unlock(&reset_state->lock);
75+
76+
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
77+
cntx->sstatus = SR_SPP | SR_SPIE;
78+
79+
cntx->hstatus |= HSTATUS_VTW;
80+
cntx->hstatus |= HSTATUS_SPVP;
81+
cntx->hstatus |= HSTATUS_SPV;
82+
83+
/* By default, make CY, TM, and IR counters accessible in VU mode */
84+
csr->scounteren = 0x7;
85+
}
86+
87+
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
88+
{
6189
bool loaded;
6290

6391
/**
@@ -72,18 +100,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
72100

73101
vcpu->arch.last_exit_cpu = -1;
74102

75-
memcpy(csr, reset_csr, sizeof(*csr));
76-
77-
spin_lock(&vcpu->arch.reset_cntx_lock);
78-
memcpy(cntx, reset_cntx, sizeof(*cntx));
79-
spin_unlock(&vcpu->arch.reset_cntx_lock);
80-
81-
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
103+
kvm_riscv_vcpu_context_reset(vcpu);
82104

83105
kvm_riscv_vcpu_fp_reset(vcpu);
84106

85-
/* Restore datap as it's not a part of the guest context. */
86-
cntx->vector.datap = vector_datap;
87107
kvm_riscv_vcpu_vector_reset(vcpu);
88108

89109
kvm_riscv_vcpu_timer_reset(vcpu);
@@ -115,8 +135,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
115135
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
116136
{
117137
int rc;
118-
struct kvm_cpu_context *cntx;
119-
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
120138

121139
spin_lock_init(&vcpu->arch.mp_state_lock);
122140

@@ -136,24 +154,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
136154
/* Setup VCPU hfence queue */
137155
spin_lock_init(&vcpu->arch.hfence_lock);
138156

139-
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
140-
spin_lock_init(&vcpu->arch.reset_cntx_lock);
141-
142-
spin_lock(&vcpu->arch.reset_cntx_lock);
143-
cntx = &vcpu->arch.guest_reset_context;
144-
cntx->sstatus = SR_SPP | SR_SPIE;
145-
cntx->hstatus = 0;
146-
cntx->hstatus |= HSTATUS_VTW;
147-
cntx->hstatus |= HSTATUS_SPVP;
148-
cntx->hstatus |= HSTATUS_SPV;
149-
spin_unlock(&vcpu->arch.reset_cntx_lock);
157+
spin_lock_init(&vcpu->arch.reset_state.lock);
150158

151159
if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
152160
return -ENOMEM;
153161

154-
/* By default, make CY, TM, and IR counters accessible in VU mode */
155-
reset_csr->scounteren = 0x7;
156-
157162
/* Setup VCPU timer */
158163
kvm_riscv_vcpu_timer_init(vcpu);
159164

arch/riscv/kvm/vcpu_sbi.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -159,11 +159,10 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
159159
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
160160
unsigned long pc, unsigned long a1)
161161
{
162-
spin_lock(&vcpu->arch.reset_cntx_lock);
163-
vcpu->arch.guest_reset_context.sepc = pc;
164-
vcpu->arch.guest_reset_context.a0 = vcpu->vcpu_id;
165-
vcpu->arch.guest_reset_context.a1 = a1;
166-
spin_unlock(&vcpu->arch.reset_cntx_lock);
162+
spin_lock(&vcpu->arch.reset_state.lock);
163+
vcpu->arch.reset_state.pc = pc;
164+
vcpu->arch.reset_state.a1 = a1;
165+
spin_unlock(&vcpu->arch.reset_state.lock);
167166

168167
kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
169168
}

0 commit comments

Comments
 (0)