Skip to content

Commit c4bb3a2

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm updates from Paolo Bonzini: - ARM fixes - RISC-V Svade and Svadu (accessed and dirty bit) extension support for host and guest * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: riscv: selftests: Add Svade and Svadu Extension to get-reg-list test RISC-V: KVM: Add Svade and Svadu Extensions Support for Guest/VM dt-bindings: riscv: Add Svade and Svadu Entries RISC-V: Add Svade and Svadu Extensions Support KVM: arm64: Use MDCR_EL2.HPME to evaluate overflow of hyp counters KVM: arm64: Ignore PMCNTENSET_EL0 while checking for overflow status KVM: arm64: Mark set_sysreg_masks() as inline to avoid build failure KVM: arm64: vgic-its: Add stronger type-checking to the ITS entry sizes KVM: arm64: vgic: Kill VGIC_MAX_PRIVATE definition KVM: arm64: vgic: Make vgic_get_irq() more robust KVM: arm64: vgic-v3: Sanitise guest writes to GICR_INVLPIR
2 parents 0ff86d8 + 4d911c7 commit c4bb3a2

File tree

24 files changed

+257
-119
lines changed

24 files changed

+257
-119
lines changed

Documentation/devicetree/bindings/riscv/extensions.yaml

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,34 @@ properties:
171171
ratified at commit 3f9ed34 ("Add ability to manually trigger
172172
workflow. (#2)") of riscv-time-compare.
173173
174+
- const: svade
175+
description: |
176+
The standard Svade supervisor-level extension for SW-managed PTE A/D
177+
bit updates as ratified in the 20240213 version of the privileged
178+
ISA specification.
179+
180+
Both Svade and Svadu extensions control the hardware behavior when
181+
the PTE A/D bits need to be set. The default behavior for the four
182+
possible combinations of these extensions in the device tree are:
183+
1) Neither Svade nor Svadu present in DT => It is technically
184+
unknown whether the platform uses Svade or Svadu. Supervisor
185+
software should be prepared to handle either hardware updating
186+
of the PTE A/D bits or page faults when they need updated.
187+
2) Only Svade present in DT => Supervisor must assume Svade to be
188+
always enabled.
189+
3) Only Svadu present in DT => Supervisor must assume Svadu to be
190+
always enabled.
191+
4) Both Svade and Svadu present in DT => Supervisor must assume
192+
Svadu turned-off at boot time. To use Svadu, supervisor must
193+
explicitly enable it using the SBI FWFT extension.
194+
195+
- const: svadu
196+
description: |
197+
The standard Svadu supervisor-level extension for hardware updating
198+
of PTE A/D bits as ratified in the 20240528 version of the
199+
privileged ISA specification. Please refer to Svade dt-binding
200+
description for more details.
201+
174202
- const: svinval
175203
description:
176204
The standard Svinval supervisor-level extension for fine-grained

arch/arm64/kvm/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -951,7 +951,7 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
951951
return v;
952952
}
953953

954-
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
954+
static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
955955
{
956956
int i = sr - __SANITISED_REG_START__;
957957

arch/arm64/kvm/pmu-emul.c

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
274274
irq_work_sync(&vcpu->arch.pmu.overflow_work);
275275
}
276276

277-
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
277+
static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
278278
{
279-
unsigned int hpmn;
279+
unsigned int hpmn, n;
280280

281-
if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
282-
return false;
281+
if (!vcpu_has_nv(vcpu))
282+
return 0;
283+
284+
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
285+
n = vcpu->kvm->arch.pmcr_n;
286+
287+
/*
288+
* Programming HPMN to a value greater than PMCR_EL0.N is
289+
* CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
290+
* UNKNOWN number of counters (in our case, zero) are reserved for EL2.
291+
*/
292+
if (hpmn >= n)
293+
return 0;
283294

284295
/*
285296
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
@@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
288299
* implementation choice that all counters are included in the second
289300
* range reserved for EL2/EL3.
290301
*/
291-
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
292-
return idx >= hpmn;
302+
return GENMASK(n - 1, hpmn);
303+
}
304+
305+
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
306+
{
307+
return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
293308
}
294309

295310
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
296311
{
297312
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
298-
u64 hpmn;
299313

300314
if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
301315
return mask;
302316

303-
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
304-
return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
317+
return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
305318
}
306319

307320
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
@@ -375,15 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
375388
}
376389
}
377390

378-
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
391+
/*
392+
* Returns the PMU overflow state, which is true if there exists an event
393+
* counter where the values of the global enable control, PMOVSSET_EL0[n], and
394+
* PMINTENSET_EL1[n] are all 1.
395+
*/
396+
static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
379397
{
380-
u64 reg = 0;
398+
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
381399

382-
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
383-
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
384-
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
385-
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
386-
}
400+
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
401+
402+
/*
403+
* PMCR_EL0.E is the global enable control for event counters available
404+
* to EL0 and EL1.
405+
*/
406+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
407+
reg &= kvm_pmu_hyp_counter_mask(vcpu);
408+
409+
/*
410+
* Otherwise, MDCR_EL2.HPME is the global enable control for event
411+
* counters reserved for EL2.
412+
*/
413+
if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
414+
reg &= ~kvm_pmu_hyp_counter_mask(vcpu);
387415

388416
return reg;
389417
}
@@ -396,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
396424
if (!kvm_vcpu_has_pmu(vcpu))
397425
return;
398426

399-
overflow = !!kvm_pmu_overflow_status(vcpu);
427+
overflow = kvm_pmu_overflow_status(vcpu);
400428
if (pmu->irq_level == overflow)
401429
return;
402430

arch/arm64/kvm/vgic/vgic-debug.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,10 @@ static int vgic_debug_show(struct seq_file *s, void *v)
287287
* Expect this to succeed, as iter_mark_lpis() takes a reference on
288288
* every LPI to be visited.
289289
*/
290-
irq = vgic_get_irq(kvm, vcpu, iter->intid);
290+
if (iter->intid < VGIC_NR_PRIVATE_IRQS)
291+
irq = vgic_get_vcpu_irq(vcpu, iter->intid);
292+
else
293+
irq = vgic_get_irq(kvm, iter->intid);
291294
if (WARN_ON_ONCE(!irq))
292295
return -EINVAL;
293296

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,7 @@ int vgic_init(struct kvm *kvm)
322322
goto out;
323323

324324
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
325-
struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
325+
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
326326

327327
switch (dist->vgic_model) {
328328
case KVM_DEV_TYPE_ARM_VGIC_V3:

0 commit comments

Comments
 (0)