Skip to content

Commit e67391c

Browse files
committed
Merge tag 'kvm-riscv-fixes-6.8-1' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv fixes for 6.8, take #1 - Fix steal-time related sparse warnings
2 parents 2f8ebe4 + f072b27 commit e67391c

File tree

2 files changed

+15
-11
lines changed

2 files changed

+15
-11
lines changed

arch/riscv/kernel/paravirt.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
4141

4242
early_param("no-steal-acc", parse_no_stealacc);
4343

44-
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
44+
static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
4545

4646
static bool __init has_pv_steal_clock(void)
4747
{
@@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
9191
static u64 pv_time_steal_clock(int cpu)
9292
{
9393
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
94-
u32 sequence;
95-
u64 steal;
94+
__le32 sequence;
95+
__le64 steal;
9696

9797
/*
9898
* Check the sequence field before and after reading the steal

arch/riscv/kvm/vcpu_sbi_sta.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
2626
{
2727
gpa_t shmem = vcpu->arch.sta.shmem;
2828
u64 last_steal = vcpu->arch.sta.last_steal;
29-
u32 *sequence_ptr, sequence;
30-
u64 *steal_ptr, steal;
29+
__le32 __user *sequence_ptr;
30+
__le64 __user *steal_ptr;
31+
__le32 sequence_le;
32+
__le64 steal_le;
33+
u32 sequence;
34+
u64 steal;
3135
unsigned long hva;
3236
gfn_t gfn;
3337

@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
4751
return;
4852
}
4953

50-
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
54+
sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
5155
offsetof(struct sbi_sta_struct, sequence));
52-
steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
56+
steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
5357
offsetof(struct sbi_sta_struct, steal));
5458

55-
if (WARN_ON(get_user(sequence, sequence_ptr)))
59+
if (WARN_ON(get_user(sequence_le, sequence_ptr)))
5660
return;
5761

58-
sequence = le32_to_cpu(sequence);
62+
sequence = le32_to_cpu(sequence_le);
5963
sequence += 1;
6064

6165
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
6266
return;
6367

64-
if (!WARN_ON(get_user(steal, steal_ptr))) {
65-
steal = le64_to_cpu(steal);
68+
if (!WARN_ON(get_user(steal_le, steal_ptr))) {
69+
steal = le64_to_cpu(steal_le);
6670
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
6771
steal += vcpu->arch.sta.last_steal - last_steal;
6872
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));

0 commit comments

Comments
 (0)