@@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
26
26
{
27
27
gpa_t shmem = vcpu -> arch .sta .shmem ;
28
28
u64 last_steal = vcpu -> arch .sta .last_steal ;
29
- u32 * sequence_ptr , sequence ;
30
- u64 * steal_ptr , steal ;
29
+ __le32 __user * sequence_ptr ;
30
+ __le64 __user * steal_ptr ;
31
+ __le32 sequence_le ;
32
+ __le64 steal_le ;
33
+ u32 sequence ;
34
+ u64 steal ;
31
35
unsigned long hva ;
32
36
gfn_t gfn ;
33
37
@@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
47
51
return ;
48
52
}
49
53
50
- sequence_ptr = (u32 * )(hva + offset_in_page (shmem ) +
54
+ sequence_ptr = (__le32 __user * )(hva + offset_in_page (shmem ) +
51
55
offsetof(struct sbi_sta_struct , sequence ));
52
- steal_ptr = (u64 * )(hva + offset_in_page (shmem ) +
56
+ steal_ptr = (__le64 __user * )(hva + offset_in_page (shmem ) +
53
57
offsetof(struct sbi_sta_struct , steal ));
54
58
55
- if (WARN_ON (get_user (sequence , sequence_ptr )))
59
+ if (WARN_ON (get_user (sequence_le , sequence_ptr )))
56
60
return ;
57
61
58
- sequence = le32_to_cpu (sequence );
62
+ sequence = le32_to_cpu (sequence_le );
59
63
sequence += 1 ;
60
64
61
65
if (WARN_ON (put_user (cpu_to_le32 (sequence ), sequence_ptr )))
62
66
return ;
63
67
64
- if (!WARN_ON (get_user (steal , steal_ptr ))) {
65
- steal = le64_to_cpu (steal );
68
+ if (!WARN_ON (get_user (steal_le , steal_ptr ))) {
69
+ steal = le64_to_cpu (steal_le );
66
70
vcpu -> arch .sta .last_steal = READ_ONCE (current -> sched_info .run_delay );
67
71
steal += vcpu -> arch .sta .last_steal - last_steal ;
68
72
WARN_ON (put_user (cpu_to_le64 (steal ), steal_ptr ));
0 commit comments