@@ -3121,15 +3121,17 @@ u64 get_kvmclock_ns(struct kvm *kvm)
3121
3121
return data .clock ;
3122
3122
}
3123
3123
3124
- static void kvm_setup_guest_pvclock (struct kvm_vcpu * v ,
3124
+ static void kvm_setup_guest_pvclock (struct pvclock_vcpu_time_info * ref_hv_clock ,
3125
+ struct kvm_vcpu * vcpu ,
3125
3126
struct gfn_to_pfn_cache * gpc ,
3126
- unsigned int offset ,
3127
- bool force_tsc_unstable )
3127
+ unsigned int offset )
3128
3128
{
3129
- struct kvm_vcpu_arch * vcpu = & v -> arch ;
3130
3129
struct pvclock_vcpu_time_info * guest_hv_clock ;
3130
+ struct pvclock_vcpu_time_info hv_clock ;
3131
3131
unsigned long flags ;
3132
3132
3133
+ memcpy (& hv_clock , ref_hv_clock , sizeof (hv_clock ));
3134
+
3133
3135
read_lock_irqsave (& gpc -> lock , flags );
3134
3136
while (!kvm_gpc_check (gpc , offset + sizeof (* guest_hv_clock ))) {
3135
3137
read_unlock_irqrestore (& gpc -> lock , flags );
@@ -3149,52 +3151,34 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
3149
3151
* it is consistent.
3150
3152
*/
3151
3153
3152
- guest_hv_clock -> version = vcpu -> hv_clock .version = (guest_hv_clock -> version + 1 ) | 1 ;
3154
+ guest_hv_clock -> version = hv_clock .version = (guest_hv_clock -> version + 1 ) | 1 ;
3153
3155
smp_wmb ();
3154
3156
3155
3157
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3156
- vcpu -> hv_clock .flags |= (guest_hv_clock -> flags & PVCLOCK_GUEST_STOPPED );
3157
-
3158
- if (vcpu -> pvclock_set_guest_stopped_request ) {
3159
- vcpu -> hv_clock .flags |= PVCLOCK_GUEST_STOPPED ;
3160
- vcpu -> pvclock_set_guest_stopped_request = false;
3161
- }
3162
-
3163
- memcpy (guest_hv_clock , & vcpu -> hv_clock , sizeof (* guest_hv_clock ));
3158
+ hv_clock .flags |= (guest_hv_clock -> flags & PVCLOCK_GUEST_STOPPED );
3164
3159
3165
- if (force_tsc_unstable )
3166
- guest_hv_clock -> flags &= ~PVCLOCK_TSC_STABLE_BIT ;
3160
+ memcpy (guest_hv_clock , & hv_clock , sizeof (* guest_hv_clock ));
3167
3161
3168
3162
smp_wmb ();
3169
3163
3170
- guest_hv_clock -> version = ++ vcpu -> hv_clock .version ;
3164
+ guest_hv_clock -> version = ++ hv_clock .version ;
3171
3165
3172
3166
kvm_gpc_mark_dirty_in_slot (gpc );
3173
3167
read_unlock_irqrestore (& gpc -> lock , flags );
3174
3168
3175
- trace_kvm_pvclock_update (v -> vcpu_id , & vcpu -> hv_clock );
3169
+ trace_kvm_pvclock_update (vcpu -> vcpu_id , & hv_clock );
3176
3170
}
3177
3171
3178
3172
static int kvm_guest_time_update (struct kvm_vcpu * v )
3179
3173
{
3174
+ struct pvclock_vcpu_time_info hv_clock = {};
3180
3175
unsigned long flags , tgt_tsc_khz ;
3181
3176
unsigned seq ;
3182
3177
struct kvm_vcpu_arch * vcpu = & v -> arch ;
3183
3178
struct kvm_arch * ka = & v -> kvm -> arch ;
3184
3179
s64 kernel_ns ;
3185
3180
u64 tsc_timestamp , host_tsc ;
3186
- u8 pvclock_flags ;
3187
3181
bool use_master_clock ;
3188
- #ifdef CONFIG_KVM_XEN
3189
- /*
3190
- * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
3191
- * explicitly told to use TSC as its clocksource Xen will not set this bit.
3192
- * This default behaviour led to bugs in some guest kernels which cause
3193
- * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
3194
- */
3195
- bool xen_pvclock_tsc_unstable =
3196
- ka -> xen_hvm_config .flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE ;
3197
- #endif
3198
3182
3199
3183
kernel_ns = 0 ;
3200
3184
host_tsc = 0 ;
@@ -3255,35 +3239,58 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
3255
3239
3256
3240
if (unlikely (vcpu -> hw_tsc_khz != tgt_tsc_khz )) {
3257
3241
kvm_get_time_scale (NSEC_PER_SEC , tgt_tsc_khz * 1000LL ,
3258
- & vcpu -> hv_clock . tsc_shift ,
3259
- & vcpu -> hv_clock . tsc_to_system_mul );
3242
+ & vcpu -> pvclock_tsc_shift ,
3243
+ & vcpu -> pvclock_tsc_mul );
3260
3244
vcpu -> hw_tsc_khz = tgt_tsc_khz ;
3261
3245
kvm_xen_update_tsc_info (v );
3262
3246
}
3263
3247
3264
- vcpu -> hv_clock .tsc_timestamp = tsc_timestamp ;
3265
- vcpu -> hv_clock .system_time = kernel_ns + v -> kvm -> arch .kvmclock_offset ;
3248
+ hv_clock .tsc_shift = vcpu -> pvclock_tsc_shift ;
3249
+ hv_clock .tsc_to_system_mul = vcpu -> pvclock_tsc_mul ;
3250
+ hv_clock .tsc_timestamp = tsc_timestamp ;
3251
+ hv_clock .system_time = kernel_ns + v -> kvm -> arch .kvmclock_offset ;
3266
3252
vcpu -> last_guest_tsc = tsc_timestamp ;
3267
3253
3268
3254
/* If the host uses TSC clocksource, then it is stable */
3269
- pvclock_flags = 0 ;
3255
+ hv_clock . flags = 0 ;
3270
3256
if (use_master_clock )
3271
- pvclock_flags |= PVCLOCK_TSC_STABLE_BIT ;
3257
+ hv_clock .flags |= PVCLOCK_TSC_STABLE_BIT ;
3258
+
3259
+ if (vcpu -> pv_time .active ) {
3260
+ /*
3261
+ * GUEST_STOPPED is only supported by kvmclock, and KVM's
3262
+ * historic behavior is to only process the request if kvmclock
3263
+ * is active/enabled.
3264
+ */
3265
+ if (vcpu -> pvclock_set_guest_stopped_request ) {
3266
+ hv_clock .flags |= PVCLOCK_GUEST_STOPPED ;
3267
+ vcpu -> pvclock_set_guest_stopped_request = false;
3268
+ }
3269
+ kvm_setup_guest_pvclock (& hv_clock , v , & vcpu -> pv_time , 0 );
3272
3270
3273
- vcpu -> hv_clock .flags = pvclock_flags ;
3271
+ hv_clock .flags &= ~PVCLOCK_GUEST_STOPPED ;
3272
+ }
3273
+
3274
+ kvm_hv_setup_tsc_page (v -> kvm , & hv_clock );
3274
3275
3275
- if (vcpu -> pv_time .active )
3276
- kvm_setup_guest_pvclock (v , & vcpu -> pv_time , 0 , false);
3277
3276
#ifdef CONFIG_KVM_XEN
3277
+ /*
3278
+ * For Xen guests we may need to override PVCLOCK_TSC_STABLE_BIT as unless
3279
+ * explicitly told to use TSC as its clocksource Xen will not set this bit.
3280
+ * This default behaviour led to bugs in some guest kernels which cause
3281
+ * problems if they observe PVCLOCK_TSC_STABLE_BIT in the pvclock flags.
3282
+ *
3283
+ * Note! Clear TSC_STABLE only for Xen clocks, i.e. the order matters!
3284
+ */
3285
+ if (ka -> xen_hvm_config .flags & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE )
3286
+ hv_clock .flags &= ~PVCLOCK_TSC_STABLE_BIT ;
3287
+
3278
3288
if (vcpu -> xen .vcpu_info_cache .active )
3279
- kvm_setup_guest_pvclock (v , & vcpu -> xen .vcpu_info_cache ,
3280
- offsetof(struct compat_vcpu_info , time ),
3281
- xen_pvclock_tsc_unstable );
3289
+ kvm_setup_guest_pvclock (& hv_clock , v , & vcpu -> xen .vcpu_info_cache ,
3290
+ offsetof(struct compat_vcpu_info , time ));
3282
3291
if (vcpu -> xen .vcpu_time_info_cache .active )
3283
- kvm_setup_guest_pvclock (v , & vcpu -> xen .vcpu_time_info_cache , 0 ,
3284
- xen_pvclock_tsc_unstable );
3292
+ kvm_setup_guest_pvclock (& hv_clock , v , & vcpu -> xen .vcpu_time_info_cache , 0 );
3285
3293
#endif
3286
- kvm_hv_setup_tsc_page (v -> kvm , & vcpu -> hv_clock );
3287
3294
return 0 ;
3288
3295
}
3289
3296
@@ -6910,23 +6917,15 @@ static int kvm_arch_suspend_notifier(struct kvm *kvm)
6910
6917
{
6911
6918
struct kvm_vcpu * vcpu ;
6912
6919
unsigned long i ;
6913
- int ret = 0 ;
6914
-
6915
- mutex_lock (& kvm -> lock );
6916
- kvm_for_each_vcpu (i , vcpu , kvm ) {
6917
- if (!vcpu -> arch .pv_time .active )
6918
- continue ;
6919
6920
6920
- ret = kvm_set_guest_paused (vcpu );
6921
- if (ret ) {
6922
- kvm_err ("Failed to pause guest VCPU%d: %d\n" ,
6923
- vcpu -> vcpu_id , ret );
6924
- break ;
6925
- }
6926
- }
6927
- mutex_unlock (& kvm -> lock );
6921
+ /*
6922
+ * Ignore the return, marking the guest paused only "fails" if the vCPU
6923
+ * isn't using kvmclock; continuing on is correct and desirable.
6924
+ */
6925
+ kvm_for_each_vcpu (i , vcpu , kvm )
6926
+ (void )kvm_set_guest_paused (vcpu );
6928
6927
6929
- return ret ? NOTIFY_BAD : NOTIFY_DONE ;
6928
+ return NOTIFY_DONE ;
6930
6929
}
6931
6930
6932
6931
int kvm_arch_pm_notifier (struct kvm * kvm , unsigned long state )
0 commit comments