@@ -48,7 +48,14 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
48
48
if (!gpc -> active )
49
49
return false;
50
50
51
- if (gpc -> generation != slots -> generation || kvm_is_error_hva (gpc -> uhva ))
51
+ /*
52
+ * If the page was cached from a memslot, make sure the memslots have
53
+ * not been re-configured.
54
+ */
55
+ if (!kvm_is_error_gpa (gpc -> gpa ) && gpc -> generation != slots -> generation )
56
+ return false;
57
+
58
+ if (kvm_is_error_hva (gpc -> uhva ))
52
59
return false;
53
60
54
61
if (offset_in_page (gpc -> uhva ) + len > PAGE_SIZE )
@@ -209,22 +216,27 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
209
216
return - EFAULT ;
210
217
}
211
218
212
- static int __kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , gpa_t gpa ,
219
+ static int __kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long uhva ,
213
220
unsigned long len )
214
221
{
215
- struct kvm_memslots * slots = kvm_memslots (gpc -> kvm );
216
- unsigned long page_offset = offset_in_page (gpa );
222
+ unsigned long page_offset ;
217
223
bool unmap_old = false;
218
224
unsigned long old_uhva ;
219
225
kvm_pfn_t old_pfn ;
220
226
bool hva_change = false;
221
227
void * old_khva ;
222
228
int ret ;
223
229
230
+ /* Either gpa or uhva must be valid, but not both */
231
+ if (WARN_ON_ONCE (kvm_is_error_gpa (gpa ) == kvm_is_error_hva (uhva )))
232
+ return - EINVAL ;
233
+
224
234
/*
225
- * If must fit within a single page. The 'len' argument is
226
- * only to enforce that.
235
+ * The cached acces must fit within a single page. The 'len' argument
236
+ * exists only to enforce that.
227
237
*/
238
+ page_offset = kvm_is_error_gpa (gpa ) ? offset_in_page (uhva ) :
239
+ offset_in_page (gpa );
228
240
if (page_offset + len > PAGE_SIZE )
229
241
return - EINVAL ;
230
242
@@ -246,29 +258,39 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
246
258
old_khva = (void * )PAGE_ALIGN_DOWN ((uintptr_t )gpc -> khva );
247
259
old_uhva = PAGE_ALIGN_DOWN (gpc -> uhva );
248
260
249
- /* Refresh the userspace HVA if necessary */
250
- if (gpc -> gpa != gpa || gpc -> generation != slots -> generation ||
251
- kvm_is_error_hva (gpc -> uhva )) {
252
- gfn_t gfn = gpa_to_gfn (gpa );
253
-
254
- gpc -> gpa = gpa ;
255
- gpc -> generation = slots -> generation ;
256
- gpc -> memslot = __gfn_to_memslot (slots , gfn );
257
- gpc -> uhva = gfn_to_hva_memslot (gpc -> memslot , gfn );
261
+ if (kvm_is_error_gpa (gpa )) {
262
+ gpc -> gpa = INVALID_GPA ;
263
+ gpc -> memslot = NULL ;
264
+ gpc -> uhva = PAGE_ALIGN_DOWN (uhva );
258
265
259
- if (kvm_is_error_hva (gpc -> uhva )) {
260
- ret = - EFAULT ;
261
- goto out ;
262
- }
263
-
264
- /*
265
- * Even if the GPA and/or the memslot generation changed, the
266
- * HVA may still be the same.
267
- */
268
266
if (gpc -> uhva != old_uhva )
269
267
hva_change = true;
270
268
} else {
271
- gpc -> uhva = old_uhva ;
269
+ struct kvm_memslots * slots = kvm_memslots (gpc -> kvm );
270
+
271
+ if (gpc -> gpa != gpa || gpc -> generation != slots -> generation ||
272
+ kvm_is_error_hva (gpc -> uhva )) {
273
+ gfn_t gfn = gpa_to_gfn (gpa );
274
+
275
+ gpc -> gpa = gpa ;
276
+ gpc -> generation = slots -> generation ;
277
+ gpc -> memslot = __gfn_to_memslot (slots , gfn );
278
+ gpc -> uhva = gfn_to_hva_memslot (gpc -> memslot , gfn );
279
+
280
+ if (kvm_is_error_hva (gpc -> uhva )) {
281
+ ret = - EFAULT ;
282
+ goto out ;
283
+ }
284
+
285
+ /*
286
+ * Even if the GPA and/or the memslot generation changed, the
287
+ * HVA may still be the same.
288
+ */
289
+ if (gpc -> uhva != old_uhva )
290
+ hva_change = true;
291
+ } else {
292
+ gpc -> uhva = old_uhva ;
293
+ }
272
294
}
273
295
274
296
/* Note: the offset must be correct before calling hva_to_pfn_retry() */
@@ -319,7 +341,15 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
319
341
320
342
int kvm_gpc_refresh (struct gfn_to_pfn_cache * gpc , unsigned long len )
321
343
{
322
- return __kvm_gpc_refresh (gpc , gpc -> gpa , len );
344
+ /*
345
+ * If the GPA is valid then ignore the HVA, as a cache can be GPA-based
346
+ * or HVA-based, not both. For GPA-based caches, the HVA will be
347
+ * recomputed during refresh if necessary.
348
+ */
349
+ unsigned long uhva = kvm_is_error_gpa (gpc -> gpa ) ? gpc -> uhva :
350
+ KVM_HVA_ERR_BAD ;
351
+
352
+ return __kvm_gpc_refresh (gpc , gpc -> gpa , uhva , len );
323
353
}
324
354
325
355
void kvm_gpc_init (struct gfn_to_pfn_cache * gpc , struct kvm * kvm )
@@ -329,10 +359,12 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
329
359
330
360
gpc -> kvm = kvm ;
331
361
gpc -> pfn = KVM_PFN_ERR_FAULT ;
362
+ gpc -> gpa = INVALID_GPA ;
332
363
gpc -> uhva = KVM_HVA_ERR_BAD ;
333
364
}
334
365
335
- int kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long len )
366
+ static int __kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long uhva ,
367
+ unsigned long len )
336
368
{
337
369
struct kvm * kvm = gpc -> kvm ;
338
370
@@ -353,7 +385,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
353
385
gpc -> active = true;
354
386
write_unlock_irq (& gpc -> lock );
355
387
}
356
- return __kvm_gpc_refresh (gpc , gpa , len );
388
+ return __kvm_gpc_refresh (gpc , gpa , uhva , len );
389
+ }
390
+
391
+ int kvm_gpc_activate (struct gfn_to_pfn_cache * gpc , gpa_t gpa , unsigned long len )
392
+ {
393
+ return __kvm_gpc_activate (gpc , gpa , KVM_HVA_ERR_BAD , len );
394
+ }
395
+
396
+ int kvm_gpc_activate_hva (struct gfn_to_pfn_cache * gpc , unsigned long uhva , unsigned long len )
397
+ {
398
+ return __kvm_gpc_activate (gpc , INVALID_GPA , uhva , len );
357
399
}
358
400
359
401
void kvm_gpc_deactivate (struct gfn_to_pfn_cache * gpc )
0 commit comments