Skip to content

Commit 406c109

Browse files
Paul Durrantsean-jc
authored andcommitted
KVM: pfncache: include page offset in uhva and use it consistently
Currently the pfncache page offset is sometimes determined using the gpa and sometimes the khva, whilst the uhva is always page-aligned. After a subsequent patch is applied the gpa will not always be valid so adjust the code to include the page offset in the uhva and use it consistently as the source of truth. Also, where a page-aligned address is required, use PAGE_ALIGN_DOWN() for clarity. No functional change intended. Signed-off-by: Paul Durrant <pdurrant@amazon.com> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/r/20240215152916.1158-8-paul@xen.org Signed-off-by: Sean Christopherson <seanjc@google.com>
1 parent 53e63e9 commit 406c109

File tree

1 file changed

+21
-8
lines changed

1 file changed

+21
-8
lines changed

virt/kvm/pfncache.c

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,10 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
4848
if (!gpc->active)
4949
return false;
5050

51-
if (offset_in_page(gpc->gpa) + len > PAGE_SIZE)
51+
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
5252
return false;
5353

54-
if (gpc->generation != slots->generation || kvm_is_error_hva(gpc->uhva))
54+
if (offset_in_page(gpc->uhva) + len > PAGE_SIZE)
5555
return false;
5656

5757
if (!gpc->valid)
@@ -119,7 +119,7 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
119119
static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
120120
{
121121
/* Note, the new page offset may be different than the old! */
122-
void *old_khva = gpc->khva - offset_in_page(gpc->khva);
122+
void *old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
123123
kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
124124
void *new_khva = NULL;
125125
unsigned long mmu_seq;
@@ -192,7 +192,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
192192

193193
gpc->valid = true;
194194
gpc->pfn = new_pfn;
195-
gpc->khva = new_khva + offset_in_page(gpc->gpa);
195+
gpc->khva = new_khva + offset_in_page(gpc->uhva);
196196

197197
/*
198198
* Put the reference to the _new_ pfn. The pfn is now tracked by the
@@ -217,6 +217,7 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
217217
bool unmap_old = false;
218218
unsigned long old_uhva;
219219
kvm_pfn_t old_pfn;
220+
bool hva_change = false;
220221
void *old_khva;
221222
int ret;
222223

@@ -242,10 +243,10 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
242243
}
243244

244245
old_pfn = gpc->pfn;
245-
old_khva = gpc->khva - offset_in_page(gpc->khva);
246-
old_uhva = gpc->uhva;
246+
old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva);
247+
old_uhva = PAGE_ALIGN_DOWN(gpc->uhva);
247248

248-
/* If the userspace HVA is invalid, refresh that first */
249+
/* Refresh the userspace HVA if necessary */
249250
if (gpc->gpa != gpa || gpc->generation != slots->generation ||
250251
kvm_is_error_hva(gpc->uhva)) {
251252
gfn_t gfn = gpa_to_gfn(gpa);
@@ -259,13 +260,25 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa,
259260
ret = -EFAULT;
260261
goto out;
261262
}
263+
264+
/*
265+
* Even if the GPA and/or the memslot generation changed, the
266+
* HVA may still be the same.
267+
*/
268+
if (gpc->uhva != old_uhva)
269+
hva_change = true;
270+
} else {
271+
gpc->uhva = old_uhva;
262272
}
263273

274+
/* Note: the offset must be correct before calling hva_to_pfn_retry() */
275+
gpc->uhva += page_offset;
276+
264277
/*
265278
* If the userspace HVA changed or the PFN was already invalid,
266279
* drop the lock and do the HVA to PFN lookup again.
267280
*/
268-
if (!gpc->valid || old_uhva != gpc->uhva) {
281+
if (!gpc->valid || hva_change) {
269282
ret = hva_to_pfn_retry(gpc);
270283
} else {
271284
/*

0 commit comments

Comments
 (0)