Skip to content

Commit 37d1b5d

Browse files
author
Claudio Imbrenda
committed
KVM: s390: stop using page->index for non-shadow gmaps
The host_to_guest radix tree will now map userspace addresses to guest addresses, instead of userspace addresses to segment tables. When segment tables and page tables are needed, they are found using an additional gmap_table_walk(). This gets rid of all usage of page->index for non-shadow gmaps. Reviewed-by: Janosch Frank <frankja@linux.ibm.com> Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Link: https://lore.kernel.org/r/20250123144627.312456-11-imbrenda@linux.ibm.com Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Message-ID: <20250123144627.312456-11-imbrenda@linux.ibm.com>
1 parent c9f721e commit 37d1b5d

File tree

1 file changed

+54
-51
lines changed

1 file changed

+54
-51
lines changed

arch/s390/mm/gmap.c

Lines changed: 54 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,20 @@
2424
#include <asm/page.h>
2525
#include <asm/tlb.h>
2626

27+
/*
28+
* The address is saved in a radix tree directly; NULL would be ambiguous,
29+
* since 0 is a valid address, and NULL is returned when nothing was found.
30+
* The lower bits are ignored by all users of the macro, so it can be used
31+
* to distinguish a valid address 0 from a NULL.
32+
*/
33+
#define VALID_GADDR_FLAG 1
34+
#define IS_GADDR_VALID(gaddr) ((gaddr) & VALID_GADDR_FLAG)
35+
#define MAKE_VALID_GADDR(gaddr) (((gaddr) & HPAGE_MASK) | VALID_GADDR_FLAG)
36+
2737
#define GMAP_SHADOW_FAKE_TABLE 1ULL
2838

39+
static inline unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
40+
2941
static struct page *gmap_alloc_crst(void)
3042
{
3143
struct page *page;
@@ -82,7 +94,6 @@ struct gmap *gmap_alloc(unsigned long limit)
8294
page = gmap_alloc_crst();
8395
if (!page)
8496
goto out_free;
85-
page->index = 0;
8697
list_add(&page->lru, &gmap->crst_list);
8798
table = page_to_virt(page);
8899
crst_table_init(table, etype);
@@ -303,7 +314,6 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303314
list_add(&page->lru, &gmap->crst_list);
304315
*table = __pa(new) | _REGION_ENTRY_LENGTH |
305316
(*table & _REGION_ENTRY_TYPE_MASK);
306-
page->index = gaddr;
307317
page = NULL;
308318
}
309319
spin_unlock(&gmap->guest_table_lock);
@@ -312,21 +322,23 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
312322
return 0;
313323
}
314324

315-
/**
316-
* __gmap_segment_gaddr - find virtual address from segment pointer
317-
* @entry: pointer to a segment table entry in the guest address space
318-
*
319-
* Returns the virtual address in the guest address space for the segment
320-
*/
321-
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
325+
static unsigned long host_to_guest_lookup(struct gmap *gmap, unsigned long vmaddr)
322326
{
323-
struct page *page;
324-
unsigned long offset;
327+
return (unsigned long)radix_tree_lookup(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
328+
}
325329

326-
offset = (unsigned long) entry / sizeof(unsigned long);
327-
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
328-
page = pmd_pgtable_page((pmd_t *) entry);
329-
return page->index + offset;
330+
static unsigned long host_to_guest_delete(struct gmap *gmap, unsigned long vmaddr)
331+
{
332+
return (unsigned long)radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
333+
}
334+
335+
static pmd_t *host_to_guest_pmd_delete(struct gmap *gmap, unsigned long vmaddr,
336+
unsigned long *gaddr)
337+
{
338+
*gaddr = host_to_guest_delete(gmap, vmaddr);
339+
if (IS_GADDR_VALID(*gaddr))
340+
return (pmd_t *)gmap_table_walk(gmap, *gaddr, 1);
341+
return NULL;
330342
}
331343

332344
/**
@@ -338,16 +350,19 @@ static unsigned long __gmap_segment_gaddr(unsigned long *entry)
338350
*/
339351
static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
340352
{
341-
unsigned long *entry;
353+
unsigned long gaddr;
342354
int flush = 0;
355+
pmd_t *pmdp;
343356

344357
BUG_ON(gmap_is_shadow(gmap));
345358
spin_lock(&gmap->guest_table_lock);
346-
entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
347-
if (entry) {
348-
flush = (*entry != _SEGMENT_ENTRY_EMPTY);
349-
*entry = _SEGMENT_ENTRY_EMPTY;
359+
360+
pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
361+
if (pmdp) {
362+
flush = (pmd_val(*pmdp) != _SEGMENT_ENTRY_EMPTY);
363+
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
350364
}
365+
351366
spin_unlock(&gmap->guest_table_lock);
352367
return flush;
353368
}
@@ -564,7 +579,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
564579
spin_lock(&gmap->guest_table_lock);
565580
if (*table == _SEGMENT_ENTRY_EMPTY) {
566581
rc = radix_tree_insert(&gmap->host_to_guest,
567-
vmaddr >> PMD_SHIFT, table);
582+
vmaddr >> PMD_SHIFT,
583+
(void *)MAKE_VALID_GADDR(gaddr));
568584
if (!rc) {
569585
if (pmd_leaf(*pmd)) {
570586
*table = (pmd_val(*pmd) &
@@ -1995,20 +2011,16 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
19952011
pte_t *pte, unsigned long bits)
19962012
{
19972013
unsigned long offset, gaddr = 0;
1998-
unsigned long *table;
19992014
struct gmap *gmap, *sg, *next;
20002015

20012016
offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
20022017
offset = offset * (PAGE_SIZE / sizeof(pte_t));
20032018
rcu_read_lock();
20042019
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
20052020
spin_lock(&gmap->guest_table_lock);
2006-
table = radix_tree_lookup(&gmap->host_to_guest,
2007-
vmaddr >> PMD_SHIFT);
2008-
if (table)
2009-
gaddr = __gmap_segment_gaddr(table) + offset;
2021+
gaddr = host_to_guest_lookup(gmap, vmaddr) + offset;
20102022
spin_unlock(&gmap->guest_table_lock);
2011-
if (!table)
2023+
if (!IS_GADDR_VALID(gaddr))
20122024
continue;
20132025

20142026
if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
@@ -2068,10 +2080,8 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
20682080
rcu_read_lock();
20692081
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
20702082
spin_lock(&gmap->guest_table_lock);
2071-
pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2072-
vmaddr >> PMD_SHIFT);
2083+
pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
20732084
if (pmdp) {
2074-
gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
20752085
pmdp_notify_gmap(gmap, pmdp, gaddr);
20762086
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
20772087
_SEGMENT_ENTRY_GMAP_UC |
@@ -2115,28 +2125,25 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
21152125
*/
21162126
void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
21172127
{
2118-
unsigned long *entry, gaddr;
2128+
unsigned long gaddr;
21192129
struct gmap *gmap;
21202130
pmd_t *pmdp;
21212131

21222132
rcu_read_lock();
21232133
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
21242134
spin_lock(&gmap->guest_table_lock);
2125-
entry = radix_tree_delete(&gmap->host_to_guest,
2126-
vmaddr >> PMD_SHIFT);
2127-
if (entry) {
2128-
pmdp = (pmd_t *)entry;
2129-
gaddr = __gmap_segment_gaddr(entry);
2135+
pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
2136+
if (pmdp) {
21302137
pmdp_notify_gmap(gmap, pmdp, gaddr);
2131-
WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2132-
_SEGMENT_ENTRY_GMAP_UC |
2133-
_SEGMENT_ENTRY));
2138+
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2139+
_SEGMENT_ENTRY_GMAP_UC |
2140+
_SEGMENT_ENTRY));
21342141
if (MACHINE_HAS_TLB_GUEST)
21352142
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
21362143
gmap->asce, IDTE_LOCAL);
21372144
else if (MACHINE_HAS_IDTE)
21382145
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2139-
*entry = _SEGMENT_ENTRY_EMPTY;
2146+
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
21402147
}
21412148
spin_unlock(&gmap->guest_table_lock);
21422149
}
@@ -2151,30 +2158,27 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
21512158
*/
21522159
void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
21532160
{
2154-
unsigned long *entry, gaddr;
2161+
unsigned long gaddr;
21552162
struct gmap *gmap;
21562163
pmd_t *pmdp;
21572164

21582165
rcu_read_lock();
21592166
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
21602167
spin_lock(&gmap->guest_table_lock);
2161-
entry = radix_tree_delete(&gmap->host_to_guest,
2162-
vmaddr >> PMD_SHIFT);
2163-
if (entry) {
2164-
pmdp = (pmd_t *)entry;
2165-
gaddr = __gmap_segment_gaddr(entry);
2168+
pmdp = host_to_guest_pmd_delete(gmap, vmaddr, &gaddr);
2169+
if (pmdp) {
21662170
pmdp_notify_gmap(gmap, pmdp, gaddr);
2167-
WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2168-
_SEGMENT_ENTRY_GMAP_UC |
2169-
_SEGMENT_ENTRY));
2171+
WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2172+
_SEGMENT_ENTRY_GMAP_UC |
2173+
_SEGMENT_ENTRY));
21702174
if (MACHINE_HAS_TLB_GUEST)
21712175
__pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
21722176
gmap->asce, IDTE_GLOBAL);
21732177
else if (MACHINE_HAS_IDTE)
21742178
__pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
21752179
else
21762180
__pmdp_csp(pmdp);
2177-
*entry = _SEGMENT_ENTRY_EMPTY;
2181+
*pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
21782182
}
21792183
spin_unlock(&gmap->guest_table_lock);
21802184
}
@@ -2690,7 +2694,6 @@ int s390_replace_asce(struct gmap *gmap)
26902694
page = gmap_alloc_crst();
26912695
if (!page)
26922696
return -ENOMEM;
2693-
page->index = 0;
26942697
table = page_to_virt(page);
26952698
memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
26962699

0 commit comments

Comments
 (0)