24
24
#include <asm/page.h>
25
25
#include <asm/tlb.h>
26
26
27
+ /*
28
+ * The address is saved in a radix tree directly; NULL would be ambiguous,
29
+ * since 0 is a valid address, and NULL is returned when nothing was found.
30
+ * The lower bits are ignored by all users of the macro, so it can be used
31
+ * to distinguish a valid address 0 from a NULL.
32
+ */
33
+ #define VALID_GADDR_FLAG 1
34
+ #define IS_GADDR_VALID (gaddr ) ((gaddr) & VALID_GADDR_FLAG)
35
+ #define MAKE_VALID_GADDR (gaddr ) (((gaddr) & HPAGE_MASK) | VALID_GADDR_FLAG)
36
+
27
37
#define GMAP_SHADOW_FAKE_TABLE 1ULL
28
38
39
+ static inline unsigned long * gmap_table_walk (struct gmap * gmap , unsigned long gaddr , int level );
40
+
29
41
static struct page * gmap_alloc_crst (void )
30
42
{
31
43
struct page * page ;
@@ -82,7 +94,6 @@ struct gmap *gmap_alloc(unsigned long limit)
82
94
page = gmap_alloc_crst ();
83
95
if (!page )
84
96
goto out_free ;
85
- page -> index = 0 ;
86
97
list_add (& page -> lru , & gmap -> crst_list );
87
98
table = page_to_virt (page );
88
99
crst_table_init (table , etype );
@@ -303,7 +314,6 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303
314
list_add (& page -> lru , & gmap -> crst_list );
304
315
* table = __pa (new ) | _REGION_ENTRY_LENGTH |
305
316
(* table & _REGION_ENTRY_TYPE_MASK );
306
- page -> index = gaddr ;
307
317
page = NULL ;
308
318
}
309
319
spin_unlock (& gmap -> guest_table_lock );
@@ -312,21 +322,23 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
312
322
return 0 ;
313
323
}
314
324
315
- /**
316
- * __gmap_segment_gaddr - find virtual address from segment pointer
317
- * @entry: pointer to a segment table entry in the guest address space
318
- *
319
- * Returns the virtual address in the guest address space for the segment
320
- */
321
- static unsigned long __gmap_segment_gaddr (unsigned long * entry )
325
+ static unsigned long host_to_guest_lookup (struct gmap * gmap , unsigned long vmaddr )
322
326
{
323
- struct page * page ;
324
- unsigned long offset ;
327
+ return ( unsigned long ) radix_tree_lookup ( & gmap -> host_to_guest , vmaddr >> PMD_SHIFT ) ;
328
+ }
325
329
326
- offset = (unsigned long ) entry / sizeof (unsigned long );
327
- offset = (offset & (PTRS_PER_PMD - 1 )) * PMD_SIZE ;
328
- page = pmd_pgtable_page ((pmd_t * ) entry );
329
- return page -> index + offset ;
330
+ static unsigned long host_to_guest_delete (struct gmap * gmap , unsigned long vmaddr )
331
+ {
332
+ return (unsigned long )radix_tree_delete (& gmap -> host_to_guest , vmaddr >> PMD_SHIFT );
333
+ }
334
+
335
+ static pmd_t * host_to_guest_pmd_delete (struct gmap * gmap , unsigned long vmaddr ,
336
+ unsigned long * gaddr )
337
+ {
338
+ * gaddr = host_to_guest_delete (gmap , vmaddr );
339
+ if (IS_GADDR_VALID (* gaddr ))
340
+ return (pmd_t * )gmap_table_walk (gmap , * gaddr , 1 );
341
+ return NULL ;
330
342
}
331
343
332
344
/**
@@ -338,16 +350,19 @@ static unsigned long __gmap_segment_gaddr(unsigned long *entry)
338
350
*/
339
351
static int __gmap_unlink_by_vmaddr (struct gmap * gmap , unsigned long vmaddr )
340
352
{
341
- unsigned long * entry ;
353
+ unsigned long gaddr ;
342
354
int flush = 0 ;
355
+ pmd_t * pmdp ;
343
356
344
357
BUG_ON (gmap_is_shadow (gmap ));
345
358
spin_lock (& gmap -> guest_table_lock );
346
- entry = radix_tree_delete (& gmap -> host_to_guest , vmaddr >> PMD_SHIFT );
347
- if (entry ) {
348
- flush = (* entry != _SEGMENT_ENTRY_EMPTY );
349
- * entry = _SEGMENT_ENTRY_EMPTY ;
359
+
360
+ pmdp = host_to_guest_pmd_delete (gmap , vmaddr , & gaddr );
361
+ if (pmdp ) {
362
+ flush = (pmd_val (* pmdp ) != _SEGMENT_ENTRY_EMPTY );
363
+ * pmdp = __pmd (_SEGMENT_ENTRY_EMPTY );
350
364
}
365
+
351
366
spin_unlock (& gmap -> guest_table_lock );
352
367
return flush ;
353
368
}
@@ -564,7 +579,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
564
579
spin_lock (& gmap -> guest_table_lock );
565
580
if (* table == _SEGMENT_ENTRY_EMPTY ) {
566
581
rc = radix_tree_insert (& gmap -> host_to_guest ,
567
- vmaddr >> PMD_SHIFT , table );
582
+ vmaddr >> PMD_SHIFT ,
583
+ (void * )MAKE_VALID_GADDR (gaddr ));
568
584
if (!rc ) {
569
585
if (pmd_leaf (* pmd )) {
570
586
* table = (pmd_val (* pmd ) &
@@ -1995,20 +2011,16 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
1995
2011
pte_t * pte , unsigned long bits )
1996
2012
{
1997
2013
unsigned long offset , gaddr = 0 ;
1998
- unsigned long * table ;
1999
2014
struct gmap * gmap , * sg , * next ;
2000
2015
2001
2016
offset = ((unsigned long ) pte ) & (255 * sizeof (pte_t ));
2002
2017
offset = offset * (PAGE_SIZE / sizeof (pte_t ));
2003
2018
rcu_read_lock ();
2004
2019
list_for_each_entry_rcu (gmap , & mm -> context .gmap_list , list ) {
2005
2020
spin_lock (& gmap -> guest_table_lock );
2006
- table = radix_tree_lookup (& gmap -> host_to_guest ,
2007
- vmaddr >> PMD_SHIFT );
2008
- if (table )
2009
- gaddr = __gmap_segment_gaddr (table ) + offset ;
2021
+ gaddr = host_to_guest_lookup (gmap , vmaddr ) + offset ;
2010
2022
spin_unlock (& gmap -> guest_table_lock );
2011
- if (!table )
2023
+ if (!IS_GADDR_VALID ( gaddr ) )
2012
2024
continue ;
2013
2025
2014
2026
if (!list_empty (& gmap -> children ) && (bits & PGSTE_VSIE_BIT )) {
@@ -2068,10 +2080,8 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2068
2080
rcu_read_lock ();
2069
2081
list_for_each_entry_rcu (gmap , & mm -> context .gmap_list , list ) {
2070
2082
spin_lock (& gmap -> guest_table_lock );
2071
- pmdp = (pmd_t * )radix_tree_delete (& gmap -> host_to_guest ,
2072
- vmaddr >> PMD_SHIFT );
2083
+ pmdp = host_to_guest_pmd_delete (gmap , vmaddr , & gaddr );
2073
2084
if (pmdp ) {
2074
- gaddr = __gmap_segment_gaddr ((unsigned long * )pmdp );
2075
2085
pmdp_notify_gmap (gmap , pmdp , gaddr );
2076
2086
WARN_ON (pmd_val (* pmdp ) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2077
2087
_SEGMENT_ENTRY_GMAP_UC |
@@ -2115,28 +2125,25 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2115
2125
*/
2116
2126
void gmap_pmdp_idte_local (struct mm_struct * mm , unsigned long vmaddr )
2117
2127
{
2118
- unsigned long * entry , gaddr ;
2128
+ unsigned long gaddr ;
2119
2129
struct gmap * gmap ;
2120
2130
pmd_t * pmdp ;
2121
2131
2122
2132
rcu_read_lock ();
2123
2133
list_for_each_entry_rcu (gmap , & mm -> context .gmap_list , list ) {
2124
2134
spin_lock (& gmap -> guest_table_lock );
2125
- entry = radix_tree_delete (& gmap -> host_to_guest ,
2126
- vmaddr >> PMD_SHIFT );
2127
- if (entry ) {
2128
- pmdp = (pmd_t * )entry ;
2129
- gaddr = __gmap_segment_gaddr (entry );
2135
+ pmdp = host_to_guest_pmd_delete (gmap , vmaddr , & gaddr );
2136
+ if (pmdp ) {
2130
2137
pmdp_notify_gmap (gmap , pmdp , gaddr );
2131
- WARN_ON (* entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2132
- _SEGMENT_ENTRY_GMAP_UC |
2133
- _SEGMENT_ENTRY ));
2138
+ WARN_ON (pmd_val ( * pmdp ) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2139
+ _SEGMENT_ENTRY_GMAP_UC |
2140
+ _SEGMENT_ENTRY ));
2134
2141
if (MACHINE_HAS_TLB_GUEST )
2135
2142
__pmdp_idte (gaddr , pmdp , IDTE_GUEST_ASCE ,
2136
2143
gmap -> asce , IDTE_LOCAL );
2137
2144
else if (MACHINE_HAS_IDTE )
2138
2145
__pmdp_idte (gaddr , pmdp , 0 , 0 , IDTE_LOCAL );
2139
- * entry = _SEGMENT_ENTRY_EMPTY ;
2146
+ * pmdp = __pmd ( _SEGMENT_ENTRY_EMPTY ) ;
2140
2147
}
2141
2148
spin_unlock (& gmap -> guest_table_lock );
2142
2149
}
@@ -2151,30 +2158,27 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2151
2158
*/
2152
2159
void gmap_pmdp_idte_global (struct mm_struct * mm , unsigned long vmaddr )
2153
2160
{
2154
- unsigned long * entry , gaddr ;
2161
+ unsigned long gaddr ;
2155
2162
struct gmap * gmap ;
2156
2163
pmd_t * pmdp ;
2157
2164
2158
2165
rcu_read_lock ();
2159
2166
list_for_each_entry_rcu (gmap , & mm -> context .gmap_list , list ) {
2160
2167
spin_lock (& gmap -> guest_table_lock );
2161
- entry = radix_tree_delete (& gmap -> host_to_guest ,
2162
- vmaddr >> PMD_SHIFT );
2163
- if (entry ) {
2164
- pmdp = (pmd_t * )entry ;
2165
- gaddr = __gmap_segment_gaddr (entry );
2168
+ pmdp = host_to_guest_pmd_delete (gmap , vmaddr , & gaddr );
2169
+ if (pmdp ) {
2166
2170
pmdp_notify_gmap (gmap , pmdp , gaddr );
2167
- WARN_ON (* entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2168
- _SEGMENT_ENTRY_GMAP_UC |
2169
- _SEGMENT_ENTRY ));
2171
+ WARN_ON (pmd_val ( * pmdp ) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2172
+ _SEGMENT_ENTRY_GMAP_UC |
2173
+ _SEGMENT_ENTRY ));
2170
2174
if (MACHINE_HAS_TLB_GUEST )
2171
2175
__pmdp_idte (gaddr , pmdp , IDTE_GUEST_ASCE ,
2172
2176
gmap -> asce , IDTE_GLOBAL );
2173
2177
else if (MACHINE_HAS_IDTE )
2174
2178
__pmdp_idte (gaddr , pmdp , 0 , 0 , IDTE_GLOBAL );
2175
2179
else
2176
2180
__pmdp_csp (pmdp );
2177
- * entry = _SEGMENT_ENTRY_EMPTY ;
2181
+ * pmdp = __pmd ( _SEGMENT_ENTRY_EMPTY ) ;
2178
2182
}
2179
2183
spin_unlock (& gmap -> guest_table_lock );
2180
2184
}
@@ -2690,7 +2694,6 @@ int s390_replace_asce(struct gmap *gmap)
2690
2694
page = gmap_alloc_crst ();
2691
2695
if (!page )
2692
2696
return - ENOMEM ;
2693
- page -> index = 0 ;
2694
2697
table = page_to_virt (page );
2695
2698
memcpy (table , gmap -> table , 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT ));
2696
2699
0 commit comments