Skip to content

Commit 43656f7

Browse files
author
Claudio Imbrenda
committed
KVM: s390: move gmap_shadow_pgt_lookup() into kvm
Move gmap_shadow_pgt_lookup() from mm/gmap.c into kvm/gaccess.c . Reviewed-by: Steffen Eiden <seiden@linux.ibm.com> Reviewed-by: Janosch Frank <frankja@linux.ibm.com> Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Link: https://lore.kernel.org/r/20250123144627.312456-13-imbrenda@linux.ibm.com Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Message-ID: <20250123144627.312456-13-imbrenda@linux.ibm.com>
1 parent ef0c8ef commit 43656f7

File tree

4 files changed

+46
-47
lines changed

4 files changed

+46
-47
lines changed

arch/s390/include/asm/gmap.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,8 +125,6 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
125125
int fake);
126126
int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
127127
int fake);
128-
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
129-
unsigned long *pgt, int *dat_protection, int *fake);
130128
int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
131129

132130
void gmap_register_pte_notifier(struct gmap_notifier *);
@@ -142,6 +140,7 @@ void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
142140
int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
143141
unsigned long end, bool interruptible);
144142
int kvm_s390_wiggle_split_folio(struct mm_struct *mm, struct folio *folio, bool split);
143+
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
145144

146145
/**
147146
* s390_uv_destroy_range - Destroy a range of pages in the given mm.

arch/s390/kvm/gaccess.c

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#include <asm/gmap.h>
1717
#include <asm/dat-bits.h>
1818
#include "kvm-s390.h"
19+
#include "gmap.h"
1920
#include "gaccess.h"
2021

2122
/*
@@ -1392,6 +1393,42 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
13921393
return 0;
13931394
}
13941395

1396+
/**
1397+
* shadow_pgt_lookup() - find a shadow page table
1398+
* @sg: pointer to the shadow guest address space structure
1399+
* @saddr: the address in the shadow aguest address space
1400+
* @pgt: parent gmap address of the page table to get shadowed
1401+
* @dat_protection: if the pgtable is marked as protected by dat
1402+
* @fake: pgt references contiguous guest memory block, not a pgtable
1403+
*
1404+
* Returns 0 if the shadow page table was found and -EAGAIN if the page
1405+
* table was not found.
1406+
*
1407+
* Called with sg->mm->mmap_lock in read.
1408+
*/
1409+
static int shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
1410+
int *dat_protection, int *fake)
1411+
{
1412+
unsigned long *table;
1413+
struct page *page;
1414+
int rc;
1415+
1416+
spin_lock(&sg->guest_table_lock);
1417+
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1418+
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1419+
/* Shadow page tables are full pages (pte+pgste) */
1420+
page = pfn_to_page(*table >> PAGE_SHIFT);
1421+
*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1422+
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1423+
*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1424+
rc = 0;
1425+
} else {
1426+
rc = -EAGAIN;
1427+
}
1428+
spin_unlock(&sg->guest_table_lock);
1429+
return rc;
1430+
}
1431+
13951432
/**
13961433
* kvm_s390_shadow_fault - handle fault on a shadow page table
13971434
* @vcpu: virtual cpu
@@ -1415,6 +1452,9 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
14151452
int dat_protection, fake;
14161453
int rc;
14171454

1455+
if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm))
1456+
return -EFAULT;
1457+
14181458
mmap_read_lock(sg->mm);
14191459
/*
14201460
* We don't want any guest-2 tables to change - so the parent
@@ -1423,7 +1463,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
14231463
*/
14241464
ipte_lock(vcpu->kvm);
14251465

1426-
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
1466+
rc = shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection, &fake);
14271467
if (rc)
14281468
rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection,
14291469
&fake);

arch/s390/kvm/gmap.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
#ifndef ARCH_KVM_S390_GMAP_H
1111
#define ARCH_KVM_S390_GMAP_H
1212

13+
#define GMAP_SHADOW_FAKE_TABLE 1ULL
14+
1315
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
1416
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
1517
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);

arch/s390/mm/gmap.c

Lines changed: 2 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,6 @@
3636

3737
#define GMAP_SHADOW_FAKE_TABLE 1ULL
3838

39-
static inline unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
40-
4139
static struct page *gmap_alloc_crst(void)
4240
{
4341
struct page *page;
@@ -738,8 +736,7 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
738736
*
739737
* Note: Can also be called for shadow gmaps.
740738
*/
741-
static inline unsigned long *gmap_table_walk(struct gmap *gmap,
742-
unsigned long gaddr, int level)
739+
unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level)
743740
{
744741
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
745742
unsigned long *table = gmap->table;
@@ -790,6 +787,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
790787
}
791788
return table;
792789
}
790+
EXPORT_SYMBOL(gmap_table_walk);
793791

794792
/**
795793
* gmap_pte_op_walk - walk the gmap page table, get the page table lock
@@ -1744,46 +1742,6 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
17441742
}
17451743
EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
17461744

1747-
/**
1748-
* gmap_shadow_pgt_lookup - find a shadow page table
1749-
* @sg: pointer to the shadow guest address space structure
1750-
* @saddr: the address in the shadow aguest address space
1751-
* @pgt: parent gmap address of the page table to get shadowed
1752-
* @dat_protection: if the pgtable is marked as protected by dat
1753-
* @fake: pgt references contiguous guest memory block, not a pgtable
1754-
*
1755-
* Returns 0 if the shadow page table was found and -EAGAIN if the page
1756-
* table was not found.
1757-
*
1758-
* Called with sg->mm->mmap_lock in read.
1759-
*/
1760-
int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1761-
unsigned long *pgt, int *dat_protection,
1762-
int *fake)
1763-
{
1764-
unsigned long *table;
1765-
struct page *page;
1766-
int rc;
1767-
1768-
BUG_ON(!gmap_is_shadow(sg));
1769-
spin_lock(&sg->guest_table_lock);
1770-
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1771-
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1772-
/* Shadow page tables are full pages (pte+pgste) */
1773-
page = pfn_to_page(*table >> PAGE_SHIFT);
1774-
*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1775-
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1776-
*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1777-
rc = 0;
1778-
} else {
1779-
rc = -EAGAIN;
1780-
}
1781-
spin_unlock(&sg->guest_table_lock);
1782-
return rc;
1783-
1784-
}
1785-
EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1786-
17871745
/**
17881746
* gmap_shadow_pgt - instantiate a shadow page table
17891747
* @sg: pointer to the shadow guest address space structure

0 commit comments

Comments
 (0)