Skip to content

Commit 4514eda

Browse files
davidhildenbrandClaudio Imbrenda
authored andcommitted
KVM: s390: vsie: stop using "struct page" for vsie page
Now that we no longer use page->index and the page refcount explicitly, let's avoid messing with "struct page" completely. Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Tested-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Message-ID: <20250107154344.1003072-5-david@redhat.com> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
1 parent 905f5ce commit 4514eda

File tree

2 files changed

+15
-20
lines changed

2 files changed

+15
-20
lines changed

arch/s390/include/asm/kvm_host.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -931,12 +931,14 @@ struct sie_page2 {
931931
u8 reserved928[0x1000 - 0x928]; /* 0x0928 */
932932
};
933933

934+
struct vsie_page;
935+
934936
struct kvm_s390_vsie {
935937
struct mutex mutex;
936938
struct radix_tree_root addr_to_page;
937939
int page_count;
938940
int next;
939-
struct page *pages[KVM_MAX_VCPUS];
941+
struct vsie_page *pages[KVM_MAX_VCPUS];
940942
};
941943

942944
struct kvm_s390_gisa_iam {

arch/s390/kvm/vsie.c

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
599599
struct kvm *kvm = gmap->private;
600600
struct vsie_page *cur;
601601
unsigned long prefix;
602-
struct page *page;
603602
int i;
604603

605604
if (!gmap_is_shadow(gmap))
@@ -609,10 +608,9 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
609608
* therefore we can safely reference them all the time.
610609
*/
611610
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
612-
page = READ_ONCE(kvm->arch.vsie.pages[i]);
613-
if (!page)
611+
cur = READ_ONCE(kvm->arch.vsie.pages[i]);
612+
if (!cur)
614613
continue;
615-
cur = page_to_virt(page);
616614
if (READ_ONCE(cur->gmap) != gmap)
617615
continue;
618616
prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
@@ -1384,14 +1382,12 @@ static void put_vsie_page(struct vsie_page *vsie_page)
13841382
static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
13851383
{
13861384
struct vsie_page *vsie_page;
1387-
struct page *page;
13881385
int nr_vcpus;
13891386

13901387
rcu_read_lock();
1391-
page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1388+
vsie_page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
13921389
rcu_read_unlock();
1393-
if (page) {
1394-
vsie_page = page_to_virt(page);
1390+
if (vsie_page) {
13951391
if (try_get_vsie_page(vsie_page)) {
13961392
if (vsie_page->scb_gpa == addr)
13971393
return vsie_page;
@@ -1411,20 +1407,18 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
14111407

14121408
mutex_lock(&kvm->arch.vsie.mutex);
14131409
if (kvm->arch.vsie.page_count < nr_vcpus) {
1414-
page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1415-
if (!page) {
1410+
vsie_page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO | GFP_DMA);
1411+
if (!vsie_page) {
14161412
mutex_unlock(&kvm->arch.vsie.mutex);
14171413
return ERR_PTR(-ENOMEM);
14181414
}
1419-
vsie_page = page_to_virt(page);
14201415
__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1421-
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1416+
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = vsie_page;
14221417
kvm->arch.vsie.page_count++;
14231418
} else {
14241419
/* reuse an existing entry that belongs to nobody */
14251420
while (true) {
1426-
page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1427-
vsie_page = page_to_virt(page);
1421+
vsie_page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
14281422
if (try_get_vsie_page(vsie_page))
14291423
break;
14301424
kvm->arch.vsie.next++;
@@ -1438,7 +1432,8 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
14381432
vsie_page->scb_gpa = ULONG_MAX;
14391433

14401434
/* Double use of the same address or allocation failure. */
1441-
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1435+
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9,
1436+
vsie_page)) {
14421437
put_vsie_page(vsie_page);
14431438
mutex_unlock(&kvm->arch.vsie.mutex);
14441439
return NULL;
@@ -1519,20 +1514,18 @@ void kvm_s390_vsie_init(struct kvm *kvm)
15191514
void kvm_s390_vsie_destroy(struct kvm *kvm)
15201515
{
15211516
struct vsie_page *vsie_page;
1522-
struct page *page;
15231517
int i;
15241518

15251519
mutex_lock(&kvm->arch.vsie.mutex);
15261520
for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1527-
page = kvm->arch.vsie.pages[i];
1521+
vsie_page = kvm->arch.vsie.pages[i];
15281522
kvm->arch.vsie.pages[i] = NULL;
1529-
vsie_page = page_to_virt(page);
15301523
release_gmap_shadow(vsie_page);
15311524
/* free the radix tree entry */
15321525
if (vsie_page->scb_gpa != ULONG_MAX)
15331526
radix_tree_delete(&kvm->arch.vsie.addr_to_page,
15341527
vsie_page->scb_gpa >> 9);
1535-
__free_page(page);
1528+
free_page((unsigned long)vsie_page);
15361529
}
15371530
kvm->arch.vsie.page_count = 0;
15381531
mutex_unlock(&kvm->arch.vsie.mutex);

0 commit comments

Comments
 (0)