Skip to content

Commit 905f5ce

Browse files
davidhildenbrandClaudio Imbrenda
authored andcommitted
KVM: s390: vsie: stop messing with page refcount
Let's stop messing with the page refcount, and use a flag that is set / cleared atomically to remember whether a vsie page is currently in use. Note that we could use a page flag, or a lower bit of the scb_gpa. Let's keep it simple for now, we have sufficient space. While at it, stop passing "struct kvm *" to put_vsie_page(), it's unused. Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Tested-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Message-ID: <20250107154344.1003072-4-david@redhat.com> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
1 parent c5f64c9 commit 905f5ce

File tree

1 file changed

+30
-16
lines changed

1 file changed

+30
-16
lines changed

arch/s390/kvm/vsie.c

Lines changed: 30 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@
2323
#include "kvm-s390.h"
2424
#include "gaccess.h"
2525

26+
enum vsie_page_flags {
27+
VSIE_PAGE_IN_USE = 0,
28+
};
29+
2630
struct vsie_page {
2731
struct kvm_s390_sie_block scb_s; /* 0x0000 */
2832
/*
@@ -52,7 +56,12 @@ struct vsie_page {
5256
* radix tree.
5357
*/
5458
gpa_t scb_gpa; /* 0x0258 */
55-
__u8 reserved[0x0700 - 0x0260]; /* 0x0260 */
59+
/*
60+
* Flags: must be set/cleared atomically after the vsie page can be
61+
* looked up by other CPUs.
62+
*/
63+
unsigned long flags; /* 0x0260 */
64+
__u8 reserved[0x0700 - 0x0268]; /* 0x0268 */
5665
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
5766
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
5867
};
@@ -1351,6 +1360,20 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
13511360
return rc;
13521361
}
13531362

1363+
/* Try getting a given vsie page, returning "true" on success. */
1364+
static inline bool try_get_vsie_page(struct vsie_page *vsie_page)
1365+
{
1366+
if (test_bit(VSIE_PAGE_IN_USE, &vsie_page->flags))
1367+
return false;
1368+
return !test_and_set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1369+
}
1370+
1371+
/* Put a vsie page acquired through get_vsie_page / try_get_vsie_page. */
1372+
static void put_vsie_page(struct vsie_page *vsie_page)
1373+
{
1374+
clear_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
1375+
}
1376+
13541377
/*
13551378
* Get or create a vsie page for a scb address.
13561379
*
@@ -1369,15 +1392,15 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
13691392
rcu_read_unlock();
13701393
if (page) {
13711394
vsie_page = page_to_virt(page);
1372-
if (page_ref_inc_return(page) == 2) {
1395+
if (try_get_vsie_page(vsie_page)) {
13731396
if (vsie_page->scb_gpa == addr)
13741397
return vsie_page;
13751398
/*
13761399
* We raced with someone reusing + putting this vsie
13771400
* page before we grabbed it.
13781401
*/
1402+
put_vsie_page(vsie_page);
13791403
}
1380-
page_ref_dec(page);
13811404
}
13821405

13831406
/*
@@ -1394,17 +1417,16 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
13941417
return ERR_PTR(-ENOMEM);
13951418
}
13961419
vsie_page = page_to_virt(page);
1397-
page_ref_inc(page);
1420+
__set_bit(VSIE_PAGE_IN_USE, &vsie_page->flags);
13981421
kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
13991422
kvm->arch.vsie.page_count++;
14001423
} else {
14011424
/* reuse an existing entry that belongs to nobody */
14021425
while (true) {
14031426
page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
14041427
vsie_page = page_to_virt(page);
1405-
if (page_ref_inc_return(page) == 2)
1428+
if (try_get_vsie_page(vsie_page))
14061429
break;
1407-
page_ref_dec(page);
14081430
kvm->arch.vsie.next++;
14091431
kvm->arch.vsie.next %= nr_vcpus;
14101432
}
@@ -1417,7 +1439,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
14171439

14181440
/* Double use of the same address or allocation failure. */
14191441
if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1420-
page_ref_dec(page);
1442+
put_vsie_page(vsie_page);
14211443
mutex_unlock(&kvm->arch.vsie.mutex);
14221444
return NULL;
14231445
}
@@ -1431,14 +1453,6 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
14311453
return vsie_page;
14321454
}
14331455

1434-
/* put a vsie page acquired via get_vsie_page */
1435-
static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1436-
{
1437-
struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1438-
1439-
page_ref_dec(page);
1440-
}
1441-
14421456
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
14431457
{
14441458
struct vsie_page *vsie_page;
@@ -1489,7 +1503,7 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
14891503
out_unpin_scb:
14901504
unpin_scb(vcpu, vsie_page, scb_addr);
14911505
out_put:
1492-
put_vsie_page(vcpu->kvm, vsie_page);
1506+
put_vsie_page(vsie_page);
14931507

14941508
return rc < 0 ? rc : 0;
14951509
}

0 commit comments

Comments
 (0)