Skip to content

Commit 3762e90

Browse files
author
Claudio Imbrenda
committed
KVM: s390: use __kvm_faultin_pfn()
Refactor the existing page fault handling code to use __kvm_faultin_pfn(). This possible now that memslots are always present. Acked-by: Janosch Frank <frankja@linux.ibm.com> Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com> Link: https://lore.kernel.org/r/20250123144627.312456-7-imbrenda@linux.ibm.com Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Message-ID: <20250123144627.312456-7-imbrenda@linux.ibm.com>
1 parent 5cbe243 commit 3762e90

File tree

3 files changed

+106
-27
lines changed

3 files changed

+106
-27
lines changed

arch/s390/kvm/kvm-s390.c

Lines changed: 99 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4786,11 +4786,104 @@ static void kvm_s390_assert_primary_as(struct kvm_vcpu *vcpu)
47864786
current->thread.gmap_int_code, current->thread.gmap_teid.val);
47874787
}
47884788

4789+
/*
4790+
* __kvm_s390_handle_dat_fault() - handle a dat fault for the gmap of a vcpu
4791+
* @vcpu: the vCPU whose gmap is to be fixed up
4792+
* @gfn: the guest frame number used for memslots (including fake memslots)
4793+
* @gaddr: the gmap address, does not have to match @gfn for ucontrol gmaps
4794+
* @flags: FOLL_* flags
4795+
*
4796+
* Return: 0 on success, < 0 in case of error.
4797+
* Context: The mm lock must not be held before calling. May sleep.
4798+
*/
4799+
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags)
4800+
{
4801+
struct kvm_memory_slot *slot;
4802+
unsigned int fault_flags;
4803+
bool writable, unlocked;
4804+
unsigned long vmaddr;
4805+
struct page *page;
4806+
kvm_pfn_t pfn;
4807+
int rc;
4808+
4809+
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
4810+
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
4811+
return vcpu_post_run_addressing_exception(vcpu);
4812+
4813+
fault_flags = flags & FOLL_WRITE ? FAULT_FLAG_WRITE : 0;
4814+
if (vcpu->arch.gmap->pfault_enabled)
4815+
flags |= FOLL_NOWAIT;
4816+
vmaddr = __gfn_to_hva_memslot(slot, gfn);
4817+
4818+
try_again:
4819+
pfn = __kvm_faultin_pfn(slot, gfn, flags, &writable, &page);
4820+
4821+
/* Access outside memory, inject addressing exception */
4822+
if (is_noslot_pfn(pfn))
4823+
return vcpu_post_run_addressing_exception(vcpu);
4824+
/* Signal pending: try again */
4825+
if (pfn == KVM_PFN_ERR_SIGPENDING)
4826+
return -EAGAIN;
4827+
4828+
/* Needs I/O, try to setup async pfault (only possible with FOLL_NOWAIT) */
4829+
if (pfn == KVM_PFN_ERR_NEEDS_IO) {
4830+
trace_kvm_s390_major_guest_pfault(vcpu);
4831+
if (kvm_arch_setup_async_pf(vcpu))
4832+
return 0;
4833+
vcpu->stat.pfault_sync++;
4834+
/* Could not setup async pfault, try again synchronously */
4835+
flags &= ~FOLL_NOWAIT;
4836+
goto try_again;
4837+
}
4838+
/* Any other error */
4839+
if (is_error_pfn(pfn))
4840+
return -EFAULT;
4841+
4842+
/* Success */
4843+
mmap_read_lock(vcpu->arch.gmap->mm);
4844+
/* Mark the userspace PTEs as young and/or dirty, to avoid page fault loops */
4845+
rc = fixup_user_fault(vcpu->arch.gmap->mm, vmaddr, fault_flags, &unlocked);
4846+
if (!rc)
4847+
rc = __gmap_link(vcpu->arch.gmap, gaddr, vmaddr);
4848+
scoped_guard(spinlock, &vcpu->kvm->mmu_lock) {
4849+
kvm_release_faultin_page(vcpu->kvm, page, false, writable);
4850+
}
4851+
mmap_read_unlock(vcpu->arch.gmap->mm);
4852+
return rc;
4853+
}
4854+
4855+
static int vcpu_dat_fault_handler(struct kvm_vcpu *vcpu, unsigned long gaddr, unsigned int flags)
4856+
{
4857+
unsigned long gaddr_tmp;
4858+
gfn_t gfn;
4859+
4860+
gfn = gpa_to_gfn(gaddr);
4861+
if (kvm_is_ucontrol(vcpu->kvm)) {
4862+
/*
4863+
* This translates the per-vCPU guest address into a
4864+
* fake guest address, which can then be used with the
4865+
* fake memslots that are identity mapping userspace.
4866+
* This allows ucontrol VMs to use the normal fault
4867+
* resolution path, like normal VMs.
4868+
*/
4869+
mmap_read_lock(vcpu->arch.gmap->mm);
4870+
gaddr_tmp = __gmap_translate(vcpu->arch.gmap, gaddr);
4871+
mmap_read_unlock(vcpu->arch.gmap->mm);
4872+
if (gaddr_tmp == -EFAULT) {
4873+
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4874+
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
4875+
vcpu->run->s390_ucontrol.pgm_code = PGM_SEGMENT_TRANSLATION;
4876+
return -EREMOTE;
4877+
}
4878+
gfn = gpa_to_gfn(gaddr_tmp);
4879+
}
4880+
return __kvm_s390_handle_dat_fault(vcpu, gfn, gaddr, flags);
4881+
}
4882+
47894883
static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
47904884
{
47914885
unsigned int flags = 0;
47924886
unsigned long gaddr;
4793-
int rc = 0;
47944887

47954888
gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
47964889
if (kvm_s390_cur_gmap_fault_is_write())
@@ -4842,37 +4935,14 @@ static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
48424935
case PGM_REGION_SECOND_TRANS:
48434936
case PGM_REGION_THIRD_TRANS:
48444937
kvm_s390_assert_primary_as(vcpu);
4845-
if (vcpu->arch.gmap->pfault_enabled) {
4846-
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
4847-
if (rc == -EFAULT)
4848-
return vcpu_post_run_addressing_exception(vcpu);
4849-
if (rc == -EAGAIN) {
4850-
trace_kvm_s390_major_guest_pfault(vcpu);
4851-
if (kvm_arch_setup_async_pf(vcpu))
4852-
return 0;
4853-
vcpu->stat.pfault_sync++;
4854-
} else {
4855-
return rc;
4856-
}
4857-
}
4858-
rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
4859-
if (rc == -EFAULT) {
4860-
if (kvm_is_ucontrol(vcpu->kvm)) {
4861-
vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4862-
vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
4863-
vcpu->run->s390_ucontrol.pgm_code = 0x10;
4864-
return -EREMOTE;
4865-
}
4866-
return vcpu_post_run_addressing_exception(vcpu);
4867-
}
4868-
break;
4938+
return vcpu_dat_fault_handler(vcpu, gaddr, flags);
48694939
default:
48704940
KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
48714941
current->thread.gmap_int_code, current->thread.gmap_teid.val);
48724942
send_sig(SIGSEGV, current, 0);
48734943
break;
48744944
}
4875-
return rc;
4945+
return 0;
48764946
}
48774947

48784948
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@ -5751,7 +5821,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
57515821
}
57525822
#endif
57535823
case KVM_S390_VCPU_FAULT: {
5754-
r = gmap_fault(vcpu->arch.gmap, arg, 0);
5824+
idx = srcu_read_lock(&vcpu->kvm->srcu);
5825+
r = vcpu_dat_fault_handler(vcpu, arg, 0);
5826+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
57555827
break;
57565828
}
57575829
case KVM_ENABLE_CAP:

arch/s390/kvm/kvm-s390.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,12 @@ void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
410410
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
411411
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
412412
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
413+
int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags);
414+
415+
static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags)
416+
{
417+
return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags);
418+
}
413419

414420
/* implemented in diag.c */
415421
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);

arch/s390/mm/gmap.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -605,6 +605,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
605605
radix_tree_preload_end();
606606
return rc;
607607
}
608+
EXPORT_SYMBOL(__gmap_link);
608609

609610
/**
610611
* fixup_user_fault_nowait - manually resolve a user page fault without waiting

0 commit comments

Comments
 (0)