Skip to content

Commit 0fd0684

Browse files
committed
vfio/type1: Use mapping page mask for pfnmaps
vfio-pci supports huge_fault for PCI MMIO BARs and will insert pud and pmd mappings for well aligned mappings. follow_pfnmap_start() walks the page table and therefore knows the page mask of the level where the address is found and returns this through follow_pfnmap_args.addr_mask. Subsequent pfns from this address until the end of the mapping page are necessarily consecutive. Use this information to retrieve a range of pfnmap pfns in a single pass. With optimal mappings and alignment on systems with 1GB pud and 4KB page size, this reduces iterations for DMA mapping PCI BARs by a factor of 256K. In real world testing, the overhead of iterating pfns for a VM DMA mapping a 32GB PCI BAR is reduced from ~1s to sub-millisecond overhead. Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Mitchell Augustin <mitchell.augustin@canonical.com> Tested-by: Mitchell Augustin <mitchell.augustin@canonical.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20250218222209.1382449-7-alex.williamson@redhat.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
1 parent 62fb8ad commit 0fd0684

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

drivers/vfio/vfio_iommu_type1.c

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -520,7 +520,7 @@ static void vfio_batch_fini(struct vfio_batch *batch)
520520

521521
static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
522522
unsigned long vaddr, unsigned long *pfn,
523-
bool write_fault)
523+
unsigned long *addr_mask, bool write_fault)
524524
{
525525
struct follow_pfnmap_args args = { .vma = vma, .address = vaddr };
526526
int ret;
@@ -544,10 +544,12 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
544544
return ret;
545545
}
546546

547-
if (write_fault && !args.writable)
547+
if (write_fault && !args.writable) {
548548
ret = -EFAULT;
549-
else
549+
} else {
550550
*pfn = args.pfn;
551+
*addr_mask = args.addr_mask;
552+
}
551553

552554
follow_pfnmap_end(&args);
553555
return ret;
@@ -590,15 +592,22 @@ static long vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
590592
vma = vma_lookup(mm, vaddr);
591593

592594
if (vma && vma->vm_flags & VM_PFNMAP) {
593-
ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE);
595+
unsigned long addr_mask;
596+
597+
ret = follow_fault_pfn(vma, mm, vaddr, pfn, &addr_mask,
598+
prot & IOMMU_WRITE);
594599
if (ret == -EAGAIN)
595600
goto retry;
596601

597602
if (!ret) {
598-
if (is_invalid_reserved_pfn(*pfn))
599-
ret = 1;
600-
else
603+
if (is_invalid_reserved_pfn(*pfn)) {
604+
unsigned long epfn;
605+
606+
epfn = (*pfn | (~addr_mask >> PAGE_SHIFT)) + 1;
607+
ret = min_t(long, npages, epfn - *pfn);
608+
} else {
601609
ret = -EFAULT;
610+
}
602611
}
603612
}
604613
done:

0 commit comments

Comments
 (0)