Skip to content

Commit 5e714bf

Browse files
committed
Merge tag 'mm-stable-2022-10-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton: - fix a race which causes page refcounting errors in ZONE_DEVICE pages (Alistair Popple) - fix userfaultfd test harness instability (Peter Xu) - various other patches in MM, mainly fixes * tag 'mm-stable-2022-10-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (29 commits) highmem: fix kmap_to_page() for kmap_local_page() addresses mm/page_alloc: fix incorrect PGFREE and PGALLOC for high-order page mm/selftest: uffd: explain the write missing fault check mm/hugetlb: use hugetlb_pte_stable in migration race check mm/hugetlb: fix race condition of uffd missing/minor handling zram: always expose rw_page LoongArch: update local TLB if PTE entry exists mm: use update_mmu_tlb() on the second thread kasan: fix array-bounds warnings in tests hmm-tests: add test for migrate_device_range() nouveau/dmem: evict device private memory during release nouveau/dmem: refactor nouveau_dmem_fault_copy_one() mm/migrate_device.c: add migrate_device_range() mm/migrate_device.c: refactor migrate_vma and migrate_deivce_coherent_page() mm/memremap.c: take a pgmap reference on page allocation mm: free device private pages have zero refcount mm/memory.c: fix race when faulting a device private page mm/damon: use damon_sz_region() in appropriate place mm/damon: move sz_damon_region to damon_sz_region lib/test_meminit: add checks for the allocation functions ...
2 parents f2e4413 + ef6e06b commit 5e714bf

File tree

32 files changed

+719
-253
lines changed

32 files changed

+719
-253
lines changed

arch/loongarch/include/asm/pgtable.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
412412
__update_tlb(vma, address, ptep);
413413
}
414414

415+
#define __HAVE_ARCH_UPDATE_MMU_TLB
416+
#define update_mmu_tlb update_mmu_cache
417+
415418
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
416419
unsigned long address, pmd_t *pmdp)
417420
{

arch/powerpc/kvm/book3s_hv_uvmem.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -508,10 +508,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
508508
static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
509509
unsigned long start,
510510
unsigned long end, unsigned long page_shift,
511-
struct kvm *kvm, unsigned long gpa)
511+
struct kvm *kvm, unsigned long gpa, struct page *fault_page)
512512
{
513513
unsigned long src_pfn, dst_pfn = 0;
514-
struct migrate_vma mig;
514+
struct migrate_vma mig = { 0 };
515515
struct page *dpage, *spage;
516516
struct kvmppc_uvmem_page_pvt *pvt;
517517
unsigned long pfn;
@@ -525,6 +525,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
525525
mig.dst = &dst_pfn;
526526
mig.pgmap_owner = &kvmppc_uvmem_pgmap;
527527
mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
528+
mig.fault_page = fault_page;
528529

529530
/* The requested page is already paged-out, nothing to do */
530531
if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
@@ -580,12 +581,14 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
580581
static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
581582
unsigned long start, unsigned long end,
582583
unsigned long page_shift,
583-
struct kvm *kvm, unsigned long gpa)
584+
struct kvm *kvm, unsigned long gpa,
585+
struct page *fault_page)
584586
{
585587
int ret;
586588

587589
mutex_lock(&kvm->arch.uvmem_lock);
588-
ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
590+
ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
591+
fault_page);
589592
mutex_unlock(&kvm->arch.uvmem_lock);
590593

591594
return ret;
@@ -634,7 +637,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
634637
pvt->remove_gfn = true;
635638

636639
if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
637-
PAGE_SHIFT, kvm, pvt->gpa))
640+
PAGE_SHIFT, kvm, pvt->gpa, NULL))
638641
pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
639642
pvt->gpa, addr);
640643
} else {
@@ -715,7 +718,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
715718

716719
dpage = pfn_to_page(uvmem_pfn);
717720
dpage->zone_device_data = pvt;
718-
lock_page(dpage);
721+
zone_device_page_init(dpage);
719722
return dpage;
720723
out_clear:
721724
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -736,7 +739,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
736739
bool pagein)
737740
{
738741
unsigned long src_pfn, dst_pfn = 0;
739-
struct migrate_vma mig;
742+
struct migrate_vma mig = { 0 };
740743
struct page *spage;
741744
unsigned long pfn;
742745
struct page *dpage;
@@ -994,7 +997,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
994997

995998
if (kvmppc_svm_page_out(vmf->vma, vmf->address,
996999
vmf->address + PAGE_SIZE, PAGE_SHIFT,
997-
pvt->kvm, pvt->gpa))
1000+
pvt->kvm, pvt->gpa, vmf->page))
9981001
return VM_FAULT_SIGBUS;
9991002
else
10001003
return 0;
@@ -1065,7 +1068,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
10651068
if (!vma || vma->vm_start > start || vma->vm_end < end)
10661069
goto out;
10671070

1068-
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1071+
if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
10691072
ret = H_SUCCESS;
10701073
out:
10711074
mmap_read_unlock(kvm->mm);

drivers/block/zram/zram_drv.c

Lines changed: 3 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,6 @@ static unsigned int num_devices = 1;
5252
static size_t huge_class_size;
5353

5454
static const struct block_device_operations zram_devops;
55-
#ifdef CONFIG_ZRAM_WRITEBACK
56-
static const struct block_device_operations zram_wb_devops;
57-
#endif
5855

5956
static void zram_free_page(struct zram *zram, size_t index);
6057
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
@@ -546,17 +543,6 @@ static ssize_t backing_dev_store(struct device *dev,
546543
zram->backing_dev = backing_dev;
547544
zram->bitmap = bitmap;
548545
zram->nr_pages = nr_pages;
549-
/*
550-
* With writeback feature, zram does asynchronous IO so it's no longer
551-
* synchronous device so let's remove synchronous io flag. Othewise,
552-
* upper layer(e.g., swap) could wait IO completion rather than
553-
* (submit and return), which will cause system sluggish.
554-
* Furthermore, when the IO function returns(e.g., swap_readpage),
555-
* upper layer expects IO was done so it could deallocate the page
556-
* freely but in fact, IO is going on so finally could cause
557-
* use-after-free when the IO is really done.
558-
*/
559-
zram->disk->fops = &zram_wb_devops;
560546
up_write(&zram->init_lock);
561547

562548
pr_info("setup backing device %s\n", file_name);
@@ -1270,6 +1256,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
12701256
struct bio_vec bvec;
12711257

12721258
zram_slot_unlock(zram, index);
1259+
/* A null bio means rw_page was used, we must fallback to bio */
1260+
if (!bio)
1261+
return -EOPNOTSUPP;
12731262

12741263
bvec.bv_page = page;
12751264
bvec.bv_len = PAGE_SIZE;
@@ -1856,15 +1845,6 @@ static const struct block_device_operations zram_devops = {
18561845
.owner = THIS_MODULE
18571846
};
18581847

1859-
#ifdef CONFIG_ZRAM_WRITEBACK
1860-
static const struct block_device_operations zram_wb_devops = {
1861-
.open = zram_open,
1862-
.submit_bio = zram_submit_bio,
1863-
.swap_slot_free_notify = zram_slot_free_notify,
1864-
.owner = THIS_MODULE
1865-
};
1866-
#endif
1867-
18681848
static DEVICE_ATTR_WO(compact);
18691849
static DEVICE_ATTR_RW(disksize);
18701850
static DEVICE_ATTR_RO(initstate);

drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
223223
page = pfn_to_page(pfn);
224224
svm_range_bo_ref(prange->svm_bo);
225225
page->zone_device_data = prange->svm_bo;
226-
lock_page(page);
226+
zone_device_page_init(page);
227227
}
228228

229229
static void
@@ -410,7 +410,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
410410
uint64_t npages = (end - start) >> PAGE_SHIFT;
411411
struct kfd_process_device *pdd;
412412
struct dma_fence *mfence = NULL;
413-
struct migrate_vma migrate;
413+
struct migrate_vma migrate = { 0 };
414414
unsigned long cpages = 0;
415415
dma_addr_t *scratch;
416416
void *buf;
@@ -666,15 +666,15 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
666666
static long
667667
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
668668
struct vm_area_struct *vma, uint64_t start, uint64_t end,
669-
uint32_t trigger)
669+
uint32_t trigger, struct page *fault_page)
670670
{
671671
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
672672
uint64_t npages = (end - start) >> PAGE_SHIFT;
673673
unsigned long upages = npages;
674674
unsigned long cpages = 0;
675675
struct kfd_process_device *pdd;
676676
struct dma_fence *mfence = NULL;
677-
struct migrate_vma migrate;
677+
struct migrate_vma migrate = { 0 };
678678
dma_addr_t *scratch;
679679
void *buf;
680680
int r = -ENOMEM;
@@ -697,6 +697,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
697697

698698
migrate.src = buf;
699699
migrate.dst = migrate.src + npages;
700+
migrate.fault_page = fault_page;
700701
scratch = (dma_addr_t *)(migrate.dst + npages);
701702

702703
kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
@@ -764,7 +765,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
764765
* 0 - OK, otherwise error code
765766
*/
766767
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
767-
uint32_t trigger)
768+
uint32_t trigger, struct page *fault_page)
768769
{
769770
struct amdgpu_device *adev;
770771
struct vm_area_struct *vma;
@@ -805,7 +806,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
805806
}
806807

807808
next = min(vma->vm_end, end);
808-
r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger);
809+
r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
810+
fault_page);
809811
if (r < 0) {
810812
pr_debug("failed %ld to migrate prange %p\n", r, prange);
811813
break;
@@ -849,7 +851,7 @@ svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
849851
pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
850852

851853
do {
852-
r = svm_migrate_vram_to_ram(prange, mm, trigger);
854+
r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
853855
if (r)
854856
return r;
855857
} while (prange->actual_loc && --retries);
@@ -950,7 +952,8 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
950952
}
951953

952954
r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
953-
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU);
955+
KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
956+
vmf->page);
954957
if (r)
955958
pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
956959
r, prange->svms, prange, prange->start, prange->last);

drivers/gpu/drm/amd/amdkfd/kfd_migrate.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ enum MIGRATION_COPY_DIR {
4343
int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
4444
struct mm_struct *mm, uint32_t trigger);
4545
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
46-
uint32_t trigger);
46+
uint32_t trigger, struct page *fault_page);
4747
unsigned long
4848
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
4949

drivers/gpu/drm/amd/amdkfd/kfd_svm.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2913,13 +2913,15 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
29132913
*/
29142914
if (prange->actual_loc)
29152915
r = svm_migrate_vram_to_ram(prange, mm,
2916-
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2916+
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2917+
NULL);
29172918
else
29182919
r = 0;
29192920
}
29202921
} else {
29212922
r = svm_migrate_vram_to_ram(prange, mm,
2922-
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2923+
KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
2924+
NULL);
29232925
}
29242926
if (r) {
29252927
pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
@@ -3278,7 +3280,8 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
32783280
return 0;
32793281

32803282
if (!best_loc) {
3281-
r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3283+
r = svm_migrate_vram_to_ram(prange, mm,
3284+
KFD_MIGRATE_TRIGGER_PREFETCH, NULL);
32823285
*migrated = !r;
32833286
return r;
32843287
}
@@ -3339,7 +3342,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
33393342
mutex_lock(&prange->migrate_mutex);
33403343
do {
33413344
r = svm_migrate_vram_to_ram(prange, mm,
3342-
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
3345+
KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL);
33433346
} while (!r && prange->actual_loc && --retries);
33443347

33453348
if (!r && prange->actual_loc)

0 commit comments

Comments
 (0)