@@ -3475,22 +3475,19 @@ static struct folio *next_uptodate_folio(struct xa_state *xas,
3475
3475
*/
3476
3476
static vm_fault_t filemap_map_folio_range (struct vm_fault * vmf ,
3477
3477
struct folio * folio , unsigned long start ,
3478
- unsigned long addr , unsigned int nr_pages )
3478
+ unsigned long addr , unsigned int nr_pages ,
3479
+ unsigned int * mmap_miss )
3479
3480
{
3480
3481
vm_fault_t ret = 0 ;
3481
- struct vm_area_struct * vma = vmf -> vma ;
3482
- struct file * file = vma -> vm_file ;
3483
3482
struct page * page = folio_page (folio , start );
3484
- unsigned int mmap_miss = READ_ONCE (file -> f_ra .mmap_miss );
3485
3483
unsigned int count = 0 ;
3486
3484
pte_t * old_ptep = vmf -> pte ;
3487
3485
3488
3486
do {
3489
3487
if (PageHWPoison (page + count ))
3490
3488
goto skip ;
3491
3489
3492
- if (mmap_miss > 0 )
3493
- mmap_miss -- ;
3490
+ (* mmap_miss )++ ;
3494
3491
3495
3492
/*
3496
3493
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3525,7 +3522,35 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3525
3522
}
3526
3523
3527
3524
vmf -> pte = old_ptep ;
3528
- WRITE_ONCE (file -> f_ra .mmap_miss , mmap_miss );
3525
+
3526
+ return ret ;
3527
+ }
3528
+
3529
+ static vm_fault_t filemap_map_order0_folio (struct vm_fault * vmf ,
3530
+ struct folio * folio , unsigned long addr ,
3531
+ unsigned int * mmap_miss )
3532
+ {
3533
+ vm_fault_t ret = 0 ;
3534
+ struct page * page = & folio -> page ;
3535
+
3536
+ if (PageHWPoison (page ))
3537
+ return ret ;
3538
+
3539
+ (* mmap_miss )++ ;
3540
+
3541
+ /*
3542
+ * NOTE: If there're PTE markers, we'll leave them to be
3543
+ * handled in the specific fault path, and it'll prohibit
3544
+ * the fault-around logic.
3545
+ */
3546
+ if (!pte_none (ptep_get (vmf -> pte )))
3547
+ return ret ;
3548
+
3549
+ if (vmf -> address == addr )
3550
+ ret = VM_FAULT_NOPAGE ;
3551
+
3552
+ set_pte_range (vmf , folio , page , 1 , addr );
3553
+ folio_ref_inc (folio );
3529
3554
3530
3555
return ret ;
3531
3556
}
@@ -3541,7 +3566,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3541
3566
XA_STATE (xas , & mapping -> i_pages , start_pgoff );
3542
3567
struct folio * folio ;
3543
3568
vm_fault_t ret = 0 ;
3544
- int nr_pages = 0 ;
3569
+ unsigned int nr_pages = 0 , mmap_miss = 0 , mmap_miss_saved ;
3545
3570
3546
3571
rcu_read_lock ();
3547
3572
folio = next_uptodate_folio (& xas , mapping , end_pgoff );
@@ -3569,25 +3594,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3569
3594
end = folio -> index + folio_nr_pages (folio ) - 1 ;
3570
3595
nr_pages = min (end , end_pgoff ) - xas .xa_index + 1 ;
3571
3596
3572
- /*
3573
- * NOTE: If there're PTE markers, we'll leave them to be
3574
- * handled in the specific fault path, and it'll prohibit the
3575
- * fault-around logic.
3576
- */
3577
- if (!pte_none (ptep_get (vmf -> pte )))
3578
- goto unlock ;
3579
-
3580
- ret |= filemap_map_folio_range (vmf , folio ,
3581
- xas .xa_index - folio -> index , addr , nr_pages );
3597
+ if (!folio_test_large (folio ))
3598
+ ret |= filemap_map_order0_folio (vmf ,
3599
+ folio , addr , & mmap_miss );
3600
+ else
3601
+ ret |= filemap_map_folio_range (vmf , folio ,
3602
+ xas .xa_index - folio -> index , addr ,
3603
+ nr_pages , & mmap_miss );
3582
3604
3583
- unlock :
3584
3605
folio_unlock (folio );
3585
3606
folio_put (folio );
3586
- folio = next_uptodate_folio (& xas , mapping , end_pgoff );
3587
- } while (folio );
3607
+ } while ((folio = next_uptodate_folio (& xas , mapping , end_pgoff )) != NULL );
3588
3608
pte_unmap_unlock (vmf -> pte , vmf -> ptl );
3589
3609
out :
3590
3610
rcu_read_unlock ();
3611
+
3612
+ mmap_miss_saved = READ_ONCE (file -> f_ra .mmap_miss );
3613
+ if (mmap_miss >= mmap_miss_saved )
3614
+ WRITE_ONCE (file -> f_ra .mmap_miss , 0 );
3615
+ else
3616
+ WRITE_ONCE (file -> f_ra .mmap_miss , mmap_miss_saved - mmap_miss );
3617
+
3591
3618
return ret ;
3592
3619
}
3593
3620
EXPORT_SYMBOL (filemap_map_pages );
0 commit comments