@@ -607,130 +607,6 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
607
607
}
608
608
EXPORT_SYMBOL (__gmap_link );
609
609
610
- /**
611
- * fixup_user_fault_nowait - manually resolve a user page fault without waiting
612
- * @mm: mm_struct of target mm
613
- * @address: user address
614
- * @fault_flags:flags to pass down to handle_mm_fault()
615
- * @unlocked: did we unlock the mmap_lock while retrying
616
- *
617
- * This function behaves similarly to fixup_user_fault(), but it guarantees
618
- * that the fault will be resolved without waiting. The function might drop
619
- * and re-acquire the mm lock, in which case @unlocked will be set to true.
620
- *
621
- * The guarantee is that the fault is handled without waiting, but the
622
- * function itself might sleep, due to the lock.
623
- *
624
- * Context: Needs to be called with mm->mmap_lock held in read mode, and will
625
- * return with the lock held in read mode; @unlocked will indicate whether
626
- * the lock has been dropped and re-acquired. This is the same behaviour as
627
- * fixup_user_fault().
628
- *
629
- * Return: 0 on success, -EAGAIN if the fault cannot be resolved without
630
- * waiting, -EFAULT if the fault cannot be resolved, -ENOMEM if out of
631
- * memory.
632
- */
633
- static int fixup_user_fault_nowait (struct mm_struct * mm , unsigned long address ,
634
- unsigned int fault_flags , bool * unlocked )
635
- {
636
- struct vm_area_struct * vma ;
637
- unsigned int test_flags ;
638
- vm_fault_t fault ;
639
- int rc ;
640
-
641
- fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT ;
642
- test_flags = fault_flags & FAULT_FLAG_WRITE ? VM_WRITE : VM_READ ;
643
-
644
- vma = find_vma (mm , address );
645
- if (unlikely (!vma || address < vma -> vm_start ))
646
- return - EFAULT ;
647
- if (unlikely (!(vma -> vm_flags & test_flags )))
648
- return - EFAULT ;
649
-
650
- fault = handle_mm_fault (vma , address , fault_flags , NULL );
651
- /* the mm lock has been dropped, take it again */
652
- if (fault & VM_FAULT_COMPLETED ) {
653
- * unlocked = true;
654
- mmap_read_lock (mm );
655
- return 0 ;
656
- }
657
- /* the mm lock has not been dropped */
658
- if (fault & VM_FAULT_ERROR ) {
659
- rc = vm_fault_to_errno (fault , 0 );
660
- BUG_ON (!rc );
661
- return rc ;
662
- }
663
- /* the mm lock has not been dropped because of FAULT_FLAG_RETRY_NOWAIT */
664
- if (fault & VM_FAULT_RETRY )
665
- return - EAGAIN ;
666
- /* nothing needed to be done and the mm lock has not been dropped */
667
- return 0 ;
668
- }
669
-
670
- /**
671
- * __gmap_fault - resolve a fault on a guest address
672
- * @gmap: pointer to guest mapping meta data structure
673
- * @gaddr: guest address
674
- * @fault_flags: flags to pass down to handle_mm_fault()
675
- *
676
- * Context: Needs to be called with mm->mmap_lock held in read mode. Might
677
- * drop and re-acquire the lock. Will always return with the lock held.
678
- */
679
- static int __gmap_fault (struct gmap * gmap , unsigned long gaddr , unsigned int fault_flags )
680
- {
681
- unsigned long vmaddr ;
682
- bool unlocked ;
683
- int rc = 0 ;
684
-
685
- retry :
686
- unlocked = false;
687
-
688
- vmaddr = __gmap_translate (gmap , gaddr );
689
- if (IS_ERR_VALUE (vmaddr ))
690
- return vmaddr ;
691
-
692
- if (fault_flags & FAULT_FLAG_RETRY_NOWAIT )
693
- rc = fixup_user_fault_nowait (gmap -> mm , vmaddr , fault_flags , & unlocked );
694
- else
695
- rc = fixup_user_fault (gmap -> mm , vmaddr , fault_flags , & unlocked );
696
- if (rc )
697
- return rc ;
698
- /*
699
- * In the case that fixup_user_fault unlocked the mmap_lock during
700
- * fault-in, redo __gmap_translate() to avoid racing with a
701
- * map/unmap_segment.
702
- * In particular, __gmap_translate(), fixup_user_fault{,_nowait}(),
703
- * and __gmap_link() must all be called atomically in one go; if the
704
- * lock had been dropped in between, a retry is needed.
705
- */
706
- if (unlocked )
707
- goto retry ;
708
-
709
- return __gmap_link (gmap , gaddr , vmaddr );
710
- }
711
-
712
- /**
713
- * gmap_fault - resolve a fault on a guest address
714
- * @gmap: pointer to guest mapping meta data structure
715
- * @gaddr: guest address
716
- * @fault_flags: flags to pass down to handle_mm_fault()
717
- *
718
- * Returns 0 on success, -ENOMEM for out of memory conditions, -EFAULT if the
719
- * vm address is already mapped to a different guest segment, and -EAGAIN if
720
- * FAULT_FLAG_RETRY_NOWAIT was specified and the fault could not be processed
721
- * immediately.
722
- */
723
- int gmap_fault (struct gmap * gmap , unsigned long gaddr , unsigned int fault_flags )
724
- {
725
- int rc ;
726
-
727
- mmap_read_lock (gmap -> mm );
728
- rc = __gmap_fault (gmap , gaddr , fault_flags );
729
- mmap_read_unlock (gmap -> mm );
730
- return rc ;
731
- }
732
- EXPORT_SYMBOL_GPL (gmap_fault );
733
-
734
610
/*
735
611
* this function is assumed to be called with mmap_lock held
736
612
*/
0 commit comments