@@ -551,8 +551,8 @@ static void kvm_null_fn(void)
551
551
node; \
552
552
node = interval_tree_iter_next(node, start, last)) \
553
553
554
- static __always_inline kvm_mn_ret_t __kvm_handle_hva_range (struct kvm * kvm ,
555
- const struct kvm_mmu_notifier_range * range )
554
+ static __always_inline kvm_mn_ret_t kvm_handle_hva_range (struct kvm * kvm ,
555
+ const struct kvm_mmu_notifier_range * range )
556
556
{
557
557
struct kvm_mmu_notifier_return r = {
558
558
.ret = false,
@@ -633,7 +633,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
633
633
return r ;
634
634
}
635
635
636
- static __always_inline int kvm_handle_hva_range (struct mmu_notifier * mn ,
636
+ static __always_inline int kvm_age_hva_range (struct mmu_notifier * mn ,
637
637
unsigned long start ,
638
638
unsigned long end ,
639
639
gfn_handler_t handler ,
@@ -649,15 +649,15 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
649
649
.may_block = false,
650
650
};
651
651
652
- return __kvm_handle_hva_range (kvm , & range ).ret ;
652
+ return kvm_handle_hva_range (kvm , & range ).ret ;
653
653
}
654
654
655
- static __always_inline int kvm_handle_hva_range_no_flush (struct mmu_notifier * mn ,
656
- unsigned long start ,
657
- unsigned long end ,
658
- gfn_handler_t handler )
655
+ static __always_inline int kvm_age_hva_range_no_flush (struct mmu_notifier * mn ,
656
+ unsigned long start ,
657
+ unsigned long end ,
658
+ gfn_handler_t handler )
659
659
{
660
- return kvm_handle_hva_range (mn , start , end , handler , false);
660
+ return kvm_age_hva_range (mn , start , end , handler , false);
661
661
}
662
662
663
663
void kvm_mmu_invalidate_begin (struct kvm * kvm )
@@ -752,7 +752,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
752
752
* that guest memory has been reclaimed. This needs to be done *after*
753
753
* dropping mmu_lock, as x86's reclaim path is slooooow.
754
754
*/
755
- if (__kvm_handle_hva_range (kvm , & hva_range ).found_memslot )
755
+ if (kvm_handle_hva_range (kvm , & hva_range ).found_memslot )
756
756
kvm_arch_guest_memory_reclaimed (kvm );
757
757
758
758
return 0 ;
@@ -798,7 +798,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
798
798
};
799
799
bool wake ;
800
800
801
- __kvm_handle_hva_range (kvm , & hva_range );
801
+ kvm_handle_hva_range (kvm , & hva_range );
802
802
803
803
/* Pairs with the increment in range_start(). */
804
804
spin_lock (& kvm -> mn_invalidate_lock );
@@ -822,8 +822,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
822
822
{
823
823
trace_kvm_age_hva (start , end );
824
824
825
- return kvm_handle_hva_range (mn , start , end , kvm_age_gfn ,
826
- !IS_ENABLED (CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG ));
825
+ return kvm_age_hva_range (mn , start , end , kvm_age_gfn ,
826
+ !IS_ENABLED (CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG ));
827
827
}
828
828
829
829
static int kvm_mmu_notifier_clear_young (struct mmu_notifier * mn ,
@@ -846,7 +846,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
846
846
* cadence. If we find this inaccurate, we might come up with a
847
847
* more sophisticated heuristic later.
848
848
*/
849
- return kvm_handle_hva_range_no_flush (mn , start , end , kvm_age_gfn );
849
+ return kvm_age_hva_range_no_flush (mn , start , end , kvm_age_gfn );
850
850
}
851
851
852
852
static int kvm_mmu_notifier_test_young (struct mmu_notifier * mn ,
@@ -855,8 +855,8 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
855
855
{
856
856
trace_kvm_test_age_hva (address );
857
857
858
- return kvm_handle_hva_range_no_flush (mn , address , address + 1 ,
859
- kvm_test_age_gfn );
858
+ return kvm_age_hva_range_no_flush (mn , address , address + 1 ,
859
+ kvm_test_age_gfn );
860
860
}
861
861
862
862
static void kvm_mmu_notifier_release (struct mmu_notifier * mn ,
0 commit comments