@@ -467,7 +467,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
467
467
return - EAGAIN ;
468
468
469
469
if (pte ) {
470
- WARN_ON (addr_is_memory (addr ) && hyp_phys_to_page (addr )-> host_state != PKVM_NOPAGE );
470
+ WARN_ON (addr_is_memory (addr ) && get_host_state (addr ) != PKVM_NOPAGE );
471
471
return - EPERM ;
472
472
}
473
473
@@ -496,7 +496,7 @@ static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_
496
496
phys_addr_t end = addr + size ;
497
497
498
498
for (; addr < end ; addr += PAGE_SIZE )
499
- hyp_phys_to_page (addr ) -> host_state = state ;
499
+ set_host_state (addr , state ) ;
500
500
}
501
501
502
502
int host_stage2_set_owner_locked (phys_addr_t addr , u64 size , u8 owner_id )
@@ -627,7 +627,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
627
627
628
628
hyp_assert_lock_held (& host_mmu .lock );
629
629
for (; addr < end ; addr += PAGE_SIZE ) {
630
- if (hyp_phys_to_page (addr )-> host_state != state )
630
+ if (get_host_state (addr ) != state )
631
631
return - EPERM ;
632
632
}
633
633
@@ -637,7 +637,7 @@ static int __host_check_page_state_range(u64 addr, u64 size,
637
637
static int __host_set_page_state_range (u64 addr , u64 size ,
638
638
enum pkvm_page_state state )
639
639
{
640
- if (hyp_phys_to_page (addr )-> host_state == PKVM_NOPAGE ) {
640
+ if (get_host_state (addr ) == PKVM_NOPAGE ) {
641
641
int ret = host_stage2_idmap_locked (addr , size , PKVM_HOST_MEM_PROT );
642
642
643
643
if (ret )
@@ -649,24 +649,24 @@ static int __host_set_page_state_range(u64 addr, u64 size,
649
649
return 0 ;
650
650
}
651
651
652
- static enum pkvm_page_state hyp_get_page_state ( kvm_pte_t pte , u64 addr )
652
+ static void __hyp_set_page_state_range ( phys_addr_t phys , u64 size , enum pkvm_page_state state )
653
653
{
654
- if (!kvm_pte_valid (pte ))
655
- return PKVM_NOPAGE ;
654
+ phys_addr_t end = phys + size ;
656
655
657
- return pkvm_getstate (kvm_pgtable_hyp_pte_prot (pte ));
656
+ for (; phys < end ; phys += PAGE_SIZE )
657
+ set_hyp_state (phys , state );
658
658
}
659
659
660
- static int __hyp_check_page_state_range (u64 addr , u64 size ,
661
- enum pkvm_page_state state )
660
+ static int __hyp_check_page_state_range (phys_addr_t phys , u64 size , enum pkvm_page_state state )
662
661
{
663
- struct check_walk_data d = {
664
- .desired = state ,
665
- .get_page_state = hyp_get_page_state ,
666
- };
662
+ phys_addr_t end = phys + size ;
663
+
664
+ for (; phys < end ; phys += PAGE_SIZE ) {
665
+ if (get_hyp_state (phys ) != state )
666
+ return - EPERM ;
667
+ }
667
668
668
- hyp_assert_lock_held (& pkvm_pgd_lock );
669
- return check_page_state_range (& pkvm_pgtable , addr , size , & d );
669
+ return 0 ;
670
670
}
671
671
672
672
static enum pkvm_page_state guest_get_page_state (kvm_pte_t pte , u64 addr )
@@ -693,8 +693,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
693
693
int __pkvm_host_share_hyp (u64 pfn )
694
694
{
695
695
u64 phys = hyp_pfn_to_phys (pfn );
696
- void * virt = __hyp_va (phys );
697
- enum kvm_pgtable_prot prot ;
698
696
u64 size = PAGE_SIZE ;
699
697
int ret ;
700
698
@@ -704,14 +702,11 @@ int __pkvm_host_share_hyp(u64 pfn)
704
702
ret = __host_check_page_state_range (phys , size , PKVM_PAGE_OWNED );
705
703
if (ret )
706
704
goto unlock ;
707
- if (IS_ENABLED (CONFIG_NVHE_EL2_DEBUG )) {
708
- ret = __hyp_check_page_state_range ((u64 )virt , size , PKVM_NOPAGE );
709
- if (ret )
710
- goto unlock ;
711
- }
705
+ ret = __hyp_check_page_state_range (phys , size , PKVM_NOPAGE );
706
+ if (ret )
707
+ goto unlock ;
712
708
713
- prot = pkvm_mkstate (PAGE_HYP , PKVM_PAGE_SHARED_BORROWED );
714
- WARN_ON (pkvm_create_mappings_locked (virt , virt + size , prot ));
709
+ __hyp_set_page_state_range (phys , size , PKVM_PAGE_SHARED_BORROWED );
715
710
WARN_ON (__host_set_page_state_range (phys , size , PKVM_PAGE_SHARED_OWNED ));
716
711
717
712
unlock :
@@ -734,15 +729,15 @@ int __pkvm_host_unshare_hyp(u64 pfn)
734
729
ret = __host_check_page_state_range (phys , size , PKVM_PAGE_SHARED_OWNED );
735
730
if (ret )
736
731
goto unlock ;
737
- ret = __hyp_check_page_state_range (virt , size , PKVM_PAGE_SHARED_BORROWED );
732
+ ret = __hyp_check_page_state_range (phys , size , PKVM_PAGE_SHARED_BORROWED );
738
733
if (ret )
739
734
goto unlock ;
740
735
if (hyp_page_count ((void * )virt )) {
741
736
ret = - EBUSY ;
742
737
goto unlock ;
743
738
}
744
739
745
- WARN_ON ( kvm_pgtable_hyp_unmap ( & pkvm_pgtable , virt , size ) != size );
740
+ __hyp_set_page_state_range ( phys , size , PKVM_NOPAGE );
746
741
WARN_ON (__host_set_page_state_range (phys , size , PKVM_PAGE_OWNED ));
747
742
748
743
unlock :
@@ -757,7 +752,6 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
757
752
u64 phys = hyp_pfn_to_phys (pfn );
758
753
u64 size = PAGE_SIZE * nr_pages ;
759
754
void * virt = __hyp_va (phys );
760
- enum kvm_pgtable_prot prot ;
761
755
int ret ;
762
756
763
757
host_lock_component ();
@@ -766,14 +760,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
766
760
ret = __host_check_page_state_range (phys , size , PKVM_PAGE_OWNED );
767
761
if (ret )
768
762
goto unlock ;
769
- if (IS_ENABLED (CONFIG_NVHE_EL2_DEBUG )) {
770
- ret = __hyp_check_page_state_range ((u64 )virt , size , PKVM_NOPAGE );
771
- if (ret )
772
- goto unlock ;
773
- }
763
+ ret = __hyp_check_page_state_range (phys , size , PKVM_NOPAGE );
764
+ if (ret )
765
+ goto unlock ;
774
766
775
- prot = pkvm_mkstate ( PAGE_HYP , PKVM_PAGE_OWNED );
776
- WARN_ON (pkvm_create_mappings_locked (virt , virt + size , prot ));
767
+ __hyp_set_page_state_range ( phys , size , PKVM_PAGE_OWNED );
768
+ WARN_ON (pkvm_create_mappings_locked (virt , virt + size , PAGE_HYP ));
777
769
WARN_ON (host_stage2_set_owner_locked (phys , size , PKVM_ID_HYP ));
778
770
779
771
unlock :
@@ -793,15 +785,14 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
793
785
host_lock_component ();
794
786
hyp_lock_component ();
795
787
796
- ret = __hyp_check_page_state_range (virt , size , PKVM_PAGE_OWNED );
788
+ ret = __hyp_check_page_state_range (phys , size , PKVM_PAGE_OWNED );
789
+ if (ret )
790
+ goto unlock ;
791
+ ret = __host_check_page_state_range (phys , size , PKVM_NOPAGE );
797
792
if (ret )
798
793
goto unlock ;
799
- if (IS_ENABLED (CONFIG_NVHE_EL2_DEBUG )) {
800
- ret = __host_check_page_state_range (phys , size , PKVM_NOPAGE );
801
- if (ret )
802
- goto unlock ;
803
- }
804
794
795
+ __hyp_set_page_state_range (phys , size , PKVM_NOPAGE );
805
796
WARN_ON (kvm_pgtable_hyp_unmap (& pkvm_pgtable , virt , size ) != size );
806
797
WARN_ON (host_stage2_set_owner_locked (phys , size , PKVM_ID_HOST ));
807
798
@@ -816,24 +807,30 @@ int hyp_pin_shared_mem(void *from, void *to)
816
807
{
817
808
u64 cur , start = ALIGN_DOWN ((u64 )from , PAGE_SIZE );
818
809
u64 end = PAGE_ALIGN ((u64 )to );
810
+ u64 phys = __hyp_pa (start );
819
811
u64 size = end - start ;
812
+ struct hyp_page * p ;
820
813
int ret ;
821
814
822
815
host_lock_component ();
823
816
hyp_lock_component ();
824
817
825
- ret = __host_check_page_state_range (__hyp_pa (start ), size ,
826
- PKVM_PAGE_SHARED_OWNED );
818
+ ret = __host_check_page_state_range (phys , size , PKVM_PAGE_SHARED_OWNED );
827
819
if (ret )
828
820
goto unlock ;
829
821
830
- ret = __hyp_check_page_state_range (start , size ,
831
- PKVM_PAGE_SHARED_BORROWED );
822
+ ret = __hyp_check_page_state_range (phys , size , PKVM_PAGE_SHARED_BORROWED );
832
823
if (ret )
833
824
goto unlock ;
834
825
835
- for (cur = start ; cur < end ; cur += PAGE_SIZE )
836
- hyp_page_ref_inc (hyp_virt_to_page (cur ));
826
+ for (cur = start ; cur < end ; cur += PAGE_SIZE ) {
827
+ p = hyp_virt_to_page (cur );
828
+ hyp_page_ref_inc (p );
829
+ if (p -> refcount == 1 )
830
+ WARN_ON (pkvm_create_mappings_locked ((void * )cur ,
831
+ (void * )cur + PAGE_SIZE ,
832
+ PAGE_HYP ));
833
+ }
837
834
838
835
unlock :
839
836
hyp_unlock_component ();
@@ -846,12 +843,17 @@ void hyp_unpin_shared_mem(void *from, void *to)
846
843
{
847
844
u64 cur , start = ALIGN_DOWN ((u64 )from , PAGE_SIZE );
848
845
u64 end = PAGE_ALIGN ((u64 )to );
846
+ struct hyp_page * p ;
849
847
850
848
host_lock_component ();
851
849
hyp_lock_component ();
852
850
853
- for (cur = start ; cur < end ; cur += PAGE_SIZE )
854
- hyp_page_ref_dec (hyp_virt_to_page (cur ));
851
+ for (cur = start ; cur < end ; cur += PAGE_SIZE ) {
852
+ p = hyp_virt_to_page (cur );
853
+ if (p -> refcount == 1 )
854
+ WARN_ON (kvm_pgtable_hyp_unmap (& pkvm_pgtable , cur , PAGE_SIZE ) != PAGE_SIZE );
855
+ hyp_page_ref_dec (p );
856
+ }
855
857
856
858
hyp_unlock_component ();
857
859
host_unlock_component ();
@@ -911,7 +913,7 @@ int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
911
913
goto unlock ;
912
914
913
915
page = hyp_phys_to_page (phys );
914
- switch (page -> host_state ) {
916
+ switch (get_host_state ( phys ) ) {
915
917
case PKVM_PAGE_OWNED :
916
918
WARN_ON (__host_set_page_state_range (phys , PAGE_SIZE , PKVM_PAGE_SHARED_OWNED ));
917
919
break ;
@@ -964,9 +966,9 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
964
966
if (WARN_ON (ret ))
965
967
return ret ;
966
968
967
- page = hyp_phys_to_page (phys );
968
- if (page -> host_state != PKVM_PAGE_SHARED_OWNED )
969
+ if (get_host_state (phys ) != PKVM_PAGE_SHARED_OWNED )
969
970
return - EPERM ;
971
+ page = hyp_phys_to_page (phys );
970
972
if (WARN_ON (!page -> host_share_guest_count ))
971
973
return - EINVAL ;
972
974
0 commit comments