36
36
37
37
#include "amd_iommu.h"
38
38
#include "../irq_remapping.h"
39
+ #include "../iommu-pages.h"
39
40
40
41
/*
41
42
* definitions for the ACPI scanning code
@@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
649
650
/* Allocate per PCI segment device table */
650
651
static inline int __init alloc_dev_table (struct amd_iommu_pci_seg * pci_seg )
651
652
{
652
- pci_seg -> dev_table = ( void * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO | GFP_DMA32 ,
653
- get_order (pci_seg -> dev_table_size ));
653
+ pci_seg -> dev_table = iommu_alloc_pages ( GFP_KERNEL | GFP_DMA32 ,
654
+ get_order (pci_seg -> dev_table_size ));
654
655
if (!pci_seg -> dev_table )
655
656
return - ENOMEM ;
656
657
@@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
659
660
660
661
static inline void free_dev_table (struct amd_iommu_pci_seg * pci_seg )
661
662
{
662
- free_pages (( unsigned long ) pci_seg -> dev_table ,
663
- get_order (pci_seg -> dev_table_size ));
663
+ iommu_free_pages ( pci_seg -> dev_table ,
664
+ get_order (pci_seg -> dev_table_size ));
664
665
pci_seg -> dev_table = NULL ;
665
666
}
666
667
667
668
/* Allocate per PCI segment IOMMU rlookup table. */
668
669
static inline int __init alloc_rlookup_table (struct amd_iommu_pci_seg * pci_seg )
669
670
{
670
- pci_seg -> rlookup_table = (void * )__get_free_pages (
671
- GFP_KERNEL | __GFP_ZERO ,
672
- get_order (pci_seg -> rlookup_table_size ));
671
+ pci_seg -> rlookup_table = iommu_alloc_pages (GFP_KERNEL ,
672
+ get_order (pci_seg -> rlookup_table_size ));
673
673
if (pci_seg -> rlookup_table == NULL )
674
674
return - ENOMEM ;
675
675
@@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
678
678
679
679
static inline void free_rlookup_table (struct amd_iommu_pci_seg * pci_seg )
680
680
{
681
- free_pages (( unsigned long ) pci_seg -> rlookup_table ,
682
- get_order (pci_seg -> rlookup_table_size ));
681
+ iommu_free_pages ( pci_seg -> rlookup_table ,
682
+ get_order (pci_seg -> rlookup_table_size ));
683
683
pci_seg -> rlookup_table = NULL ;
684
684
}
685
685
686
686
static inline int __init alloc_irq_lookup_table (struct amd_iommu_pci_seg * pci_seg )
687
687
{
688
- pci_seg -> irq_lookup_table = (void * )__get_free_pages (
689
- GFP_KERNEL | __GFP_ZERO ,
690
- get_order (pci_seg -> rlookup_table_size ));
688
+ pci_seg -> irq_lookup_table = iommu_alloc_pages (GFP_KERNEL ,
689
+ get_order (pci_seg -> rlookup_table_size ));
691
690
kmemleak_alloc (pci_seg -> irq_lookup_table ,
692
691
pci_seg -> rlookup_table_size , 1 , GFP_KERNEL );
693
692
if (pci_seg -> irq_lookup_table == NULL )
@@ -699,17 +698,17 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
699
698
static inline void free_irq_lookup_table (struct amd_iommu_pci_seg * pci_seg )
700
699
{
701
700
kmemleak_free (pci_seg -> irq_lookup_table );
702
- free_pages (( unsigned long ) pci_seg -> irq_lookup_table ,
703
- get_order (pci_seg -> rlookup_table_size ));
701
+ iommu_free_pages ( pci_seg -> irq_lookup_table ,
702
+ get_order (pci_seg -> rlookup_table_size ));
704
703
pci_seg -> irq_lookup_table = NULL ;
705
704
}
706
705
707
706
static int __init alloc_alias_table (struct amd_iommu_pci_seg * pci_seg )
708
707
{
709
708
int i ;
710
709
711
- pci_seg -> alias_table = ( void * ) __get_free_pages (GFP_KERNEL ,
712
- get_order (pci_seg -> alias_table_size ));
710
+ pci_seg -> alias_table = iommu_alloc_pages (GFP_KERNEL ,
711
+ get_order (pci_seg -> alias_table_size ));
713
712
if (!pci_seg -> alias_table )
714
713
return - ENOMEM ;
715
714
@@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
724
723
725
724
static void __init free_alias_table (struct amd_iommu_pci_seg * pci_seg )
726
725
{
727
- free_pages (( unsigned long ) pci_seg -> alias_table ,
728
- get_order (pci_seg -> alias_table_size ));
726
+ iommu_free_pages ( pci_seg -> alias_table ,
727
+ get_order (pci_seg -> alias_table_size ));
729
728
pci_seg -> alias_table = NULL ;
730
729
}
731
730
@@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
736
735
*/
737
736
static int __init alloc_command_buffer (struct amd_iommu * iommu )
738
737
{
739
- iommu -> cmd_buf = ( void * ) __get_free_pages ( GFP_KERNEL | __GFP_ZERO ,
740
- get_order (CMD_BUFFER_SIZE ));
738
+ iommu -> cmd_buf = iommu_alloc_pages ( GFP_KERNEL ,
739
+ get_order (CMD_BUFFER_SIZE ));
741
740
742
741
return iommu -> cmd_buf ? 0 : - ENOMEM ;
743
742
}
@@ -834,19 +833,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
834
833
835
834
static void __init free_command_buffer (struct amd_iommu * iommu )
836
835
{
837
- free_pages (( unsigned long ) iommu -> cmd_buf , get_order (CMD_BUFFER_SIZE ));
836
+ iommu_free_pages ( iommu -> cmd_buf , get_order (CMD_BUFFER_SIZE ));
838
837
}
839
838
840
839
void * __init iommu_alloc_4k_pages (struct amd_iommu * iommu , gfp_t gfp ,
841
840
size_t size )
842
841
{
843
842
int order = get_order (size );
844
- void * buf = ( void * ) __get_free_pages (gfp , order );
843
+ void * buf = iommu_alloc_pages (gfp , order );
845
844
846
845
if (buf &&
847
846
check_feature (FEATURE_SNP ) &&
848
847
set_memory_4k ((unsigned long )buf , (1 << order ))) {
849
- free_pages (( unsigned long ) buf , order );
848
+ iommu_free_pages ( buf , order );
850
849
buf = NULL ;
851
850
}
852
851
@@ -856,7 +855,7 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
856
855
/* allocates the memory where the IOMMU will log its events to */
857
856
static int __init alloc_event_buffer (struct amd_iommu * iommu )
858
857
{
859
- iommu -> evt_buf = iommu_alloc_4k_pages (iommu , GFP_KERNEL | __GFP_ZERO ,
858
+ iommu -> evt_buf = iommu_alloc_4k_pages (iommu , GFP_KERNEL ,
860
859
EVT_BUFFER_SIZE );
861
860
862
861
return iommu -> evt_buf ? 0 : - ENOMEM ;
@@ -890,14 +889,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
890
889
891
890
static void __init free_event_buffer (struct amd_iommu * iommu )
892
891
{
893
- free_pages (( unsigned long ) iommu -> evt_buf , get_order (EVT_BUFFER_SIZE ));
892
+ iommu_free_pages ( iommu -> evt_buf , get_order (EVT_BUFFER_SIZE ));
894
893
}
895
894
896
895
static void free_ga_log (struct amd_iommu * iommu )
897
896
{
898
897
#ifdef CONFIG_IRQ_REMAP
899
- free_pages (( unsigned long ) iommu -> ga_log , get_order (GA_LOG_SIZE ));
900
- free_pages (( unsigned long ) iommu -> ga_log_tail , get_order (8 ));
898
+ iommu_free_pages ( iommu -> ga_log , get_order (GA_LOG_SIZE ));
899
+ iommu_free_pages ( iommu -> ga_log_tail , get_order (8 ));
901
900
#endif
902
901
}
903
902
@@ -942,13 +941,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
942
941
if (!AMD_IOMMU_GUEST_IR_VAPIC (amd_iommu_guest_ir ))
943
942
return 0 ;
944
943
945
- iommu -> ga_log = (u8 * )__get_free_pages (GFP_KERNEL | __GFP_ZERO ,
946
- get_order (GA_LOG_SIZE ));
944
+ iommu -> ga_log = iommu_alloc_pages (GFP_KERNEL , get_order (GA_LOG_SIZE ));
947
945
if (!iommu -> ga_log )
948
946
goto err_out ;
949
947
950
- iommu -> ga_log_tail = (u8 * )__get_free_pages (GFP_KERNEL | __GFP_ZERO ,
951
- get_order (8 ));
948
+ iommu -> ga_log_tail = iommu_alloc_pages (GFP_KERNEL , get_order (8 ));
952
949
if (!iommu -> ga_log_tail )
953
950
goto err_out ;
954
951
@@ -961,15 +958,15 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
961
958
962
959
static int __init alloc_cwwb_sem (struct amd_iommu * iommu )
963
960
{
964
- iommu -> cmd_sem = iommu_alloc_4k_pages (iommu , GFP_KERNEL | __GFP_ZERO , 1 );
961
+ iommu -> cmd_sem = iommu_alloc_4k_pages (iommu , GFP_KERNEL , 1 );
965
962
966
963
return iommu -> cmd_sem ? 0 : - ENOMEM ;
967
964
}
968
965
969
966
static void __init free_cwwb_sem (struct amd_iommu * iommu )
970
967
{
971
968
if (iommu -> cmd_sem )
972
- free_page (( unsigned long )iommu -> cmd_sem );
969
+ iommu_free_page (( void * )iommu -> cmd_sem );
973
970
}
974
971
975
972
static void iommu_enable_xt (struct amd_iommu * iommu )
@@ -1034,7 +1031,6 @@ static bool __copy_device_table(struct amd_iommu *iommu)
1034
1031
u32 lo , hi , devid , old_devtb_size ;
1035
1032
phys_addr_t old_devtb_phys ;
1036
1033
u16 dom_id , dte_v , irq_v ;
1037
- gfp_t gfp_flag ;
1038
1034
u64 tmp ;
1039
1035
1040
1036
/* Each IOMMU use separate device table with the same size */
@@ -1068,9 +1064,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
1068
1064
if (!old_devtb )
1069
1065
return false;
1070
1066
1071
- gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32 ;
1072
- pci_seg -> old_dev_tbl_cpy = (void * )__get_free_pages (gfp_flag ,
1073
- get_order (pci_seg -> dev_table_size ));
1067
+ pci_seg -> old_dev_tbl_cpy = iommu_alloc_pages (GFP_KERNEL | GFP_DMA32 ,
1068
+ get_order (pci_seg -> dev_table_size ));
1074
1069
if (pci_seg -> old_dev_tbl_cpy == NULL ) {
1075
1070
pr_err ("Failed to allocate memory for copying old device table!\n" );
1076
1071
memunmap (old_devtb );
@@ -2769,8 +2764,8 @@ static void early_enable_iommus(void)
2769
2764
2770
2765
for_each_pci_segment (pci_seg ) {
2771
2766
if (pci_seg -> old_dev_tbl_cpy != NULL ) {
2772
- free_pages (( unsigned long ) pci_seg -> old_dev_tbl_cpy ,
2773
- get_order (pci_seg -> dev_table_size ));
2767
+ iommu_free_pages ( pci_seg -> old_dev_tbl_cpy ,
2768
+ get_order (pci_seg -> dev_table_size ));
2774
2769
pci_seg -> old_dev_tbl_cpy = NULL ;
2775
2770
}
2776
2771
}
@@ -2783,8 +2778,8 @@ static void early_enable_iommus(void)
2783
2778
pr_info ("Copied DEV table from previous kernel.\n" );
2784
2779
2785
2780
for_each_pci_segment (pci_seg ) {
2786
- free_pages (( unsigned long ) pci_seg -> dev_table ,
2787
- get_order (pci_seg -> dev_table_size ));
2781
+ iommu_free_pages ( pci_seg -> dev_table ,
2782
+ get_order (pci_seg -> dev_table_size ));
2788
2783
pci_seg -> dev_table = pci_seg -> old_dev_tbl_cpy ;
2789
2784
}
2790
2785
@@ -2989,8 +2984,8 @@ static bool __init check_ioapic_information(void)
2989
2984
2990
2985
static void __init free_dma_resources (void )
2991
2986
{
2992
- free_pages (( unsigned long ) amd_iommu_pd_alloc_bitmap ,
2993
- get_order (MAX_DOMAIN_ID / 8 ));
2987
+ iommu_free_pages ( amd_iommu_pd_alloc_bitmap ,
2988
+ get_order (MAX_DOMAIN_ID / 8 ));
2994
2989
amd_iommu_pd_alloc_bitmap = NULL ;
2995
2990
2996
2991
free_unity_maps ();
@@ -3062,9 +3057,8 @@ static int __init early_amd_iommu_init(void)
3062
3057
/* Device table - directly used by all IOMMUs */
3063
3058
ret = - ENOMEM ;
3064
3059
3065
- amd_iommu_pd_alloc_bitmap = (void * )__get_free_pages (
3066
- GFP_KERNEL | __GFP_ZERO ,
3067
- get_order (MAX_DOMAIN_ID /8 ));
3060
+ amd_iommu_pd_alloc_bitmap = iommu_alloc_pages (GFP_KERNEL ,
3061
+ get_order (MAX_DOMAIN_ID / 8 ));
3068
3062
if (amd_iommu_pd_alloc_bitmap == NULL )
3069
3063
goto out ;
3070
3064
0 commit comments