@@ -74,87 +74,61 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
74
74
*
75
75
****************************************************************************/
76
76
77
- static void free_page_list ( struct page * freelist )
77
+ static void free_pt_page ( u64 * pt , struct list_head * freelist )
78
78
{
79
- while (freelist != NULL ) {
80
- unsigned long p = (unsigned long )page_address (freelist );
79
+ struct page * p = virt_to_page (pt );
81
80
82
- freelist = freelist -> freelist ;
83
- free_page (p );
84
- }
81
+ list_add_tail (& p -> lru , freelist );
85
82
}
86
83
87
- static struct page * free_pt_page ( unsigned long pt , struct page * freelist )
84
+ static void free_pt_lvl ( u64 * pt , struct list_head * freelist , int lvl )
88
85
{
89
- struct page * p = virt_to_page ((void * )pt );
86
+ u64 * p ;
87
+ int i ;
90
88
91
- p -> freelist = freelist ;
89
+ for (i = 0 ; i < 512 ; ++ i ) {
90
+ /* PTE present? */
91
+ if (!IOMMU_PTE_PRESENT (pt [i ]))
92
+ continue ;
92
93
93
- return p ;
94
- }
94
+ /* Large PTE? */
95
+ if (PM_PTE_LEVEL (pt [i ]) == 0 ||
96
+ PM_PTE_LEVEL (pt [i ]) == 7 )
97
+ continue ;
95
98
96
- #define DEFINE_FREE_PT_FN (LVL , FN ) \
97
- static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
98
- { \
99
- unsigned long p; \
100
- u64 *pt; \
101
- int i; \
102
- \
103
- pt = (u64 *)__pt; \
104
- \
105
- for (i = 0; i < 512; ++i) { \
106
- /* PTE present? */ \
107
- if (!IOMMU_PTE_PRESENT (pt [i ])) \
108
- continue ; \
109
- \
110
- /* Large PTE? */ \
111
- if (PM_PTE_LEVEL (pt [i ]) == 0 || \
112
- PM_PTE_LEVEL (pt [i ]) == 7 ) \
113
- continue ; \
114
- \
115
- p = (unsigned long )IOMMU_PTE_PAGE (pt [i ]); \
116
- freelist = FN (p , freelist ); \
117
- } \
118
- \
119
- return free_pt_page ((unsigned long )pt , freelist ); \
120
- }
99
+ /*
100
+ * Free the next level. No need to look at l1 tables here since
101
+ * they can only contain leaf PTEs; just free them directly.
102
+ */
103
+ p = IOMMU_PTE_PAGE (pt [i ]);
104
+ if (lvl > 2 )
105
+ free_pt_lvl (p , freelist , lvl - 1 );
106
+ else
107
+ free_pt_page (p , freelist );
108
+ }
121
109
122
- DEFINE_FREE_PT_FN (l2 , free_pt_page )
123
- DEFINE_FREE_PT_FN (l3 , free_pt_l2 )
124
- DEFINE_FREE_PT_FN (l4 , free_pt_l3 )
125
- DEFINE_FREE_PT_FN (l5 , free_pt_l4 )
126
- DEFINE_FREE_PT_FN (l6 , free_pt_l5 )
110
+ free_pt_page (pt , freelist );
111
+ }
127
112
128
- static struct page * free_sub_pt (unsigned long root , int mode ,
129
- struct page * freelist )
113
+ static void free_sub_pt (u64 * root , int mode , struct list_head * freelist )
130
114
{
131
115
switch (mode ) {
132
116
case PAGE_MODE_NONE :
133
117
case PAGE_MODE_7_LEVEL :
134
118
break ;
135
119
case PAGE_MODE_1_LEVEL :
136
- freelist = free_pt_page (root , freelist );
120
+ free_pt_page (root , freelist );
137
121
break ;
138
122
case PAGE_MODE_2_LEVEL :
139
- freelist = free_pt_l2 (root , freelist );
140
- break ;
141
123
case PAGE_MODE_3_LEVEL :
142
- freelist = free_pt_l3 (root , freelist );
143
- break ;
144
124
case PAGE_MODE_4_LEVEL :
145
- freelist = free_pt_l4 (root , freelist );
146
- break ;
147
125
case PAGE_MODE_5_LEVEL :
148
- freelist = free_pt_l5 (root , freelist );
149
- break ;
150
126
case PAGE_MODE_6_LEVEL :
151
- freelist = free_pt_l6 (root , freelist );
127
+ free_pt_lvl (root , freelist , mode );
152
128
break ;
153
129
default :
154
130
BUG ();
155
131
}
156
-
157
- return freelist ;
158
132
}
159
133
160
134
void amd_iommu_domain_set_pgtable (struct protection_domain * domain ,
@@ -362,9 +336,9 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
362
336
return pte ;
363
337
}
364
338
365
- static struct page * free_clear_pte (u64 * pte , u64 pteval , struct page * freelist )
339
+ static void free_clear_pte (u64 * pte , u64 pteval , struct list_head * freelist )
366
340
{
367
- unsigned long pt ;
341
+ u64 * pt ;
368
342
int mode ;
369
343
370
344
while (cmpxchg64 (pte , pteval , 0 ) != pteval ) {
@@ -373,12 +347,12 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
373
347
}
374
348
375
349
if (!IOMMU_PTE_PRESENT (pteval ))
376
- return freelist ;
350
+ return ;
377
351
378
- pt = ( unsigned long ) IOMMU_PTE_PAGE (pteval );
352
+ pt = IOMMU_PTE_PAGE (pteval );
379
353
mode = IOMMU_PTE_MODE (pteval );
380
354
381
- return free_sub_pt (pt , mode , freelist );
355
+ free_sub_pt (pt , mode , freelist );
382
356
}
383
357
384
358
/*
@@ -392,7 +366,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
392
366
phys_addr_t paddr , size_t size , int prot , gfp_t gfp )
393
367
{
394
368
struct protection_domain * dom = io_pgtable_ops_to_domain (ops );
395
- struct page * freelist = NULL ;
369
+ LIST_HEAD ( freelist ) ;
396
370
bool updated = false;
397
371
u64 __pte , * pte ;
398
372
int ret , i , count ;
@@ -412,9 +386,9 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
412
386
goto out ;
413
387
414
388
for (i = 0 ; i < count ; ++ i )
415
- freelist = free_clear_pte (& pte [i ], pte [i ], freelist );
389
+ free_clear_pte (& pte [i ], pte [i ], & freelist );
416
390
417
- if (freelist != NULL )
391
+ if (! list_empty ( & freelist ) )
418
392
updated = true;
419
393
420
394
if (count > 1 ) {
@@ -449,7 +423,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
449
423
}
450
424
451
425
/* Everything flushed out, free pages now */
452
- free_page_list ( freelist );
426
+ put_pages_list ( & freelist );
453
427
454
428
return ret ;
455
429
}
@@ -511,8 +485,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
511
485
{
512
486
struct amd_io_pgtable * pgtable = container_of (iop , struct amd_io_pgtable , iop );
513
487
struct protection_domain * dom ;
514
- struct page * freelist = NULL ;
515
- unsigned long root ;
488
+ LIST_HEAD (freelist );
516
489
517
490
if (pgtable -> mode == PAGE_MODE_NONE )
518
491
return ;
@@ -529,10 +502,9 @@ static void v1_free_pgtable(struct io_pgtable *iop)
529
502
BUG_ON (pgtable -> mode < PAGE_MODE_NONE ||
530
503
pgtable -> mode > PAGE_MODE_6_LEVEL );
531
504
532
- root = (unsigned long )pgtable -> root ;
533
- freelist = free_sub_pt (root , pgtable -> mode , freelist );
505
+ free_sub_pt (pgtable -> root , pgtable -> mode , & freelist );
534
506
535
- free_page_list ( freelist );
507
+ put_pages_list ( & freelist );
536
508
}
537
509
538
510
static struct io_pgtable * v1_alloc_pgtable (struct io_pgtable_cfg * cfg , void * cookie )
0 commit comments