@@ -1257,18 +1257,17 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
1257
1257
1258
1258
static void domain_flush_complete (struct protection_domain * domain )
1259
1259
{
1260
- int i ;
1260
+ struct pdom_iommu_info * pdom_iommu_info ;
1261
+ unsigned long i ;
1261
1262
1262
- for (i = 0 ; i < amd_iommu_get_num_iommus (); ++ i ) {
1263
- if (domain && !domain -> dev_iommu [i ])
1264
- continue ;
1263
+ lockdep_assert_held (& domain -> lock );
1265
1264
1266
- /*
1267
- * Devices of this domain are behind this IOMMU
1268
- * We need to wait for completion of all commands.
1269
- */
1270
- iommu_completion_wait ( amd_iommus [ i ]);
1271
- }
1265
+ /*
1266
+ * Devices of this domain are behind this IOMMU
1267
+ * We need to wait for completion of all commands.
1268
+ */
1269
+ xa_for_each ( & domain -> iommu_array , i , pdom_iommu_info )
1270
+ iommu_completion_wait ( pdom_iommu_info -> iommu );
1272
1271
}
1273
1272
1274
1273
static int iommu_flush_dte (struct amd_iommu * iommu , u16 devid )
@@ -1450,21 +1449,22 @@ static int domain_flush_pages_v2(struct protection_domain *pdom,
1450
1449
static int domain_flush_pages_v1 (struct protection_domain * pdom ,
1451
1450
u64 address , size_t size )
1452
1451
{
1452
+ struct pdom_iommu_info * pdom_iommu_info ;
1453
1453
struct iommu_cmd cmd ;
1454
- int ret = 0 , i ;
1454
+ int ret = 0 ;
1455
+ unsigned long i ;
1456
+
1457
+ lockdep_assert_held (& pdom -> lock );
1455
1458
1456
1459
build_inv_iommu_pages (& cmd , address , size ,
1457
1460
pdom -> id , IOMMU_NO_PASID , false);
1458
1461
1459
- for (i = 0 ; i < amd_iommu_get_num_iommus (); ++ i ) {
1460
- if (!pdom -> dev_iommu [i ])
1461
- continue ;
1462
-
1462
+ xa_for_each (& pdom -> iommu_array , i , pdom_iommu_info ) {
1463
1463
/*
1464
1464
* Devices of this domain are behind this IOMMU
1465
1465
* We need a TLB flush
1466
1466
*/
1467
- ret |= iommu_queue_command (amd_iommus [ i ] , & cmd );
1467
+ ret |= iommu_queue_command (pdom_iommu_info -> iommu , & cmd );
1468
1468
}
1469
1469
1470
1470
return ret ;
@@ -1503,6 +1503,8 @@ static void __domain_flush_pages(struct protection_domain *domain,
1503
1503
void amd_iommu_domain_flush_pages (struct protection_domain * domain ,
1504
1504
u64 address , size_t size )
1505
1505
{
1506
+ lockdep_assert_held (& domain -> lock );
1507
+
1506
1508
if (likely (!amd_iommu_np_cache )) {
1507
1509
__domain_flush_pages (domain , address , size );
1508
1510
@@ -2014,6 +2016,50 @@ static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
2014
2016
free_gcr3_table (gcr3_info );
2015
2017
}
2016
2018
2019
+ static int pdom_attach_iommu (struct amd_iommu * iommu ,
2020
+ struct protection_domain * pdom )
2021
+ {
2022
+ struct pdom_iommu_info * pdom_iommu_info , * curr ;
2023
+
2024
+ pdom_iommu_info = xa_load (& pdom -> iommu_array , iommu -> index );
2025
+ if (pdom_iommu_info ) {
2026
+ pdom_iommu_info -> refcnt ++ ;
2027
+ return 0 ;
2028
+ }
2029
+
2030
+ pdom_iommu_info = kzalloc (sizeof (* pdom_iommu_info ), GFP_ATOMIC );
2031
+ if (!pdom_iommu_info )
2032
+ return - ENOMEM ;
2033
+
2034
+ pdom_iommu_info -> iommu = iommu ;
2035
+ pdom_iommu_info -> refcnt = 1 ;
2036
+
2037
+ curr = xa_cmpxchg (& pdom -> iommu_array , iommu -> index ,
2038
+ NULL , pdom_iommu_info , GFP_ATOMIC );
2039
+ if (curr ) {
2040
+ kfree (pdom_iommu_info );
2041
+ return - ENOSPC ;
2042
+ }
2043
+
2044
+ return 0 ;
2045
+ }
2046
+
2047
+ static void pdom_detach_iommu (struct amd_iommu * iommu ,
2048
+ struct protection_domain * pdom )
2049
+ {
2050
+ struct pdom_iommu_info * pdom_iommu_info ;
2051
+
2052
+ pdom_iommu_info = xa_load (& pdom -> iommu_array , iommu -> index );
2053
+ if (!pdom_iommu_info )
2054
+ return ;
2055
+
2056
+ pdom_iommu_info -> refcnt -- ;
2057
+ if (pdom_iommu_info -> refcnt == 0 ) {
2058
+ xa_erase (& pdom -> iommu_array , iommu -> index );
2059
+ kfree (pdom_iommu_info );
2060
+ }
2061
+ }
2062
+
2017
2063
static int do_attach (struct iommu_dev_data * dev_data ,
2018
2064
struct protection_domain * domain )
2019
2065
{
@@ -2030,13 +2076,17 @@ static int do_attach(struct iommu_dev_data *dev_data,
2030
2076
cfg -> amd .nid = dev_to_node (dev_data -> dev );
2031
2077
2032
2078
/* Do reference counting */
2033
- domain -> dev_iommu [iommu -> index ] += 1 ;
2079
+ ret = pdom_attach_iommu (iommu , domain );
2080
+ if (ret )
2081
+ return ret ;
2034
2082
2035
2083
/* Setup GCR3 table */
2036
2084
if (pdom_is_sva_capable (domain )) {
2037
2085
ret = init_gcr3_table (dev_data , domain );
2038
- if (ret )
2086
+ if (ret ) {
2087
+ pdom_detach_iommu (iommu , domain );
2039
2088
return ret ;
2089
+ }
2040
2090
}
2041
2091
2042
2092
return ret ;
@@ -2062,7 +2112,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2062
2112
list_del (& dev_data -> list );
2063
2113
2064
2114
/* decrease reference counters - needs to happen after the flushes */
2065
- domain -> dev_iommu [ iommu -> index ] -= 1 ;
2115
+ pdom_detach_iommu ( iommu , domain ) ;
2066
2116
}
2067
2117
2068
2118
/*
@@ -2258,6 +2308,7 @@ static void protection_domain_init(struct protection_domain *domain, int nid)
2258
2308
spin_lock_init (& domain -> lock );
2259
2309
INIT_LIST_HEAD (& domain -> dev_list );
2260
2310
INIT_LIST_HEAD (& domain -> dev_data_list );
2311
+ xa_init (& domain -> iommu_array );
2261
2312
domain -> iop .pgtbl .cfg .amd .nid = nid ;
2262
2313
}
2263
2314
0 commit comments