@@ -2020,16 +2020,23 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
2020
2020
struct protection_domain * pdom )
2021
2021
{
2022
2022
struct pdom_iommu_info * pdom_iommu_info , * curr ;
2023
+ struct io_pgtable_cfg * cfg = & pdom -> iop .pgtbl .cfg ;
2024
+ unsigned long flags ;
2025
+ int ret = 0 ;
2026
+
2027
+ spin_lock_irqsave (& pdom -> lock , flags );
2023
2028
2024
2029
pdom_iommu_info = xa_load (& pdom -> iommu_array , iommu -> index );
2025
2030
if (pdom_iommu_info ) {
2026
2031
pdom_iommu_info -> refcnt ++ ;
2027
- return 0 ;
2032
+ goto out_unlock ;
2028
2033
}
2029
2034
2030
2035
pdom_iommu_info = kzalloc (sizeof (* pdom_iommu_info ), GFP_ATOMIC );
2031
- if (!pdom_iommu_info )
2032
- return - ENOMEM ;
2036
+ if (!pdom_iommu_info ) {
2037
+ ret = - ENOMEM ;
2038
+ goto out_unlock ;
2039
+ }
2033
2040
2034
2041
pdom_iommu_info -> iommu = iommu ;
2035
2042
pdom_iommu_info -> refcnt = 1 ;
@@ -2038,43 +2045,52 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
2038
2045
NULL , pdom_iommu_info , GFP_ATOMIC );
2039
2046
if (curr ) {
2040
2047
kfree (pdom_iommu_info );
2041
- return - ENOSPC ;
2048
+ ret = - ENOSPC ;
2049
+ goto out_unlock ;
2042
2050
}
2043
2051
2044
- return 0 ;
2052
+ /* Update NUMA Node ID */
2053
+ if (cfg -> amd .nid == NUMA_NO_NODE )
2054
+ cfg -> amd .nid = dev_to_node (& iommu -> dev -> dev );
2055
+
2056
+ out_unlock :
2057
+ spin_unlock_irqrestore (& pdom -> lock , flags );
2058
+ return ret ;
2045
2059
}
2046
2060
2047
2061
static void pdom_detach_iommu (struct amd_iommu * iommu ,
2048
2062
struct protection_domain * pdom )
2049
2063
{
2050
2064
struct pdom_iommu_info * pdom_iommu_info ;
2065
+ unsigned long flags ;
2066
+
2067
+ spin_lock_irqsave (& pdom -> lock , flags );
2051
2068
2052
2069
pdom_iommu_info = xa_load (& pdom -> iommu_array , iommu -> index );
2053
- if (!pdom_iommu_info )
2070
+ if (!pdom_iommu_info ) {
2071
+ spin_unlock_irqrestore (& pdom -> lock , flags );
2054
2072
return ;
2073
+ }
2055
2074
2056
2075
pdom_iommu_info -> refcnt -- ;
2057
2076
if (pdom_iommu_info -> refcnt == 0 ) {
2058
2077
xa_erase (& pdom -> iommu_array , iommu -> index );
2059
2078
kfree (pdom_iommu_info );
2060
2079
}
2080
+
2081
+ spin_unlock_irqrestore (& pdom -> lock , flags );
2061
2082
}
2062
2083
2063
2084
static int do_attach (struct iommu_dev_data * dev_data ,
2064
2085
struct protection_domain * domain )
2065
2086
{
2066
2087
struct amd_iommu * iommu = get_amd_iommu_from_dev_data (dev_data );
2067
- struct io_pgtable_cfg * cfg = & domain -> iop .pgtbl .cfg ;
2068
2088
int ret = 0 ;
2069
2089
2070
2090
/* Update data structures */
2071
2091
dev_data -> domain = domain ;
2072
2092
list_add (& dev_data -> list , & domain -> dev_list );
2073
2093
2074
- /* Update NUMA Node ID */
2075
- if (cfg -> amd .nid == NUMA_NO_NODE )
2076
- cfg -> amd .nid = dev_to_node (dev_data -> dev );
2077
-
2078
2094
/* Do reference counting */
2079
2095
ret = pdom_attach_iommu (iommu , domain );
2080
2096
if (ret )
@@ -2096,12 +2112,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
2096
2112
{
2097
2113
struct protection_domain * domain = dev_data -> domain ;
2098
2114
struct amd_iommu * iommu = get_amd_iommu_from_dev_data (dev_data );
2115
+ unsigned long flags ;
2099
2116
2100
2117
/* Clear DTE and flush the entry */
2101
2118
dev_update_dte (dev_data , false);
2102
2119
2103
2120
/* Flush IOTLB and wait for the flushes to finish */
2121
+ spin_lock_irqsave (& domain -> lock , flags );
2104
2122
amd_iommu_domain_flush_all (domain );
2123
+ spin_unlock_irqrestore (& domain -> lock , flags );
2105
2124
2106
2125
/* Clear GCR3 table */
2107
2126
if (pdom_is_sva_capable (domain ))
@@ -2123,11 +2142,8 @@ static int attach_device(struct device *dev,
2123
2142
struct protection_domain * domain )
2124
2143
{
2125
2144
struct iommu_dev_data * dev_data ;
2126
- unsigned long flags ;
2127
2145
int ret = 0 ;
2128
2146
2129
- spin_lock_irqsave (& domain -> lock , flags );
2130
-
2131
2147
dev_data = dev_iommu_priv_get (dev );
2132
2148
2133
2149
spin_lock (& dev_data -> lock );
@@ -2142,8 +2158,6 @@ static int attach_device(struct device *dev,
2142
2158
out :
2143
2159
spin_unlock (& dev_data -> lock );
2144
2160
2145
- spin_unlock_irqrestore (& domain -> lock , flags );
2146
-
2147
2161
return ret ;
2148
2162
}
2149
2163
@@ -2153,13 +2167,9 @@ static int attach_device(struct device *dev,
2153
2167
static void detach_device (struct device * dev )
2154
2168
{
2155
2169
struct iommu_dev_data * dev_data = dev_iommu_priv_get (dev );
2156
- struct protection_domain * domain = dev_data -> domain ;
2157
2170
struct amd_iommu * iommu = get_amd_iommu_from_dev_data (dev_data );
2158
- unsigned long flags ;
2159
2171
bool ppr = dev_data -> ppr ;
2160
2172
2161
- spin_lock_irqsave (& domain -> lock , flags );
2162
-
2163
2173
spin_lock (& dev_data -> lock );
2164
2174
2165
2175
/*
@@ -2183,8 +2193,6 @@ static void detach_device(struct device *dev)
2183
2193
out :
2184
2194
spin_unlock (& dev_data -> lock );
2185
2195
2186
- spin_unlock_irqrestore (& domain -> lock , flags );
2187
-
2188
2196
/* Remove IOPF handler */
2189
2197
if (ppr )
2190
2198
amd_iommu_iopf_remove_device (iommu , dev_data );
0 commit comments