@@ -43,7 +43,6 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
43
43
}
44
44
iommu_mm -> pasid = pasid ;
45
45
INIT_LIST_HEAD (& iommu_mm -> sva_domains );
46
- INIT_LIST_HEAD (& iommu_mm -> sva_handles );
47
46
/*
48
47
* Make sure the write to mm->iommu_mm is not reordered in front of
49
48
* initialization to iommu_mm fields. If it does, readers may see a
@@ -71,11 +70,16 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
71
70
*/
72
71
struct iommu_sva * iommu_sva_bind_device (struct device * dev , struct mm_struct * mm )
73
72
{
73
+ struct iommu_group * group = dev -> iommu_group ;
74
+ struct iommu_attach_handle * attach_handle ;
74
75
struct iommu_mm_data * iommu_mm ;
75
76
struct iommu_domain * domain ;
76
77
struct iommu_sva * handle ;
77
78
int ret ;
78
79
80
+ if (!group )
81
+ return ERR_PTR (- ENODEV );
82
+
79
83
mutex_lock (& iommu_sva_lock );
80
84
81
85
/* Allocate mm->pasid if necessary. */
@@ -85,12 +89,22 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
85
89
goto out_unlock ;
86
90
}
87
91
88
- list_for_each_entry (handle , & mm -> iommu_mm -> sva_handles , handle_item ) {
89
- if (handle -> dev == dev ) {
90
- refcount_inc (& handle -> users );
91
- mutex_unlock (& iommu_sva_lock );
92
- return handle ;
92
+ /* A bond already exists, just take a reference`. */
93
+ attach_handle = iommu_attach_handle_get (group , iommu_mm -> pasid , IOMMU_DOMAIN_SVA );
94
+ if (!IS_ERR (attach_handle )) {
95
+ handle = container_of (attach_handle , struct iommu_sva , handle );
96
+ if (attach_handle -> domain -> mm != mm ) {
97
+ ret = - EBUSY ;
98
+ goto out_unlock ;
93
99
}
100
+ refcount_inc (& handle -> users );
101
+ mutex_unlock (& iommu_sva_lock );
102
+ return handle ;
103
+ }
104
+
105
+ if (PTR_ERR (attach_handle ) != - ENOENT ) {
106
+ ret = PTR_ERR (attach_handle );
107
+ goto out_unlock ;
94
108
}
95
109
96
110
handle = kzalloc (sizeof (* handle ), GFP_KERNEL );
@@ -101,7 +115,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
101
115
102
116
/* Search for an existing domain. */
103
117
list_for_each_entry (domain , & mm -> iommu_mm -> sva_domains , next ) {
104
- ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid );
118
+ ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid ,
119
+ & handle -> handle );
105
120
if (!ret ) {
106
121
domain -> users ++ ;
107
122
goto out ;
@@ -115,18 +130,17 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
115
130
goto out_free_handle ;
116
131
}
117
132
118
- ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid );
133
+ ret = iommu_attach_device_pasid (domain , dev , iommu_mm -> pasid ,
134
+ & handle -> handle );
119
135
if (ret )
120
136
goto out_free_domain ;
121
137
domain -> users = 1 ;
122
138
list_add (& domain -> next , & mm -> iommu_mm -> sva_domains );
123
139
124
140
out :
125
141
refcount_set (& handle -> users , 1 );
126
- list_add (& handle -> handle_item , & mm -> iommu_mm -> sva_handles );
127
142
mutex_unlock (& iommu_sva_lock );
128
143
handle -> dev = dev ;
129
- handle -> domain = domain ;
130
144
return handle ;
131
145
132
146
out_free_domain :
@@ -149,7 +163,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
149
163
*/
150
164
void iommu_sva_unbind_device (struct iommu_sva * handle )
151
165
{
152
- struct iommu_domain * domain = handle -> domain ;
166
+ struct iommu_domain * domain = handle -> handle . domain ;
153
167
struct iommu_mm_data * iommu_mm = domain -> mm -> iommu_mm ;
154
168
struct device * dev = handle -> dev ;
155
169
@@ -158,7 +172,6 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
158
172
mutex_unlock (& iommu_sva_lock );
159
173
return ;
160
174
}
161
- list_del (& handle -> handle_item );
162
175
163
176
iommu_detach_device_pasid (domain , dev , iommu_mm -> pasid );
164
177
if (-- domain -> users == 0 ) {
@@ -172,7 +185,7 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
172
185
173
186
u32 iommu_sva_get_pasid (struct iommu_sva * handle )
174
187
{
175
- struct iommu_domain * domain = handle -> domain ;
188
+ struct iommu_domain * domain = handle -> handle . domain ;
176
189
177
190
return mm_get_enqcmd_pasid (domain -> mm );
178
191
}
@@ -261,7 +274,8 @@ static void iommu_sva_handle_iopf(struct work_struct *work)
261
274
if (status != IOMMU_PAGE_RESP_SUCCESS )
262
275
break ;
263
276
264
- status = iommu_sva_handle_mm (& iopf -> fault , group -> domain -> mm );
277
+ status = iommu_sva_handle_mm (& iopf -> fault ,
278
+ group -> attach_handle -> domain -> mm );
265
279
}
266
280
267
281
iopf_group_response (group , status );
0 commit comments