@@ -1289,52 +1289,13 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
1289
1289
raw_spin_unlock_irqrestore (& iommu -> register_lock , flag );
1290
1290
}
1291
1291
1292
- static int iommu_init_domains (struct intel_iommu * iommu )
1293
- {
1294
- u32 ndomains ;
1295
-
1296
- ndomains = cap_ndoms (iommu -> cap );
1297
- pr_debug ("%s: Number of Domains supported <%d>\n" ,
1298
- iommu -> name , ndomains );
1299
-
1300
- spin_lock_init (& iommu -> lock );
1301
-
1302
- iommu -> domain_ids = bitmap_zalloc (ndomains , GFP_KERNEL );
1303
- if (!iommu -> domain_ids )
1304
- return - ENOMEM ;
1305
-
1306
- /*
1307
- * If Caching mode is set, then invalid translations are tagged
1308
- * with domain-id 0, hence we need to pre-allocate it. We also
1309
- * use domain-id 0 as a marker for non-allocated domain-id, so
1310
- * make sure it is not used for a real domain.
1311
- */
1312
- set_bit (0 , iommu -> domain_ids );
1313
-
1314
- /*
1315
- * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid
1316
- * entry for first-level or pass-through translation modes should
1317
- * be programmed with a domain id different from those used for
1318
- * second-level or nested translation. We reserve a domain id for
1319
- * this purpose. This domain id is also used for identity domain
1320
- * in legacy mode.
1321
- */
1322
- set_bit (FLPT_DEFAULT_DID , iommu -> domain_ids );
1323
-
1324
- return 0 ;
1325
- }
1326
-
1327
1292
static void disable_dmar_iommu (struct intel_iommu * iommu )
1328
1293
{
1329
- if (!iommu -> domain_ids )
1330
- return ;
1331
-
1332
1294
/*
1333
1295
* All iommu domains must have been detached from the devices,
1334
1296
* hence there should be no domain IDs in use.
1335
1297
*/
1336
- if (WARN_ON (bitmap_weight (iommu -> domain_ids , cap_ndoms (iommu -> cap ))
1337
- > NUM_RESERVED_DID ))
1298
+ if (WARN_ON (!ida_is_empty (& iommu -> domain_ida )))
1338
1299
return ;
1339
1300
1340
1301
if (iommu -> gcmd & DMA_GCMD_TE )
@@ -1343,11 +1304,6 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1343
1304
1344
1305
static void free_dmar_iommu (struct intel_iommu * iommu )
1345
1306
{
1346
- if (iommu -> domain_ids ) {
1347
- bitmap_free (iommu -> domain_ids );
1348
- iommu -> domain_ids = NULL ;
1349
- }
1350
-
1351
1307
if (iommu -> copied_tables ) {
1352
1308
bitmap_free (iommu -> copied_tables );
1353
1309
iommu -> copied_tables = NULL ;
@@ -1380,7 +1336,6 @@ static bool first_level_by_default(struct intel_iommu *iommu)
1380
1336
int domain_attach_iommu (struct dmar_domain * domain , struct intel_iommu * iommu )
1381
1337
{
1382
1338
struct iommu_domain_info * info , * curr ;
1383
- unsigned long ndomains ;
1384
1339
int num , ret = - ENOSPC ;
1385
1340
1386
1341
if (domain -> domain .type == IOMMU_DOMAIN_SVA )
@@ -1399,14 +1354,13 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
1399
1354
return 0 ;
1400
1355
}
1401
1356
1402
- ndomains = cap_ndoms ( iommu -> cap );
1403
- num = find_first_zero_bit (iommu -> domain_ids , ndomains );
1404
- if (num >= ndomains ) {
1357
+ num = ida_alloc_range ( & iommu -> domain_ida , IDA_START_DID ,
1358
+ cap_ndoms (iommu -> cap ) - 1 , GFP_ATOMIC );
1359
+ if (num < 0 ) {
1405
1360
pr_err ("%s: No free domain ids\n" , iommu -> name );
1406
1361
goto err_unlock ;
1407
1362
}
1408
1363
1409
- set_bit (num , iommu -> domain_ids );
1410
1364
info -> refcnt = 1 ;
1411
1365
info -> did = num ;
1412
1366
info -> iommu = iommu ;
@@ -1421,7 +1375,7 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
1421
1375
return 0 ;
1422
1376
1423
1377
err_clear :
1424
- clear_bit ( info -> did , iommu -> domain_ids );
1378
+ ida_free ( & iommu -> domain_ida , info -> did );
1425
1379
err_unlock :
1426
1380
spin_unlock (& iommu -> lock );
1427
1381
kfree (info );
@@ -1438,7 +1392,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
1438
1392
spin_lock (& iommu -> lock );
1439
1393
info = xa_load (& domain -> iommu_array , iommu -> seq_id );
1440
1394
if (-- info -> refcnt == 0 ) {
1441
- clear_bit ( info -> did , iommu -> domain_ids );
1395
+ ida_free ( & iommu -> domain_ida , info -> did );
1442
1396
xa_erase (& domain -> iommu_array , iommu -> seq_id );
1443
1397
domain -> nid = NUMA_NO_NODE ;
1444
1398
kfree (info );
@@ -2041,7 +1995,7 @@ static int copy_context_table(struct intel_iommu *iommu,
2041
1995
2042
1996
did = context_domain_id (& ce );
2043
1997
if (did >= 0 && did < cap_ndoms (iommu -> cap ))
2044
- set_bit ( did , iommu -> domain_ids );
1998
+ ida_alloc_range ( & iommu -> domain_ida , did , did , GFP_KERNEL );
2045
1999
2046
2000
set_context_copied (iommu , bus , devfn );
2047
2001
new_ce [idx ] = ce ;
@@ -2168,11 +2122,6 @@ static int __init init_dmars(void)
2168
2122
}
2169
2123
2170
2124
intel_iommu_init_qi (iommu );
2171
-
2172
- ret = iommu_init_domains (iommu );
2173
- if (ret )
2174
- goto free_iommu ;
2175
-
2176
2125
init_translation_status (iommu );
2177
2126
2178
2127
if (translation_pre_enabled (iommu ) && !is_kdump_kernel ()) {
@@ -2650,9 +2599,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
2650
2599
if (iommu -> gcmd & DMA_GCMD_TE )
2651
2600
iommu_disable_translation (iommu );
2652
2601
2653
- ret = iommu_init_domains (iommu );
2654
- if (ret == 0 )
2655
- ret = iommu_alloc_root_entry (iommu );
2602
+ ret = iommu_alloc_root_entry (iommu );
2656
2603
if (ret )
2657
2604
goto out ;
2658
2605
@@ -2971,9 +2918,14 @@ static ssize_t domains_used_show(struct device *dev,
2971
2918
struct device_attribute * attr , char * buf )
2972
2919
{
2973
2920
struct intel_iommu * iommu = dev_to_intel_iommu (dev );
2974
- return sysfs_emit (buf , "%d\n" ,
2975
- bitmap_weight (iommu -> domain_ids ,
2976
- cap_ndoms (iommu -> cap )));
2921
+ unsigned int count = 0 ;
2922
+ int id ;
2923
+
2924
+ for (id = 0 ; id < cap_ndoms (iommu -> cap ); id ++ )
2925
+ if (ida_exists (& iommu -> domain_ida , id ))
2926
+ count ++ ;
2927
+
2928
+ return sysfs_emit (buf , "%d\n" , count );
2977
2929
}
2978
2930
static DEVICE_ATTR_RO (domains_used );
2979
2931
0 commit comments