18
18
#include <linux/scatterlist.h>
19
19
#include <linux/dma-map-ops.h>
20
20
#include <linux/dma-direct.h>
21
+ #include <linux/idr.h>
21
22
#include <linux/iommu-helper.h>
22
23
#include <linux/delay.h>
23
24
#include <linux/amd-iommu.h>
52
53
#define HT_RANGE_START (0xfd00000000ULL)
53
54
#define HT_RANGE_END (0xffffffffffULL)
54
55
55
- static DEFINE_SPINLOCK (pd_bitmap_lock );
56
-
57
56
LIST_HEAD (ioapic_map );
58
57
LIST_HEAD (hpet_map );
59
58
LIST_HEAD (acpihid_map );
@@ -70,6 +69,12 @@ struct iommu_cmd {
70
69
u32 data [4 ];
71
70
};
72
71
72
+ /*
73
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
74
+ * to know which ones are already in use.
75
+ */
76
+ DEFINE_IDA (pdom_ids );
77
+
73
78
struct kmem_cache * amd_iommu_irq_cache ;
74
79
75
80
static void detach_device (struct device * dev );
@@ -1643,31 +1648,14 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
1643
1648
*
1644
1649
****************************************************************************/
1645
1650
1646
- static u16 domain_id_alloc (void )
1651
+ static int pdom_id_alloc (void )
1647
1652
{
1648
- unsigned long flags ;
1649
- int id ;
1650
-
1651
- spin_lock_irqsave (& pd_bitmap_lock , flags );
1652
- id = find_first_zero_bit (amd_iommu_pd_alloc_bitmap , MAX_DOMAIN_ID );
1653
- BUG_ON (id == 0 );
1654
- if (id > 0 && id < MAX_DOMAIN_ID )
1655
- __set_bit (id , amd_iommu_pd_alloc_bitmap );
1656
- else
1657
- id = 0 ;
1658
- spin_unlock_irqrestore (& pd_bitmap_lock , flags );
1659
-
1660
- return id ;
1653
+ return ida_alloc_range (& pdom_ids , 1 , MAX_DOMAIN_ID - 1 , GFP_ATOMIC );
1661
1654
}
1662
1655
1663
- static void domain_id_free (int id )
1656
+ static void pdom_id_free (int id )
1664
1657
{
1665
- unsigned long flags ;
1666
-
1667
- spin_lock_irqsave (& pd_bitmap_lock , flags );
1668
- if (id > 0 && id < MAX_DOMAIN_ID )
1669
- __clear_bit (id , amd_iommu_pd_alloc_bitmap );
1670
- spin_unlock_irqrestore (& pd_bitmap_lock , flags );
1658
+ ida_free (& pdom_ids , id );
1671
1659
}
1672
1660
1673
1661
static void free_gcr3_tbl_level1 (u64 * tbl )
@@ -1712,7 +1700,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
1712
1700
gcr3_info -> glx = 0 ;
1713
1701
1714
1702
/* Free per device domain ID */
1715
- domain_id_free (gcr3_info -> domid );
1703
+ pdom_id_free (gcr3_info -> domid );
1716
1704
1717
1705
iommu_free_page (gcr3_info -> gcr3_tbl );
1718
1706
gcr3_info -> gcr3_tbl = NULL ;
@@ -1739,6 +1727,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
1739
1727
{
1740
1728
int levels = get_gcr3_levels (pasids );
1741
1729
int nid = iommu ? dev_to_node (& iommu -> dev -> dev ) : NUMA_NO_NODE ;
1730
+ int domid ;
1742
1731
1743
1732
if (levels > amd_iommu_max_glx_val )
1744
1733
return - EINVAL ;
@@ -1747,11 +1736,14 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
1747
1736
return - EBUSY ;
1748
1737
1749
1738
/* Allocate per device domain ID */
1750
- gcr3_info -> domid = domain_id_alloc ();
1739
+ domid = pdom_id_alloc ();
1740
+ if (domid <= 0 )
1741
+ return - ENOSPC ;
1742
+ gcr3_info -> domid = domid ;
1751
1743
1752
1744
gcr3_info -> gcr3_tbl = iommu_alloc_page_node (nid , GFP_ATOMIC );
1753
1745
if (gcr3_info -> gcr3_tbl == NULL ) {
1754
- domain_id_free ( gcr3_info -> domid );
1746
+ pdom_id_free ( domid );
1755
1747
return - ENOMEM ;
1756
1748
}
1757
1749
@@ -2262,7 +2254,7 @@ void protection_domain_free(struct protection_domain *domain)
2262
2254
WARN_ON (!list_empty (& domain -> dev_list ));
2263
2255
if (domain -> domain .type & __IOMMU_DOMAIN_PAGING )
2264
2256
free_io_pgtable_ops (& domain -> iop .pgtbl .ops );
2265
- domain_id_free (domain -> id );
2257
+ pdom_id_free (domain -> id );
2266
2258
kfree (domain );
2267
2259
}
2268
2260
@@ -2277,16 +2269,18 @@ static void protection_domain_init(struct protection_domain *domain, int nid)
2277
2269
struct protection_domain * protection_domain_alloc (unsigned int type , int nid )
2278
2270
{
2279
2271
struct protection_domain * domain ;
2272
+ int domid ;
2280
2273
2281
2274
domain = kzalloc (sizeof (* domain ), GFP_KERNEL );
2282
2275
if (!domain )
2283
2276
return NULL ;
2284
2277
2285
- domain -> id = domain_id_alloc ();
2286
- if (! domain -> id ) {
2278
+ domid = pdom_id_alloc ();
2279
+ if (domid <= 0 ) {
2287
2280
kfree (domain );
2288
2281
return NULL ;
2289
2282
}
2283
+ domain -> id = domid ;
2290
2284
2291
2285
protection_domain_init (domain , nid );
2292
2286
@@ -2361,7 +2355,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
2361
2355
2362
2356
ret = pdom_setup_pgtable (domain , type , pgtable );
2363
2357
if (ret ) {
2364
- domain_id_free (domain -> id );
2358
+ pdom_id_free (domain -> id );
2365
2359
kfree (domain );
2366
2360
return ERR_PTR (ret );
2367
2361
}
@@ -2493,7 +2487,7 @@ void amd_iommu_init_identity_domain(void)
2493
2487
domain -> ops = & identity_domain_ops ;
2494
2488
domain -> owner = & amd_iommu_ops ;
2495
2489
2496
- identity_domain .id = domain_id_alloc ();
2490
+ identity_domain .id = pdom_id_alloc ();
2497
2491
2498
2492
protection_domain_init (& identity_domain , NUMA_NO_NODE );
2499
2493
}
0 commit comments