Skip to content

Commit d6b47de

Browse files
hegdevasantjoergroedel
authored andcommitted
iommu/amd: Reduce domain lock scope in attach device path
Currently attach device path takes protection domain lock followed by dev_data lock. Most of the operations in this function is specific to device data except pdom_attach_iommu() where it updates protection domain structure. Hence reduce the scope of protection domain lock. Note that this changes the locking order. Now it takes device lock before taking doamin lock (group->mutex -> dev_data->lock -> pdom->lock). dev_data->lock is used only in device attachment path. So changing order is fine. It will not create any issue. Finally move numa node assignment to pdom_attach_iommu(). Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20241030063556.6104-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 07bbd66 commit d6b47de

File tree

1 file changed

+30
-22
lines changed

1 file changed

+30
-22
lines changed

drivers/iommu/amd/iommu.c

Lines changed: 30 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -2020,16 +2020,23 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
20202020
struct protection_domain *pdom)
20212021
{
20222022
struct pdom_iommu_info *pdom_iommu_info, *curr;
2023+
struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
2024+
unsigned long flags;
2025+
int ret = 0;
2026+
2027+
spin_lock_irqsave(&pdom->lock, flags);
20232028

20242029
pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
20252030
if (pdom_iommu_info) {
20262031
pdom_iommu_info->refcnt++;
2027-
return 0;
2032+
goto out_unlock;
20282033
}
20292034

20302035
pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC);
2031-
if (!pdom_iommu_info)
2032-
return -ENOMEM;
2036+
if (!pdom_iommu_info) {
2037+
ret = -ENOMEM;
2038+
goto out_unlock;
2039+
}
20332040

20342041
pdom_iommu_info->iommu = iommu;
20352042
pdom_iommu_info->refcnt = 1;
@@ -2038,43 +2045,52 @@ static int pdom_attach_iommu(struct amd_iommu *iommu,
20382045
NULL, pdom_iommu_info, GFP_ATOMIC);
20392046
if (curr) {
20402047
kfree(pdom_iommu_info);
2041-
return -ENOSPC;
2048+
ret = -ENOSPC;
2049+
goto out_unlock;
20422050
}
20432051

2044-
return 0;
2052+
/* Update NUMA Node ID */
2053+
if (cfg->amd.nid == NUMA_NO_NODE)
2054+
cfg->amd.nid = dev_to_node(&iommu->dev->dev);
2055+
2056+
out_unlock:
2057+
spin_unlock_irqrestore(&pdom->lock, flags);
2058+
return ret;
20452059
}
20462060

20472061
static void pdom_detach_iommu(struct amd_iommu *iommu,
20482062
struct protection_domain *pdom)
20492063
{
20502064
struct pdom_iommu_info *pdom_iommu_info;
2065+
unsigned long flags;
2066+
2067+
spin_lock_irqsave(&pdom->lock, flags);
20512068

20522069
pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index);
2053-
if (!pdom_iommu_info)
2070+
if (!pdom_iommu_info) {
2071+
spin_unlock_irqrestore(&pdom->lock, flags);
20542072
return;
2073+
}
20552074

20562075
pdom_iommu_info->refcnt--;
20572076
if (pdom_iommu_info->refcnt == 0) {
20582077
xa_erase(&pdom->iommu_array, iommu->index);
20592078
kfree(pdom_iommu_info);
20602079
}
2080+
2081+
spin_unlock_irqrestore(&pdom->lock, flags);
20612082
}
20622083

20632084
static int do_attach(struct iommu_dev_data *dev_data,
20642085
struct protection_domain *domain)
20652086
{
20662087
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2067-
struct io_pgtable_cfg *cfg = &domain->iop.pgtbl.cfg;
20682088
int ret = 0;
20692089

20702090
/* Update data structures */
20712091
dev_data->domain = domain;
20722092
list_add(&dev_data->list, &domain->dev_list);
20732093

2074-
/* Update NUMA Node ID */
2075-
if (cfg->amd.nid == NUMA_NO_NODE)
2076-
cfg->amd.nid = dev_to_node(dev_data->dev);
2077-
20782094
/* Do reference counting */
20792095
ret = pdom_attach_iommu(iommu, domain);
20802096
if (ret)
@@ -2096,12 +2112,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
20962112
{
20972113
struct protection_domain *domain = dev_data->domain;
20982114
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2115+
unsigned long flags;
20992116

21002117
/* Clear DTE and flush the entry */
21012118
dev_update_dte(dev_data, false);
21022119

21032120
/* Flush IOTLB and wait for the flushes to finish */
2121+
spin_lock_irqsave(&domain->lock, flags);
21042122
amd_iommu_domain_flush_all(domain);
2123+
spin_unlock_irqrestore(&domain->lock, flags);
21052124

21062125
/* Clear GCR3 table */
21072126
if (pdom_is_sva_capable(domain))
@@ -2123,11 +2142,8 @@ static int attach_device(struct device *dev,
21232142
struct protection_domain *domain)
21242143
{
21252144
struct iommu_dev_data *dev_data;
2126-
unsigned long flags;
21272145
int ret = 0;
21282146

2129-
spin_lock_irqsave(&domain->lock, flags);
2130-
21312147
dev_data = dev_iommu_priv_get(dev);
21322148

21332149
spin_lock(&dev_data->lock);
@@ -2142,8 +2158,6 @@ static int attach_device(struct device *dev,
21422158
out:
21432159
spin_unlock(&dev_data->lock);
21442160

2145-
spin_unlock_irqrestore(&domain->lock, flags);
2146-
21472161
return ret;
21482162
}
21492163

@@ -2153,13 +2167,9 @@ static int attach_device(struct device *dev,
21532167
static void detach_device(struct device *dev)
21542168
{
21552169
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
2156-
struct protection_domain *domain = dev_data->domain;
21572170
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
2158-
unsigned long flags;
21592171
bool ppr = dev_data->ppr;
21602172

2161-
spin_lock_irqsave(&domain->lock, flags);
2162-
21632173
spin_lock(&dev_data->lock);
21642174

21652175
/*
@@ -2183,8 +2193,6 @@ static void detach_device(struct device *dev)
21832193
out:
21842194
spin_unlock(&dev_data->lock);
21852195

2186-
spin_unlock_irqrestore(&domain->lock, flags);
2187-
21882196
/* Remove IOPF handler */
21892197
if (ppr)
21902198
amd_iommu_iopf_remove_device(iommu, dev_data);

0 commit comments

Comments
 (0)