Skip to content

Commit 4f609db

Browse files
LuBaolujoergroedel
authored andcommitted
iommu/vt-d: Use cache helpers in arch_invalidate_secondary_tlbs
The arch_invalidate_secondary_tlbs callback is called in the SVA mm notification path. It invalidates all or a range of caches after the CPU page table is modified. Use the cache tag helps in this path. The mm_types defines vm_end as the first byte after the end address which is different from the iommu gather API, hence convert the end parameter from mm_types to iommu gather scheme before calling the cache_tag helper. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240416080656.60968-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 8ebc223 commit 4f609db

File tree

2 files changed

+11
-71
lines changed

2 files changed

+11
-71
lines changed

drivers/iommu/intel/iommu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1166,6 +1166,7 @@ struct intel_svm {
11661166
struct mm_struct *mm;
11671167
u32 pasid;
11681168
struct list_head devs;
1169+
struct dmar_domain *domain;
11691170
};
11701171
#else
11711172
static inline void intel_svm_check(struct intel_iommu *iommu) {}

drivers/iommu/intel/svm.c

Lines changed: 10 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -168,88 +168,25 @@ void intel_svm_check(struct intel_iommu *iommu)
168168
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
169169
}
170170

171-
static void __flush_svm_range_dev(struct intel_svm *svm,
172-
struct intel_svm_dev *sdev,
173-
unsigned long address,
174-
unsigned long pages, int ih)
175-
{
176-
struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
177-
178-
if (WARN_ON(!pages))
179-
return;
180-
181-
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
182-
if (info->ats_enabled) {
183-
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
184-
svm->pasid, sdev->qdep, address,
185-
order_base_2(pages));
186-
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
187-
svm->pasid, sdev->qdep);
188-
}
189-
}
190-
191-
static void intel_flush_svm_range_dev(struct intel_svm *svm,
192-
struct intel_svm_dev *sdev,
193-
unsigned long address,
194-
unsigned long pages, int ih)
195-
{
196-
unsigned long shift = ilog2(__roundup_pow_of_two(pages));
197-
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
198-
unsigned long start = ALIGN_DOWN(address, align);
199-
unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
200-
201-
while (start < end) {
202-
__flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
203-
start += align;
204-
}
205-
}
206-
207-
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
208-
unsigned long pages, int ih)
209-
{
210-
struct intel_svm_dev *sdev;
211-
212-
rcu_read_lock();
213-
list_for_each_entry_rcu(sdev, &svm->devs, list)
214-
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
215-
rcu_read_unlock();
216-
}
217-
218-
static void intel_flush_svm_all(struct intel_svm *svm)
219-
{
220-
struct device_domain_info *info;
221-
struct intel_svm_dev *sdev;
222-
223-
rcu_read_lock();
224-
list_for_each_entry_rcu(sdev, &svm->devs, list) {
225-
info = dev_iommu_priv_get(sdev->dev);
226-
227-
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
228-
if (info->ats_enabled) {
229-
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
230-
svm->pasid, sdev->qdep,
231-
0, 64 - VTD_PAGE_SHIFT);
232-
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
233-
svm->pasid, sdev->qdep);
234-
}
235-
}
236-
rcu_read_unlock();
237-
}
238-
239171
/* Pages have been freed at this point */
240172
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
241173
struct mm_struct *mm,
242174
unsigned long start, unsigned long end)
243175
{
244176
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
177+
struct dmar_domain *domain = svm->domain;
245178

246179
if (start == 0 && end == -1UL) {
247-
intel_flush_svm_all(svm);
180+
cache_tag_flush_all(domain);
248181
return;
249182
}
250183

251-
intel_flush_svm_range(svm, start,
252-
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
184+
/*
185+
* The mm_types defines vm_end as the first byte after the end address,
186+
* different from IOMMU subsystem using the last address of an address
187+
* range.
188+
*/
189+
cache_tag_flush_range(domain, start, end - 1, 0);
253190
}
254191

255192
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -336,6 +273,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
336273
INIT_LIST_HEAD_RCU(&svm->devs);
337274

338275
svm->notifier.ops = &intel_mmuops;
276+
svm->domain = to_dmar_domain(domain);
339277
ret = mmu_notifier_register(&svm->notifier, mm);
340278
if (ret) {
341279
kfree(svm);
@@ -747,6 +685,7 @@ struct iommu_domain *intel_svm_domain_alloc(void)
747685
if (!domain)
748686
return NULL;
749687
domain->domain.ops = &intel_svm_domain_ops;
688+
domain->use_first_level = true;
750689
INIT_LIST_HEAD(&domain->cache_tags);
751690
spin_lock_init(&domain->cache_lock);
752691

0 commit comments

Comments
 (0)