Skip to content

Commit 333b890

Browse files
Thomas Hellströmrodrigovivi
authored andcommitted
drm/xe/userptr: Unmap userptrs in the mmu notifier
If userptr pages are freed after a call to the xe mmu notifier, the device will not be blocked out from theoretically accessing these pages unless they are also unmapped from the iommu, and this violates some aspects of the iommu-imposed security. Ensure that userptrs are unmapped in the mmu notifier to mitigate this. A naive attempt would try to free the sg table, but the sg table itself may be accessed by a concurrent bind operation, so settle for only unmapping. v3: - Update lockdep asserts. - Fix a typo (Matthew Auld) Fixes: 81e058a ("drm/xe: Introduce helper to populate userptr") Cc: Oak Zeng <oak.zeng@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: <stable@vger.kernel.org> # v6.10+ Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Acked-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com (cherry picked from commit ba767b9) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
1 parent 0a98219 commit 333b890

File tree

4 files changed

+52
-9
lines changed

4 files changed

+52
-9
lines changed

drivers/gpu/drm/xe/xe_hmm.c

Lines changed: 42 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,45 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
150150
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
151151
}
152152

153+
static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
154+
{
155+
struct xe_userptr *userptr = &uvma->userptr;
156+
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
157+
158+
lockdep_assert_held_write(&vm->lock);
159+
lockdep_assert_held(&vm->userptr.notifier_lock);
160+
161+
mutex_lock(&userptr->unmap_mutex);
162+
xe_assert(vm->xe, !userptr->mapped);
163+
userptr->mapped = true;
164+
mutex_unlock(&userptr->unmap_mutex);
165+
}
166+
167+
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
168+
{
169+
struct xe_userptr *userptr = &uvma->userptr;
170+
struct xe_vma *vma = &uvma->vma;
171+
bool write = !xe_vma_read_only(vma);
172+
struct xe_vm *vm = xe_vma_vm(vma);
173+
struct xe_device *xe = vm->xe;
174+
175+
if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
176+
!lockdep_is_held_type(&vm->lock, 0) &&
177+
!(vma->gpuva.flags & XE_VMA_DESTROYED)) {
178+
/* Don't unmap in exec critical section. */
179+
xe_vm_assert_held(vm);
180+
/* Don't unmap while mapping the sg. */
181+
lockdep_assert_held(&vm->lock);
182+
}
183+
184+
mutex_lock(&userptr->unmap_mutex);
185+
if (userptr->sg && userptr->mapped)
186+
dma_unmap_sgtable(xe->drm.dev, userptr->sg,
187+
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
188+
userptr->mapped = false;
189+
mutex_unlock(&userptr->unmap_mutex);
190+
}
191+
153192
/**
154193
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
155194
* @uvma: the userptr vma which hold the scatter gather table
@@ -161,16 +200,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
161200
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
162201
{
163202
struct xe_userptr *userptr = &uvma->userptr;
164-
struct xe_vma *vma = &uvma->vma;
165-
bool write = !xe_vma_read_only(vma);
166-
struct xe_vm *vm = xe_vma_vm(vma);
167-
struct xe_device *xe = vm->xe;
168-
struct device *dev = xe->drm.dev;
169-
170-
xe_assert(xe, userptr->sg);
171-
dma_unmap_sgtable(dev, userptr->sg,
172-
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
173203

204+
xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
205+
xe_hmm_userptr_unmap(uvma);
174206
sg_free_table(userptr->sg);
175207
userptr->sg = NULL;
176208
}
@@ -297,6 +329,7 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
297329

298330
xe_mark_range_accessed(&hmm_range, write);
299331
userptr->sg = &userptr->sgt;
332+
xe_hmm_userptr_set_mapped(uvma);
300333
userptr->notifier_seq = hmm_range.notifier_seq;
301334
up_read(&vm->userptr.notifier_lock);
302335
kvfree(pfns);

drivers/gpu/drm/xe/xe_hmm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,4 +13,6 @@ struct xe_userptr_vma;
1313
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
1414

1515
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
16+
17+
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
1618
#endif

drivers/gpu/drm/xe/xe_vm.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -620,6 +620,8 @@ static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uv
620620
err = xe_vm_invalidate_vma(vma);
621621
XE_WARN_ON(err);
622622
}
623+
624+
xe_hmm_userptr_unmap(uvma);
623625
}
624626

625627
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
@@ -1039,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
10391041
INIT_LIST_HEAD(&userptr->invalidate_link);
10401042
INIT_LIST_HEAD(&userptr->repin_link);
10411043
vma->gpuva.gem.offset = bo_offset_or_userptr;
1044+
mutex_init(&userptr->unmap_mutex);
10421045

10431046
err = mmu_interval_notifier_insert(&userptr->notifier,
10441047
current->mm,
@@ -1080,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
10801083
* them anymore
10811084
*/
10821085
mmu_interval_notifier_remove(&userptr->notifier);
1086+
mutex_destroy(&userptr->unmap_mutex);
10831087
xe_vm_put(vm);
10841088
} else if (xe_vma_is_null(vma)) {
10851089
xe_vm_put(vm);

drivers/gpu/drm/xe/xe_vm_types.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,16 @@ struct xe_userptr {
5959
struct sg_table *sg;
6060
/** @notifier_seq: notifier sequence number */
6161
unsigned long notifier_seq;
62+
/** @unmap_mutex: Mutex protecting dma-unmapping */
63+
struct mutex unmap_mutex;
6264
/**
6365
* @initial_bind: user pointer has been bound at least once.
6466
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
6567
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
6668
*/
6769
bool initial_bind;
70+
/** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
71+
bool mapped;
6872
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
6973
u32 divisor;
7074
#endif

0 commit comments

Comments
 (0)