Skip to content

Commit 0a98219

Browse files
Thomas Hellströmrodrigovivi
authored andcommitted
drm/xe/hmm: Don't dereference struct page pointers without notifier lock
The pnfs that we obtain from hmm_range_fault() point to pages that we don't have a reference on, and the guarantee that they are still in the cpu page-tables is that the notifier lock must be held and the notifier seqno is still valid. So while building the sg table and marking the pages accesses / dirty we need to hold this lock with a validated seqno. However, the lock is reclaim tainted which makes sg_alloc_table_from_pages_segment() unusable, since it internally allocates memory. Instead build the sg-table manually. For the non-iommu case this might lead to fewer coalesces, but if that's a problem it can be fixed up later in the resource cursor code. For the iommu case, the whole sg-table may still be coalesced to a single contigous device va region. This avoids marking pages that we don't own dirty and accessed, and it also avoid dereferencing struct pages that we don't own. v2: - Use assert to check whether hmm pfns are valid (Matthew Auld) - Take into account that large pages may cross range boundaries (Matthew Auld) v3: - Don't unnecessarily check for a non-freed sg-table. (Matthew Auld) - Add a missing up_read() in an error path. (Matthew Auld) Fixes: 81e058a ("drm/xe: Introduce helper to populate userptr") Cc: Oak Zeng <oak.zeng@intel.com> Cc: <stable@vger.kernel.org> # v6.10+ Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Acked-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-3-thomas.hellstrom@linux.intel.com (cherry picked from commit ea3e66d) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
1 parent e3e2e7f commit 0a98219

File tree

1 file changed

+86
-26
lines changed

1 file changed

+86
-26
lines changed

drivers/gpu/drm/xe/xe_hmm.c

Lines changed: 86 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,42 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
4242
}
4343
}
4444

45+
static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
46+
struct hmm_range *range, struct rw_semaphore *notifier_sem)
47+
{
48+
unsigned long i, npages, hmm_pfn;
49+
unsigned long num_chunks = 0;
50+
int ret;
51+
52+
/* HMM docs says this is needed. */
53+
ret = down_read_interruptible(notifier_sem);
54+
if (ret)
55+
return ret;
56+
57+
if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
58+
up_read(notifier_sem);
59+
return -EAGAIN;
60+
}
61+
62+
npages = xe_npages_in_range(range->start, range->end);
63+
for (i = 0; i < npages;) {
64+
unsigned long len;
65+
66+
hmm_pfn = range->hmm_pfns[i];
67+
xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
68+
69+
len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
70+
71+
/* If order > 0 the page may extend beyond range->start */
72+
len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
73+
i += len;
74+
num_chunks++;
75+
}
76+
up_read(notifier_sem);
77+
78+
return sg_alloc_table(st, num_chunks, GFP_KERNEL);
79+
}
80+
4581
/**
4682
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
4783
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
@@ -50,6 +86,7 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
5086
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
5187
* has the pfn numbers of pages that back up this hmm address range.
5288
* @st: pointer to the sg table.
89+
* @notifier_sem: The xe notifier lock.
5390
* @write: whether we write to this range. This decides dma map direction
5491
* for system pages. If write we map it bi-diretional; otherwise
5592
* DMA_TO_DEVICE
@@ -76,38 +113,41 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
76113
* Returns 0 if successful; -ENOMEM if fails to allocate memory
77114
*/
78115
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
79-
struct sg_table *st, bool write)
116+
struct sg_table *st,
117+
struct rw_semaphore *notifier_sem,
118+
bool write)
80119
{
120+
unsigned long npages = xe_npages_in_range(range->start, range->end);
81121
struct device *dev = xe->drm.dev;
82-
struct page **pages;
83-
u64 i, npages;
84-
int ret;
122+
struct scatterlist *sgl;
123+
struct page *page;
124+
unsigned long i, j;
85125

86-
npages = xe_npages_in_range(range->start, range->end);
87-
pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
88-
if (!pages)
89-
return -ENOMEM;
126+
lockdep_assert_held(notifier_sem);
90127

91-
for (i = 0; i < npages; i++) {
92-
pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
93-
xe_assert(xe, !is_device_private_page(pages[i]));
94-
}
128+
i = 0;
129+
for_each_sg(st->sgl, sgl, st->nents, j) {
130+
unsigned long hmm_pfn, size;
95131

96-
ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
97-
xe_sg_segment_size(dev), GFP_KERNEL);
98-
if (ret)
99-
goto free_pages;
132+
hmm_pfn = range->hmm_pfns[i];
133+
page = hmm_pfn_to_page(hmm_pfn);
134+
xe_assert(xe, !is_device_private_page(page));
135+
136+
size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
137+
size -= page_to_pfn(page) & (size - 1);
138+
i += size;
100139

101-
ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
102-
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
103-
if (ret) {
104-
sg_free_table(st);
105-
st = NULL;
140+
if (unlikely(j == st->nents - 1)) {
141+
if (i > npages)
142+
size -= (i - npages);
143+
sg_mark_end(sgl);
144+
}
145+
sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
106146
}
147+
xe_assert(xe, i == npages);
107148

108-
free_pages:
109-
kvfree(pages);
110-
return ret;
149+
return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
150+
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
111151
}
112152

113153
/**
@@ -237,16 +277,36 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
237277
if (ret)
238278
goto free_pfns;
239279

240-
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
280+
ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
241281
if (ret)
242282
goto free_pfns;
243283

284+
ret = down_read_interruptible(&vm->userptr.notifier_lock);
285+
if (ret)
286+
goto free_st;
287+
288+
if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
289+
ret = -EAGAIN;
290+
goto out_unlock;
291+
}
292+
293+
ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
294+
&vm->userptr.notifier_lock, write);
295+
if (ret)
296+
goto out_unlock;
297+
244298
xe_mark_range_accessed(&hmm_range, write);
245299
userptr->sg = &userptr->sgt;
246300
userptr->notifier_seq = hmm_range.notifier_seq;
301+
up_read(&vm->userptr.notifier_lock);
302+
kvfree(pfns);
303+
return 0;
247304

305+
out_unlock:
306+
up_read(&vm->userptr.notifier_lock);
307+
free_st:
308+
sg_free_table(&userptr->sgt);
248309
free_pfns:
249310
kvfree(pfns);
250311
return ret;
251312
}
252-

0 commit comments

Comments
 (0)