@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
290
290
return i915_error_to_vmf_fault (err );
291
291
}
292
292
293
+ static void set_address_limits (struct vm_area_struct * area ,
294
+ struct i915_vma * vma ,
295
+ unsigned long obj_offset ,
296
+ unsigned long * start_vaddr ,
297
+ unsigned long * end_vaddr )
298
+ {
299
+ unsigned long vm_start , vm_end , vma_size ; /* user's memory parameters */
300
+ long start , end ; /* memory boundaries */
301
+
302
+ /*
303
+ * Let's move into the ">> PAGE_SHIFT"
304
+ * domain to be sure not to lose bits
305
+ */
306
+ vm_start = area -> vm_start >> PAGE_SHIFT ;
307
+ vm_end = area -> vm_end >> PAGE_SHIFT ;
308
+ vma_size = vma -> size >> PAGE_SHIFT ;
309
+
310
+ /*
311
+ * Calculate the memory boundaries by considering the offset
312
+ * provided by the user during memory mapping and the offset
313
+ * provided for the partial mapping.
314
+ */
315
+ start = vm_start ;
316
+ start -= obj_offset ;
317
+ start += vma -> gtt_view .partial .offset ;
318
+ end = start + vma_size ;
319
+
320
+ start = max_t (long , start , vm_start );
321
+ end = min_t (long , end , vm_end );
322
+
323
+ /* Let's move back into the "<< PAGE_SHIFT" domain */
324
+ * start_vaddr = (unsigned long )start << PAGE_SHIFT ;
325
+ * end_vaddr = (unsigned long )end << PAGE_SHIFT ;
326
+ }
327
+
293
328
static vm_fault_t vm_fault_gtt (struct vm_fault * vmf )
294
329
{
295
330
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
302
337
struct i915_ggtt * ggtt = to_gt (i915 )-> ggtt ;
303
338
bool write = area -> vm_flags & VM_WRITE ;
304
339
struct i915_gem_ww_ctx ww ;
340
+ unsigned long obj_offset ;
341
+ unsigned long start , end ; /* memory boundaries */
305
342
intel_wakeref_t wakeref ;
306
343
struct i915_vma * vma ;
307
344
pgoff_t page_offset ;
345
+ unsigned long pfn ;
308
346
int srcu ;
309
347
int ret ;
310
348
311
- /* We don't use vmf->pgoff since that has the fake offset */
349
+ obj_offset = area -> vm_pgoff - drm_vma_node_start ( & mmo -> vma_node );
312
350
page_offset = (vmf -> address - area -> vm_start ) >> PAGE_SHIFT ;
351
+ page_offset += obj_offset ;
313
352
314
353
trace_i915_gem_object_fault (obj , page_offset , true, write );
315
354
@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
402
441
if (ret )
403
442
goto err_unpin ;
404
443
444
+ set_address_limits (area , vma , obj_offset , & start , & end );
445
+
446
+ pfn = (ggtt -> gmadr .start + i915_ggtt_offset (vma )) >> PAGE_SHIFT ;
447
+ pfn += (start - area -> vm_start ) >> PAGE_SHIFT ;
448
+ pfn += obj_offset - vma -> gtt_view .partial .offset ;
449
+
405
450
/* Finally, remap it using the new GTT offset */
406
- ret = remap_io_mapping (area ,
407
- area -> vm_start + (vma -> gtt_view .partial .offset << PAGE_SHIFT ),
408
- (ggtt -> gmadr .start + i915_ggtt_offset (vma )) >> PAGE_SHIFT ,
409
- min_t (u64 , vma -> size , area -> vm_end - area -> vm_start ),
410
- & ggtt -> iomap );
451
+ ret = remap_io_mapping (area , start , pfn , end - start , & ggtt -> iomap );
411
452
if (ret )
412
453
goto err_fence ;
413
454
@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
1084
1125
mmo = mmap_offset_attach (obj , mmap_type , NULL );
1085
1126
if (IS_ERR (mmo ))
1086
1127
return PTR_ERR (mmo );
1128
+
1129
+ vma -> vm_pgoff += drm_vma_node_start (& mmo -> vma_node );
1087
1130
}
1088
1131
1089
1132
/*
0 commit comments