12
12
#include "xe_vm.h"
13
13
#include "xe_vm_types.h"
14
14
15
+ static bool xe_svm_range_in_vram (struct xe_svm_range * range )
16
+ {
17
+ /* Not reliable without notifier lock */
18
+ return range -> base .flags .has_devmem_pages ;
19
+ }
20
+
21
+ static bool xe_svm_range_has_vram_binding (struct xe_svm_range * range )
22
+ {
23
+ /* Not reliable without notifier lock */
24
+ return xe_svm_range_in_vram (range ) && range -> tile_present ;
25
+ }
26
+
15
27
static struct xe_vm * gpusvm_to_vm (struct drm_gpusvm * gpusvm )
16
28
{
17
29
return container_of (gpusvm , struct xe_vm , svm .gpusvm );
@@ -37,6 +49,23 @@ static unsigned long xe_svm_range_size(struct xe_svm_range *range)
37
49
return drm_gpusvm_range_size (& range -> base );
38
50
}
39
51
52
+ #define range_debug (r__ , operaton__ ) \
53
+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
54
+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
55
+ "start=0x%014lx, end=0x%014lx, size=%lu", \
56
+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
57
+ (r__)->base.gpusvm, \
58
+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
59
+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
60
+ (r__)->base.notifier_seq, \
61
+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
62
+ xe_svm_range_size((r__)))
63
+
64
+ void xe_svm_range_debug (struct xe_svm_range * range , const char * operation )
65
+ {
66
+ range_debug (range , operation );
67
+ }
68
+
40
69
static void * xe_svm_devm_owner (struct xe_device * xe )
41
70
{
42
71
return xe ;
@@ -74,6 +103,8 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
74
103
{
75
104
struct xe_device * xe = vm -> xe ;
76
105
106
+ range_debug (range , "GARBAGE COLLECTOR ADD" );
107
+
77
108
drm_gpusvm_range_set_unmapped (& range -> base , mmu_range );
78
109
79
110
spin_lock (& vm -> svm .garbage_collector .lock );
@@ -99,10 +130,14 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
99
130
100
131
xe_svm_assert_in_notifier (vm );
101
132
133
+ range_debug (range , "NOTIFIER" );
134
+
102
135
/* Skip if already unmapped or if no binding exist */
103
136
if (range -> base .flags .unmapped || !range -> tile_present )
104
137
return 0 ;
105
138
139
+ range_debug (range , "NOTIFIER - EXECUTE" );
140
+
106
141
/* Adjust invalidation to range boundaries */
107
142
* adj_start = min (xe_svm_range_start (range ), mmu_range -> start );
108
143
* adj_end = max (xe_svm_range_end (range ), mmu_range -> end );
@@ -153,6 +188,11 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
153
188
154
189
xe_svm_assert_in_notifier (vm );
155
190
191
+ vm_dbg (& gpusvm_to_vm (gpusvm )-> xe -> drm ,
192
+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d" ,
193
+ vm -> usm .asid , gpusvm , notifier -> notifier .invalidate_seq ,
194
+ mmu_range -> start , mmu_range -> end , mmu_range -> event );
195
+
156
196
/* Adjust invalidation to notifier boundaries */
157
197
adj_start = max (drm_gpusvm_notifier_start (notifier ), adj_start );
158
198
adj_end = min (drm_gpusvm_notifier_end (notifier ), adj_end );
@@ -237,6 +277,8 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
237
277
{
238
278
struct dma_fence * fence ;
239
279
280
+ range_debug (range , "GARBAGE COLLECTOR" );
281
+
240
282
xe_vm_lock (vm , false);
241
283
fence = xe_vm_range_unbind (vm , range );
242
284
xe_vm_unlock (vm );
@@ -396,16 +438,23 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
396
438
int incr = (match && last ) ? 1 : 0 ;
397
439
398
440
if (vram_addr != XE_VRAM_ADDR_INVALID ) {
399
- if (sram )
441
+ if (sram ) {
442
+ vm_dbg (& tile -> xe -> drm ,
443
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld" ,
444
+ vram_addr , (u64 )dma_addr [pos ], i - pos + incr );
400
445
__fence = xe_migrate_from_vram (tile -> migrate ,
401
446
i - pos + incr ,
402
447
vram_addr ,
403
448
dma_addr + pos );
404
- else
449
+ } else {
450
+ vm_dbg (& tile -> xe -> drm ,
451
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld" ,
452
+ (u64 )dma_addr [pos ], vram_addr , i - pos + incr );
405
453
__fence = xe_migrate_to_vram (tile -> migrate ,
406
454
i - pos + incr ,
407
455
dma_addr + pos ,
408
456
vram_addr );
457
+ }
409
458
if (IS_ERR (__fence )) {
410
459
err = PTR_ERR (__fence );
411
460
goto err_out ;
@@ -425,14 +474,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
425
474
426
475
/* Extra mismatched device page, copy it */
427
476
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID ) {
428
- if (sram )
477
+ if (sram ) {
478
+ vm_dbg (& tile -> xe -> drm ,
479
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d" ,
480
+ vram_addr , (u64 )dma_addr [pos ], 1 );
429
481
__fence = xe_migrate_from_vram (tile -> migrate , 1 ,
430
482
vram_addr ,
431
483
dma_addr + pos );
432
- else
484
+ } else {
485
+ vm_dbg (& tile -> xe -> drm ,
486
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d" ,
487
+ (u64 )dma_addr [pos ], vram_addr , 1 );
433
488
__fence = xe_migrate_to_vram (tile -> migrate , 1 ,
434
489
dma_addr + pos ,
435
490
vram_addr );
491
+ }
436
492
if (IS_ERR (__fence )) {
437
493
err = PTR_ERR (__fence );
438
494
goto err_out ;
@@ -609,6 +665,8 @@ static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
609
665
ktime_t end = 0 ;
610
666
int err ;
611
667
668
+ range_debug (range , "ALLOCATE VRAM" );
669
+
612
670
if (!mmget_not_zero (mm ))
613
671
return - EFAULT ;
614
672
mmap_read_lock (mm );
@@ -699,6 +757,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
699
757
if (xe_svm_range_is_valid (range , tile ))
700
758
return 0 ;
701
759
760
+ range_debug (range , "PAGE FAULT" );
761
+
702
762
/* XXX: Add migration policy, for now migrate range once */
703
763
if (!range -> skip_migrate && range -> base .flags .migrate_devmem &&
704
764
xe_svm_range_size (range ) >= SZ_64K ) {
@@ -714,18 +774,26 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
714
774
}
715
775
}
716
776
777
+ range_debug (range , "GET PAGES" );
717
778
err = drm_gpusvm_range_get_pages (& vm -> svm .gpusvm , r , & ctx );
718
779
/* Corner where CPU mappings have changed */
719
780
if (err == - EOPNOTSUPP || err == - EFAULT || err == - EPERM ) {
720
- if (err == - EOPNOTSUPP )
781
+ if (err == - EOPNOTSUPP ) {
782
+ range_debug (range , "PAGE FAULT - EVICT PAGES" );
721
783
drm_gpusvm_range_evict (& vm -> svm .gpusvm , & range -> base );
784
+ }
722
785
drm_dbg (& vm -> xe -> drm ,
723
786
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n" ,
724
787
vm -> usm .asid , & vm -> svm .gpusvm , ERR_PTR (err ));
788
+ range_debug (range , "PAGE FAULT - RETRY PAGES" );
725
789
goto retry ;
726
790
}
727
- if (err )
791
+ if (err ) {
792
+ range_debug (range , "PAGE FAULT - FAIL PAGE COLLECT" );
728
793
goto err_out ;
794
+ }
795
+
796
+ range_debug (range , "PAGE FAULT - BIND" );
729
797
730
798
retry_bind :
731
799
drm_exec_init (& exec , 0 , 0 );
@@ -741,8 +809,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
741
809
if (IS_ERR (fence )) {
742
810
drm_exec_fini (& exec );
743
811
err = PTR_ERR (fence );
744
- if (err == - EAGAIN )
812
+ if (err == - EAGAIN ) {
813
+ range_debug (range , "PAGE FAULT - RETRY BIND" );
745
814
goto retry ;
815
+ }
746
816
if (xe_vm_validate_should_retry (& exec , err , & end ))
747
817
goto retry_bind ;
748
818
goto err_out ;
0 commit comments