@@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1106
1106
}
1107
1107
1108
1108
static int
1109
- svm_range_split_tail (struct svm_range * prange ,
1110
- uint64_t new_last , struct list_head * insert_list )
1109
+ svm_range_split_tail (struct svm_range * prange , uint64_t new_last ,
1110
+ struct list_head * insert_list , struct list_head * remap_list )
1111
1111
{
1112
1112
struct svm_range * tail ;
1113
1113
int r = svm_range_split (prange , prange -> start , new_last , & tail );
1114
1114
1115
- if (!r )
1115
+ if (!r ) {
1116
1116
list_add (& tail -> list , insert_list );
1117
+ if (!IS_ALIGNED (new_last + 1 , 1UL << prange -> granularity ))
1118
+ list_add (& tail -> update_list , remap_list );
1119
+ }
1117
1120
return r ;
1118
1121
}
1119
1122
1120
1123
static int
1121
- svm_range_split_head (struct svm_range * prange ,
1122
- uint64_t new_start , struct list_head * insert_list )
1124
+ svm_range_split_head (struct svm_range * prange , uint64_t new_start ,
1125
+ struct list_head * insert_list , struct list_head * remap_list )
1123
1126
{
1124
1127
struct svm_range * head ;
1125
1128
int r = svm_range_split (prange , new_start , prange -> last , & head );
1126
1129
1127
- if (!r )
1130
+ if (!r ) {
1128
1131
list_add (& head -> list , insert_list );
1132
+ if (!IS_ALIGNED (new_start , 1UL << prange -> granularity ))
1133
+ list_add (& head -> update_list , remap_list );
1134
+ }
1129
1135
return r ;
1130
1136
}
1131
1137
@@ -2052,7 +2058,7 @@ static int
2052
2058
svm_range_add (struct kfd_process * p , uint64_t start , uint64_t size ,
2053
2059
uint32_t nattr , struct kfd_ioctl_svm_attribute * attrs ,
2054
2060
struct list_head * update_list , struct list_head * insert_list ,
2055
- struct list_head * remove_list )
2061
+ struct list_head * remove_list , struct list_head * remap_list )
2056
2062
{
2057
2063
unsigned long last = start + size - 1UL ;
2058
2064
struct svm_range_list * svms = & p -> svms ;
@@ -2068,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2068
2074
INIT_LIST_HEAD (insert_list );
2069
2075
INIT_LIST_HEAD (remove_list );
2070
2076
INIT_LIST_HEAD (& new_list );
2077
+ INIT_LIST_HEAD (remap_list );
2071
2078
2072
2079
node = interval_tree_iter_first (& svms -> objects , start , last );
2073
2080
while (node ) {
@@ -2104,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
2104
2111
if (node -> start < start ) {
2105
2112
pr_debug ("change old range start\n" );
2106
2113
r = svm_range_split_head (prange , start ,
2107
- insert_list );
2114
+ insert_list , remap_list );
2108
2115
if (r )
2109
2116
goto out ;
2110
2117
}
2111
2118
if (node -> last > last ) {
2112
2119
pr_debug ("change old range last\n" );
2113
2120
r = svm_range_split_tail (prange , last ,
2114
- insert_list );
2121
+ insert_list , remap_list );
2115
2122
if (r )
2116
2123
goto out ;
2117
2124
}
@@ -3501,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3501
3508
struct list_head update_list ;
3502
3509
struct list_head insert_list ;
3503
3510
struct list_head remove_list ;
3511
+ struct list_head remap_list ;
3504
3512
struct svm_range_list * svms ;
3505
3513
struct svm_range * prange ;
3506
3514
struct svm_range * next ;
@@ -3532,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3532
3540
3533
3541
/* Add new range and split existing ranges as needed */
3534
3542
r = svm_range_add (p , start , size , nattr , attrs , & update_list ,
3535
- & insert_list , & remove_list );
3543
+ & insert_list , & remove_list , & remap_list );
3536
3544
if (r ) {
3537
3545
mutex_unlock (& svms -> lock );
3538
3546
mmap_write_unlock (mm );
@@ -3597,6 +3605,19 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3597
3605
ret = r ;
3598
3606
}
3599
3607
3608
+ list_for_each_entry (prange , & remap_list , update_list ) {
3609
+ pr_debug ("Remapping prange 0x%p [0x%lx 0x%lx]\n" ,
3610
+ prange , prange -> start , prange -> last );
3611
+ mutex_lock (& prange -> migrate_mutex );
3612
+ r = svm_range_validate_and_map (mm , prange , MAX_GPU_INSTANCE ,
3613
+ true, true, prange -> mapped_to_gpu );
3614
+ if (r )
3615
+ pr_debug ("failed %d on remap svm range\n" , r );
3616
+ mutex_unlock (& prange -> migrate_mutex );
3617
+ if (r )
3618
+ ret = r ;
3619
+ }
3620
+
3600
3621
dynamic_svm_range_dump (svms );
3601
3622
3602
3623
mutex_unlock (& svms -> lock );
0 commit comments