@@ -955,17 +955,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
955955 struct drm_gem_object * gobj ;
956956 struct amdgpu_vm_bo_base * base ;
957957 struct amdgpu_bo * robj ;
958+ struct drm_exec exec ;
959+ struct amdgpu_fpriv * fpriv = filp -> driver_priv ;
958960 int r ;
959961
962+ if (args -> padding )
963+ return - EINVAL ;
964+
960965 gobj = drm_gem_object_lookup (filp , args -> handle );
961966 if (!gobj )
962967 return - ENOENT ;
963968
964969 robj = gem_to_amdgpu_bo (gobj );
965970
966- r = amdgpu_bo_reserve (robj , false);
967- if (unlikely (r ))
968- goto out ;
971+ drm_exec_init (& exec , DRM_EXEC_INTERRUPTIBLE_WAIT |
972+ DRM_EXEC_IGNORE_DUPLICATES , 0 );
973+ drm_exec_until_all_locked (& exec ) {
974+ r = drm_exec_lock_obj (& exec , gobj );
975+ drm_exec_retry_on_contention (& exec );
976+ if (r )
977+ goto out_exec ;
978+
979+ if (args -> op == AMDGPU_GEM_OP_GET_MAPPING_INFO ) {
980+ r = amdgpu_vm_lock_pd (& fpriv -> vm , & exec , 0 );
981+ drm_exec_retry_on_contention (& exec );
982+ if (r )
983+ goto out_exec ;
984+ }
985+ }
969986
970987 switch (args -> op ) {
971988 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO : {
@@ -976,7 +993,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
976993 info .alignment = robj -> tbo .page_alignment << PAGE_SHIFT ;
977994 info .domains = robj -> preferred_domains ;
978995 info .domain_flags = robj -> flags ;
979- amdgpu_bo_unreserve ( robj );
996+ drm_exec_fini ( & exec );
980997 if (copy_to_user (out , & info , sizeof (info )))
981998 r = - EFAULT ;
982999 break ;
@@ -985,20 +1002,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
9851002 if (drm_gem_is_imported (& robj -> tbo .base ) &&
9861003 args -> value & AMDGPU_GEM_DOMAIN_VRAM ) {
9871004 r = - EINVAL ;
988- amdgpu_bo_unreserve (robj );
989- break ;
1005+ goto out_exec ;
9901006 }
9911007 if (amdgpu_ttm_tt_get_usermm (robj -> tbo .ttm )) {
9921008 r = - EPERM ;
993- amdgpu_bo_unreserve (robj );
994- break ;
1009+ goto out_exec ;
9951010 }
9961011 for (base = robj -> vm_bo ; base ; base = base -> next )
9971012 if (amdgpu_xgmi_same_hive (amdgpu_ttm_adev (robj -> tbo .bdev ),
9981013 amdgpu_ttm_adev (base -> vm -> root .bo -> tbo .bdev ))) {
9991014 r = - EINVAL ;
1000- amdgpu_bo_unreserve (robj );
1001- goto out ;
1015+ goto out_exec ;
10021016 }
10031017
10041018
@@ -1011,15 +1025,63 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
10111025
10121026 if (robj -> flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID )
10131027 amdgpu_vm_bo_invalidate (robj , true);
1028+ drm_exec_fini (& exec );
1029+ break ;
1030+ case AMDGPU_GEM_OP_GET_MAPPING_INFO : {
1031+ struct amdgpu_bo_va * bo_va = amdgpu_vm_bo_find (& fpriv -> vm , robj );
1032+ struct drm_amdgpu_gem_vm_entry * vm_entries ;
1033+ struct amdgpu_bo_va_mapping * mapping ;
1034+ int num_mappings = 0 ;
1035+ /*
1036+ * num_entries is set as an input to the size of the user-allocated array of
1037+ * drm_amdgpu_gem_vm_entry stored at args->value.
1038+ * num_entries is sent back as output as the number of mappings the bo has.
1039+ * If that number is larger than the size of the array, the ioctl must
1040+ * be retried.
1041+ */
1042+ vm_entries = kvcalloc (args -> num_entries , sizeof (* vm_entries ), GFP_KERNEL );
1043+ if (!vm_entries )
1044+ return - ENOMEM ;
1045+
1046+ amdgpu_vm_bo_va_for_each_valid_mapping (bo_va , mapping ) {
1047+ if (num_mappings < args -> num_entries ) {
1048+ vm_entries [num_mappings ].addr = mapping -> start * AMDGPU_GPU_PAGE_SIZE ;
1049+ vm_entries [num_mappings ].size = (mapping -> last - mapping -> start + 1 ) * AMDGPU_GPU_PAGE_SIZE ;
1050+ vm_entries [num_mappings ].offset = mapping -> offset ;
1051+ vm_entries [num_mappings ].flags = mapping -> flags ;
1052+ }
1053+ num_mappings += 1 ;
1054+ }
1055+
1056+ amdgpu_vm_bo_va_for_each_invalid_mapping (bo_va , mapping ) {
1057+ if (num_mappings < args -> num_entries ) {
1058+ vm_entries [num_mappings ].addr = mapping -> start * AMDGPU_GPU_PAGE_SIZE ;
1059+ vm_entries [num_mappings ].size = (mapping -> last - mapping -> start + 1 ) * AMDGPU_GPU_PAGE_SIZE ;
1060+ vm_entries [num_mappings ].offset = mapping -> offset ;
1061+ vm_entries [num_mappings ].flags = mapping -> flags ;
1062+ }
1063+ num_mappings += 1 ;
1064+ }
10141065
1015- amdgpu_bo_unreserve (robj );
1066+ drm_exec_fini (& exec );
1067+
1068+ if (num_mappings > 0 && num_mappings <= args -> num_entries )
1069+ r = copy_to_user (u64_to_user_ptr (args -> value ), vm_entries , num_mappings * sizeof (* vm_entries ));
1070+
1071+ args -> num_entries = num_mappings ;
1072+
1073+ kvfree (vm_entries );
10161074 break ;
1075+ }
10171076 default :
1018- amdgpu_bo_unreserve ( robj );
1077+ drm_exec_fini ( & exec );
10191078 r = - EINVAL ;
10201079 }
10211080
1022- out :
1081+ drm_gem_object_put (gobj );
1082+ return r ;
1083+ out_exec :
1084+ drm_exec_fini (& exec );
10231085 drm_gem_object_put (gobj );
10241086 return r ;
10251087}
0 commit comments