@@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
293
293
294
294
static int
295
295
pvr_vm_bind_op_unmap_init (struct pvr_vm_bind_op * bind_op ,
296
- struct pvr_vm_context * vm_ctx , u64 device_addr ,
297
- u64 size )
296
+ struct pvr_vm_context * vm_ctx ,
297
+ struct pvr_gem_object * pvr_obj ,
298
+ u64 device_addr , u64 size )
298
299
{
299
300
int err ;
300
301
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
318
319
goto err_bind_op_fini ;
319
320
}
320
321
322
+ bind_op -> pvr_obj = pvr_obj ;
321
323
bind_op -> vm_ctx = vm_ctx ;
322
324
bind_op -> device_addr = device_addr ;
323
325
bind_op -> size = size ;
@@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
597
599
return ERR_PTR (err );
598
600
}
599
601
600
- /**
601
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
602
- * @vm_ctx: Target VM context.
603
- *
604
- * This function ensures that no mappings are left dangling by unmapping them
605
- * all in order of ascending device-virtual address.
606
- */
607
- void
608
- pvr_vm_unmap_all (struct pvr_vm_context * vm_ctx )
609
- {
610
- WARN_ON (pvr_vm_unmap (vm_ctx , vm_ctx -> gpuvm_mgr .mm_start ,
611
- vm_ctx -> gpuvm_mgr .mm_range ));
612
- }
613
-
614
602
/**
615
603
* pvr_vm_context_release() - Teardown a VM context.
616
604
* @ref_count: Pointer to reference counter of the VM context.
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
703
691
struct pvr_vm_bind_op * bind_op = vm_exec -> extra .priv ;
704
692
struct pvr_gem_object * pvr_obj = bind_op -> pvr_obj ;
705
693
706
- /* Unmap operations don't have an object to lock. */
707
- if (!pvr_obj )
708
- return 0 ;
709
-
710
- /* Acquire lock on the GEM being mapped. */
694
+ /* Acquire lock on the GEM object being mapped/unmapped. */
711
695
return drm_exec_lock_obj (& vm_exec -> exec , gem_from_pvr_gem (pvr_obj ));
712
696
}
713
697
@@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
772
756
}
773
757
774
758
/**
775
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
759
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
760
+ * memory.
776
761
* @vm_ctx: Target VM context.
762
+ * @pvr_obj: Target PowerVR memory object.
777
763
* @device_addr: Virtual device address at the start of the target mapping.
778
764
* @size: Size of the target mapping.
779
765
*
@@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
784
770
* * Any error encountered while performing internal operations required to
785
771
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
786
772
* pvr_vm_gpuva_remap).
773
+ *
774
+ * The vm_ctx->lock must be held when calling this function.
787
775
*/
788
- int
789
- pvr_vm_unmap (struct pvr_vm_context * vm_ctx , u64 device_addr , u64 size )
776
+ static int
777
+ pvr_vm_unmap_obj_locked (struct pvr_vm_context * vm_ctx ,
778
+ struct pvr_gem_object * pvr_obj ,
779
+ u64 device_addr , u64 size )
790
780
{
791
781
struct pvr_vm_bind_op bind_op = {0 };
792
782
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
799
789
},
800
790
};
801
791
802
- int err = pvr_vm_bind_op_unmap_init (& bind_op , vm_ctx , device_addr ,
803
- size );
792
+ int err = pvr_vm_bind_op_unmap_init (& bind_op , vm_ctx , pvr_obj ,
793
+ device_addr , size );
804
794
if (err )
805
795
return err ;
806
796
797
+ pvr_gem_object_get (pvr_obj );
798
+
807
799
err = drm_gpuvm_exec_lock (& vm_exec );
808
800
if (err )
809
801
goto err_cleanup ;
@@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
818
810
return err ;
819
811
}
820
812
813
+ /**
814
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
815
+ * memory.
816
+ * @vm_ctx: Target VM context.
817
+ * @pvr_obj: Target PowerVR memory object.
818
+ * @device_addr: Virtual device address at the start of the target mapping.
819
+ * @size: Size of the target mapping.
820
+ *
821
+ * Return:
822
+ * * 0 on success,
823
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
824
+ */
825
+ int
826
+ pvr_vm_unmap_obj (struct pvr_vm_context * vm_ctx , struct pvr_gem_object * pvr_obj ,
827
+ u64 device_addr , u64 size )
828
+ {
829
+ int err ;
830
+
831
+ mutex_lock (& vm_ctx -> lock );
832
+ err = pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj , device_addr , size );
833
+ mutex_unlock (& vm_ctx -> lock );
834
+
835
+ return err ;
836
+ }
837
+
838
+ /**
839
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
840
+ * @vm_ctx: Target VM context.
841
+ * @device_addr: Virtual device address at the start of the target mapping.
842
+ * @size: Size of the target mapping.
843
+ *
844
+ * Return:
845
+ * * 0 on success,
846
+ * * Any error encountered by drm_gpuva_find,
847
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
848
+ */
849
+ int
850
+ pvr_vm_unmap (struct pvr_vm_context * vm_ctx , u64 device_addr , u64 size )
851
+ {
852
+ struct pvr_gem_object * pvr_obj ;
853
+ struct drm_gpuva * va ;
854
+ int err ;
855
+
856
+ mutex_lock (& vm_ctx -> lock );
857
+
858
+ va = drm_gpuva_find (& vm_ctx -> gpuvm_mgr , device_addr , size );
859
+ if (va ) {
860
+ pvr_obj = gem_to_pvr_gem (va -> gem .obj );
861
+ err = pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj ,
862
+ va -> va .addr , va -> va .range );
863
+ } else {
864
+ err = - ENOENT ;
865
+ }
866
+
867
+ mutex_unlock (& vm_ctx -> lock );
868
+
869
+ return err ;
870
+ }
871
+
872
+ /**
873
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
874
+ * @vm_ctx: Target VM context.
875
+ *
876
+ * This function ensures that no mappings are left dangling by unmapping them
877
+ * all in order of ascending device-virtual address.
878
+ */
879
+ void
880
+ pvr_vm_unmap_all (struct pvr_vm_context * vm_ctx )
881
+ {
882
+ mutex_lock (& vm_ctx -> lock );
883
+
884
+ for (;;) {
885
+ struct pvr_gem_object * pvr_obj ;
886
+ struct drm_gpuva * va ;
887
+
888
+ va = drm_gpuva_find_first (& vm_ctx -> gpuvm_mgr ,
889
+ vm_ctx -> gpuvm_mgr .mm_start ,
890
+ vm_ctx -> gpuvm_mgr .mm_range );
891
+ if (!va )
892
+ break ;
893
+
894
+ pvr_obj = gem_to_pvr_gem (va -> gem .obj );
895
+
896
+ WARN_ON (pvr_vm_unmap_obj_locked (vm_ctx , pvr_obj ,
897
+ va -> va .addr , va -> va .range ));
898
+ }
899
+
900
+ mutex_unlock (& vm_ctx -> lock );
901
+ }
902
+
821
903
/* Static data areas are determined by firmware. */
822
904
static const struct drm_pvr_static_data_area static_data_areas [] = {
823
905
{
0 commit comments