@@ -758,6 +758,231 @@ static void idxd_unbind(struct device_driver *drv, const char *buf)
758
758
put_device (dev );
759
759
}
760
760
761
+ #define idxd_free_saved_configs (saved_configs , count ) \
762
+ do { \
763
+ int i; \
764
+ \
765
+ for (i = 0; i < (count); i++) \
766
+ kfree(saved_configs[i]); \
767
+ } while (0)
768
+
769
+ static void idxd_free_saved (struct idxd_group * * saved_groups ,
770
+ struct idxd_engine * * saved_engines ,
771
+ struct idxd_wq * * saved_wqs ,
772
+ struct idxd_device * idxd )
773
+ {
774
+ if (saved_groups )
775
+ idxd_free_saved_configs (saved_groups , idxd -> max_groups );
776
+ if (saved_engines )
777
+ idxd_free_saved_configs (saved_engines , idxd -> max_engines );
778
+ if (saved_wqs )
779
+ idxd_free_saved_configs (saved_wqs , idxd -> max_wqs );
780
+ }
781
+
782
+ /*
783
+ * Save IDXD device configurations including engines, groups, wqs etc.
784
+ * The saved configurations can be restored when needed.
785
+ */
786
+ static int idxd_device_config_save (struct idxd_device * idxd ,
787
+ struct idxd_saved_states * idxd_saved )
788
+ {
789
+ struct device * dev = & idxd -> pdev -> dev ;
790
+ int i ;
791
+
792
+ memcpy (& idxd_saved -> saved_idxd , idxd , sizeof (* idxd ));
793
+
794
+ if (idxd -> evl ) {
795
+ memcpy (& idxd_saved -> saved_evl , idxd -> evl ,
796
+ sizeof (struct idxd_evl ));
797
+ }
798
+
799
+ struct idxd_group * * saved_groups __free (kfree ) =
800
+ kcalloc_node (idxd -> max_groups ,
801
+ sizeof (struct idxd_group * ),
802
+ GFP_KERNEL , dev_to_node (dev ));
803
+ if (!saved_groups )
804
+ return - ENOMEM ;
805
+
806
+ for (i = 0 ; i < idxd -> max_groups ; i ++ ) {
807
+ struct idxd_group * saved_group __free (kfree ) =
808
+ kzalloc_node (sizeof (* saved_group ), GFP_KERNEL ,
809
+ dev_to_node (dev ));
810
+
811
+ if (!saved_group ) {
812
+ /* Free saved groups */
813
+ idxd_free_saved (saved_groups , NULL , NULL , idxd );
814
+
815
+ return - ENOMEM ;
816
+ }
817
+
818
+ memcpy (saved_group , idxd -> groups [i ], sizeof (* saved_group ));
819
+ saved_groups [i ] = no_free_ptr (saved_group );
820
+ }
821
+
822
+ struct idxd_engine * * saved_engines =
823
+ kcalloc_node (idxd -> max_engines ,
824
+ sizeof (struct idxd_engine * ),
825
+ GFP_KERNEL , dev_to_node (dev ));
826
+ if (!saved_engines ) {
827
+ /* Free saved groups */
828
+ idxd_free_saved (saved_groups , NULL , NULL , idxd );
829
+
830
+ return - ENOMEM ;
831
+ }
832
+ for (i = 0 ; i < idxd -> max_engines ; i ++ ) {
833
+ struct idxd_engine * saved_engine __free (kfree ) =
834
+ kzalloc_node (sizeof (* saved_engine ), GFP_KERNEL ,
835
+ dev_to_node (dev ));
836
+ if (!saved_engine ) {
837
+ /* Free saved groups and engines */
838
+ idxd_free_saved (saved_groups , saved_engines , NULL ,
839
+ idxd );
840
+
841
+ return - ENOMEM ;
842
+ }
843
+
844
+ memcpy (saved_engine , idxd -> engines [i ], sizeof (* saved_engine ));
845
+ saved_engines [i ] = no_free_ptr (saved_engine );
846
+ }
847
+
848
+ unsigned long * saved_wq_enable_map __free (bitmap ) =
849
+ bitmap_zalloc_node (idxd -> max_wqs , GFP_KERNEL ,
850
+ dev_to_node (dev ));
851
+ if (!saved_wq_enable_map ) {
852
+ /* Free saved groups and engines */
853
+ idxd_free_saved (saved_groups , saved_engines , NULL , idxd );
854
+
855
+ return - ENOMEM ;
856
+ }
857
+
858
+ bitmap_copy (saved_wq_enable_map , idxd -> wq_enable_map , idxd -> max_wqs );
859
+
860
+ struct idxd_wq * * saved_wqs __free (kfree ) =
861
+ kcalloc_node (idxd -> max_wqs , sizeof (struct idxd_wq * ),
862
+ GFP_KERNEL , dev_to_node (dev ));
863
+ if (!saved_wqs ) {
864
+ /* Free saved groups and engines */
865
+ idxd_free_saved (saved_groups , saved_engines , NULL , idxd );
866
+
867
+ return - ENOMEM ;
868
+ }
869
+
870
+ for (i = 0 ; i < idxd -> max_wqs ; i ++ ) {
871
+ struct idxd_wq * saved_wq __free (kfree ) =
872
+ kzalloc_node (sizeof (* saved_wq ), GFP_KERNEL ,
873
+ dev_to_node (dev ));
874
+ struct idxd_wq * wq ;
875
+
876
+ if (!saved_wq ) {
877
+ /* Free saved groups, engines, and wqs */
878
+ idxd_free_saved (saved_groups , saved_engines , saved_wqs ,
879
+ idxd );
880
+
881
+ return - ENOMEM ;
882
+ }
883
+
884
+ if (!test_bit (i , saved_wq_enable_map ))
885
+ continue ;
886
+
887
+ wq = idxd -> wqs [i ];
888
+ mutex_lock (& wq -> wq_lock );
889
+ memcpy (saved_wq , wq , sizeof (* saved_wq ));
890
+ saved_wqs [i ] = no_free_ptr (saved_wq );
891
+ mutex_unlock (& wq -> wq_lock );
892
+ }
893
+
894
+ /* Save configurations */
895
+ idxd_saved -> saved_groups = no_free_ptr (saved_groups );
896
+ idxd_saved -> saved_engines = no_free_ptr (saved_engines );
897
+ idxd_saved -> saved_wq_enable_map = no_free_ptr (saved_wq_enable_map );
898
+ idxd_saved -> saved_wqs = no_free_ptr (saved_wqs );
899
+
900
+ return 0 ;
901
+ }
902
+
903
+ /*
904
+ * Restore IDXD device configurations including engines, groups, wqs etc
905
+ * that were saved before.
906
+ */
907
+ static void idxd_device_config_restore (struct idxd_device * idxd ,
908
+ struct idxd_saved_states * idxd_saved )
909
+ {
910
+ struct idxd_evl * saved_evl = & idxd_saved -> saved_evl ;
911
+ int i ;
912
+
913
+ idxd -> rdbuf_limit = idxd_saved -> saved_idxd .rdbuf_limit ;
914
+
915
+ if (saved_evl )
916
+ idxd -> evl -> size = saved_evl -> size ;
917
+
918
+ for (i = 0 ; i < idxd -> max_groups ; i ++ ) {
919
+ struct idxd_group * saved_group , * group ;
920
+
921
+ saved_group = idxd_saved -> saved_groups [i ];
922
+ group = idxd -> groups [i ];
923
+
924
+ group -> rdbufs_allowed = saved_group -> rdbufs_allowed ;
925
+ group -> rdbufs_reserved = saved_group -> rdbufs_reserved ;
926
+ group -> tc_a = saved_group -> tc_a ;
927
+ group -> tc_b = saved_group -> tc_b ;
928
+ group -> use_rdbuf_limit = saved_group -> use_rdbuf_limit ;
929
+
930
+ kfree (saved_group );
931
+ }
932
+ kfree (idxd_saved -> saved_groups );
933
+
934
+ for (i = 0 ; i < idxd -> max_engines ; i ++ ) {
935
+ struct idxd_engine * saved_engine , * engine ;
936
+
937
+ saved_engine = idxd_saved -> saved_engines [i ];
938
+ engine = idxd -> engines [i ];
939
+
940
+ engine -> group = saved_engine -> group ;
941
+
942
+ kfree (saved_engine );
943
+ }
944
+ kfree (idxd_saved -> saved_engines );
945
+
946
+ bitmap_copy (idxd -> wq_enable_map , idxd_saved -> saved_wq_enable_map ,
947
+ idxd -> max_wqs );
948
+ bitmap_free (idxd_saved -> saved_wq_enable_map );
949
+
950
+ for (i = 0 ; i < idxd -> max_wqs ; i ++ ) {
951
+ struct idxd_wq * saved_wq , * wq ;
952
+ size_t len ;
953
+
954
+ if (!test_bit (i , idxd -> wq_enable_map ))
955
+ continue ;
956
+
957
+ saved_wq = idxd_saved -> saved_wqs [i ];
958
+ wq = idxd -> wqs [i ];
959
+
960
+ mutex_lock (& wq -> wq_lock );
961
+
962
+ wq -> group = saved_wq -> group ;
963
+ wq -> flags = saved_wq -> flags ;
964
+ wq -> threshold = saved_wq -> threshold ;
965
+ wq -> size = saved_wq -> size ;
966
+ wq -> priority = saved_wq -> priority ;
967
+ wq -> type = saved_wq -> type ;
968
+ len = strlen (saved_wq -> name ) + 1 ;
969
+ strscpy (wq -> name , saved_wq -> name , len );
970
+ wq -> max_xfer_bytes = saved_wq -> max_xfer_bytes ;
971
+ wq -> max_batch_size = saved_wq -> max_batch_size ;
972
+ wq -> enqcmds_retries = saved_wq -> enqcmds_retries ;
973
+ wq -> descs = saved_wq -> descs ;
974
+ wq -> idxd_chan = saved_wq -> idxd_chan ;
975
+ len = strlen (saved_wq -> driver_name ) + 1 ;
976
+ strscpy (wq -> driver_name , saved_wq -> driver_name , len );
977
+
978
+ mutex_unlock (& wq -> wq_lock );
979
+
980
+ kfree (saved_wq );
981
+ }
982
+
983
+ kfree (idxd_saved -> saved_wqs );
984
+ }
985
+
761
986
/*
762
987
* Probe idxd PCI device.
763
988
* If idxd is not given, need to allocate idxd and set up its data.
0 commit comments