@@ -592,6 +592,9 @@ def create_launch_model(
592
592
593
593
A model bundle consists of exactly {predict_fn_or_cls}, {load_predict_fn + model}, or {load_predict_fn + load_model_fn}.
594
594
Pre/post-processing code can be included inside load_predict_fn/model or in predict_fn_or_cls call.
595
+ Note: the exact parameters used will depend on the version of the Launch client used.
596
+ i.e. if you are on Launch client version 0.x, you will use `env_params`, otherwise
597
+ you will use `pytorch_image_tag` and `tensorflow_version`.
595
598
596
599
Parameters:
597
600
model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
@@ -608,7 +611,8 @@ def create_launch_model(
608
611
["tensorflow==2.3.0", "tensorflow-hub==0.11.0"]. If no list has been passed, will default to the currently
609
612
imported list of packages.
610
613
app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
611
- env_params: A dictionary that dictates environment information e.g.
614
+ env_params: Only for launch v0.
615
+ A dictionary that dictates environment information e.g.
612
616
the use of pytorch or tensorflow, which cuda/cudnn versions to use.
613
617
Specifically, the dictionary should contain the following keys:
614
618
"framework_type": either "tensorflow" or "pytorch".
@@ -617,6 +621,9 @@ def create_launch_model(
617
621
"cudnn_version" Version of cudnn used, e.g. "cudnn8-devel".
618
622
"tensorflow_version": Version of tensorflow, e.g. "2.3.0". Only applicable if framework_type is tensorflow
619
623
globals_copy: Dictionary of the global symbol table. Normally provided by `globals()` built-in function.
624
+ pytorch_image_tag: Only for launch v1, and if you want to use pytorch framework type.
625
+ The tag of the pytorch docker image you want to use, e.g. 1.11.0-cuda11.3-cudnn8-runtime
626
+ tensorflow_version: Only for launch v1, and if you want to use tensorflow. Version of tensorflow, e.g. "2.3.0".
620
627
"""
621
628
from launch import LaunchClient
622
629
@@ -637,13 +644,22 @@ def create_launch_model(
637
644
"model_bundle_name" : name + "-nucleus-autogen" ,
638
645
** bundle_args ,
639
646
}
640
-
641
- bundle = launch_client .create_model_bundle (** kwargs )
647
+ if hasattr (launch_client , "create_model_bundle_from_callable_v2" ):
648
+ # Launch client is >= 1.0.0
649
+ bundle = launch_client .create_model_bundle_from_callable_v2 (
650
+ ** kwargs
651
+ )
652
+ bundle_name = (
653
+ bundle .name
654
+ ) # both v0 and v1 have a .name field but are different types
655
+ else :
656
+ bundle = launch_client .create_model_bundle (** kwargs )
657
+ bundle_name = bundle .name
642
658
return self .create_model (
643
659
name ,
644
660
reference_id ,
645
661
metadata ,
646
- bundle . name ,
662
+ bundle_name ,
647
663
)
648
664
649
665
def create_launch_model_from_dir (
@@ -705,12 +721,16 @@ def create_launch_model_from_dir(
705
721
as the desired inference loading function, then the `load_predict_fn_module_path` argument should be
706
722
`my_module1.my_inference_file.f`.
707
723
724
+ Note: the exact keys for `bundle_from_dir_args` used will depend on the version of the Launch client used.
725
+ i.e. if you are on Launch client version 0.x, you will use `env_params`, otherwise
726
+ you will use `pytorch_image_tag` and `tensorflow_version`.
708
727
709
728
Keys for `bundle_from_dir_args`:
710
729
model_bundle_name: Name of model bundle you want to create. This acts as a unique identifier.
711
730
base_paths: The paths on the local filesystem where the bundle code lives.
712
731
requirements_path: A path on the local filesystem where a requirements.txt file lives.
713
- env_params: A dictionary that dictates environment information e.g.
732
+ env_params: Only for launch v0.
733
+ A dictionary that dictates environment information e.g.
714
734
the use of pytorch or tensorflow, which cuda/cudnn versions to use.
715
735
Specifically, the dictionary should contain the following keys:
716
736
"framework_type": either "tensorflow" or "pytorch".
@@ -723,6 +743,9 @@ def create_launch_model_from_dir(
723
743
load_model_fn_module_path: A python module path for a function that returns a model. The output feeds into
724
744
the function located at load_predict_fn_module_path.
725
745
app_config: Either a Dictionary that represents a YAML file contents or a local path to a YAML file.
746
+ pytorch_image_tag: Only for launch v1, and if you want to use pytorch framework type.
747
+ The tag of the pytorch docker image you want to use, e.g. 1.11.0-cuda11.3-cudnn8-runtime
748
+ tensorflow_version: Only for launch v1, and if you want to use tensorflow. Version of tensorflow, e.g. "2.3.0".
726
749
"""
727
750
from launch import LaunchClient
728
751
@@ -744,13 +767,21 @@ def create_launch_model_from_dir(
744
767
** bundle_from_dir_args ,
745
768
}
746
769
747
- bundle = launch_client .create_model_bundle_from_dirs (** kwargs )
770
+ if hasattr (launch_client , "create_model_bundle_from_dirs_v2" ):
771
+ # Launch client is >= 1.0.0, use new fn
772
+ bundle = launch_client .create_model_bundle_from_dirs_v2 (** kwargs )
773
+ # Different code paths give different types for bundle, although both have a .name field
774
+ bundle_name = bundle .name
775
+ else :
776
+ # Launch client is < 1.0.0
777
+ bundle = launch_client .create_model_bundle_from_dirs (** kwargs )
778
+ bundle_name = bundle .name
748
779
749
780
return self .create_model (
750
781
name ,
751
782
reference_id ,
752
783
metadata ,
753
- bundle . name ,
784
+ bundle_name ,
754
785
)
755
786
756
787
@deprecated (
0 commit comments