@@ -161,6 +161,10 @@ You could submit a notebook using ADS SDK APIs. Here is an example to submit a n
161
161
.with_executor_shape_config(ocpus = 4 , memory_in_gbs = 64 )
162
162
.with_logs_bucket_uri(" oci://mybucket@mytenancy/" )
163
163
.with_private_endpoint_id(" ocid1.dataflowprivateendpoint.oc1.iad.<your private endpoint ocid>" )
164
+ .with_configuration({
165
+ " spark.driverEnv.myEnvVariable" : " value1" ,
166
+ " spark.executorEnv.myEnvVariable" : " value2" ,
167
+ })
164
168
)
165
169
rt = (
166
170
DataFlowNotebookRuntime()
@@ -169,7 +173,6 @@ You could submit a notebook using ADS SDK APIs. Here is an example to submit a n
169
173
) # This could be local path or http path to notebook ipynb file
170
174
.with_script_bucket(" <my-bucket>" )
171
175
.with_exclude_tag([" ignore" , " remove" ]) # Cells to Ignore
172
- .with_environment_variable(env1 = " test" , env2 = " test2" ) # will be propagated to both driver and executor
173
176
)
174
177
job = Job(infrastructure = df, runtime = rt).create(overwrite = True )
175
178
df_run = job.run(wait = True )
@@ -213,7 +216,7 @@ The ``DataFlowRuntime`` properties are:
213
216
- ``with_archive_uri `` (`doc <https://docs.oracle.com/en-us/iaas/data-flow/using/dfs_data_flow_library.htm#third-party-libraries >`__)
214
217
- ``with_archive_bucket ``
215
218
- ``with_custom_conda ``
216
- - ``with_environment_variable ``
219
+ - ``with_configuration ``
217
220
218
221
For more details, see the `runtime class documentation <../../ads.jobs.html#module-ads.jobs.builders.runtimes.python_runtime >`__.
219
222
@@ -272,7 +275,10 @@ accepted. In the next example, the prefix is given for ``script_bucket``.
272
275
.with_script_uri(os.path.join(td, " script.py" ))
273
276
.with_script_bucket(" oci://mybucket@namespace/prefix" )
274
277
.with_custom_conda(" oci://<mybucket>@<mynamespace>/<path/to/conda_pack>" )
275
- .with_environment_variable(env1 = " test" , env2 = " test2" ) # will be propagated to both driver and executor
278
+ .with_configuration({
279
+ " spark.driverEnv.myEnvVariable" : " value1" ,
280
+ " spark.executorEnv.myEnvVariable" : " value2" ,
281
+ })
276
282
)
277
283
df = Job(name = name, infrastructure = dataflow_configs, runtime = runtime_config)
278
284
df.create()
@@ -380,6 +386,10 @@ In the next example, ``archive_uri`` is given as an Object Storage location.
380
386
.with_executor_shape(" VM.Standard.E4.Flex" )
381
387
.with_executor_shape_config(ocpus = 4 , memory_in_gbs = 64 )
382
388
.with_spark_version(" 3.0.2" )
389
+ .with_configuration({
390
+ " spark.driverEnv.myEnvVariable" : " value1" ,
391
+ " spark.executorEnv.myEnvVariable" : " value2" ,
392
+ })
383
393
)
384
394
runtime_config = (
385
395
DataFlowRuntime()
@@ -558,11 +568,11 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
558
568
runtime :
559
569
kind : runtime
560
570
spec :
571
+ configuration :
572
+ spark.driverEnv.myEnvVariable : value1
573
+ spark.executorEnv.myEnvVariable : value2
561
574
scriptBucket : bucket_name
562
575
scriptPathURI : oci://<bucket_name>@<namespace>/<prefix>
563
- env :
564
- - name : env1
565
- value : test1
566
576
type : dataFlow
567
577
568
578
**Data Flow Infrastructure YAML Schema **
@@ -631,6 +641,9 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
631
641
privateEndpointId :
632
642
required : false
633
643
type : string
644
+ configuration :
645
+ required : false
646
+ type : dict
634
647
type :
635
648
allowed :
636
649
- dataFlow
@@ -675,11 +688,9 @@ into the ``Job.from_yaml()`` function to build a Data Flow job:
675
688
- service
676
689
required : true
677
690
type : string
678
- env :
679
- type : list
691
+ configuration :
680
692
required : false
681
- schema :
682
- type : dict
693
+ type : dict
683
694
freeform_tag :
684
695
required : false
685
696
type : dict
0 commit comments