@@ -110,6 +110,12 @@ sql_alchemy_pool_pre_ping = True
110
110
# SqlAlchemy supports databases with the concept of multiple schemas.
111
111
sql_alchemy_schema =
112
112
113
+ # Import path for connect args in SqlAlchemy. Default to an empty dict.
114
+ # This is useful when you want to configure db engine args that SqlAlchemy won't parse
115
+ # in connection string.
116
+ # See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args
117
+ # sql_alchemy_connect_args =
118
+
113
119
# The amount of parallelism as a setting to the executor. This defines
114
120
# the max number of task instances that should run simultaneously
115
121
# on this airflow installation
@@ -124,11 +130,16 @@ dags_are_paused_at_creation = True
124
130
# The maximum number of active DAG runs per DAG
125
131
max_active_runs_per_dag = 16
126
132
127
- # Whether to load the examples that ship with Airflow. It's good to
133
+ # Whether to load the DAG examples that ship with Airflow. It's good to
128
134
# get started, but you probably want to set this to False in a production
129
135
# environment
130
136
load_examples = True
131
137
138
+ # Whether to load the default connections that ship with Airflow. It's good to
139
+ # get started, but you probably want to set this to False in a production
140
+ # environment
141
+ load_default_connections = False
142
+
132
143
# Where your Airflow plugins are stored
133
144
plugins_folder = /usr/local/airflow/plugins
134
145
@@ -184,17 +195,51 @@ dag_discovery_safe_mode = True
184
195
# The number of retries each task is going to have by default. Can be overridden at dag or task level.
185
196
default_task_retries = 0
186
197
187
- # Whether to serialises DAGs and persist them in DB.
198
+ # Whether to serialise DAGs and persist them in DB.
188
199
# If set to True, Webserver reads from DB instead of parsing DAG files
189
200
# More details: https://airflow.apache.org/docs/stable/dag-serialization.html
190
201
store_serialized_dags = False
191
202
192
203
# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
193
204
min_serialized_dag_update_interval = 30
194
205
206
+ # Fetching serialized DAG can not be faster than a minimum interval to reduce database
207
+ # read rate. This config controls when your DAGs are updated in the Webserver
208
+ min_serialized_dag_fetch_interval = 10
209
+
210
+ # Whether to persist DAG files code in DB.
211
+ # If set to True, Webserver reads file contents from DB instead of
212
+ # trying to access files in a DAG folder. Defaults to same as the
213
+ # ``store_serialized_dags`` setting.
214
+ # Example: store_dag_code = False
215
+ # store_dag_code =
216
+
217
+ # Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
218
+ # in the Database.
219
+ # When Dag Serialization is enabled (``store_serialized_dags=True``), all the template_fields
220
+ # for each of Task Instance are stored in the Database.
221
+ # Keeping this number small may cause an error when you try to view ``Rendered`` tab in
222
+ # TaskInstance view for older tasks.
223
+ max_num_rendered_ti_fields_per_task = 30
224
+
195
225
# On each dagrun check against defined SLAs
196
226
check_slas = True
197
227
228
+ # Path to custom XCom class that will be used to store and resolve operators results
229
+ # Example: xcom_backend = path.to.CustomXCom
230
+ xcom_backend = airflow.models.xcom.BaseXCom
231
+
232
+ [secrets]
233
+ # Full class name of secrets backend to enable (will precede env vars and metastore in search path)
234
+ # Example: backend = airflow.contrib.secrets.aws_systems_manager.SystemsManagerParameterStoreBackend
235
+ backend =
236
+
237
+ # The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
238
+ # See documentation for the secrets backend you are using. JSON is expected.
239
+ # Example for AWS Systems Manager ParameterStore:
240
+ # ``{{"connections_prefix": "/airflow/connections", "profile_name": "default"}}``
241
+ backend_kwargs =
242
+
198
243
[cli]
199
244
# In what way should the cli access the API. The LocalClient will use the
200
245
# database directly, while the json_client will use the api running on the
@@ -212,7 +257,9 @@ endpoint_url = http://localhost:8080
212
257
fail_fast = False
213
258
214
259
[api]
215
- # How to authenticate users of the API
260
+ # How to authenticate users of the API. See
261
+ # https://airflow.apache.org/docs/stable/security.html for possible values.
262
+ # ("airflow.api.auth.backend.default" allows all requests for historic reasons)
216
263
auth_backend = airflow.api.auth.backend.default
217
264
218
265
[lineage]
@@ -245,6 +292,12 @@ default_hive_mapred_queue =
245
292
# airflow sends to point links to the right web server
246
293
base_url = http://localhost:8080
247
294
295
+ # Default timezone to display all dates in the RBAC UI, can be UTC, system, or
296
+ # any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
297
+ # default value of core/default_timezone will be used
298
+ # Example: default_ui_timezone = America/New_York
299
+ default_ui_timezone =
300
+
248
301
# The ip specified when starting the web server
249
302
web_server_host = 0.0.0.0
250
303
@@ -273,6 +326,10 @@ worker_refresh_batch_size = 1
273
326
# Number of seconds to wait before refreshing a batch of workers.
274
327
worker_refresh_interval = 30
275
328
329
+ # If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
330
+ # then reload the gunicorn.
331
+ reload_on_plugin_change = False
332
+
276
333
# Secret key used to run your flask app
277
334
# It should be as random as possible
278
335
secret_key = temporary_key
@@ -734,18 +791,30 @@ verify_certs = True
734
791
[kubernetes]
735
792
# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
736
793
worker_container_repository =
794
+
795
+ # Path to the YAML pod file. If set, all other kubernetes-related fields are ignored.
796
+ # (This feature is experimental)
797
+ pod_template_file =
737
798
worker_container_tag =
738
799
worker_container_image_pull_policy = IfNotPresent
739
800
740
- # If True (default), worker pods will be deleted upon termination
801
+ # If True, all worker pods will be deleted upon termination
741
802
delete_worker_pods = True
742
803
804
+ # If False (and delete_worker_pods is True),
805
+ # failed worker pods will not be deleted so users can investigate them.
806
+ delete_worker_pods_on_failure = False
807
+
743
808
# Number of Kubernetes Worker Pod creation calls per scheduler loop
744
809
worker_pods_creation_batch_size = 1
745
810
746
811
# The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
747
812
namespace = default
748
813
814
+ # Allows users to launch pods in multiple namespaces.
815
+ # Will require creating a cluster-role for the scheduler
816
+ multi_namespace_mode = False
817
+
749
818
# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file)
750
819
# Example: airflow_configmap = airflow-configmap
751
820
airflow_configmap =
@@ -782,6 +851,9 @@ dags_in_image = False
782
851
# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
783
852
dags_volume_subpath =
784
853
854
+ # For either git sync or volume mounted DAGs, the worker will mount the volume in this path
855
+ dags_volume_mount_point =
856
+
785
857
# For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path)
786
858
dags_volume_claim =
787
859
@@ -810,6 +882,10 @@ env_from_secret_ref =
810
882
# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
811
883
git_repo =
812
884
git_branch =
885
+
886
+ # Use a shallow clone with a history truncated to the specified number of commits.
887
+ # 0 - do not use shallow clone.
888
+ git_sync_depth = 1
813
889
git_subpath =
814
890
815
891
# The specific rev or hash the git_sync init container will checkout
@@ -931,10 +1007,18 @@ tolerations =
931
1007
# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely
932
1008
# for kubernetes api responses, which will cause the scheduler to hang.
933
1009
# The timeout is specified as [connect timeout, read timeout]
934
- kube_client_request_args = {{" _request_timeout" : [60,60] }}
1010
+ kube_client_request_args =
1011
+
1012
+ # Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
1013
+ # ``core_v1_api`` method when using the Kubernetes Executor.
1014
+ # This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
1015
+ # class defined here:
1016
+ # https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
1017
+ # Example: delete_option_kwargs = {{"grace_period_seconds": 10}}
1018
+ delete_option_kwargs =
935
1019
936
1020
# Specifies the uid to run the first process of the worker pods containers as
937
- run_as_user =
1021
+ run_as_user = 50000
938
1022
939
1023
# Specifies a gid to associate with all containers in the worker pods
940
1024
# if using a git_ssh_key_secret_name use an fs_group
0 commit comments