diff --git a/config/airflow.cfg b/config/airflow.cfg
index bbaed40f4a001703480ad33e8c26626fe8cc532b..daf7de729d01911e56f3a7370f3307cff9f6323b 100644
--- a/config/airflow.cfg
+++ b/config/airflow.cfg
@@ -3,79 +3,41 @@
 # subfolder in a code repository. This path must be absolute.
 dags_folder = /opt/airflow/dags
 
-# The folder where airflow should store its log files
-# This path must be absolute
-base_log_folder = /opt/airflow/logs
-
-# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
-# Set this to True if you want to enable remote logging.
-remote_logging = False
-
-# Users must supply an Airflow connection id that provides access to the storage
-# location.
-remote_log_conn_id =
-remote_base_log_folder =
-encrypt_s3_logs = False
-
-# Logging level
-logging_level = INFO
-
-# Logging level for Flask-appbuilder UI
-fab_logging_level = WARN
-
-# Logging class
-# Specify the class that will specify the logging configuration
-# This class has to be on the python classpath
-# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
-logging_config_class =
-
-# Flag to enable/disable Colored logs in Console
-# Colour the logs when the controlling terminal is a TTY.
-colored_console_log = True
-
-# Log format for when Colored logs is enabled
-colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
-colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
-
-# Format of Log line
-log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s
-simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
-
-# Log filename format
-log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
-log_processor_filename_template = {{ filename }}.log
-dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log
-
-# Name of handler to read task instance logs.
-# Default to use task handler.
-task_log_reader = task
-
 # Hostname by providing a path to a callable, which will resolve the hostname.
-# The format is "package:function".
+# The format is "package.function".
 #
-# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket"
+# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket"
 # package will be used as hostname.
 #
 # No argument should be required in the function specified.
-# If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address``
-hostname_callable = socket:getfqdn
+# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address``
+hostname_callable = socket.getfqdn
 
 # Default timezone in case supplied date times are naive
 # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
 default_timezone = utc
 
 # The executor class that airflow should use. Choices include
-# SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor, KubernetesExecutor
+# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``,
+# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the
+# full import path to the class when using a custom executor.
 executor = SequentialExecutor
 
 # The SqlAlchemy connection string to the metadata database.
-# SqlAlchemy supports many different database engine, more information
-# their website
-# sql_alchemy_conn = sqlite:////tmp/airflow.db
+# SqlAlchemy supports many different database engines.
+# More information here:
+# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri
+# sql_alchemy_conn = sqlite:////opt/airflow/airflow.db
 
 # The encoding for the databases
 sql_engine_encoding = utf-8
 
+# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding.
+# This is particularly useful in case of mysql with utf8mb4 encoding because
+# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should
+# be set to ``utf8mb3_general_ci``.
+# sql_engine_collation_for_ids =
+
 # If SqlAlchemy should pool database connections.
 sql_alchemy_pool_enabled = True
 
@@ -90,8 +52,8 @@ sql_alchemy_pool_size = 5
 # It follows then that the total number of simultaneous connections the pool will allow
 # is pool_size + max_overflow,
 # and the total number of "sleeping" connections the pool will allow is pool_size.
-# max_overflow can be set to -1 to indicate no overflow limit;
-# no limit will be placed on the total number of concurrent connections. Defaults to 10.
+# max_overflow can be set to ``-1`` to indicate no overflow limit;
+# no limit will be placed on the total number of concurrent connections. Defaults to ``10``.
 sql_alchemy_max_overflow = 10
 
 # The SqlAlchemy pool recycle is the number of seconds a connection
@@ -110,41 +72,71 @@ sql_alchemy_pool_pre_ping = True
 # SqlAlchemy supports databases with the concept of multiple schemas.
 sql_alchemy_schema =
 
-# The amount of parallelism as a setting to the executor. This defines
-# the max number of task instances that should run simultaneously
-# on this airflow installation
+# Import path for connect args in SqlAlchemy. Defaults to an empty dict.
+# This is useful when you want to configure db engine args that SqlAlchemy won't parse
+# in connection string.
+# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args
+# sql_alchemy_connect_args =
+
+# This defines the maximum number of task instances that can run concurrently in Airflow
+# regardless of scheduler count and worker count. Generally, this value is reflective of
+# the number of task instances with the running state in the metadata database.
 parallelism = 32
 
-# The number of task instances allowed to run concurrently by the scheduler
+# The maximum number of task instances allowed to run concurrently in each DAG. To calculate
+# the number of tasks that is running concurrently for a DAG, add up the number of running
+# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``concurrency``,
+# which is defaulted as ``dag_concurrency``.
 dag_concurrency = 16
 
 # Are DAGs paused by default at creation
 dags_are_paused_at_creation = True
 
-# The maximum number of active DAG runs per DAG
+# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs
+# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``,
+# which is defaulted as ``max_active_runs_per_dag``.
 max_active_runs_per_dag = 16
 
-# Whether to load the examples that ship with Airflow. It's good to
-# get started, but you probably want to set this to False in a production
+# Whether to load the DAG examples that ship with Airflow. It's good to
+# get started, but you probably want to set this to ``False`` in a production
 # environment
 load_examples = True
 
-# Where your Airflow plugins are stored
+# Whether to load the default connections that ship with Airflow. It's good to
+# get started, but you probably want to set this to ``False`` in a production
+# environment
+load_default_connections = True
+
+# Path to the folder containing Airflow plugins
 plugins_folder = /opt/airflow/plugins
 
+# Should tasks be executed via forking of the parent process ("False",
+# the speedier option) or by spawning a new python process ("True" slow,
+# but means plugin changes picked up by tasks straight away)
+execute_tasks_new_python_interpreter = False
+
 # Secret key to save connection passwords in the db
-fernet_key = $FERNET_KEY
+fernet_key = 
 
 # Whether to disable pickling dags
-donot_pickle = False
+donot_pickle = True
 
 # How long before timing out a python file import
-dagbag_import_timeout = 30
+dagbag_import_timeout = 30.0
+
+# Should a traceback be shown in the UI for dagbag import errors,
+# instead of just the exception message
+dagbag_import_error_tracebacks = True
+
+# If tracebacks are shown, how many entries from the traceback should be shown
+dagbag_import_error_traceback_depth = 2
 
 # How long before timing out a DagFileProcessor, which processes a dag file
 dag_file_processor_timeout = 50
 
-# The class to use for running task instances in a subprocess
+# The class to use for running task instances in a subprocess.
+# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class
+# when using a custom task runner.
 task_runner = StandardTaskRunner
 
 # If set, tasks without a ``run_as_user`` argument will be run with this user
@@ -154,17 +146,13 @@ default_impersonation =
 # What security module to use (for example kerberos)
 security =
 
-# If set to False enables some unsecure features like Charts and Ad Hoc Queries.
-# In 2.0 will default to True.
-secure_mode = False
-
 # Turn unit test mode on (overwrites many configuration options with test
 # values at runtime)
 unit_test_mode = False
 
 # Whether to enable pickling for xcom (note that this is insecure and allows for
-# RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
-enable_xcom_pickling = True
+# RCE exploits).
+enable_xcom_pickling = False
 
 # When a task is killed forcefully, this is the amount of time in seconds that
 # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
@@ -173,10 +161,7 @@ killed_task_cleanup_time = 60
 # Whether to override params with dag_run.conf. If you pass some key-value pairs
 # through ``airflow dags backfill -c`` or
 # ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
-dag_run_conf_overrides_params = False
-
-# Worker initialisation check to validate Metadata Database connection
-worker_precheck = False
+dag_run_conf_overrides_params = True
 
 # When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``.
 dag_discovery_safe_mode = True
@@ -184,17 +169,180 @@ dag_discovery_safe_mode = True
 # The number of retries each task is going to have by default. Can be overridden at dag or task level.
 default_task_retries = 0
 
-# Whether to serialises DAGs and persist them in DB.
-# If set to True, Webserver reads from DB instead of parsing DAG files
-# More details: https://airflow.apache.org/docs/stable/dag-serialization.html
-store_serialized_dags = False
-
 # Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
 min_serialized_dag_update_interval = 30
 
+# Fetching serialized DAG can not be faster than a minimum interval to reduce database
+# read rate. This config controls when your DAGs are updated in the Webserver
+min_serialized_dag_fetch_interval = 10
+
+# Whether to persist DAG files code in DB.
+# If set to True, Webserver reads file contents from DB instead of
+# trying to access files in a DAG folder.
+# (Default is ``True``)
+# Example: store_dag_code = True
+# store_dag_code =
+
+# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
+# in the Database.
+# All the template_fields for each of Task Instance are stored in the Database.
+# Keeping this number small may cause an error when you try to view ``Rendered`` tab in
+# TaskInstance view for older tasks.
+max_num_rendered_ti_fields_per_task = 30
+
 # On each dagrun check against defined SLAs
 check_slas = True
 
+# Path to custom XCom class that will be used to store and resolve operators results
+# Example: xcom_backend = path.to.CustomXCom
+xcom_backend = airflow.models.xcom.BaseXCom
+
+# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``,
+# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module.
+lazy_load_plugins = True
+
+# By default Airflow providers are lazily-discovered (discovery and imports happen only when required).
+# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or
+# loaded from module.
+lazy_discover_providers = True
+
+# Number of times the code should be retried in case of DB Operational Errors.
+# Not all transactions will be retried as it can cause undesired state.
+# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``.
+max_db_retries = 3
+
+# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True
+#
+# (Connection passwords are always hidden in logs)
+hide_sensitive_var_conn_fields = True
+
+# A comma-separated list of extra sensitive keywords to look for in variables names or connection's
+# extra JSON.
+sensitive_var_conn_names =
+
+[logging]
+# The folder where airflow should store its log files
+# This path must be absolute
+base_log_folder = /opt/airflow/logs
+
+# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
+# Set this to True if you want to enable remote logging.
+remote_logging = False
+
+# Users must supply an Airflow connection id that provides access to the storage
+# location.
+remote_log_conn_id =
+
+# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default
+# Credentials
+# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
+# be used.
+google_key_path =
+
+# Storage bucket URL for remote logging
+# S3 buckets should start with "s3://"
+# Cloudwatch log groups should start with "cloudwatch://"
+# GCS buckets should start with "gs://"
+# WASB buckets should start with "wasb" just to help Airflow select correct handler
+# Stackdriver logs should start with "stackdriver://"
+remote_base_log_folder =
+
+# Use server-side encryption for logs stored in S3
+encrypt_s3_logs = False
+
+# Logging level.
+#
+# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
+logging_level = INFO
+
+# Logging level for Flask-appbuilder UI.
+#
+# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
+fab_logging_level = WARN
+
+# Logging class
+# Specify the class that will specify the logging configuration
+# This class has to be on the python classpath
+# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
+logging_config_class =
+
+# Flag to enable/disable Colored logs in Console
+# Colour the logs when the controlling terminal is a TTY.
+colored_console_log = True
+
+# Log format for when Colored logs is enabled
+colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
+colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
+
+# Format of Log line
+log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
+simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
+
+# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter
+# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number}
+task_log_prefix_template =
+
+# Formatting for how airflow generates file names/paths for each task run.
+log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
+
+# Formatting for how airflow generates file names for log
+log_processor_filename_template = {{ filename }}.log
+
+# full path of dag_processor_manager logfile
+dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log
+
+# Name of handler to read task instance logs.
+# Defaults to use ``task`` handler.
+task_log_reader = task
+
+# A comma\-separated list of third-party logger names that will be configured to print messages to
+# consoles\.
+# Example: extra_loggers = connexion,sqlalchemy
+extra_loggers =
+
+[metrics]
+
+# StatsD (https://github.com/etsy/statsd) integration settings.
+# Enables sending metrics to StatsD.
+statsd_on = False
+statsd_host = localhost
+statsd_port = 8125
+statsd_prefix = airflow
+
+# If you want to avoid sending all the available metrics to StatsD,
+# you can configure an allow list of prefixes (comma separated) to send only the metrics that
+# start with the elements of the list (e.g: "scheduler,executor,dagrun")
+statsd_allow_list =
+
+# A function that validate the statsd stat name, apply changes to the stat name if necessary and return
+# the transformed stat name.
+#
+# The function should have the following signature:
+# def func_name(stat_name: str) -> str:
+stat_name_handler =
+
+# To enable datadog integration to send airflow metrics.
+statsd_datadog_enabled = False
+
+# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2)
+statsd_datadog_tags =
+
+# If you want to utilise your own custom Statsd client set the relevant
+# module path below.
+# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up
+# statsd_custom_client_path =
+
+[secrets]
+# Full class name of secrets backend to enable (will precede env vars and metastore in search path)
+# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
+backend =
+
+# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
+# See documentation for the secrets backend you are using. JSON is expected.
+# Example for AWS Systems Manager ParameterStore:
+# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}``
+backend_kwargs =
+
 [cli]
 # In what way should the cli access the API. The LocalClient will use the
 # database directly, while the json_client will use the api running on the
@@ -207,13 +355,59 @@ api_client = airflow.api.client.local_client
 endpoint_url = http://localhost:8080
 
 [debug]
-# Used only with DebugExecutor. If set to True DAG will fail with first
+# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first
 # failed task. Helpful for debugging purposes.
 fail_fast = False
 
 [api]
-# How to authenticate users of the API
-auth_backend = airflow.api.auth.backend.default
+# Enables the deprecated experimental API. Please note that these APIs do not have access control.
+# The authenticated user has full access.
+#
+# .. warning::
+#
+#   This `Experimental REST API <https://airflow.readthedocs.io/en/latest/rest-api-ref.html>`__ is
+#   deprecated since version 2.0. Please consider using
+#   `the Stable REST API <https://airflow.readthedocs.io/en/latest/stable-rest-api-ref.html>`__.
+#   For more information on migration, see
+#   `UPDATING.md <https://github.com/apache/airflow/blob/main/UPDATING.md>`_
+enable_experimental_api = False
+
+# How to authenticate users of the API. See
+# https://airflow.apache.org/docs/apache-airflow/stable/security.html for possible values.
+# ("airflow.api.auth.backend.default" allows all requests for historic reasons)
+auth_backend = airflow.api.auth.backend.deny_all
+
+# Used to set the maximum page limit for API requests
+maximum_page_limit = 100
+
+# Used to set the default page limit when limit is zero. A default limit
+# of 100 is set on OpenApi spec. However, this particular default limit
+# only work when limit is set equal to zero(0) from API requests.
+# If no limit is supplied, the OpenApi spec default is used.
+fallback_page_limit = 100
+
+# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested.
+# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com
+google_oauth2_audience =
+
+# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on
+# `the Application Default Credentials
+# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
+# be used.
+# Example: google_key_path = /files/service-account-json
+google_key_path =
+
+# Used in response to a preflight request to indicate which HTTP
+# headers can be used when making the actual request. This header is
+# the server side response to the browser's
+# Access-Control-Request-Headers header.
+access_control_allow_headers =
+
+# Specifies the method or methods allowed when accessing the resource.
+access_control_allow_methods =
+
+# Indicates whether the response can be shared with requesting code from the given origin.
+access_control_allow_origin =
 
 [lineage]
 # what lineage backend to use
@@ -235,16 +429,33 @@ default_ram = 512
 default_disk = 512
 default_gpus = 0
 
+# Default queue that tasks get assigned to and that worker listen on.
+default_queue = default
+
+# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator.
+# If set to False, an exception will be thrown, otherwise only the console message will be displayed.
+allow_illegal_arguments = False
+
 [hive]
 # Default mapreduce queue for HiveOperator tasks
 default_hive_mapred_queue =
 
+# Template for mapred_job_name in HiveOperator, supports the following named parameters
+# hostname, dag_id, task_id, execution_date
+# mapred_job_name_template =
+
 [webserver]
 # The base url of your website as airflow cannot guess what domain or
 # cname you are using. This is used in automated emails that
 # airflow sends to point links to the right web server
 base_url = http://localhost:8080
 
+# Default timezone to display all dates in the UI, can be UTC, system, or
+# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
+# default value of core/default_timezone will be used
+# Example: default_ui_timezone = America/New_York
+default_ui_timezone = UTC
+
 # The ip specified when starting the web server
 web_server_host = 0.0.0.0
 
@@ -271,11 +482,16 @@ web_server_worker_timeout = 120
 worker_refresh_batch_size = 1
 
 # Number of seconds to wait before refreshing a batch of workers.
-worker_refresh_interval = 30
+worker_refresh_interval = 6000
+
+# If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
+# then reload the gunicorn.
+reload_on_plugin_change = False
 
-# Secret key used to run your flask app
-# It should be as random as possible
-secret_key = temporary_key
+# Secret key used to run your flask app. It should be as random as possible. However, when running
+# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise
+# one of them will error with "CSRF session token is missing".
+secret_key = 8kUFwlRKUhs6i8NBAvUmWg==
 
 # Number of workers to run the Gunicorn web server
 workers = 4
@@ -290,8 +506,13 @@ access_logfile = -
 # Log files for the gunicorn webserver. '-' means log to stderr.
 error_logfile = -
 
+# Access log format for gunicorn webserver.
+# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s"
+# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format
+access_logformat =
+
 # Expose the configuration file in the web server
-expose_config = True
+expose_config = False
 
 # Expose hostname in the web server
 expose_hostname = True
@@ -299,32 +520,13 @@ expose_hostname = True
 # Expose stacktrace in the web server
 expose_stacktrace = True
 
-# Set to true to turn on authentication:
-# https://airflow.apache.org/security.html#web-authentication
-authenticate = False
-
-# Filter the list of dags by owner name (requires authentication to be enabled)
-filter_by_owner = False
-
-# Filtering mode. Choices include user (default) and ldapgroup.
-# Ldap group filtering requires using the ldap backend
-#
-# Note that the ldap server needs the "memberOf" overlay to be set up
-# in order to user the ldapgroup mode.
-owner_mode = user
-
-# Default DAG view. Valid values are:
-# tree, graph, duration, gantt, landing_times
+# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times``
 dag_default_view = tree
 
-# "Default DAG orientation. Valid values are:"
-# LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
+# Default DAG orientation. Valid values are:
+# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top)
 dag_orientation = LR
 
-# Puts the webserver in demonstration mode; blurs the names of Operators for
-# privacy.
-demo_mode = False
-
 # The amount of time (in secs) webserver will wait for initial handshake
 # while fetching logs from other worker machine
 log_fetch_timeout_sec = 5
@@ -345,11 +547,8 @@ hide_paused_dags_by_default = False
 # Consistent page size across all listing views in the UI
 page_size = 100
 
-# Use FAB-based webserver with RBAC feature
-rbac = False
-
 # Define the color of navigation bar
-navbar_color = #007A87
+navbar_color = #fff
 
 # Default dagrun to show in UI
 default_dag_run_display_number = 25
@@ -377,7 +576,7 @@ proxy_fix_x_prefix = 1
 cookie_secure = False
 
 # Set samesite policy on session cookie
-cookie_samesite =
+cookie_samesite = Lax
 
 # Default setting for wrap toggle on DAG code and TI log views.
 default_wrap = False
@@ -392,22 +591,46 @@ x_frame_enabled = True
 # Unique ID of your account in the analytics tool
 # analytics_id =
 
+# 'Recent Tasks' stats will show for old DagRuns if set
+show_recent_stats_for_completed_runs = True
+
 # Update FAB permissions and sync security manager roles
 # on webserver startup
 update_fab_perms = True
 
-# Minutes of non-activity before logged out from UI
-# 0 means never get forcibly logged out
-force_log_out_after = 0
-
-# The UI cookie lifetime in days
-session_lifetime_days = 30
+# The UI cookie lifetime in minutes. User will be logged out from UI after
+# ``session_lifetime_minutes`` of non-activity
+session_lifetime_minutes = 43200
 
-instance_name = "eFlows4HPC Pipelines"
+# Sets a custom page title for the DAGs overview page and site title for all pages
+instance_name =eFlows4HPC
 
 [email]
+
+# Configuration email backend and whether to
+# send email alerts on retry or failure
+# Email backend to use
 email_backend = airflow.utils.email.send_email_smtp
 
+# Email connection to use
+email_conn_id = smtp_default
+
+# Whether email alerts should be sent when a task is retried
+default_email_on_retry = True
+
+# Whether email alerts should be sent when a task failed
+default_email_on_failure = True
+
+# File that will be used as the template for Email subject (which will be rendered using Jinja2).
+# If not set, Airflow uses a base template.
+# Example: subject_template = /path/to/my_subject_template_file
+# subject_template =
+
+# File that will be used as the template for Email content (which will be rendered using Jinja2).
+# If not set, Airflow uses a base template.
+# Example: html_content_template = /path/to/my_html_content_template_file
+# html_content_template =
+
 [smtp]
 
 # If you want airflow to send emails on retries, failure, and you want to use
@@ -422,12 +645,30 @@ smtp_ssl = False
 # smtp_password =
 smtp_port = 25
 smtp_mail_from = airflow@example.com
+smtp_timeout = 30
+smtp_retry_limit = 5
 
 [sentry]
 
-# Sentry (https://docs.sentry.io) integration
+# Sentry (https://docs.sentry.io) integration. Here you can supply
+# additional configuration options based on the Python platform. See:
+# https://docs.sentry.io/error-reporting/configuration/?platform=python.
+# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``,
+# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``.
+# Enable error reporting to Sentry
+sentry_on = false
 sentry_dsn =
 
+[celery_kubernetes_executor]
+
+# This section only applies if you are using the ``CeleryKubernetesExecutor`` in
+# ``[core]`` section above
+# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``.
+# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),
+# the task is executed via ``KubernetesExecutor``,
+# otherwise via ``CeleryExecutor``
+kubernetes_queue = kubernetes
+
 [celery]
 
 # This section only applies if you are using the CeleryExecutor in
@@ -448,7 +689,17 @@ worker_concurrency = 16
 # If autoscale option is available, worker_concurrency will be ignored.
 # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale
 # Example: worker_autoscale = 16,12
-worker_autoscale = 16,12
+# worker_autoscale =
+
+# Used to increase the number of tasks that a worker prefetches which can improve performance.
+# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks
+# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily
+# blocked if there are multiple workers and one worker prefetches tasks that sit behind long
+# running tasks while another worker has unutilized processes that are unable to process the already
+# claimed blocked tasks.
+# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits
+# Example: worker_prefetch_multiplier = 1
+# worker_prefetch_multiplier =
 
 # When you start an airflow worker, airflow starts a tiny web server
 # subprocess to serve the workers local log files to the airflow main
@@ -457,11 +708,14 @@ worker_autoscale = 16,12
 # visible from the main web server to connect into the workers.
 worker_log_server_port = 8793
 
+# Umask that will be used when starting workers with the ``airflow celery worker``
+# in daemon mode. This control the file-creation mode mask which determines the initial
+# value of file permission bits for newly created files.
+worker_umask = 0o077
+
 # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
-# a sqlalchemy database. Refer to the Celery documentation for more
-# information.
-# http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings
-broker_url = redis://redis:6379/1
+# a sqlalchemy database. Refer to the Celery documentation for more information.
+broker_url = redis://redis:6379/0
 
 # The Celery result_backend. When a job finishes, it needs to update the
 # metadata of the job. Therefore it will post a message on a message bus,
@@ -469,10 +723,10 @@ broker_url = redis://redis:6379/1
 # This status is used by the scheduler to update the state of the task
 # The use of a database is highly recommended
 # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
-result_backend = db+postgresql://airflow:airflow@postgres/airflow
+result_backend = db+postgresql://postgres:airflow@postgres/airflow
 
 # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
-# it ``airflow flower``. This defines the IP that Celery Flower runs on
+# it ``airflow celery flower``. This defines the IP that Celery Flower runs on
 flower_host = 0.0.0.0
 
 # The root URL for Flower
@@ -487,24 +741,19 @@ flower_port = 5555
 # Example: flower_basic_auth = user1:password1,user2:password2
 flower_basic_auth =
 
-# Default queue that tasks get assigned to and that worker listen on.
-default_queue = default
-
 # How many processes CeleryExecutor uses to sync task state.
 # 0 means to use max(1, number of cores - 1) processes.
 sync_parallelism = 0
 
 # Import path for celery configuration options
 celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
-
-# In case of using SSL
 ssl_active = False
 ssl_key =
 ssl_cert =
 ssl_cacert =
 
 # Celery Pool implementation.
-# Choices include: prefork (default), eventlet, gevent or solo.
+# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``.
 # See:
 # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
 # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
@@ -512,7 +761,23 @@ pool = prefork
 
 # The number of seconds to wait before timing out ``send_task_to_executor`` or
 # ``fetch_celery_task_state`` operations.
-operation_timeout = 2
+operation_timeout = 1.0
+
+# Celery task will report its status as 'started' when the task is executed by a worker.
+# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted
+# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob.
+task_track_started = True
+
+# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear
+# stalled tasks.
+task_adoption_timeout = 600
+
+# The Maximum number of retries for publishing task messages to the broker when failing
+# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed.
+task_publish_max_retries = 3
+
+# Worker initialisation check to validate Metadata Database connection
+worker_precheck = False
 
 [celery_broker_transport_options]
 
@@ -547,15 +812,15 @@ tls_key =
 # listen (in seconds).
 job_heartbeat_sec = 5
 
+# How often (in seconds) to check and tidy up 'running' TaskInstancess
+# that no longer have a matching DagRun
+clean_tis_without_dagrun_interval = 15.0
+
 # The scheduler constantly tries to trigger new tasks (look at the
 # scheduler section in the docs for more information). This defines
 # how often the scheduler should run (in seconds).
 scheduler_heartbeat_sec = 5
 
-# After how much time should the scheduler terminate in seconds
-# -1 indicates to run continuously (see also num_runs)
-run_duration = -1
-
 # The number of times to try to schedule each DAG file
 # -1 indicates unlimited number
 num_runs = -1
@@ -563,8 +828,10 @@ num_runs = -1
 # The number of seconds to wait between consecutive DAG file processing
 processor_poll_interval = 1
 
-# after how much time (seconds) a new DAGs should be picked up from the filesystem
-min_file_process_interval = 0
+# Number of seconds after which a DAG file is parsed. The DAG file is parsed every
+# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after
+# this interval. Keeping this number low will increase CPU usage.
+min_file_process_interval = 30
 
 # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
 dag_dir_list_interval = 300
@@ -572,10 +839,16 @@ dag_dir_list_interval = 300
 # How often should stats be printed to the logs. Setting to 0 will disable printing stats
 print_stats_interval = 30
 
+# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled)
+pool_metrics_interval = 5.0
+
 # If the last scheduler heartbeat happened more than scheduler_health_check_threshold
 # ago (in seconds), scheduler is considered unhealthy.
 # This is used by the health check in the "/health" endpoint
 scheduler_health_check_threshold = 30
+
+# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs
+orphaned_tasks_check_interval = 300.0
 child_process_log_directory = /opt/airflow/logs/scheduler
 
 # Local task jobs periodically heartbeat to the DB. If the job has
@@ -583,10 +856,10 @@ child_process_log_directory = /opt/airflow/logs/scheduler
 # associated task instance as failed and will re-schedule the task.
 scheduler_zombie_task_threshold = 300
 
-# Turn off scheduler catchup by setting this to False.
+# Turn off scheduler catchup by setting this to ``False``.
 # Default behavior is unchanged and
 # Command Line Backfills still work, but the scheduler
-# will not do scheduler catchup if this is False,
+# will not do scheduler catchup if this is ``False``,
 # however it can be set on a per DAG basis in the
 # DAG definition (catchup)
 catchup_by_default = True
@@ -601,21 +874,37 @@ catchup_by_default = True
 # Set this to 0 for no limit (not advised)
 max_tis_per_query = 512
 
-# Statsd (https://github.com/etsy/statsd) integration settings
-statsd_on = False
-statsd_host = localhost
-statsd_port = 8125
-statsd_prefix = airflow
+# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries.
+# If this is set to False then you should not run more than a single
+# scheduler at once
+use_row_level_locking = True
 
-# If you want to avoid send all the available metrics to StatsD,
-# you can configure an allow list of prefixes to send only the metrics that
-# start with the elements of the list (e.g: scheduler,executor,dagrun)
-statsd_allow_list =
+# Max number of DAGs to create DagRuns for per scheduler loop.
+max_dagruns_to_create_per_loop = 10
+
+# How many DagRuns should a scheduler examine (and lock) when scheduling
+# and queuing tasks.
+max_dagruns_per_loop_to_schedule = 20
 
-# The scheduler can run multiple threads in parallel to schedule dags.
-# This defines how many threads will run.
-max_threads = 2
-authenticate = False
+# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the
+# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other
+# dags in some circumstances
+schedule_after_task_execution = True
+
+# The scheduler can run multiple processes in parallel to parse dags.
+# This defines how many processes will run.
+parsing_processes = 2
+
+# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``.
+# The scheduler will list and sort the dag files to decide the parsing order.
+#
+# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the
+#   recently modified DAGs first.
+# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the
+#   same host. This is useful when running with Scheduler in HA mode where each scheduler can
+#   parse different DAG files.
+# * ``alphabetical``: Sort by filename
+file_parsing_sort_mode = modified_time
 
 # Turn off scheduler use of cron intervals by setting this to False.
 # DAGs submitted manually in the web UI or with trigger_dag will still run.
@@ -625,69 +914,8 @@ use_job_schedule = True
 # Only has effect if schedule_interval is set to None in DAG
 allow_trigger_in_future = False
 
-[ldap]
-# set this to ldaps://<your.ldap.server>:<port>
-uri =
-user_filter = objectClass=*
-user_name_attr = uid
-group_member_attr = memberOf
-superuser_filter =
-data_profiler_filter =
-bind_user = cn=Manager,dc=example,dc=com
-bind_password = insecure
-basedn = dc=example,dc=com
-cacert = /etc/ca/ldap_ca.crt
-search_scope = LEVEL
-
-# This setting allows the use of LDAP servers that either return a
-# broken schema, or do not return a schema.
-ignore_malformed_schema = False
-
-[mesos]
-# Mesos master address which MesosExecutor will connect to.
-master = localhost:5050
-
-# The framework name which Airflow scheduler will register itself as on mesos
-framework_name = Airflow
-
-# Number of cpu cores required for running one task instance using
-# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
-# command on a mesos slave
-task_cpu = 1
-
-# Memory in MB required for running one task instance using
-# 'airflow run <dag_id> <task_id> <execution_date> --local -p <pickle_id>'
-# command on a mesos slave
-task_memory = 256
-
-# Enable framework checkpointing for mesos
-# See http://mesos.apache.org/documentation/latest/slave-recovery/
-checkpoint = False
-
-# Failover timeout in milliseconds.
-# When checkpointing is enabled and this option is set, Mesos waits
-# until the configured timeout for
-# the MesosExecutor framework to re-register after a failover. Mesos
-# shuts down running tasks if the
-# MesosExecutor framework fails to re-register within this timeframe.
-# Example: failover_timeout = 604800
-# failover_timeout =
-
-# Enable framework authentication for mesos
-# See http://mesos.apache.org/documentation/latest/configuration/
-authenticate = False
-
-# Mesos credentials, if authentication is enabled
-# Example: default_principal = admin
-# default_principal =
-# Example: default_secret = admin
-# default_secret =
-
-# Optional Docker Image to run on slave before running the command
-# This image should be accessible from mesos slave i.e mesos slave
-# should be able to pull this docker image before executing the command.
-# Example: docker_image_slave = puckel/docker-airflow
-# docker_image_slave =
+# DAG dependency detector class to use
+dependency_detector = airflow.serialization.serialized_objects.DependencyDetector
 
 [kerberos]
 ccache = /tmp/airflow_krb5_ccache
@@ -701,16 +929,12 @@ keytab = airflow.keytab
 [github_enterprise]
 api_rev = v3
 
-[admin]
-# UI to hide sensitive variable fields when set to True
-hide_sensitive_variable_fields = True
-
 [elasticsearch]
 # Elasticsearch host
 host =
 
 # Format of the log_id, which is used to query for a given tasks logs
-log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}}
+log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
 
 # Used to mark the end of a log stream for a task
 end_of_log_mark = end_of_log
@@ -729,180 +953,48 @@ json_format = False
 # Log fields to also attach to the json output, if enabled
 json_fields = asctime, filename, lineno, levelname, message
 
+# The field where host name is stored (normally either `host` or `host.name`)
+host_field = host
+
+# The field where offset is stored (normally either `offset` or `log.offset`)
+offset_field = offset
+
 [elasticsearch_configs]
 use_ssl = False
 verify_certs = True
 
 [kubernetes]
-# The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
-worker_container_repository =
-worker_container_tag =
-worker_container_image_pull_policy = IfNotPresent
+# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored.
+pod_template_file =
 
-# If True (default), worker pods will be deleted upon termination
-delete_worker_pods = True
+# The repository of the Kubernetes Image for the Worker to Run
+worker_container_repository =
 
-# Number of Kubernetes Worker Pod creation calls per scheduler loop
-worker_pods_creation_batch_size = 1
+# The tag of the Kubernetes Image for the Worker to Run
+worker_container_tag =
 
 # The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
 namespace = default
 
-# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file)
-# Example: airflow_configmap = airflow-configmap
-airflow_configmap =
+# If True, all worker pods will be deleted upon termination
+delete_worker_pods = True
+
+# If False (and delete_worker_pods is True),
+# failed worker pods will not be deleted so users can investigate them.
+# This only prevents removal of worker pods where the worker itself failed,
+# not when the task it ran failed.
+delete_worker_pods_on_failure = False
+
+# Number of Kubernetes Worker Pod creation calls per scheduler loop.
+# Note that the current default of "1" will only launch a single pod
+# per-heartbeat. It is HIGHLY recommended that users increase this
+# number to match the tolerance of their kubernetes cluster for
+# better performance.
+worker_pods_creation_batch_size = 1
 
-# The name of the Kubernetes ConfigMap containing ``airflow_local_settings.py`` file.
-#
-# For example:
-#
-# ``airflow_local_settings_configmap = "airflow-configmap"`` if you have the following ConfigMap.
-#
-# ``airflow-configmap.yaml``:
-#
-# .. code-block:: yaml
-#
-#   ---
-#   apiVersion: v1
-#   kind: ConfigMap
-#   metadata:
-#     name: airflow-configmap
-#   data:
-#     airflow_local_settings.py: |
-#         def pod_mutation_hook(pod):
-#             ...
-#     airflow.cfg: |
-#         ...
-# Example: airflow_local_settings_configmap = airflow-configmap
-airflow_local_settings_configmap =
-
-# For docker image already contains DAGs, this is set to ``True``, and the worker will
-# search for dags in dags_folder,
-# otherwise use git sync or dags volume claim to mount DAGs
-dags_in_image = False
-
-# For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
-dags_volume_subpath =
-
-# For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path)
-dags_volume_claim =
-
-# For volume mounted logs, the worker will look in this subpath for logs
-logs_volume_subpath =
-
-# A shared volume claim for the logs
-logs_volume_claim =
-
-# For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync)
-# Useful in local environment, discouraged in production
-dags_volume_host =
-
-# A hostPath volume for the logs
-# Useful in local environment, discouraged in production
-logs_volume_host =
-
-# A list of configMapsRefs to envFrom. If more than one configMap is
-# specified, provide a comma separated list: configmap_a,configmap_b
-env_from_configmap_ref =
-
-# A list of secretRefs to envFrom. If more than one secret is
-# specified, provide a comma separated list: secret_a,secret_b
-env_from_secret_ref =
-
-# Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
-git_repo =
-git_branch =
-git_subpath =
-
-# The specific rev or hash the git_sync init container will checkout
-# This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods
-git_sync_rev =
-
-# Use git_user and git_password for user authentication or git_ssh_key_secret_name
-# and git_ssh_key_secret_key for SSH authentication
-git_user =
-git_password =
-git_sync_root = /git
-git_sync_dest = repo
-
-# Mount point of the volume if git-sync is being used.
-# i.e./opt/airflow/dags
-git_dags_folder_mount_point =
-
-# To get Git-sync SSH authentication set up follow this format
-#
-# ``airflow-secrets.yaml``:
-#
-# .. code-block:: yaml
-#
-#   ---
-#   apiVersion: v1
-#   kind: Secret
-#   metadata:
-#     name: airflow-secrets
-#   data:
-#     # key needs to be gitSshKey
-#     gitSshKey: <base64_encoded_data>
-# Example: git_ssh_key_secret_name = airflow-secrets
-git_ssh_key_secret_name =
-
-# To get Git-sync SSH authentication set up follow this format
-#
-# ``airflow-configmap.yaml``:
-#
-# .. code-block:: yaml
-#
-#   ---
-#   apiVersion: v1
-#   kind: ConfigMap
-#   metadata:
-#     name: airflow-configmap
-#   data:
-#     known_hosts: |
-#         github.com ssh-rsa <...>
-#     airflow.cfg: |
-#         ...
-# Example: git_ssh_known_hosts_configmap_name = airflow-configmap
-git_ssh_known_hosts_configmap_name =
-
-# To give the git_sync init container credentials via a secret, create a secret
-# with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and
-# add ``git_sync_credentials_secret = <secret_name>`` to your airflow config under the
-# ``kubernetes`` section
-#
-# Secret Example:
-#
-# .. code-block:: yaml
-#
-#   ---
-#   apiVersion: v1
-#   kind: Secret
-#   metadata:
-#     name: git-credentials
-#   data:
-#     GIT_SYNC_USERNAME: <base64_encoded_git_username>
-#     GIT_SYNC_PASSWORD: <base64_encoded_git_password>
-git_sync_credentials_secret =
-
-# For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
-git_sync_container_repository = k8s.gcr.io/git-sync
-git_sync_container_tag = v3.1.1
-git_sync_init_container_name = git-sync-clone
-git_sync_run_as_user = 65533
-
-# The name of the Kubernetes service account to be associated with airflow workers, if any.
-# Service accounts are required for workers that require access to secrets or cluster resources.
-# See the Kubernetes RBAC documentation for more:
-# https://kubernetes.io/docs/admin/authorization/rbac/
-worker_service_account_name =
-
-# Any image pull secrets to be given to worker pods, If more than one secret is
-# required, provide a comma separated list: secret_a,secret_b
-image_pull_secrets =
-
-# GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
-# Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
-gcp_service_account_keys =
+# Allows users to launch pods in multiple namespaces.
+# Will require creating a cluster-role for the scheduler
+multi_namespace_mode = False
 
 # Use the service account kubernetes gives to pods to connect to kubernetes cluster.
 # It's intended for clients that expect to be running inside a pod running on kubernetes.
@@ -912,81 +1004,68 @@ in_cluster = True
 # When running with in_cluster=False change the default cluster_context or config_file
 # options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.
 # cluster_context =
-# config_file =
-
-# Affinity configuration as a single line formatted JSON object.
-# See the affinity model for top-level key names (e.g. ``nodeAffinity``, etc.):
-# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core
-affinity =
 
-# A list of toleration objects as a single line formatted JSON array
-# See:
-# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core
-tolerations =
+# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False
+# config_file =
 
 # Keyword parameters to pass while calling a kubernetes client core_v1_api methods
 # from Kubernetes Executor provided as a single line formatted JSON dictionary string.
 # List of supported params are similar for all core_v1_apis, hence a single config
-# variable for all apis.
-# See:
-# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py
-# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely
-# for kubernetes api responses, which will cause the scheduler to hang.
-# The timeout is specified as [connect timeout, read timeout]
-kube_client_request_args = {{"_request_timeout" : [60,60] }}
+# variable for all apis. See:
+# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py
+kube_client_request_args =
 
-# Specifies the uid to run the first process of the worker pods containers as
-run_as_user =
+# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
+# ``core_v1_api`` method when using the Kubernetes Executor.
+# This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
+# class defined here:
+# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
+# Example: delete_option_kwargs = {"grace_period_seconds": 10}
+delete_option_kwargs =
 
-# Specifies a gid to associate with all containers in the worker pods
-# if using a git_ssh_key_secret_name use an fs_group
-# that allows for the key to be read, e.g. 65533
-fs_group =
+# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely
+# when idle connection is time-outed on services like cloud load balancers or firewalls.
+enable_tcp_keepalive = True
 
-[kubernetes_node_selectors]
+# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has
+# been idle for `tcp_keep_idle` seconds.
+tcp_keep_idle = 120
 
-# The Key-value pairs to be given to worker pods.
-# The worker pods will be scheduled to the nodes of the specified key-value pairs.
-# Should be supplied in the format: key = value
+# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
+# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds.
+tcp_keep_intvl = 30
 
-[kubernetes_annotations]
+# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
+# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before
+# a connection is considered to be broken.
+tcp_keep_cnt = 6
 
-# The Key-value annotations pairs to be given to worker pods.
-# Should be supplied in the format: key = value
+# Set this to false to skip verifying SSL certificate of Kubernetes python client.
+verify_ssl = True
 
-[kubernetes_environment_variables]
+# How long in seconds a worker can be in Pending before it is considered a failure
+worker_pods_pending_timeout = 300
 
-# The scheduler sets the following environment variables into your workers. You may define as
-# many environment variables as needed and the kubernetes launcher will set them in the launched workers.
-# Environment variables in this section are defined as follows
-# ``<environment_variable_key> = <environment_variable_value>``
-#
-# For example if you wanted to set an environment variable with value `prod` and key
-# ``ENVIRONMENT`` you would follow the following format:
-# ENVIRONMENT = prod
-#
-# Additionally you may override worker airflow settings with the ``AIRFLOW__<SECTION>__<KEY>``
-# formatting as supported by airflow normally.
+# How often in seconds to check if Pending workers have exceeded their timeouts
+worker_pods_pending_timeout_check_interval = 120
 
-[kubernetes_secrets]
+# How many pending pods to check for timeout violations in each check interval.
+# You may want this higher if you have a very large cluster and/or use ``multi_namespace_mode``.
+worker_pods_pending_timeout_batch_size = 100
 
-# The scheduler mounts the following secrets into your workers as they are launched by the
-# scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
-# defined secrets and mount them as secret environment variables in the launched workers.
-# Secrets in this section are defined as follows
-# ``<environment_variable_mount> = <kubernetes_secret_object>=<kubernetes_secret_key>``
-#
-# For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the
-# kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into
-# your workers you would follow the following format:
-# ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials``
-#
-# Additionally you may override worker airflow settings with the ``AIRFLOW__<SECTION>__<KEY>``
-# formatting as supported by airflow normally.
+[smart_sensor]
+# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to
+# smart sensor task.
+use_smart_sensor = False
+
+# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated
+# by `hashcode % shard_code_upper_limit`.
+shard_code_upper_limit = 10000
+
+# The number of running smart sensor processes for each service.
+shards = 5
 
-[kubernetes_labels]
+# comma separated sensor classes support in smart_sensor.
+sensors_enabled = NamedHivePartitionSensor
 
-# The Key-value pairs to be given to worker pods.
-# The worker pods will be given these static labels, as well as some additional dynamic labels
-# to identify the task.
-# Should be supplied in the format: ``key = value``
+rbac = True
\ No newline at end of file
diff --git a/dockers/config/airflow.cfg b/dockers/config/airflow.cfg
deleted file mode 100644
index 9256d353f01c129e1ebfa705474b2d83c6119ca4..0000000000000000000000000000000000000000
--- a/dockers/config/airflow.cfg
+++ /dev/null
@@ -1,1071 +0,0 @@
-[core]
-# The folder where your airflow pipelines live, most likely a
-# subfolder in a code repository. This path must be absolute.
-dags_folder = /opt/airflow/dags
-
-# Hostname by providing a path to a callable, which will resolve the hostname.
-# The format is "package.function".
-#
-# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket"
-# package will be used as hostname.
-#
-# No argument should be required in the function specified.
-# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address``
-hostname_callable = socket.getfqdn
-
-# Default timezone in case supplied date times are naive
-# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
-default_timezone = utc
-
-# The executor class that airflow should use. Choices include
-# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``,
-# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the
-# full import path to the class when using a custom executor.
-executor = SequentialExecutor
-
-# The SqlAlchemy connection string to the metadata database.
-# SqlAlchemy supports many different database engines.
-# More information here:
-# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri
-# sql_alchemy_conn = sqlite:////opt/airflow/airflow.db
-
-# The encoding for the databases
-sql_engine_encoding = utf-8
-
-# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding.
-# This is particularly useful in case of mysql with utf8mb4 encoding because
-# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should
-# be set to ``utf8mb3_general_ci``.
-# sql_engine_collation_for_ids =
-
-# If SqlAlchemy should pool database connections.
-sql_alchemy_pool_enabled = True
-
-# The SqlAlchemy pool size is the maximum number of database connections
-# in the pool. 0 indicates no limit.
-sql_alchemy_pool_size = 5
-
-# The maximum overflow size of the pool.
-# When the number of checked-out connections reaches the size set in pool_size,
-# additional connections will be returned up to this limit.
-# When those additional connections are returned to the pool, they are disconnected and discarded.
-# It follows then that the total number of simultaneous connections the pool will allow
-# is pool_size + max_overflow,
-# and the total number of "sleeping" connections the pool will allow is pool_size.
-# max_overflow can be set to ``-1`` to indicate no overflow limit;
-# no limit will be placed on the total number of concurrent connections. Defaults to ``10``.
-sql_alchemy_max_overflow = 10
-
-# The SqlAlchemy pool recycle is the number of seconds a connection
-# can be idle in the pool before it is invalidated. This config does
-# not apply to sqlite. If the number of DB connections is ever exceeded,
-# a lower config value will allow the system to recover faster.
-sql_alchemy_pool_recycle = 1800
-
-# Check connection at the start of each connection pool checkout.
-# Typically, this is a simple statement like "SELECT 1".
-# More information here:
-# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
-sql_alchemy_pool_pre_ping = True
-
-# The schema to use for the metadata database.
-# SqlAlchemy supports databases with the concept of multiple schemas.
-sql_alchemy_schema =
-
-# Import path for connect args in SqlAlchemy. Defaults to an empty dict.
-# This is useful when you want to configure db engine args that SqlAlchemy won't parse
-# in connection string.
-# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args
-# sql_alchemy_connect_args =
-
-# This defines the maximum number of task instances that can run concurrently in Airflow
-# regardless of scheduler count and worker count. Generally, this value is reflective of
-# the number of task instances with the running state in the metadata database.
-parallelism = 32
-
-# The maximum number of task instances allowed to run concurrently in each DAG. To calculate
-# the number of tasks that is running concurrently for a DAG, add up the number of running
-# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``concurrency``,
-# which is defaulted as ``dag_concurrency``.
-dag_concurrency = 16
-
-# Are DAGs paused by default at creation
-dags_are_paused_at_creation = True
-
-# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs
-# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``,
-# which is defaulted as ``max_active_runs_per_dag``.
-max_active_runs_per_dag = 16
-
-# Whether to load the DAG examples that ship with Airflow. It's good to
-# get started, but you probably want to set this to ``False`` in a production
-# environment
-load_examples = True
-
-# Whether to load the default connections that ship with Airflow. It's good to
-# get started, but you probably want to set this to ``False`` in a production
-# environment
-load_default_connections = True
-
-# Path to the folder containing Airflow plugins
-plugins_folder = /opt/airflow/plugins
-
-# Should tasks be executed via forking of the parent process ("False",
-# the speedier option) or by spawning a new python process ("True" slow,
-# but means plugin changes picked up by tasks straight away)
-execute_tasks_new_python_interpreter = False
-
-# Secret key to save connection passwords in the db
-fernet_key = 
-
-# Whether to disable pickling dags
-donot_pickle = True
-
-# How long before timing out a python file import
-dagbag_import_timeout = 30.0
-
-# Should a traceback be shown in the UI for dagbag import errors,
-# instead of just the exception message
-dagbag_import_error_tracebacks = True
-
-# If tracebacks are shown, how many entries from the traceback should be shown
-dagbag_import_error_traceback_depth = 2
-
-# How long before timing out a DagFileProcessor, which processes a dag file
-dag_file_processor_timeout = 50
-
-# The class to use for running task instances in a subprocess.
-# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class
-# when using a custom task runner.
-task_runner = StandardTaskRunner
-
-# If set, tasks without a ``run_as_user`` argument will be run with this user
-# Can be used to de-elevate a sudo user running Airflow when executing tasks
-default_impersonation =
-
-# What security module to use (for example kerberos)
-security =
-
-# Turn unit test mode on (overwrites many configuration options with test
-# values at runtime)
-unit_test_mode = False
-
-# Whether to enable pickling for xcom (note that this is insecure and allows for
-# RCE exploits).
-enable_xcom_pickling = False
-
-# When a task is killed forcefully, this is the amount of time in seconds that
-# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
-killed_task_cleanup_time = 60
-
-# Whether to override params with dag_run.conf. If you pass some key-value pairs
-# through ``airflow dags backfill -c`` or
-# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
-dag_run_conf_overrides_params = True
-
-# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``.
-dag_discovery_safe_mode = True
-
-# The number of retries each task is going to have by default. Can be overridden at dag or task level.
-default_task_retries = 0
-
-# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
-min_serialized_dag_update_interval = 30
-
-# Fetching serialized DAG can not be faster than a minimum interval to reduce database
-# read rate. This config controls when your DAGs are updated in the Webserver
-min_serialized_dag_fetch_interval = 10
-
-# Whether to persist DAG files code in DB.
-# If set to True, Webserver reads file contents from DB instead of
-# trying to access files in a DAG folder.
-# (Default is ``True``)
-# Example: store_dag_code = True
-# store_dag_code =
-
-# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
-# in the Database.
-# All the template_fields for each of Task Instance are stored in the Database.
-# Keeping this number small may cause an error when you try to view ``Rendered`` tab in
-# TaskInstance view for older tasks.
-max_num_rendered_ti_fields_per_task = 30
-
-# On each dagrun check against defined SLAs
-check_slas = True
-
-# Path to custom XCom class that will be used to store and resolve operators results
-# Example: xcom_backend = path.to.CustomXCom
-xcom_backend = airflow.models.xcom.BaseXCom
-
-# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``,
-# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module.
-lazy_load_plugins = False
-
-# By default Airflow providers are lazily-discovered (discovery and imports happen only when required).
-# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or
-# loaded from module.
-lazy_discover_providers = True
-
-# Number of times the code should be retried in case of DB Operational Errors.
-# Not all transactions will be retried as it can cause undesired state.
-# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``.
-max_db_retries = 3
-
-# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True
-#
-# (Connection passwords are always hidden in logs)
-hide_sensitive_var_conn_fields = True
-
-# A comma-separated list of extra sensitive keywords to look for in variables names or connection's
-# extra JSON.
-sensitive_var_conn_names =
-
-[logging]
-# The folder where airflow should store its log files
-# This path must be absolute
-base_log_folder = /opt/airflow/logs
-
-# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
-# Set this to True if you want to enable remote logging.
-remote_logging = False
-
-# Users must supply an Airflow connection id that provides access to the storage
-# location.
-remote_log_conn_id =
-
-# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default
-# Credentials
-# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
-# be used.
-google_key_path =
-
-# Storage bucket URL for remote logging
-# S3 buckets should start with "s3://"
-# Cloudwatch log groups should start with "cloudwatch://"
-# GCS buckets should start with "gs://"
-# WASB buckets should start with "wasb" just to help Airflow select correct handler
-# Stackdriver logs should start with "stackdriver://"
-remote_base_log_folder =
-
-# Use server-side encryption for logs stored in S3
-encrypt_s3_logs = False
-
-# Logging level.
-#
-# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
-logging_level = INFO
-
-# Logging level for Flask-appbuilder UI.
-#
-# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
-fab_logging_level = WARN
-
-# Logging class
-# Specify the class that will specify the logging configuration
-# This class has to be on the python classpath
-# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
-logging_config_class =
-
-# Flag to enable/disable Colored logs in Console
-# Colour the logs when the controlling terminal is a TTY.
-colored_console_log = True
-
-# Log format for when Colored logs is enabled
-colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
-colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
-
-# Format of Log line
-log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
-simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
-
-# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter
-# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number}
-task_log_prefix_template =
-
-# Formatting for how airflow generates file names/paths for each task run.
-log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
-
-# Formatting for how airflow generates file names for log
-log_processor_filename_template = {{ filename }}.log
-
-# full path of dag_processor_manager logfile
-dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log
-
-# Name of handler to read task instance logs.
-# Defaults to use ``task`` handler.
-task_log_reader = task
-
-# A comma\-separated list of third-party logger names that will be configured to print messages to
-# consoles\.
-# Example: extra_loggers = connexion,sqlalchemy
-extra_loggers =
-
-[metrics]
-
-# StatsD (https://github.com/etsy/statsd) integration settings.
-# Enables sending metrics to StatsD.
-statsd_on = False
-statsd_host = localhost
-statsd_port = 8125
-statsd_prefix = airflow
-
-# If you want to avoid sending all the available metrics to StatsD,
-# you can configure an allow list of prefixes (comma separated) to send only the metrics that
-# start with the elements of the list (e.g: "scheduler,executor,dagrun")
-statsd_allow_list =
-
-# A function that validate the statsd stat name, apply changes to the stat name if necessary and return
-# the transformed stat name.
-#
-# The function should have the following signature:
-# def func_name(stat_name: str) -> str:
-stat_name_handler =
-
-# To enable datadog integration to send airflow metrics.
-statsd_datadog_enabled = False
-
-# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2)
-statsd_datadog_tags =
-
-# If you want to utilise your own custom Statsd client set the relevant
-# module path below.
-# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up
-# statsd_custom_client_path =
-
-[secrets]
-# Full class name of secrets backend to enable (will precede env vars and metastore in search path)
-# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
-backend =
-
-# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
-# See documentation for the secrets backend you are using. JSON is expected.
-# Example for AWS Systems Manager ParameterStore:
-# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}``
-backend_kwargs =
-
-[cli]
-# In what way should the cli access the API. The LocalClient will use the
-# database directly, while the json_client will use the api running on the
-# webserver
-api_client = airflow.api.client.local_client
-
-# If you set web_server_url_prefix, do NOT forget to append it here, ex:
-# ``endpoint_url = http://localhost:8080/myroot``
-# So api will look like: ``http://localhost:8080/myroot/api/experimental/...``
-endpoint_url = http://localhost:8080
-
-[debug]
-# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first
-# failed task. Helpful for debugging purposes.
-fail_fast = False
-
-[api]
-# Enables the deprecated experimental API. Please note that these APIs do not have access control.
-# The authenticated user has full access.
-#
-# .. warning::
-#
-#   This `Experimental REST API <https://airflow.readthedocs.io/en/latest/rest-api-ref.html>`__ is
-#   deprecated since version 2.0. Please consider using
-#   `the Stable REST API <https://airflow.readthedocs.io/en/latest/stable-rest-api-ref.html>`__.
-#   For more information on migration, see
-#   `UPDATING.md <https://github.com/apache/airflow/blob/main/UPDATING.md>`_
-enable_experimental_api = False
-
-# How to authenticate users of the API. See
-# https://airflow.apache.org/docs/apache-airflow/stable/security.html for possible values.
-# ("airflow.api.auth.backend.default" allows all requests for historic reasons)
-auth_backend = airflow.api.auth.backend.deny_all
-
-# Used to set the maximum page limit for API requests
-maximum_page_limit = 100
-
-# Used to set the default page limit when limit is zero. A default limit
-# of 100 is set on OpenApi spec. However, this particular default limit
-# only work when limit is set equal to zero(0) from API requests.
-# If no limit is supplied, the OpenApi spec default is used.
-fallback_page_limit = 100
-
-# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested.
-# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com
-google_oauth2_audience =
-
-# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on
-# `the Application Default Credentials
-# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
-# be used.
-# Example: google_key_path = /files/service-account-json
-google_key_path =
-
-# Used in response to a preflight request to indicate which HTTP
-# headers can be used when making the actual request. This header is
-# the server side response to the browser's
-# Access-Control-Request-Headers header.
-access_control_allow_headers =
-
-# Specifies the method or methods allowed when accessing the resource.
-access_control_allow_methods =
-
-# Indicates whether the response can be shared with requesting code from the given origin.
-access_control_allow_origin =
-
-[lineage]
-# what lineage backend to use
-backend =
-
-[atlas]
-sasl_enabled = False
-host =
-port = 21000
-username =
-password =
-
-[operators]
-# The default owner assigned to each new operator, unless
-# provided explicitly or passed via ``default_args``
-default_owner = airflow
-default_cpus = 1
-default_ram = 512
-default_disk = 512
-default_gpus = 0
-
-# Default queue that tasks get assigned to and that worker listen on.
-default_queue = default
-
-# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator.
-# If set to False, an exception will be thrown, otherwise only the console message will be displayed.
-allow_illegal_arguments = False
-
-[hive]
-# Default mapreduce queue for HiveOperator tasks
-default_hive_mapred_queue =
-
-# Template for mapred_job_name in HiveOperator, supports the following named parameters
-# hostname, dag_id, task_id, execution_date
-# mapred_job_name_template =
-
-[webserver]
-# The base url of your website as airflow cannot guess what domain or
-# cname you are using. This is used in automated emails that
-# airflow sends to point links to the right web server
-base_url = http://localhost:8080
-
-# Default timezone to display all dates in the UI, can be UTC, system, or
-# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
-# default value of core/default_timezone will be used
-# Example: default_ui_timezone = America/New_York
-default_ui_timezone = UTC
-
-# The ip specified when starting the web server
-web_server_host = 0.0.0.0
-
-# The port on which to run the web server
-web_server_port = 8080
-
-# Paths to the SSL certificate and key for the web server. When both are
-# provided SSL will be enabled. This does not change the web server port.
-web_server_ssl_cert =
-
-# Paths to the SSL certificate and key for the web server. When both are
-# provided SSL will be enabled. This does not change the web server port.
-web_server_ssl_key =
-
-# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
-web_server_master_timeout = 120
-
-# Number of seconds the gunicorn webserver waits before timing out on a worker
-web_server_worker_timeout = 120
-
-# Number of workers to refresh at a time. When set to 0, worker refresh is
-# disabled. When nonzero, airflow periodically refreshes webserver workers by
-# bringing up new ones and killing old ones.
-worker_refresh_batch_size = 1
-
-# Number of seconds to wait before refreshing a batch of workers.
-worker_refresh_interval = 6000
-
-# If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
-# then reload the gunicorn.
-reload_on_plugin_change = False
-
-# Secret key used to run your flask app. It should be as random as possible. However, when running
-# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise
-# one of them will error with "CSRF session token is missing".
-secret_key = 8kUFwlRKUhs6i8NBAvUmWg==
-
-# Number of workers to run the Gunicorn web server
-workers = 4
-
-# The worker class gunicorn should use. Choices include
-# sync (default), eventlet, gevent
-worker_class = sync
-
-# Log files for the gunicorn webserver. '-' means log to stderr.
-access_logfile = -
-
-# Log files for the gunicorn webserver. '-' means log to stderr.
-error_logfile = -
-
-# Access log format for gunicorn webserver.
-# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s"
-# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format
-access_logformat =
-
-# Expose the configuration file in the web server
-expose_config = False
-
-# Expose hostname in the web server
-expose_hostname = True
-
-# Expose stacktrace in the web server
-expose_stacktrace = True
-
-# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times``
-dag_default_view = tree
-
-# Default DAG orientation. Valid values are:
-# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top)
-dag_orientation = LR
-
-# The amount of time (in secs) webserver will wait for initial handshake
-# while fetching logs from other worker machine
-log_fetch_timeout_sec = 5
-
-# Time interval (in secs) to wait before next log fetching.
-log_fetch_delay_sec = 2
-
-# Distance away from page bottom to enable auto tailing.
-log_auto_tailing_offset = 30
-
-# Animation speed for auto tailing log display.
-log_animation_speed = 1000
-
-# By default, the webserver shows paused DAGs. Flip this to hide paused
-# DAGs by default
-hide_paused_dags_by_default = False
-
-# Consistent page size across all listing views in the UI
-page_size = 100
-
-# Define the color of navigation bar
-navbar_color = #fff
-
-# Default dagrun to show in UI
-default_dag_run_display_number = 25
-
-# Enable werkzeug ``ProxyFix`` middleware for reverse proxy
-enable_proxy_fix = False
-
-# Number of values to trust for ``X-Forwarded-For``.
-# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/
-proxy_fix_x_for = 1
-
-# Number of values to trust for ``X-Forwarded-Proto``
-proxy_fix_x_proto = 1
-
-# Number of values to trust for ``X-Forwarded-Host``
-proxy_fix_x_host = 1
-
-# Number of values to trust for ``X-Forwarded-Port``
-proxy_fix_x_port = 1
-
-# Number of values to trust for ``X-Forwarded-Prefix``
-proxy_fix_x_prefix = 1
-
-# Set secure flag on session cookie
-cookie_secure = False
-
-# Set samesite policy on session cookie
-cookie_samesite = Lax
-
-# Default setting for wrap toggle on DAG code and TI log views.
-default_wrap = False
-
-# Allow the UI to be rendered in a frame
-x_frame_enabled = True
-
-# Send anonymous user activity to your analytics tool
-# choose from google_analytics, segment, or metarouter
-# analytics_tool =
-
-# Unique ID of your account in the analytics tool
-# analytics_id =
-
-# 'Recent Tasks' stats will show for old DagRuns if set
-show_recent_stats_for_completed_runs = True
-
-# Update FAB permissions and sync security manager roles
-# on webserver startup
-update_fab_perms = True
-
-# The UI cookie lifetime in minutes. User will be logged out from UI after
-# ``session_lifetime_minutes`` of non-activity
-session_lifetime_minutes = 43200
-
-# Sets a custom page title for the DAGs overview page and site title for all pages
-instance_name =eFlows4HPC
-
-[email]
-
-# Configuration email backend and whether to
-# send email alerts on retry or failure
-# Email backend to use
-email_backend = airflow.utils.email.send_email_smtp
-
-# Email connection to use
-email_conn_id = smtp_default
-
-# Whether email alerts should be sent when a task is retried
-default_email_on_retry = True
-
-# Whether email alerts should be sent when a task failed
-default_email_on_failure = True
-
-# File that will be used as the template for Email subject (which will be rendered using Jinja2).
-# If not set, Airflow uses a base template.
-# Example: subject_template = /path/to/my_subject_template_file
-# subject_template =
-
-# File that will be used as the template for Email content (which will be rendered using Jinja2).
-# If not set, Airflow uses a base template.
-# Example: html_content_template = /path/to/my_html_content_template_file
-# html_content_template =
-
-[smtp]
-
-# If you want airflow to send emails on retries, failure, and you want to use
-# the airflow.utils.email.send_email_smtp function, you have to configure an
-# smtp server here
-smtp_host = localhost
-smtp_starttls = True
-smtp_ssl = False
-# Example: smtp_user = airflow
-# smtp_user =
-# Example: smtp_password = airflow
-# smtp_password =
-smtp_port = 25
-smtp_mail_from = airflow@example.com
-smtp_timeout = 30
-smtp_retry_limit = 5
-
-[sentry]
-
-# Sentry (https://docs.sentry.io) integration. Here you can supply
-# additional configuration options based on the Python platform. See:
-# https://docs.sentry.io/error-reporting/configuration/?platform=python.
-# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``,
-# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``.
-# Enable error reporting to Sentry
-sentry_on = false
-sentry_dsn =
-
-[celery_kubernetes_executor]
-
-# This section only applies if you are using the ``CeleryKubernetesExecutor`` in
-# ``[core]`` section above
-# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``.
-# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),
-# the task is executed via ``KubernetesExecutor``,
-# otherwise via ``CeleryExecutor``
-kubernetes_queue = kubernetes
-
-[celery]
-
-# This section only applies if you are using the CeleryExecutor in
-# ``[core]`` section above
-# The app name that will be used by celery
-celery_app_name = airflow.executors.celery_executor
-
-# The concurrency that will be used when starting workers with the
-# ``airflow celery worker`` command. This defines the number of task instances that
-# a worker will take, so size up your workers based on the resources on
-# your worker box and the nature of your tasks
-worker_concurrency = 16
-
-# The maximum and minimum concurrency that will be used when starting workers with the
-# ``airflow celery worker`` command (always keep minimum processes, but grow
-# to maximum if necessary). Note the value should be max_concurrency,min_concurrency
-# Pick these numbers based on resources on worker box and the nature of the task.
-# If autoscale option is available, worker_concurrency will be ignored.
-# http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale
-# Example: worker_autoscale = 16,12
-# worker_autoscale =
-
-# Used to increase the number of tasks that a worker prefetches which can improve performance.
-# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks
-# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily
-# blocked if there are multiple workers and one worker prefetches tasks that sit behind long
-# running tasks while another worker has unutilized processes that are unable to process the already
-# claimed blocked tasks.
-# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits
-# Example: worker_prefetch_multiplier = 1
-# worker_prefetch_multiplier =
-
-# When you start an airflow worker, airflow starts a tiny web server
-# subprocess to serve the workers local log files to the airflow main
-# web server, who then builds pages and sends them to users. This defines
-# the port on which the logs are served. It needs to be unused, and open
-# visible from the main web server to connect into the workers.
-worker_log_server_port = 8793
-
-# Umask that will be used when starting workers with the ``airflow celery worker``
-# in daemon mode. This control the file-creation mode mask which determines the initial
-# value of file permission bits for newly created files.
-worker_umask = 0o077
-
-# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
-# a sqlalchemy database. Refer to the Celery documentation for more information.
-broker_url = redis://redis:6379/0
-
-# The Celery result_backend. When a job finishes, it needs to update the
-# metadata of the job. Therefore it will post a message on a message bus,
-# or insert it into a database (depending of the backend)
-# This status is used by the scheduler to update the state of the task
-# The use of a database is highly recommended
-# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
-result_backend = db+postgresql://postgres:airflow@postgres/airflow
-
-# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
-# it ``airflow celery flower``. This defines the IP that Celery Flower runs on
-flower_host = 0.0.0.0
-
-# The root URL for Flower
-# Example: flower_url_prefix = /flower
-flower_url_prefix =
-
-# This defines the port that Celery Flower runs on
-flower_port = 5555
-
-# Securing Flower with Basic Authentication
-# Accepts user:password pairs separated by a comma
-# Example: flower_basic_auth = user1:password1,user2:password2
-flower_basic_auth =
-
-# How many processes CeleryExecutor uses to sync task state.
-# 0 means to use max(1, number of cores - 1) processes.
-sync_parallelism = 0
-
-# Import path for celery configuration options
-celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
-ssl_active = False
-ssl_key =
-ssl_cert =
-ssl_cacert =
-
-# Celery Pool implementation.
-# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``.
-# See:
-# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
-# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
-pool = prefork
-
-# The number of seconds to wait before timing out ``send_task_to_executor`` or
-# ``fetch_celery_task_state`` operations.
-operation_timeout = 1.0
-
-# Celery task will report its status as 'started' when the task is executed by a worker.
-# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted
-# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob.
-task_track_started = True
-
-# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear
-# stalled tasks.
-task_adoption_timeout = 600
-
-# The Maximum number of retries for publishing task messages to the broker when failing
-# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed.
-task_publish_max_retries = 3
-
-# Worker initialisation check to validate Metadata Database connection
-worker_precheck = False
-
-[celery_broker_transport_options]
-
-# This section is for specifying options which can be passed to the
-# underlying celery broker transport. See:
-# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options
-# The visibility timeout defines the number of seconds to wait for the worker
-# to acknowledge the task before the message is redelivered to another worker.
-# Make sure to increase the visibility timeout to match the time of the longest
-# ETA you're planning to use.
-# visibility_timeout is only supported for Redis and SQS celery brokers.
-# See:
-# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
-# Example: visibility_timeout = 21600
-# visibility_timeout =
-
-[dask]
-
-# This section only applies if you are using the DaskExecutor in
-# [core] section above
-# The IP address and port of the Dask cluster's scheduler.
-cluster_address = 127.0.0.1:8786
-
-# TLS/ SSL settings to access a secured Dask scheduler.
-tls_ca =
-tls_cert =
-tls_key =
-
-[scheduler]
-# Task instances listen for external kill signal (when you clear tasks
-# from the CLI or the UI), this defines the frequency at which they should
-# listen (in seconds).
-job_heartbeat_sec = 5
-
-# How often (in seconds) to check and tidy up 'running' TaskInstancess
-# that no longer have a matching DagRun
-clean_tis_without_dagrun_interval = 15.0
-
-# The scheduler constantly tries to trigger new tasks (look at the
-# scheduler section in the docs for more information). This defines
-# how often the scheduler should run (in seconds).
-scheduler_heartbeat_sec = 5
-
-# The number of times to try to schedule each DAG file
-# -1 indicates unlimited number
-num_runs = -1
-
-# The number of seconds to wait between consecutive DAG file processing
-processor_poll_interval = 1
-
-# Number of seconds after which a DAG file is parsed. The DAG file is parsed every
-# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after
-# this interval. Keeping this number low will increase CPU usage.
-min_file_process_interval = 30
-
-# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
-dag_dir_list_interval = 300
-
-# How often should stats be printed to the logs. Setting to 0 will disable printing stats
-print_stats_interval = 30
-
-# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled)
-pool_metrics_interval = 5.0
-
-# If the last scheduler heartbeat happened more than scheduler_health_check_threshold
-# ago (in seconds), scheduler is considered unhealthy.
-# This is used by the health check in the "/health" endpoint
-scheduler_health_check_threshold = 30
-
-# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs
-orphaned_tasks_check_interval = 300.0
-child_process_log_directory = /opt/airflow/logs/scheduler
-
-# Local task jobs periodically heartbeat to the DB. If the job has
-# not heartbeat in this many seconds, the scheduler will mark the
-# associated task instance as failed and will re-schedule the task.
-scheduler_zombie_task_threshold = 300
-
-# Turn off scheduler catchup by setting this to ``False``.
-# Default behavior is unchanged and
-# Command Line Backfills still work, but the scheduler
-# will not do scheduler catchup if this is ``False``,
-# however it can be set on a per DAG basis in the
-# DAG definition (catchup)
-catchup_by_default = True
-
-# This changes the batch size of queries in the scheduling main loop.
-# If this is too high, SQL query performance may be impacted by one
-# or more of the following:
-# - reversion to full table scan
-# - complexity of query predicate
-# - excessive locking
-# Additionally, you may hit the maximum allowable query length for your db.
-# Set this to 0 for no limit (not advised)
-max_tis_per_query = 512
-
-# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries.
-# If this is set to False then you should not run more than a single
-# scheduler at once
-use_row_level_locking = True
-
-# Max number of DAGs to create DagRuns for per scheduler loop.
-max_dagruns_to_create_per_loop = 10
-
-# How many DagRuns should a scheduler examine (and lock) when scheduling
-# and queuing tasks.
-max_dagruns_per_loop_to_schedule = 20
-
-# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the
-# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other
-# dags in some circumstances
-schedule_after_task_execution = True
-
-# The scheduler can run multiple processes in parallel to parse dags.
-# This defines how many processes will run.
-parsing_processes = 2
-
-# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``.
-# The scheduler will list and sort the dag files to decide the parsing order.
-#
-# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the
-#   recently modified DAGs first.
-# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the
-#   same host. This is useful when running with Scheduler in HA mode where each scheduler can
-#   parse different DAG files.
-# * ``alphabetical``: Sort by filename
-file_parsing_sort_mode = modified_time
-
-# Turn off scheduler use of cron intervals by setting this to False.
-# DAGs submitted manually in the web UI or with trigger_dag will still run.
-use_job_schedule = True
-
-# Allow externally triggered DagRuns for Execution Dates in the future
-# Only has effect if schedule_interval is set to None in DAG
-allow_trigger_in_future = False
-
-# DAG dependency detector class to use
-dependency_detector = airflow.serialization.serialized_objects.DependencyDetector
-
-[kerberos]
-ccache = /tmp/airflow_krb5_ccache
-
-# gets augmented with fqdn
-principal = airflow
-reinit_frequency = 3600
-kinit_path = kinit
-keytab = airflow.keytab
-
-[github_enterprise]
-api_rev = v3
-
-[elasticsearch]
-# Elasticsearch host
-host =
-
-# Format of the log_id, which is used to query for a given tasks logs
-log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
-
-# Used to mark the end of a log stream for a task
-end_of_log_mark = end_of_log
-
-# Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id
-# Code will construct log_id using the log_id template from the argument above.
-# NOTE: The code will prefix the https:// automatically, don't include that here.
-frontend =
-
-# Write the task logs to the stdout of the worker, rather than the default files
-write_stdout = False
-
-# Instead of the default log formatter, write the log lines as JSON
-json_format = False
-
-# Log fields to also attach to the json output, if enabled
-json_fields = asctime, filename, lineno, levelname, message
-
-# The field where host name is stored (normally either `host` or `host.name`)
-host_field = host
-
-# The field where offset is stored (normally either `offset` or `log.offset`)
-offset_field = offset
-
-[elasticsearch_configs]
-use_ssl = False
-verify_certs = True
-
-[kubernetes]
-# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored.
-pod_template_file =
-
-# The repository of the Kubernetes Image for the Worker to Run
-worker_container_repository =
-
-# The tag of the Kubernetes Image for the Worker to Run
-worker_container_tag =
-
-# The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
-namespace = default
-
-# If True, all worker pods will be deleted upon termination
-delete_worker_pods = True
-
-# If False (and delete_worker_pods is True),
-# failed worker pods will not be deleted so users can investigate them.
-# This only prevents removal of worker pods where the worker itself failed,
-# not when the task it ran failed.
-delete_worker_pods_on_failure = False
-
-# Number of Kubernetes Worker Pod creation calls per scheduler loop.
-# Note that the current default of "1" will only launch a single pod
-# per-heartbeat. It is HIGHLY recommended that users increase this
-# number to match the tolerance of their kubernetes cluster for
-# better performance.
-worker_pods_creation_batch_size = 1
-
-# Allows users to launch pods in multiple namespaces.
-# Will require creating a cluster-role for the scheduler
-multi_namespace_mode = False
-
-# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
-# It's intended for clients that expect to be running inside a pod running on kubernetes.
-# It will raise an exception if called from a process not running in a kubernetes environment.
-in_cluster = True
-
-# When running with in_cluster=False change the default cluster_context or config_file
-# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.
-# cluster_context =
-
-# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False
-# config_file =
-
-# Keyword parameters to pass while calling a kubernetes client core_v1_api methods
-# from Kubernetes Executor provided as a single line formatted JSON dictionary string.
-# List of supported params are similar for all core_v1_apis, hence a single config
-# variable for all apis. See:
-# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py
-kube_client_request_args =
-
-# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
-# ``core_v1_api`` method when using the Kubernetes Executor.
-# This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
-# class defined here:
-# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
-# Example: delete_option_kwargs = {"grace_period_seconds": 10}
-delete_option_kwargs =
-
-# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely
-# when idle connection is time-outed on services like cloud load balancers or firewalls.
-enable_tcp_keepalive = True
-
-# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has
-# been idle for `tcp_keep_idle` seconds.
-tcp_keep_idle = 120
-
-# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
-# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds.
-tcp_keep_intvl = 30
-
-# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
-# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before
-# a connection is considered to be broken.
-tcp_keep_cnt = 6
-
-# Set this to false to skip verifying SSL certificate of Kubernetes python client.
-verify_ssl = True
-
-# How long in seconds a worker can be in Pending before it is considered a failure
-worker_pods_pending_timeout = 300
-
-# How often in seconds to check if Pending workers have exceeded their timeouts
-worker_pods_pending_timeout_check_interval = 120
-
-# How many pending pods to check for timeout violations in each check interval.
-# You may want this higher if you have a very large cluster and/or use ``multi_namespace_mode``.
-worker_pods_pending_timeout_batch_size = 100
-
-[smart_sensor]
-# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to
-# smart sensor task.
-use_smart_sensor = False
-
-# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated
-# by `hashcode % shard_code_upper_limit`.
-shard_code_upper_limit = 10000
-
-# The number of running smart sensor processes for each service.
-shards = 5
-
-# comma separated sensor classes support in smart_sensor.
-sensors_enabled = NamedHivePartitionSensor
-
-rbac = True
\ No newline at end of file
diff --git a/dockers/plugins/eFlows.py b/plugins/eFlows_menu_link.py
similarity index 100%
rename from dockers/plugins/eFlows.py
rename to plugins/eFlows_menu_link.py