diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index be1103f4088ecc0f14ce9495411ffa3e24329bec..14240415147c5d89785c0a485d7f396521a52784 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -26,3 +26,23 @@ test:
    - airflow dags test testdag 2021-08-18
    - nosetests
 
+deploy-test:
+  stage: deploy
+  environment: Testing
+  only: 
+   - mptest
+  when: manual
+  variables:
+    OS_AUTH_TYPE: v3applicationcredential
+    OS_AUTH_URL: https://hdf-cloud.fz-juelich.de:5000
+    OS_IDENTITY_API_VERSION: 3
+    OS_REGION_NAME: "HDFCloud"
+    OS_INTERFACE: public
+    FLOATING_IP: 134.94.199.220
+  script:
+    - echo "Starting the full testing deployment of airflows example."
+    - pip install python-openstackclient
+    - INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor m2 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data scripts/cloudinit.yml --security-group ssh --security-group airflows --security-group www --security-group https airflow-testing`
+    - while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done # wait until an address is available to attach the floating ip
+    - openstack server add floating ip $INSTANCE_ID $FLOATING_IP
+    - echo "Done"
diff --git a/config/airflow.cfg b/config/airflow.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..daf7de729d01911e56f3a7370f3307cff9f6323b
--- /dev/null
+++ b/config/airflow.cfg
@@ -0,0 +1,1071 @@
+[core]
+# The folder where your airflow pipelines live, most likely a
+# subfolder in a code repository. This path must be absolute.
+dags_folder = /opt/airflow/dags
+
+# Hostname by providing a path to a callable, which will resolve the hostname.
+# The format is "package.function".
+#
+# For example, default value "socket.getfqdn" means that result from getfqdn() of "socket"
+# package will be used as hostname.
+#
+# No argument should be required in the function specified.
+# If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address``
+hostname_callable = socket.getfqdn
+
+# Default timezone in case supplied date times are naive
+# can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
+default_timezone = utc
+
+# The executor class that airflow should use. Choices include
+# ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``,
+# ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the
+# full import path to the class when using a custom executor.
+executor = SequentialExecutor
+
+# The SqlAlchemy connection string to the metadata database.
+# SqlAlchemy supports many different database engines.
+# More information here:
+# http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri
+# sql_alchemy_conn = sqlite:////opt/airflow/airflow.db
+
+# The encoding for the databases
+sql_engine_encoding = utf-8
+
+# Collation for ``dag_id``, ``task_id``, ``key`` columns in case they have different encoding.
+# This is particularly useful in case of mysql with utf8mb4 encoding because
+# primary keys for XCom table has too big size and ``sql_engine_collation_for_ids`` should
+# be set to ``utf8mb3_general_ci``.
+# sql_engine_collation_for_ids =
+
+# If SqlAlchemy should pool database connections.
+sql_alchemy_pool_enabled = True
+
+# The SqlAlchemy pool size is the maximum number of database connections
+# in the pool. 0 indicates no limit.
+sql_alchemy_pool_size = 5
+
+# The maximum overflow size of the pool.
+# When the number of checked-out connections reaches the size set in pool_size,
+# additional connections will be returned up to this limit.
+# When those additional connections are returned to the pool, they are disconnected and discarded.
+# It follows then that the total number of simultaneous connections the pool will allow
+# is pool_size + max_overflow,
+# and the total number of "sleeping" connections the pool will allow is pool_size.
+# max_overflow can be set to ``-1`` to indicate no overflow limit;
+# no limit will be placed on the total number of concurrent connections. Defaults to ``10``.
+sql_alchemy_max_overflow = 10
+
+# The SqlAlchemy pool recycle is the number of seconds a connection
+# can be idle in the pool before it is invalidated. This config does
+# not apply to sqlite. If the number of DB connections is ever exceeded,
+# a lower config value will allow the system to recover faster.
+sql_alchemy_pool_recycle = 1800
+
+# Check connection at the start of each connection pool checkout.
+# Typically, this is a simple statement like "SELECT 1".
+# More information here:
+# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
+sql_alchemy_pool_pre_ping = True
+
+# The schema to use for the metadata database.
+# SqlAlchemy supports databases with the concept of multiple schemas.
+sql_alchemy_schema =
+
+# Import path for connect args in SqlAlchemy. Defaults to an empty dict.
+# This is useful when you want to configure db engine args that SqlAlchemy won't parse
+# in connection string.
+# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args
+# sql_alchemy_connect_args =
+
+# This defines the maximum number of task instances that can run concurrently in Airflow
+# regardless of scheduler count and worker count. Generally, this value is reflective of
+# the number of task instances with the running state in the metadata database.
+parallelism = 32
+
+# The maximum number of task instances allowed to run concurrently in each DAG. To calculate
+# the number of tasks that is running concurrently for a DAG, add up the number of running
+# tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``concurrency``,
+# which is defaulted as ``dag_concurrency``.
+dag_concurrency = 16
+
+# Are DAGs paused by default at creation
+dags_are_paused_at_creation = True
+
+# The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs
+# if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``,
+# which is defaulted as ``max_active_runs_per_dag``.
+max_active_runs_per_dag = 16
+
+# Whether to load the DAG examples that ship with Airflow. It's good to
+# get started, but you probably want to set this to ``False`` in a production
+# environment
+load_examples = True
+
+# Whether to load the default connections that ship with Airflow. It's good to
+# get started, but you probably want to set this to ``False`` in a production
+# environment
+load_default_connections = True
+
+# Path to the folder containing Airflow plugins
+plugins_folder = /opt/airflow/plugins
+
+# Should tasks be executed via forking of the parent process ("False",
+# the speedier option) or by spawning a new python process ("True" slow,
+# but means plugin changes picked up by tasks straight away)
+execute_tasks_new_python_interpreter = False
+
+# Secret key to save connection passwords in the db
+fernet_key = 
+
+# Whether to disable pickling dags
+donot_pickle = True
+
+# How long before timing out a python file import
+dagbag_import_timeout = 30.0
+
+# Should a traceback be shown in the UI for dagbag import errors,
+# instead of just the exception message
+dagbag_import_error_tracebacks = True
+
+# If tracebacks are shown, how many entries from the traceback should be shown
+dagbag_import_error_traceback_depth = 2
+
+# How long before timing out a DagFileProcessor, which processes a dag file
+dag_file_processor_timeout = 50
+
+# The class to use for running task instances in a subprocess.
+# Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class
+# when using a custom task runner.
+task_runner = StandardTaskRunner
+
+# If set, tasks without a ``run_as_user`` argument will be run with this user
+# Can be used to de-elevate a sudo user running Airflow when executing tasks
+default_impersonation =
+
+# What security module to use (for example kerberos)
+security =
+
+# Turn unit test mode on (overwrites many configuration options with test
+# values at runtime)
+unit_test_mode = False
+
+# Whether to enable pickling for xcom (note that this is insecure and allows for
+# RCE exploits).
+enable_xcom_pickling = False
+
+# When a task is killed forcefully, this is the amount of time in seconds that
+# it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
+killed_task_cleanup_time = 60
+
+# Whether to override params with dag_run.conf. If you pass some key-value pairs
+# through ``airflow dags backfill -c`` or
+# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
+dag_run_conf_overrides_params = True
+
+# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``.
+dag_discovery_safe_mode = True
+
+# The number of retries each task is going to have by default. Can be overridden at dag or task level.
+default_task_retries = 0
+
+# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
+min_serialized_dag_update_interval = 30
+
+# Fetching serialized DAG can not be faster than a minimum interval to reduce database
+# read rate. This config controls when your DAGs are updated in the Webserver
+min_serialized_dag_fetch_interval = 10
+
+# Whether to persist DAG files code in DB.
+# If set to True, Webserver reads file contents from DB instead of
+# trying to access files in a DAG folder.
+# (Default is ``True``)
+# Example: store_dag_code = True
+# store_dag_code =
+
+# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store
+# in the Database.
+# All the template_fields for each of Task Instance are stored in the Database.
+# Keeping this number small may cause an error when you try to view ``Rendered`` tab in
+# TaskInstance view for older tasks.
+max_num_rendered_ti_fields_per_task = 30
+
+# On each dagrun check against defined SLAs
+check_slas = True
+
+# Path to custom XCom class that will be used to store and resolve operators results
+# Example: xcom_backend = path.to.CustomXCom
+xcom_backend = airflow.models.xcom.BaseXCom
+
+# By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``,
+# if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module.
+lazy_load_plugins = True
+
+# By default Airflow providers are lazily-discovered (discovery and imports happen only when required).
+# Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or
+# loaded from module.
+lazy_discover_providers = True
+
+# Number of times the code should be retried in case of DB Operational Errors.
+# Not all transactions will be retried as it can cause undesired state.
+# Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``.
+max_db_retries = 3
+
+# Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True
+#
+# (Connection passwords are always hidden in logs)
+hide_sensitive_var_conn_fields = True
+
+# A comma-separated list of extra sensitive keywords to look for in variables names or connection's
+# extra JSON.
+sensitive_var_conn_names =
+
+[logging]
+# The folder where airflow should store its log files
+# This path must be absolute
+base_log_folder = /opt/airflow/logs
+
+# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
+# Set this to True if you want to enable remote logging.
+remote_logging = False
+
+# Users must supply an Airflow connection id that provides access to the storage
+# location.
+remote_log_conn_id =
+
+# Path to Google Credential JSON file. If omitted, authorization based on `the Application Default
+# Credentials
+# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
+# be used.
+google_key_path =
+
+# Storage bucket URL for remote logging
+# S3 buckets should start with "s3://"
+# Cloudwatch log groups should start with "cloudwatch://"
+# GCS buckets should start with "gs://"
+# WASB buckets should start with "wasb" just to help Airflow select correct handler
+# Stackdriver logs should start with "stackdriver://"
+remote_base_log_folder =
+
+# Use server-side encryption for logs stored in S3
+encrypt_s3_logs = False
+
+# Logging level.
+#
+# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
+logging_level = INFO
+
+# Logging level for Flask-appbuilder UI.
+#
+# Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``.
+fab_logging_level = WARN
+
+# Logging class
+# Specify the class that will specify the logging configuration
+# This class has to be on the python classpath
+# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
+logging_config_class =
+
+# Flag to enable/disable Colored logs in Console
+# Colour the logs when the controlling terminal is a TTY.
+colored_console_log = True
+
+# Log format for when Colored logs is enabled
+colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
+colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
+
+# Format of Log line
+log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
+simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
+
+# Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter
+# Example: task_log_prefix_template = {ti.dag_id}-{ti.task_id}-{execution_date}-{try_number}
+task_log_prefix_template =
+
+# Formatting for how airflow generates file names/paths for each task run.
+log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
+
+# Formatting for how airflow generates file names for log
+log_processor_filename_template = {{ filename }}.log
+
+# full path of dag_processor_manager logfile
+dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log
+
+# Name of handler to read task instance logs.
+# Defaults to use ``task`` handler.
+task_log_reader = task
+
+# A comma\-separated list of third-party logger names that will be configured to print messages to
+# consoles\.
+# Example: extra_loggers = connexion,sqlalchemy
+extra_loggers =
+
+[metrics]
+
+# StatsD (https://github.com/etsy/statsd) integration settings.
+# Enables sending metrics to StatsD.
+statsd_on = False
+statsd_host = localhost
+statsd_port = 8125
+statsd_prefix = airflow
+
+# If you want to avoid sending all the available metrics to StatsD,
+# you can configure an allow list of prefixes (comma separated) to send only the metrics that
+# start with the elements of the list (e.g: "scheduler,executor,dagrun")
+statsd_allow_list =
+
+# A function that validate the statsd stat name, apply changes to the stat name if necessary and return
+# the transformed stat name.
+#
+# The function should have the following signature:
+# def func_name(stat_name: str) -> str:
+stat_name_handler =
+
+# To enable datadog integration to send airflow metrics.
+statsd_datadog_enabled = False
+
+# List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2)
+statsd_datadog_tags =
+
+# If you want to utilise your own custom Statsd client set the relevant
+# module path below.
+# Note: The module path must exist on your PYTHONPATH for Airflow to pick it up
+# statsd_custom_client_path =
+
+[secrets]
+# Full class name of secrets backend to enable (will precede env vars and metastore in search path)
+# Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend
+backend =
+
+# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class.
+# See documentation for the secrets backend you are using. JSON is expected.
+# Example for AWS Systems Manager ParameterStore:
+# ``{"connections_prefix": "/airflow/connections", "profile_name": "default"}``
+backend_kwargs =
+
+[cli]
+# In what way should the cli access the API. The LocalClient will use the
+# database directly, while the json_client will use the api running on the
+# webserver
+api_client = airflow.api.client.local_client
+
+# If you set web_server_url_prefix, do NOT forget to append it here, ex:
+# ``endpoint_url = http://localhost:8080/myroot``
+# So api will look like: ``http://localhost:8080/myroot/api/experimental/...``
+endpoint_url = http://localhost:8080
+
+[debug]
+# Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first
+# failed task. Helpful for debugging purposes.
+fail_fast = False
+
+[api]
+# Enables the deprecated experimental API. Please note that these APIs do not have access control.
+# The authenticated user has full access.
+#
+# .. warning::
+#
+#   This `Experimental REST API <https://airflow.readthedocs.io/en/latest/rest-api-ref.html>`__ is
+#   deprecated since version 2.0. Please consider using
+#   `the Stable REST API <https://airflow.readthedocs.io/en/latest/stable-rest-api-ref.html>`__.
+#   For more information on migration, see
+#   `UPDATING.md <https://github.com/apache/airflow/blob/main/UPDATING.md>`_
+enable_experimental_api = False
+
+# How to authenticate users of the API. See
+# https://airflow.apache.org/docs/apache-airflow/stable/security.html for possible values.
+# ("airflow.api.auth.backend.default" allows all requests for historic reasons)
+auth_backend = airflow.api.auth.backend.deny_all
+
+# Used to set the maximum page limit for API requests
+maximum_page_limit = 100
+
+# Used to set the default page limit when limit is zero. A default limit
+# of 100 is set on OpenApi spec. However, this particular default limit
+# only work when limit is set equal to zero(0) from API requests.
+# If no limit is supplied, the OpenApi spec default is used.
+fallback_page_limit = 100
+
+# The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested.
+# Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com
+google_oauth2_audience =
+
+# Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on
+# `the Application Default Credentials
+# <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
+# be used.
+# Example: google_key_path = /files/service-account-json
+google_key_path =
+
+# Used in response to a preflight request to indicate which HTTP
+# headers can be used when making the actual request. This header is
+# the server side response to the browser's
+# Access-Control-Request-Headers header.
+access_control_allow_headers =
+
+# Specifies the method or methods allowed when accessing the resource.
+access_control_allow_methods =
+
+# Indicates whether the response can be shared with requesting code from the given origin.
+access_control_allow_origin =
+
+[lineage]
+# what lineage backend to use
+backend =
+
+[atlas]
+sasl_enabled = False
+host =
+port = 21000
+username =
+password =
+
+[operators]
+# The default owner assigned to each new operator, unless
+# provided explicitly or passed via ``default_args``
+default_owner = airflow
+default_cpus = 1
+default_ram = 512
+default_disk = 512
+default_gpus = 0
+
+# Default queue that tasks get assigned to and that worker listen on.
+default_queue = default
+
+# Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator.
+# If set to False, an exception will be thrown, otherwise only the console message will be displayed.
+allow_illegal_arguments = False
+
+[hive]
+# Default mapreduce queue for HiveOperator tasks
+default_hive_mapred_queue =
+
+# Template for mapred_job_name in HiveOperator, supports the following named parameters
+# hostname, dag_id, task_id, execution_date
+# mapred_job_name_template =
+
+[webserver]
+# The base url of your website as airflow cannot guess what domain or
+# cname you are using. This is used in automated emails that
+# airflow sends to point links to the right web server
+base_url = http://localhost:8080
+
+# Default timezone to display all dates in the UI, can be UTC, system, or
+# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the
+# default value of core/default_timezone will be used
+# Example: default_ui_timezone = America/New_York
+default_ui_timezone = UTC
+
+# The ip specified when starting the web server
+web_server_host = 0.0.0.0
+
+# The port on which to run the web server
+web_server_port = 8080
+
+# Paths to the SSL certificate and key for the web server. When both are
+# provided SSL will be enabled. This does not change the web server port.
+web_server_ssl_cert =
+
+# Paths to the SSL certificate and key for the web server. When both are
+# provided SSL will be enabled. This does not change the web server port.
+web_server_ssl_key =
+
+# Number of seconds the webserver waits before killing gunicorn master that doesn't respond
+web_server_master_timeout = 120
+
+# Number of seconds the gunicorn webserver waits before timing out on a worker
+web_server_worker_timeout = 120
+
+# Number of workers to refresh at a time. When set to 0, worker refresh is
+# disabled. When nonzero, airflow periodically refreshes webserver workers by
+# bringing up new ones and killing old ones.
+worker_refresh_batch_size = 1
+
+# Number of seconds to wait before refreshing a batch of workers.
+worker_refresh_interval = 6000
+
+# If set to True, Airflow will track files in plugins_folder directory. When it detects changes,
+# then reload the gunicorn.
+reload_on_plugin_change = False
+
+# Secret key used to run your flask app. It should be as random as possible. However, when running
+# more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise
+# one of them will error with "CSRF session token is missing".
+secret_key = 8kUFwlRKUhs6i8NBAvUmWg==
+
+# Number of workers to run the Gunicorn web server
+workers = 4
+
+# The worker class gunicorn should use. Choices include
+# sync (default), eventlet, gevent
+worker_class = sync
+
+# Log files for the gunicorn webserver. '-' means log to stderr.
+access_logfile = -
+
+# Log files for the gunicorn webserver. '-' means log to stderr.
+error_logfile = -
+
+# Access log format for gunicorn webserver.
+# default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s"
+# documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format
+access_logformat =
+
+# Expose the configuration file in the web server
+expose_config = False
+
+# Expose hostname in the web server
+expose_hostname = True
+
+# Expose stacktrace in the web server
+expose_stacktrace = True
+
+# Default DAG view. Valid values are: ``tree``, ``graph``, ``duration``, ``gantt``, ``landing_times``
+dag_default_view = tree
+
+# Default DAG orientation. Valid values are:
+# ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top)
+dag_orientation = LR
+
+# The amount of time (in secs) webserver will wait for initial handshake
+# while fetching logs from other worker machine
+log_fetch_timeout_sec = 5
+
+# Time interval (in secs) to wait before next log fetching.
+log_fetch_delay_sec = 2
+
+# Distance away from page bottom to enable auto tailing.
+log_auto_tailing_offset = 30
+
+# Animation speed for auto tailing log display.
+log_animation_speed = 1000
+
+# By default, the webserver shows paused DAGs. Flip this to hide paused
+# DAGs by default
+hide_paused_dags_by_default = False
+
+# Consistent page size across all listing views in the UI
+page_size = 100
+
+# Define the color of navigation bar
+navbar_color = #fff
+
+# Default dagrun to show in UI
+default_dag_run_display_number = 25
+
+# Enable werkzeug ``ProxyFix`` middleware for reverse proxy
+enable_proxy_fix = False
+
+# Number of values to trust for ``X-Forwarded-For``.
+# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/
+proxy_fix_x_for = 1
+
+# Number of values to trust for ``X-Forwarded-Proto``
+proxy_fix_x_proto = 1
+
+# Number of values to trust for ``X-Forwarded-Host``
+proxy_fix_x_host = 1
+
+# Number of values to trust for ``X-Forwarded-Port``
+proxy_fix_x_port = 1
+
+# Number of values to trust for ``X-Forwarded-Prefix``
+proxy_fix_x_prefix = 1
+
+# Set secure flag on session cookie
+cookie_secure = False
+
+# Set samesite policy on session cookie
+cookie_samesite = Lax
+
+# Default setting for wrap toggle on DAG code and TI log views.
+default_wrap = False
+
+# Allow the UI to be rendered in a frame
+x_frame_enabled = True
+
+# Send anonymous user activity to your analytics tool
+# choose from google_analytics, segment, or metarouter
+# analytics_tool =
+
+# Unique ID of your account in the analytics tool
+# analytics_id =
+
+# 'Recent Tasks' stats will show for old DagRuns if set
+show_recent_stats_for_completed_runs = True
+
+# Update FAB permissions and sync security manager roles
+# on webserver startup
+update_fab_perms = True
+
+# The UI cookie lifetime in minutes. User will be logged out from UI after
+# ``session_lifetime_minutes`` of non-activity
+session_lifetime_minutes = 43200
+
+# Sets a custom page title for the DAGs overview page and site title for all pages
+instance_name =eFlows4HPC
+
+[email]
+
+# Configuration email backend and whether to
+# send email alerts on retry or failure
+# Email backend to use
+email_backend = airflow.utils.email.send_email_smtp
+
+# Email connection to use
+email_conn_id = smtp_default
+
+# Whether email alerts should be sent when a task is retried
+default_email_on_retry = True
+
+# Whether email alerts should be sent when a task failed
+default_email_on_failure = True
+
+# File that will be used as the template for Email subject (which will be rendered using Jinja2).
+# If not set, Airflow uses a base template.
+# Example: subject_template = /path/to/my_subject_template_file
+# subject_template =
+
+# File that will be used as the template for Email content (which will be rendered using Jinja2).
+# If not set, Airflow uses a base template.
+# Example: html_content_template = /path/to/my_html_content_template_file
+# html_content_template =
+
+[smtp]
+
+# If you want airflow to send emails on retries, failure, and you want to use
+# the airflow.utils.email.send_email_smtp function, you have to configure an
+# smtp server here
+smtp_host = localhost
+smtp_starttls = True
+smtp_ssl = False
+# Example: smtp_user = airflow
+# smtp_user =
+# Example: smtp_password = airflow
+# smtp_password =
+smtp_port = 25
+smtp_mail_from = airflow@example.com
+smtp_timeout = 30
+smtp_retry_limit = 5
+
+[sentry]
+
+# Sentry (https://docs.sentry.io) integration. Here you can supply
+# additional configuration options based on the Python platform. See:
+# https://docs.sentry.io/error-reporting/configuration/?platform=python.
+# Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``,
+# ``ignore_errors``, ``before_breadcrumb``, ``before_send``, ``transport``.
+# Enable error reporting to Sentry
+sentry_on = false
+sentry_dsn =
+
+[celery_kubernetes_executor]
+
+# This section only applies if you are using the ``CeleryKubernetesExecutor`` in
+# ``[core]`` section above
+# Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``.
+# When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``),
+# the task is executed via ``KubernetesExecutor``,
+# otherwise via ``CeleryExecutor``
+kubernetes_queue = kubernetes
+
+[celery]
+
+# This section only applies if you are using the CeleryExecutor in
+# ``[core]`` section above
+# The app name that will be used by celery
+celery_app_name = airflow.executors.celery_executor
+
+# The concurrency that will be used when starting workers with the
+# ``airflow celery worker`` command. This defines the number of task instances that
+# a worker will take, so size up your workers based on the resources on
+# your worker box and the nature of your tasks
+worker_concurrency = 16
+
+# The maximum and minimum concurrency that will be used when starting workers with the
+# ``airflow celery worker`` command (always keep minimum processes, but grow
+# to maximum if necessary). Note the value should be max_concurrency,min_concurrency
+# Pick these numbers based on resources on worker box and the nature of the task.
+# If autoscale option is available, worker_concurrency will be ignored.
+# http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale
+# Example: worker_autoscale = 16,12
+# worker_autoscale =
+
+# Used to increase the number of tasks that a worker prefetches which can improve performance.
+# The number of processes multiplied by worker_prefetch_multiplier is the number of tasks
+# that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily
+# blocked if there are multiple workers and one worker prefetches tasks that sit behind long
+# running tasks while another worker has unutilized processes that are unable to process the already
+# claimed blocked tasks.
+# https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits
+# Example: worker_prefetch_multiplier = 1
+# worker_prefetch_multiplier =
+
+# When you start an airflow worker, airflow starts a tiny web server
+# subprocess to serve the workers local log files to the airflow main
+# web server, who then builds pages and sends them to users. This defines
+# the port on which the logs are served. It needs to be unused, and open
+# visible from the main web server to connect into the workers.
+worker_log_server_port = 8793
+
+# Umask that will be used when starting workers with the ``airflow celery worker``
+# in daemon mode. This control the file-creation mode mask which determines the initial
+# value of file permission bits for newly created files.
+worker_umask = 0o077
+
+# The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
+# a sqlalchemy database. Refer to the Celery documentation for more information.
+broker_url = redis://redis:6379/0
+
+# The Celery result_backend. When a job finishes, it needs to update the
+# metadata of the job. Therefore it will post a message on a message bus,
+# or insert it into a database (depending of the backend)
+# This status is used by the scheduler to update the state of the task
+# The use of a database is highly recommended
+# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
+result_backend = db+postgresql://postgres:airflow@postgres/airflow
+
+# Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
+# it ``airflow celery flower``. This defines the IP that Celery Flower runs on
+flower_host = 0.0.0.0
+
+# The root URL for Flower
+# Example: flower_url_prefix = /flower
+flower_url_prefix =
+
+# This defines the port that Celery Flower runs on
+flower_port = 5555
+
+# Securing Flower with Basic Authentication
+# Accepts user:password pairs separated by a comma
+# Example: flower_basic_auth = user1:password1,user2:password2
+flower_basic_auth =
+
+# How many processes CeleryExecutor uses to sync task state.
+# 0 means to use max(1, number of cores - 1) processes.
+sync_parallelism = 0
+
+# Import path for celery configuration options
+celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
+ssl_active = False
+ssl_key =
+ssl_cert =
+ssl_cacert =
+
+# Celery Pool implementation.
+# Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``.
+# See:
+# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
+# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
+pool = prefork
+
+# The number of seconds to wait before timing out ``send_task_to_executor`` or
+# ``fetch_celery_task_state`` operations.
+operation_timeout = 1.0
+
+# Celery task will report its status as 'started' when the task is executed by a worker.
+# This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted
+# or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob.
+task_track_started = True
+
+# Time in seconds after which Adopted tasks are cleared by CeleryExecutor. This is helpful to clear
+# stalled tasks.
+task_adoption_timeout = 600
+
+# The Maximum number of retries for publishing task messages to the broker when failing
+# due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed.
+task_publish_max_retries = 3
+
+# Worker initialisation check to validate Metadata Database connection
+worker_precheck = False
+
+[celery_broker_transport_options]
+
+# This section is for specifying options which can be passed to the
+# underlying celery broker transport. See:
+# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options
+# The visibility timeout defines the number of seconds to wait for the worker
+# to acknowledge the task before the message is redelivered to another worker.
+# Make sure to increase the visibility timeout to match the time of the longest
+# ETA you're planning to use.
+# visibility_timeout is only supported for Redis and SQS celery brokers.
+# See:
+# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
+# Example: visibility_timeout = 21600
+# visibility_timeout =
+
+[dask]
+
+# This section only applies if you are using the DaskExecutor in
+# [core] section above
+# The IP address and port of the Dask cluster's scheduler.
+cluster_address = 127.0.0.1:8786
+
+# TLS/ SSL settings to access a secured Dask scheduler.
+tls_ca =
+tls_cert =
+tls_key =
+
+[scheduler]
+# Task instances listen for external kill signal (when you clear tasks
+# from the CLI or the UI), this defines the frequency at which they should
+# listen (in seconds).
+job_heartbeat_sec = 5
+
+# How often (in seconds) to check and tidy up 'running' TaskInstancess
+# that no longer have a matching DagRun
+clean_tis_without_dagrun_interval = 15.0
+
+# The scheduler constantly tries to trigger new tasks (look at the
+# scheduler section in the docs for more information). This defines
+# how often the scheduler should run (in seconds).
+scheduler_heartbeat_sec = 5
+
+# The number of times to try to schedule each DAG file
+# -1 indicates unlimited number
+num_runs = -1
+
+# The number of seconds to wait between consecutive DAG file processing
+processor_poll_interval = 1
+
+# Number of seconds after which a DAG file is parsed. The DAG file is parsed every
+# ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after
+# this interval. Keeping this number low will increase CPU usage.
+min_file_process_interval = 30
+
+# How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
+dag_dir_list_interval = 300
+
+# How often should stats be printed to the logs. Setting to 0 will disable printing stats
+print_stats_interval = 30
+
+# How often (in seconds) should pool usage stats be sent to statsd (if statsd_on is enabled)
+pool_metrics_interval = 5.0
+
+# If the last scheduler heartbeat happened more than scheduler_health_check_threshold
+# ago (in seconds), scheduler is considered unhealthy.
+# This is used by the health check in the "/health" endpoint
+scheduler_health_check_threshold = 30
+
+# How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs
+orphaned_tasks_check_interval = 300.0
+child_process_log_directory = /opt/airflow/logs/scheduler
+
+# Local task jobs periodically heartbeat to the DB. If the job has
+# not heartbeat in this many seconds, the scheduler will mark the
+# associated task instance as failed and will re-schedule the task.
+scheduler_zombie_task_threshold = 300
+
+# Turn off scheduler catchup by setting this to ``False``.
+# Default behavior is unchanged and
+# Command Line Backfills still work, but the scheduler
+# will not do scheduler catchup if this is ``False``,
+# however it can be set on a per DAG basis in the
+# DAG definition (catchup)
+catchup_by_default = True
+
+# This changes the batch size of queries in the scheduling main loop.
+# If this is too high, SQL query performance may be impacted by one
+# or more of the following:
+# - reversion to full table scan
+# - complexity of query predicate
+# - excessive locking
+# Additionally, you may hit the maximum allowable query length for your db.
+# Set this to 0 for no limit (not advised)
+max_tis_per_query = 512
+
+# Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries.
+# If this is set to False then you should not run more than a single
+# scheduler at once
+use_row_level_locking = True
+
+# Max number of DAGs to create DagRuns for per scheduler loop.
+max_dagruns_to_create_per_loop = 10
+
+# How many DagRuns should a scheduler examine (and lock) when scheduling
+# and queuing tasks.
+max_dagruns_per_loop_to_schedule = 20
+
+# Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the
+# same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other
+# dags in some circumstances
+schedule_after_task_execution = True
+
+# The scheduler can run multiple processes in parallel to parse dags.
+# This defines how many processes will run.
+parsing_processes = 2
+
+# One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``.
+# The scheduler will list and sort the dag files to decide the parsing order.
+#
+# * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the
+#   recently modified DAGs first.
+# * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the
+#   same host. This is useful when running with Scheduler in HA mode where each scheduler can
+#   parse different DAG files.
+# * ``alphabetical``: Sort by filename
+file_parsing_sort_mode = modified_time
+
+# Turn off scheduler use of cron intervals by setting this to False.
+# DAGs submitted manually in the web UI or with trigger_dag will still run.
+use_job_schedule = True
+
+# Allow externally triggered DagRuns for Execution Dates in the future
+# Only has effect if schedule_interval is set to None in DAG
+allow_trigger_in_future = False
+
+# DAG dependency detector class to use
+dependency_detector = airflow.serialization.serialized_objects.DependencyDetector
+
+[kerberos]
+ccache = /tmp/airflow_krb5_ccache
+
+# gets augmented with fqdn
+principal = airflow
+reinit_frequency = 3600
+kinit_path = kinit
+keytab = airflow.keytab
+
+[github_enterprise]
+api_rev = v3
+
+[elasticsearch]
+# Elasticsearch host
+host =
+
+# Format of the log_id, which is used to query for a given tasks logs
+log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number}
+
+# Used to mark the end of a log stream for a task
+end_of_log_mark = end_of_log
+
+# Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id
+# Code will construct log_id using the log_id template from the argument above.
+# NOTE: The code will prefix the https:// automatically, don't include that here.
+frontend =
+
+# Write the task logs to the stdout of the worker, rather than the default files
+write_stdout = False
+
+# Instead of the default log formatter, write the log lines as JSON
+json_format = False
+
+# Log fields to also attach to the json output, if enabled
+json_fields = asctime, filename, lineno, levelname, message
+
+# The field where host name is stored (normally either `host` or `host.name`)
+host_field = host
+
+# The field where offset is stored (normally either `offset` or `log.offset`)
+offset_field = offset
+
+[elasticsearch_configs]
+use_ssl = False
+verify_certs = True
+
+[kubernetes]
+# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored.
+pod_template_file =
+
+# The repository of the Kubernetes Image for the Worker to Run
+worker_container_repository =
+
+# The tag of the Kubernetes Image for the Worker to Run
+worker_container_tag =
+
+# The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
+namespace = default
+
+# If True, all worker pods will be deleted upon termination
+delete_worker_pods = True
+
+# If False (and delete_worker_pods is True),
+# failed worker pods will not be deleted so users can investigate them.
+# This only prevents removal of worker pods where the worker itself failed,
+# not when the task it ran failed.
+delete_worker_pods_on_failure = False
+
+# Number of Kubernetes Worker Pod creation calls per scheduler loop.
+# Note that the current default of "1" will only launch a single pod
+# per-heartbeat. It is HIGHLY recommended that users increase this
+# number to match the tolerance of their kubernetes cluster for
+# better performance.
+worker_pods_creation_batch_size = 1
+
+# Allows users to launch pods in multiple namespaces.
+# Will require creating a cluster-role for the scheduler
+multi_namespace_mode = False
+
+# Use the service account kubernetes gives to pods to connect to kubernetes cluster.
+# It's intended for clients that expect to be running inside a pod running on kubernetes.
+# It will raise an exception if called from a process not running in a kubernetes environment.
+in_cluster = True
+
+# When running with in_cluster=False change the default cluster_context or config_file
+# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.
+# cluster_context =
+
+# Path to the kubernetes configfile to be used when ``in_cluster`` is set to False
+# config_file =
+
+# Keyword parameters to pass while calling a kubernetes client core_v1_api methods
+# from Kubernetes Executor provided as a single line formatted JSON dictionary string.
+# List of supported params are similar for all core_v1_apis, hence a single config
+# variable for all apis. See:
+# https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py
+kube_client_request_args =
+
+# Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client
+# ``core_v1_api`` method when using the Kubernetes Executor.
+# This should be an object and can contain any of the options listed in the ``v1DeleteOptions``
+# class defined here:
+# https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19
+# Example: delete_option_kwargs = {"grace_period_seconds": 10}
+delete_option_kwargs =
+
+# Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely
+# when idle connection is time-outed on services like cloud load balancers or firewalls.
+enable_tcp_keepalive = True
+
+# When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has
+# been idle for `tcp_keep_idle` seconds.
+tcp_keep_idle = 120
+
+# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
+# to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds.
+tcp_keep_intvl = 30
+
+# When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond
+# to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before
+# a connection is considered to be broken.
+tcp_keep_cnt = 6
+
+# Set this to false to skip verifying SSL certificate of Kubernetes python client.
+verify_ssl = True
+
+# How long in seconds a worker can be in Pending before it is considered a failure
+worker_pods_pending_timeout = 300
+
+# How often in seconds to check if Pending workers have exceeded their timeouts
+worker_pods_pending_timeout_check_interval = 120
+
+# How many pending pods to check for timeout violations in each check interval.
+# You may want this higher if you have a very large cluster and/or use ``multi_namespace_mode``.
+worker_pods_pending_timeout_batch_size = 100
+
+[smart_sensor]
+# When `use_smart_sensor` is True, Airflow redirects multiple qualified sensor tasks to
+# smart sensor task.
+use_smart_sensor = False
+
+# `shard_code_upper_limit` is the upper limit of `shard_code` value. The `shard_code` is generated
+# by `hashcode % shard_code_upper_limit`.
+shard_code_upper_limit = 10000
+
+# The number of running smart sensor processes for each service.
+shards = 5
+
+# comma separated sensor classes support in smart_sensor.
+sensors_enabled = NamedHivePartitionSensor
+
+rbac = True
\ No newline at end of file
diff --git a/dockers/docker-compose.yaml b/dockers/docker-compose.yaml
index 8b89907d5e5dde7e763fbfd54764c01086198d1e..727dfc23558a4f4e96015de180e221ac4c10baee 100644
--- a/dockers/docker-compose.yaml
+++ b/dockers/docker-compose.yaml
@@ -47,6 +47,8 @@ x-airflow-common:
   image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.1.3}
   environment:
     &airflow-common-env
+    AIRFLOW_HOME: /opt/airflow
+    AIRFLOW__CORE_dags_folder: /opt/airflow/dags
     AIRFLOW__CORE__EXECUTOR: CeleryExecutor
     AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow@postgres/airflow
     AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow@postgres/airflow
@@ -60,6 +62,7 @@ x-airflow-common:
     - ./dags:/opt/airflow/dags
     - ./logs:/opt/airflow/logs
     - ./plugins:/opt/airflow/plugins
+    - ./config/airflow.cfg:/opt/airflow/airflow.cfg
   user: "${AIRFLOW_UID:-50000}:${AIRFLOW_GID:-50000}"
   depends_on:
     redis:
diff --git a/plugins/eFlows_menu_link.py b/plugins/eFlows_menu_link.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8a24680a605f8e75fd6298a6b4564707d0b64d0
--- /dev/null
+++ b/plugins/eFlows_menu_link.py
@@ -0,0 +1,14 @@
+from airflow.plugins_manager import AirflowPlugin
+
+appbuilder_eFlows = {
+    "name": "About eFlows4HPC",
+    "href": "https://eflows4hpc.eu/",
+}
+
+class AirflowEFlowsPlugin(AirflowPlugin):
+    name = "eFlowsLink"
+    operators = []
+    flask_blueprints = []
+    hooks = []
+    admin_views = []
+    appbuilder_menu_items = [appbuilder_eFlows]
diff --git a/scripts/cloudinit.yml b/scripts/cloudinit.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b210996116684538a8991d98396bb838f3240614
--- /dev/null
+++ b/scripts/cloudinit.yml
@@ -0,0 +1,59 @@
+#cloud-config
+
+# This is a cloud config that install most basic packages, and clones and prepares the git repo for the datacatalog
+# This should prepare everything that is possible, so that (after assigning the ip address and generating the static files) only docker-compose needs to be run
+
+# upgrade packages
+package_update: true
+package_upgrade: true
+
+# install relevant packages
+packages:
+  - python3
+  - python3-pip
+  - docker.io
+  - docker-compose
+
+# Add users to the system. Users are added after groups are added.
+
+users:
+  - name: cboettcher
+    gecos: Christian Böttcher
+    groups: sudo, docker
+    shell: /bin/bash
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    lock_passwd: true
+    ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDrgXm/3kbHrgPuHrru2LCLxKBPNnZkwTSbaadkYm6N+EzE7GwVPcXorPReC+2SHT2e8YnczcjHMcazmf7VWmHAQVV3fGrZiQtk+xTjXt3tC+Rm2zuqB4vvJcR5DXXomMMRJwG3sk/PcozvFfKFv6P7bbHxKOR090o4krM3mE2Vo43EnsBaPUS8cWI2EkhcR4gAJHhreFHbIS+nrFaJydfmzfwHNE1WjjtfIBA0U8ld2tk8eelMUjvkWrYXK+qqdaUKL0n/wVMo8D/Kl1lNGKym8LE6ZiojjEX0Aq0ajSHyyEWGscJunv/tJkrrOX2C4jd9pGEP6d0YyAunimsT1glv cboet@Desktop-CB
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCRsldcJ7kiksXTn2hivYfZ+Y9gziBWaMPpfVPNVlPi5XizbMurXAPQ3gUbBTDRp+Plf5LiXAfFNBdPTACb5ymFhIUKj/3sJhxc92uvJktLyjObAZ74ImBzDhVwGzs/cKhWc2otFgyMwrfPuIxdarCiLTjmG+dZ0a+IZbWta241kc3qBPjuqKK/LSZOK/Jx9Dl4rURs780GdcoA7Q2r6I6Bq8m0Cpfl2Otwi5Vr4d6hxWrl8D100ssLctn4FlL4SzVHPyZJVNeFJYQv1boJwldHBST8tJ0r0KC1V5CboB+Rdh1b/Qy1y6l/y9fPX+axFSGIIxSb6egRSwcE89f3kCC1 cboettcher@zam024
+
+
+  - name: maria
+    gecos: Maria Petrova-El Sayed
+    groups: sudo, docker
+    shell: /bin/bash
+    sudo: ALL=(ALL) NOPASSWD:ALL
+    lock_passwd: true
+    ssh_authorized_keys:
+      - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUNFmYnaZ1raXQm04/mfdoBfn4i6xYknic2nhGDOrkhp5r6kv4F1m7wgtuL/pddRKuEoQpiXjRWciEMljFmxvVc7+9VitsAn5zBsnzY9+Sq9+si5aKe93RK8JGLX/WsfZGnPMdKPkK2GO9LFJN4TyL9hTpFdFQfxtO82NIa3WikG4RI+WQuKeQ4qr8FHNymr+gHTw/+YaM9331xnM5YqkmOC27CvVtiQx96MNMAyMQ8RJcHy1GL8donTBL+knVZdIwGt4SUy9dIF8iwTXGFkLe8V7/DIEB7RW9gvk2sG3YPo2eq56HsQKAB3yre+5QFhmH/uqUnTKVFgZLqlDUC0duFOwALCRmlEgtOeZqOzRBa6a0RveTIfccMb48ac4FpeeJdo4KId1QO1JaEZ8fYKgRVw3xRuOjDMpxCFuxELpSvx/hd1jgrK9lRizH9DXNf5/5Go2O16hj8LPufBbhX2EiChjWJEJkoRWBhQ3UHmstbqRiuNU/MsHq0FPSHMHV6BU= maria@jsc-strela
+
+#TODO do a proper ssh key if needed, this has been excluded so far so that the testing of the use case goes faster
+  # - name: airflows
+  #   gecos: Common user for running the apiserver
+  #   groups: sudo
+  #   sudo: ALL=(ALL) NOPASSWD:ALL
+  #   lock_passwd: true
+  #   ssh_authorized_keys:
+  #     - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQMbfKUO3NoZspgWpzFY+SwY5Tx251oBT/F22pmnqKq3A0U1EcRooYVc11HzDmLrDTkoLSWAYPuv7I8weKqUPMlypXygu7I1kw1JoAZ4veV/TO8kBIb8+fUjD4VnD0EuU9/MD4rc0IazlInUu/5H2oDj4cj3XGoOFHAPRvo1YXF2eEbXgHcos5o52idZfvZPeWmk4wLqWUI+4q1C5o+c9xGxdWkA0Z6cErw5jSfaqIMu9GnsaPE8dDZ89vtNu8kRK97/Ax0qmJ8eLBfv3qm2HnqACRUv1MRLS/s9KsdB18DV6dTn8VuErJsn9rlpx/2oEMVS5lkUSLTJHf7oNVKDtILQ/rQ2tF/f3LakmiViA4ZsWxFspP0T/sXPhjuCgEqGWG8HrJwFj8DByMpoJUsGe1czAiMdoY5Tr7UeIgK7BGaGjoVUFaVrCKlDpDNhYsHopSTTNajVxsb0LkTRIRphGlQTHlD3nDYdHIrgZiLqA1XLtTTXtWNzQ4uE59tAkIzdTK7RSBduHunqx++IEO6Huj49Vvk1vcO33iqFTTZro1vhZ2kEGxAkxNMti+/eT2rvyfkhsXaUH1/7LXvRrR+pFKcXBpaWWeEt8cOiVrMWAPDi9VRh5QPZbJ1tyTq7XzxeaQuJhL22o2BO13ZSRzr1S+UNFcmfk3esruZoxDIiQ+Bw== apiserver@gitlab
+
+runcmd:
+  - 'git clone https://gitlab.jsc.fz-juelich.de/eflows4hpc-wp2/data-logistics-service.git /home/maria/data-logistics-service'
+  - cd /home/maria
+  - mkdir airflow-testing
+  - cd airflow-testing
+  - mkdir -p ./dags ./logs ./plugins
+  - cp ../data-logistics-service/dags/* ./dags
+  - echo "AIRFLOW_UID=0\nAIRFLOW_GID=0" > .env #for root
+  - docker-compose -f ../data-logistics-service/dockers/docker-compose.yaml --project-directory . up airflow-init
+  - docker-compose -f ../data-logistics-service/dockers/docker-compose.yaml --project-directory . up
+  # - /bin/bash ../data-logistics-service/scripts/deployment.sh .
diff --git a/scripts/deployment.sh b/scripts/deployment.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2601fa574c337b8b408f157f3a6ff4d7213a229d
--- /dev/null
+++ b/scripts/deployment.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+# From Christian B.
+## USAGE:
+#
+# deployment.sh <git_directory> [API_URL] [SERVER_DOMAIN]
+
+OLD_DIR=`pwd`
+
+echo "DEBUG_1 $0 $1 $2 $3"
+
+if [ -z ${1+x} ]; then NEW_DIR=`pwd`; else NEW_DIR=$1; fi
+# if [ -z ${2+x} ]; then API_URL=https://datacatalog.fz-juelich.de/; else API_URL=$2; fi
+# if [ -z ${3+x} ]; then SERVER_DOMAIN=datacatalog.fz-juelich.de; else SERVER_DOMAIN=$3; fi
+
+echo "DEBUG_2 $0 $1 $2 $3"
+
+cd $NEW_DIR
+
+# pip install -r requirements.txt
+
+# sed -i "s_datacatalog.fz-juelich.de_${SERVER_DOMAIN}_g" docker-compose.yml
+
+# it is at this point assumed that ip and volume are correctly assigned, and that dns is working properly
+
+docker-compose pull #  pull changed images (e.g. new latest, or specific tag)
+TIME=`date +%Y-%m-%d-%H-%M`
+mv /app/mnt/docker.log "/app/mnt/docker.log.${TIME}"
+docker-compose -f ../data-logistics-service/dockers/docker-compose.yaml --project-directory . up airflow-init
+docker-compose -f ../data-logistics-service/dockers/docker-compose.yaml --project-directory . up -d
+# docker-compose up -d # should only restart changed images, which will also update nginx and reverse-proxy image if needed
+
+# nohup docker-compose logs -f >/app/mnt/docker.log & # or similar to capture docker log TODO (seems to cause gitlab CI to hang)
+
+cd $OLD_DIR
\ No newline at end of file