diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 2f62df0ab946f61556b053c678a3e496bce01a13..aed7b8f91e5c83d77719b4ceed5c8210b210125b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -7,14 +7,15 @@ variables:
   OS_IDENTITY_API_VERSION: 3
   OS_REGION_NAME: "HDFCloud"
   OS_INTERFACE: public
-  TESTING_IP: 134.94.199.220
-  OLD_TEST_NAME: old-airflow-test
-  TESTING_NAME: airflow-testing
-  TESTING_URL: http://134.94.199.220:7001/home
-  TESTING_DOMAIN: zam10220.zam.kfa-juelich.de
+  PRODUCTION_IP: 134.94.199.220
+  OLD_PROD_NAME: old-airflow-production
+  PRODUCTION_NAME: airflow-production
+  PRODUCTION_URL: https://datalogistics.eflows4hpc.eu
+  TESTING_DOMAIN: datalogistics.eflows4hpc.eu
   AIRFLOW_TESTUSER: "airflow"
   AIRFLOW__SECRETS__BACKEND_KWARGS: $TESTING_AIRFLOW__SECRETS__BACKEND_KWARGS
   AIRFLOW__SECRETS__BACKEND: datacat_integration.secrets.DatacatSecretsBackend
+  VOLUME_ID: 6b58c3a6-691b-496a-8afd-153637c2de48
   DOCKER_TLS_CERTDIR: ""
 
 
@@ -77,30 +78,34 @@ build-custom-image:
     - docker push $IMAGE_LATEST_TAG
 
 
-full-deploy-testing:
+full-deploy-production:
   stage: deploy
-  environment: Testing
+  environment: Production
   only: 
     - web
   <<: *ssh_setup
   script:
     - echo "Starting the full testing deployment of airflows example."
     - pip install python-openstackclient
-    - OLD_ID=`openstack server show $TESTING_NAME -f value -c id` && server_exists=true || echo "No testing server found. It might be a first time deployment"
+    - OLD_ID=`openstack server show $PRODUCTION_NAME -f value -c id` && server_exists=true || echo "No production server found. It might be a first time deployment"
     - if [ "$server_exists" = true ] ; then
-      openstack server set --name $OLD_TEST_NAME $OLD_ID;
+      openstack server set --name $OLD_PROD_NAME $OLD_ID;
       fi
-    - INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor m4 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data scripts/cloudinit.yml --security-group ssh --security-group airflows --security-group www --security-group https $TESTING_NAME`
+    - INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor m4 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data scripts/cloudinit.yml --security-group ssh --security-group www --security-group https $PRODUCTION_NAME`
     - while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done # wait until an address is available to attach the floating ip
-    - openstack server add floating ip $INSTANCE_ID $TESTING_IP
+    - openstack server add floating ip $INSTANCE_ID $PRODUCTION_IP
     - sleep 10 # ensure that next command reaches the new server, prevents host key problems
-    - until ssh -oStrictHostKeyChecking=accept-new airflow@$TESTING_IP ls /finished_cloudinit >/dev/null 2>&1; do sleep 30; done # wait until cloudinit script is complete
-    - ssh -oStrictHostKeyChecking=accept-new airflow@$TESTING_IP "sudo /home/airflow/data-logistics-service/scripts/deployment.sh /home/airflow /home/airflow/data-logistics-service $TESTING_DOMAIN $AIRFLOW__SECRETS__BACKEND $AIRFLOW__SECRETS__BACKEND_KWARGS"
+    # do the mount /dev/vdb1 stuff
+    - openstack server add volume $INSTANCE_ID $VOLUME_ID
+    - sleep 20 # apparently it may take some time until the volume is available to the OS
+    - ssh -oStrictHostKeyChecking=accept-new airflow@$PRODUCTION_IP "sudo mkdir -p /persistent_data && sudo mount /dev/vdb1 /persistent_data"
+    - until ssh -oStrictHostKeyChecking=accept-new airflow@$PRODUCTION_IP ls /finished_cloudinit >/dev/null 2>&1; do sleep 30; done # wait until cloudinit script is complete
+    - ssh -oStrictHostKeyChecking=accept-new airflow@$PRODUCTION_IP "sudo /home/airflow/data-logistics-service/scripts/deployment.sh /home/airflow /home/airflow/data-logistics-service $PRODUCTION_DOMAIN $AIRFLOW__SECRETS__BACKEND $AIRFLOW__SECRETS__BACKEND_KWARGS"
     - echo "Done"
 
 # NOTE Light deployment did not perform well when the template/main.html file was changed (in case of the official airflow image being updated)
 # TODO Add proper tests
-light-deploy-testing:
+light-deploy-production:
   stage: deploy
   # only run when master is updated, unless the pipeline was triggered via the web UI
   only:
@@ -109,12 +114,12 @@ light-deploy-testing:
     - tags
     - web
   <<: *ssh_setup
-  environment: Testing
+  environment: Production
   script:
-    - ssh -oStrictHostKeyChecking=accept-new airflow@$TESTING_IP "cd /home/airflow/data-logistics-service && sudo git stash && sudo git stash clear && sudo git checkout main && sudo git checkout -f $CI_COMMIT_TAG && sudo git pull --all"
-    - ssh -oStrictHostKeyChecking=accept-new airflow@$TESTING_IP "sudo /home/airflow/data-logistics-service/scripts/deployment.sh /home/airflow /home/airflow/data-logistics-service $TESTING_DOMAIN $AIRFLOW__SECRETS__BACKEND $AIRFLOW__SECRETS__BACKEND_KWARGS"
+    - ssh -oStrictHostKeyChecking=accept-new airflow@$PRODUCTION_IP "cd /home/airflow/data-logistics-service && sudo git stash && sudo git stash clear && sudo git checkout main && sudo git checkout -f $CI_COMMIT_TAG && sudo git pull --all"
+    - ssh -oStrictHostKeyChecking=accept-new airflow@$PRODUCTION_IP "sudo /home/airflow/data-logistics-service/scripts/deployment.sh /home/airflow /home/airflow/data-logistics-service $PRODUCTION_DOMAIN $AIRFLOW__SECRETS__BACKEND $AIRFLOW__SECRETS__BACKEND_KWARGS"
 
-test-testingdeployment_webserver:
+test-production-webserver:
   cache: {}
   stage: test-deployment 
   only:
@@ -126,11 +131,11 @@ test-testingdeployment_webserver:
     - echo "This is a simple check if the deployment was successful and dags get executed"
     # ensure that the docker containers are up and running before testing the airflow deployment; timeout in 16 to 17 minutes
     - SECONDS=0
-    - 'while [ $SECONDS -le 1000 ] ; do if output=$(curl --insecure --max-time 10 -I -H "Accept: application/json" $TESTING_URL) ; then break; else sleep 30; fi ; done'
-    - 'curl --insecure -I -H "Accept: application/json" $TESTING_URL'
-    - 'curl -X GET -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" $TESTING_IP:7001/api/v1/dags'
-    - 'curl -X GET -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" $TESTING_IP:7001/api/v1/connections'
-    - 'curl -X POST -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" --data {} $TESTING_IP:7001/api/v1/dags/testdag/dagRuns'
+    - 'while [ $SECONDS -le 1000 ] ; do if output=$(curl --insecure --max-time 10 -I -H "Accept: application/json" $PRODUCTION_URL/home) ; then break; else sleep 30; fi ; done'
+    - 'curl --insecure -I -H "Accept: application/json" $PRODUCTION_URL/home'
+    - 'curl -X GET -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" $PRODUCTION_URL/api/v1/dags'
+    - 'curl -X GET -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" $PRODUCTION_URL/api/v1/connections'
+    - 'curl -X POST -u $AIRFLOW_TESTUSER:$AIRFLOW_TESTUSER_PASS -H "Content-Type: application/json" --data {} $PRODUCTION_URL/api/v1/dags/testdag/dagRuns'
 
 cleanup-successful-full-deployment:
   # check if there is an old prod or test instance, and delete it if present
diff --git a/dags/another-testdag.py b/dags/another-testdag.py
new file mode 100644
index 0000000000000000000000000000000000000000..c00489fc207af25223e505e7d643c22d95b5acaa
--- /dev/null
+++ b/dags/another-testdag.py
@@ -0,0 +1,21 @@
+from datetime import timedelta
+
+from airflow import DAG
+from airflow.operators.bash import BashOperator
+from airflow.utils.dates import days_ago
+
+def_args = {
+    'owner': 'airflow',
+    'depends_on_past': False,
+    'email_on_failure': False,
+    'email_on_retry': False,
+    'retries': 1,
+    'retry_delay': timedelta(minutes=5)
+
+}
+
+with DAG('another-testdag', default_args=def_args, description='simple testing dag', schedule_interval=timedelta(days=1), start_date=days_ago(2)) as dag:
+    t1 = BashOperator(task_id='print_date', bash_command='date')
+    t2 = BashOperator(task_id='do_noting', bash_command='sleep 5')
+
+    t1 >> t2
diff --git a/dockers/docker-compose.yaml b/dockers/docker-compose.yaml
index a5b2ec3f2a4c80c7999e9f80c6713c9c04abede7..805ea8e6a66245406f623eaea9a9e0cfc9c6e4b2 100644
--- a/dockers/docker-compose.yaml
+++ b/dockers/docker-compose.yaml
@@ -62,7 +62,7 @@ x-airflow-common:
   volumes:
     - ./dags:/opt/airflow/dags
     - ./config/airflow.cfg:/opt/airflow/airflow.cfg
-    - ./logs:/opt/airflow/logs
+    - logs:/opt/airflow/logs
     - ./plugins:/opt/airflow/plugins
     - ./templates/main.html:/home/airflow/.local/lib/python3.7/site-packages/airflow/www/templates/airflow/main.html
     - ./templates/img/BMBF_gefoerdert_2017_en.jpg:/home/airflow/.local/lib/python3.7/site-packages/airflow/www/static/BMBF_gefoerdert_2017_en.jpg
@@ -75,6 +75,37 @@ x-airflow-common:
       condition: service_healthy
 
 services:
+
+  reverse-proxy:
+    image: "jwilder/nginx-proxy:alpine"
+    container_name: "reverse-proxy"
+    volumes:
+      - "html:/usr/share/nginx/html"
+      - "dhparam:/etc/nginx/dhparam"
+      - "vhost:/etc/nginx/vhost.d"
+      - "certs:/etc/nginx/certs"
+      - "/run/docker.sock:/tmp/docker.sock:ro"
+    restart: "always"
+    ports:
+      - "80:80"
+      - "443:443"
+
+  letsencrypt:
+    image: "jrcs/letsencrypt-nginx-proxy-companion:latest"
+    container_name: "letsencrypt-helper"
+    volumes:
+      - "html:/usr/share/nginx/html"
+      - "dhparam:/etc/nginx/dhparam"
+      - "vhost:/etc/nginx/vhost.d"
+      - "certs:/etc/nginx/certs"
+      - "/run/docker.sock:/var/run/docker.sock:ro"
+    environment:
+      NGINX_PROXY_CONTAINER: "reverse-proxy"
+      DEFAULT_EMAIL: "m.petrova@fz-juelich.de"
+    restart: "always"
+    depends_on:
+      - "reverse-proxy"
+
   postgres:
     image: postgres:13
     environment:
@@ -105,6 +136,13 @@ services:
     command: webserver
     ports:
       - 7001:8080
+    
+    environment:
+      <<: *airflow-common-env
+      VIRTUAL_HOST: datalogistics.eflows4hpc.eu
+      LETSENCRYPT_HOST: datalogistics.eflows4hpc.eu
+      VIRTUAL_PORT: 8080
+
     healthcheck:
       test: ["CMD", "curl", "--fail", "http://localhost:8080/health"]
       interval: 60s
@@ -149,7 +187,7 @@ services:
     volumes:
       - ./dags:/opt/airflow/dags
       - ./config/airflow.cfg:/opt/airflow/airflow.cfg
-      - ./logs:/opt/airflow/logs
+      - logs:/opt/airflow/logs
       - ./tmp/:/work/
     depends_on:
       <<: *airflow-common-depends-on
@@ -284,4 +322,10 @@ services:
         condition: service_completed_successfully
 
 volumes:
+  logs:
   postgres-db-volume:
+  certs:
+  html:
+  vhost:
+  dhparam:
+
diff --git a/docs/apirequests.adoc b/docs/apirequests.adoc
index 6d4bd84e9d217edf17b96940e2b12fbb4e80d0cc..740eefee3a8761361ca901daa908db081782a0b4 100644
--- a/docs/apirequests.adoc
+++ b/docs/apirequests.adoc
@@ -36,7 +36,7 @@ Following parameters are expected:
 Those will be used to create a temporary connection (with randomized name). The connection will be deleted after the pipline run. 
 
 ==== Passing vault connection id ====
-(not implemented yet)
+Credentials from vault can be used to access resources. This is achieved by passing a id of the creentials in vault to the respecitve pipeline. The parameter is ```vault_id``` and crednetials are located in the vault defined in admin/connections/my_vault. The location of the credentials in vault is defined as follows: ```/secret/data/ssh-credentials/<vault-id-value>```
 
 
 === Starting data transfer ===
diff --git a/plugins/eFlows_menu_link.py b/plugins/eFlows_menu_link.py
index 937ad23e784fb873550d41e54b6c0e39fedea4c9..da5b6603b96752fcd2f24773242b5fce75d891f0 100644
--- a/plugins/eFlows_menu_link.py
+++ b/plugins/eFlows_menu_link.py
@@ -14,9 +14,9 @@ class AirflowEFlowsPlugin(AirflowPlugin):
     appbuilder_menu_items = [appbuilder_eFlows]
 
 class AirflowDataCatPlugin(AirflowPlugin):
-    name = "Data Catalog"
+    name = "Data Catalogue"
     operators = []
     flask_blueprints = []
     hooks = []
     admin_views = []
-    appbuilder_menu_items = [{"name": "Data Catalog", "href": "https://datacatalog.fz-juelich.de/index.html"}]
+    appbuilder_menu_items = [{"name": "Data Catalogue", "href": "https://datacatalog.fz-juelich.de/index.html"}]
diff --git a/scripts/cloudinit.yml b/scripts/cloudinit.yml
index 8fdb0263996ed17fd810e2cd8b37c6adb61e61d6..56ad8cb9c708998b76f74aa2c487f79a40c53c86 100644
--- a/scripts/cloudinit.yml
+++ b/scripts/cloudinit.yml
@@ -46,6 +46,15 @@ users:
     ssh_authorized_keys:
       - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCeJcmX8ogs4/KDQwsQHdCwk5iZz7bYJdthvX0y0YIrslhkW1KfDbOJMdRRLBcKmSCIdPofRkWkgj1hFEW4MqDCNSftLfUDTsymxNdF0dZxaX0jM2K1K7sFr1JG2Lkuxao2g9AoRKqyydlVhApoY8UhwpDaNOAFx5q0Pa7FchWvFX37r0AU5JZTsDFSDM9NaSZll78vwK1wjhhHi4EEFAs2IkrySOGzJTkaHwZrqYMJgEIvtfHN14UJR9WDmnPvSIRe/GzRg1xz3Op4E+S0/jK2bR5jJTMJJka19FjShxlqMr0UbhwBgiKYRhpfqhy+MWGz9H5GvWNdiUbBV8jdlZQHjFujAlzErJ/Twmnn0YVNUX+9ZoyEa/NKMS2quKAUhhQaD04TbAIKNt0iFbOFcbrfpbBXw7PiruIeHGFgmcxug3FcOX6xmHVuPVi1Zm1CxMxaydS7QStUchZdHN0Rc9AF2MOB/ZuKQCUbJZ2pKpP5i90eLbwhZAYvH5DAn9g6w+/6CjuFj1fB4Ywgmv+TvZ0NwtrJRhARN/1nY9uxVSoFAvxrlx3FU0bw0c/3YojY9j+LBJYf+e3Y1R2ZBSVUYGn2eACF86fnGs6Bz/WmrZW6WWrSiEwxtElkQRnuAb35L/V5VFrZv+x0qtoMNl0EK0Rz6CKMD5HHrI6Z2FNqOO6bEQ== service@gitlab
 
+write_files:
+  - path: /etc/docker/daemon.json
+    permission: 0744
+    owner: root
+    content: |
+      {
+        "data-root": "/persistent_data/docker_volumes"
+      }
+
 runcmd:
   - echo "Downloading latest version of docker-compose"
   - sudo pip3 install docker-compose
diff --git a/scripts/deployment.sh b/scripts/deployment.sh
index d7819825707a17dce56a0c6f6f0bf9895456ccae..a6143e4e20ef494dcae3795f75a44286a7c8991e 100755
--- a/scripts/deployment.sh
+++ b/scripts/deployment.sh
@@ -41,21 +41,18 @@ export AIRFLOW_UID=$(id -u)
 echo "Collecting requirements"
 reqs=`cat $GIT_REPO/requirements.txt | tr '\n' ' '`
 echo "Collected requirements: $reqs"
-# sudo sh -c "echo \"_PIP_ADDITIONAL_REQUIREMENTS=\"$reqs\"\" >> $GIT_REPO/dockers/.env"
+
 echo "_PIP_ADDITIONAL_REQUIREMENTS=\"$reqs\"" >> $GIT_REPO/dockers/.env
 pip install -r $GIT_REPO/requirements.txt
 
-# sed -i "s_datacatalog.fz-juelich.de_${SERVER_DOMAIN}_g" docker-compose.yml
+sed -i "s_datalogistics.eflows4hpc.eu/_${SERVER_DOMAIN}_g" docker-compose.yml
 
 # it is at this point assumed that ip and volume are correctly assigned, and that dns is working properly
 echo "-----------Bringing up the docker containers-----------"
 docker-compose -f $GIT_REPO/dockers/docker-compose.yaml pull #  pull changed images (e.g. new latest, or specific tag)
 
-docker-compose -f $GIT_REPO/dockers/docker-compose.yaml --project-directory $AIRFLOW_DIR --verbose up airflow-init
+# no init for persistent database & dirs
+#docker-compose -f $GIT_REPO/dockers/docker-compose.yaml --project-directory $AIRFLOW_DIR --verbose up airflow-init
 docker-compose -f $GIT_REPO/dockers/docker-compose.yaml --project-directory $AIRFLOW_DIR up -d
 
-# docker-compose up -d # should only restart changed images, which will also update nginx and reverse-proxy image if needed
-
-# nohup docker-compose logs -f >/app/mnt/docker.log & # or similar to capture docker log TODO (seems to cause gitlab CI to hang)
-
 cd $OLD_DIR