diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9f1ba73f5a3b3fada8624dfd89fa6f12488a877b..f3ec1ab98cf8e46b97e2d803518ed57c6cfd4622 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -23,13 +23,71 @@ version:
     paths:
       - badges/
 
+### Tests (from scratch) ###
+tests (from scratch):
+  tags:
+    - base
+    - zam347
+  stage: test
+  only:
+    - master
+    - /^release.*$/
+    - develop
+  variables:
+    FAILURE_THRESHOLD: 100
+    TEST_TYPE: "scratch"
+  before_script:
+    - chmod +x ./CI/update_badge.sh
+    - ./CI/update_badge.sh > /dev/null
+  script:
+    - zypper --non-interactive install binutils libproj-devel gdal-devel
+    - zypper --non-interactive install proj geos-devel
+    - pip install -r requirements.txt
+    - chmod +x ./CI/run_pytest.sh
+    - ./CI/run_pytest.sh
+  after_script:
+    - ./CI/update_badge.sh > /dev/null
+  artifacts:
+    name: pages
+    when: always
+    paths:
+      - badges/
+      - test_results/
+
+### Tests (on GPU) ###
+tests (on GPU):
+  tags:
+    - gpu
+    - zam347
+  stage: test
+  only:
+    - master
+    - /^release.*$/
+    - develop
+  variables:
+    FAILURE_THRESHOLD: 100
+    TEST_TYPE: "gpu"
+  before_script:
+    - chmod +x ./CI/update_badge.sh
+    - ./CI/update_badge.sh > /dev/null
+  script:
+    - pip install -r requirements.txt
+    - chmod +x ./CI/run_pytest.sh
+    - ./CI/run_pytest.sh
+  after_script:
+    - ./CI/update_badge.sh > /dev/null
+  artifacts:
+    name: pages
+    when: always
+    paths:
+      - badges/
+      - test_results/
+
 ### Tests ###
 tests:
   tags:
-    - leap
+    - machinelearningtools
     - zam347
-    - base
-    - django
   stage: test
   variables:
     FAILURE_THRESHOLD: 100
@@ -51,10 +109,8 @@ tests:
 
 coverage:
   tags:
-    - leap
+    - machinelearningtools
     - zam347
-    - base
-    - django
   stage: test
   variables:
     FAILURE_THRESHOLD: 50
@@ -79,7 +135,6 @@ coverage:
 pages:
   stage: pages
   tags:
-    - leap
     - zam347
     - base
   script:
diff --git a/CI/run_pytest.sh b/CI/run_pytest.sh
index a7d883dcc95e0c16541af00ed0891e2d31dee82c..baa7ef8e892fc2d9efdd30094917ca492017de3d 100644
--- a/CI/run_pytest.sh
+++ b/CI/run_pytest.sh
@@ -1,24 +1,30 @@
 #!/bin/bash
 
 # run pytest for all run_modules
-python3 -m pytest --html=report.html --self-contained-html test/ | tee test_results.out
+python3.6 -m pytest --html=report.html --self-contained-html test/ | tee test_results.out
 
 IS_FAILED=$?
 
+if [ -z ${TEST_TYPE+x} ]; then
+    TEST_TYPE=""; else
+      TEST_TYPE="_"${TEST_TYPE};
+fi
+
 # move html test report
-mkdir test_results/
+TEST_RESULTS_DIR="test_results${TEST_TYPE}/"
+mkdir ${TEST_RESULTS_DIR}
 BRANCH_NAME=$( echo -e "${CI_COMMIT_REF_NAME////_}")
-mkdir test_results/${BRANCH_NAME}
-mkdir test_results/recent
-cp report.html test_results/${BRANCH_NAME}/.
-cp report.html test_results/recent/.
+mkdir "${TEST_RESULTS_DIR}/${BRANCH_NAME}"
+mkdir "${TEST_RESULTS_DIR}/recent"
+cp report.html "${TEST_RESULTS_DIR}/${BRANCH_NAME}/."
+cp report.html "${TEST_RESULTS_DIR}/recent/."
 if [[ "${CI_COMMIT_REF_NAME}" = "master" ]]; then
-    cp -r report.html test_results/.
+    cp -r report.html ${TEST_RESULTS_DIR}/.
 fi
 
 # exit 0 if no tests implemented
 RUN_NO_TESTS="$(grep -c 'no tests ran' test_results.out)"
-if [[ ${RUN_NO_TESTS} > 0 ]]; then
+if [[ ${RUN_NO_TESTS} -gt 0 ]]; then
     echo "no test available"
     echo "incomplete" > status.txt
     echo "no tests avail" > incomplete.txt
@@ -27,20 +33,19 @@ fi
 
 # extract if tests passed or not
 TEST_FAILED="$(grep -oP '(\d+\s{1}failed)' test_results.out)"
-TEST_FAILED="$(echo ${TEST_FAILED} | (grep -oP '\d*'))"
+TEST_FAILED="$(echo "${TEST_FAILED}" | (grep -oP '\d*'))"
 TEST_PASSED="$(grep -oP '\d+\s{1}passed' test_results.out)"
-TEST_PASSED="$(echo ${TEST_PASSED} | (grep -oP '\d*'))"
+TEST_PASSED="$(echo "${TEST_PASSED}" | (grep -oP '\d*'))"
 if [[ -z "$TEST_FAILED" ]]; then
     TEST_FAILED=0
 fi
-let "TEST_PASSED=${TEST_PASSED}-${TEST_FAILED}"
-
+(( TEST_PASSED=TEST_PASSED-TEST_FAILED ))
 # calculate metrics
-let "SUM=${TEST_FAILED}+${TEST_PASSED}"
-let "TEST_PASSED_RATIO=${TEST_PASSED}*100/${SUM}"
+(( SUM=TEST_FAILED+TEST_PASSED ))
+(( TEST_PASSED_RATIO=TEST_PASSED*100/SUM ))
 
 # report
-if [[ ${IS_FAILED} == 0 ]]; then
+if [[ ${IS_FAILED} -eq 0 ]]; then
     if [[ ${TEST_PASSED_RATIO} -lt 100 ]]; then
         echo "only ${TEST_PASSED_RATIO}% passed"
         echo "incomplete" > status.txt
diff --git a/CI/run_pytest_coverage.sh b/CI/run_pytest_coverage.sh
index 2157192d49a15baa048968b799aa264941152c1b..45916427f1521843923fb94e49dc661241dc0369 100644
--- a/CI/run_pytest_coverage.sh
+++ b/CI/run_pytest_coverage.sh
@@ -1,17 +1,16 @@
 #!/usr/bin/env bash
 
 # run coverage twice, 1) for html deploy 2) for success evaluation
-python3 -m pytest --cov=src --cov-report html test/
-python3 -m pytest --cov=src --cov-report term test/ | tee coverage_results.out
+python3.6 -m pytest --cov=src --cov-report term  --cov-report html test/ | tee coverage_results.out
 
 IS_FAILED=$?
 
 # move html coverage report
 mkdir coverage/
 BRANCH_NAME=$( echo -e "${CI_COMMIT_REF_NAME////_}")
-mkdir coverage/${BRANCH_NAME}
-mkdir coverage/recent
-cp -r htmlcov/* coverage/${BRANCH_NAME}/.
+mkdir "coverage/${BRANCH_NAME}"
+mkdir "coverage/recent"
+cp -r htmlcov/* "coverage/${BRANCH_NAME}/."
 cp -r htmlcov/* coverage/recent/.
 if [[ "${CI_COMMIT_REF_NAME}" = "master" ]]; then
     cp -r htmlcov/* coverage/.
@@ -19,10 +18,10 @@ fi
 
 # extract coverage information
 COVERAGE_RATIO="$(grep -oP '\d+\%' coverage_results.out | tail -1)"
-COVERAGE_RATIO="$(echo ${COVERAGE_RATIO} | (grep -oP '\d*'))"
+COVERAGE_RATIO="$(echo "${COVERAGE_RATIO}" | (grep -oP '\d*'))"
 
 # report
-if [[ ${IS_FAILED} == 0 ]]; then
+if [[ ${IS_FAILED} -eq 0 ]]; then
     if [[ ${COVERAGE_RATIO} -lt ${COVERAGE_PASS_THRESHOLD} ]]; then
         echo "only ${COVERAGE_RATIO}% covered"
         echo "incomplete" > status.txt
diff --git a/src/helpers.py b/src/helpers.py
index 2589cfe88d187ac8ebdf488cc9ab84fb1598ada0..e977c9ae0da53c640405aa376a593281b485f46d 100644
--- a/src/helpers.py
+++ b/src/helpers.py
@@ -9,13 +9,12 @@ import logging
 import math
 import os
 import socket
-import sys
 import time
 
 import keras.backend as K
 import xarray as xr
 
-from typing import Dict, Callable
+from typing import Dict, Callable, Pattern, Union
 
 
 def to_list(arg):
@@ -99,6 +98,7 @@ class TimeTracking(object):
 
 def prepare_host(create_new=True, sampling="daily"):
     hostname = socket.gethostname()
+    runner_regex = re.compile(r"runner-.*-project-2411-concurrent-\d+")
     try:
         user = os.getlogin()
     except OSError:
@@ -113,7 +113,7 @@ def prepare_host(create_new=True, sampling="daily"):
         path = f"/p/project/cjjsc42/{user}/DATA/toar_{sampling}/"
     elif (len(hostname) > 2) and (hostname[:2] == "jw"):
         path = f"/p/home/jusers/{user}/juwels/intelliaq/DATA/toar_{sampling}/"
-    elif "runner-6HmDp9Qd-project-2411-concurrent" in hostname:
+    elif runner_regex.match(hostname) is not None:
         path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
     else:
         raise OSError(f"unknown host '{hostname}'")
@@ -156,7 +156,7 @@ def set_bootstrap_path(bootstrap_path, data_path, sampling):
 class PyTestRegex:
     """Assert that a given string meets some expectations."""
 
-    def __init__(self, pattern: str, flags: int = 0):
+    def __init__(self, pattern: Union[str, Pattern], flags: int = 0):
         self._regex = re.compile(pattern, flags)
 
     def __eq__(self, actual: str) -> bool:
diff --git a/test/test_helpers.py b/test/test_helpers.py
index 623c0bf8b1bec735ab422e31c0e5783b5c711a3a..9c71a53389344083e4e18a83a6aab5838ad678ca 100644
--- a/test/test_helpers.py
+++ b/test/test_helpers.py
@@ -131,7 +131,7 @@ class TestTimeTracking:
 class TestPrepareHost:
 
     @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
-                                                   "runner-6HmDp9Qd-project-2411-concurrent"])
+                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
     @mock.patch("os.getlogin", return_value="testUser")
     @mock.patch("os.path.exists", return_value=True)
     def test_prepare_host(self, mock_host, mock_user, mock_path):
@@ -163,7 +163,7 @@ class TestPrepareHost:
         assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
 
     @mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
-                                                   "runner-6HmDp9Qd-project-2411-concurrent"])
+                                                   "runner-6HmDp9Qd-project-2411-concurrent-01"])
     @mock.patch("os.getlogin", side_effect=OSError)
     @mock.patch("os.path.exists", return_value=True)
     def test_os_error(self, mock_path, mock_user, mock_host):