Skip to content
Snippets Groups Projects
Commit 26d51f3d authored by lukas leufen's avatar lukas leufen
Browse files

Merge branch 'lukas_issue095_tech_mlt-runner' into 'develop'

Resolve "new gitlab runner specialised for mlt"

See merge request toar/machinelearningtools!85
parents 333dd12d fff544da
No related branches found
No related tags found
3 merge requests!90WIP: new release update,!89Resolve "release branch / CI on gpu",!85Resolve "new gitlab runner specialised for mlt"
Pipeline #33891 passed
......@@ -23,13 +23,71 @@ version:
paths:
- badges/
### Tests (from scratch) ###
tests (from scratch):
tags:
- base
- zam347
stage: test
only:
- master
- /^release.*$/
- develop
variables:
FAILURE_THRESHOLD: 100
TEST_TYPE: "scratch"
before_script:
- chmod +x ./CI/update_badge.sh
- ./CI/update_badge.sh > /dev/null
script:
- zypper --non-interactive install binutils libproj-devel gdal-devel
- zypper --non-interactive install proj geos-devel
- pip install -r requirements.txt
- chmod +x ./CI/run_pytest.sh
- ./CI/run_pytest.sh
after_script:
- ./CI/update_badge.sh > /dev/null
artifacts:
name: pages
when: always
paths:
- badges/
- test_results/
### Tests (on GPU) ###
tests (on GPU):
tags:
- gpu
- zam347
stage: test
only:
- master
- /^release.*$/
- develop
variables:
FAILURE_THRESHOLD: 100
TEST_TYPE: "gpu"
before_script:
- chmod +x ./CI/update_badge.sh
- ./CI/update_badge.sh > /dev/null
script:
- pip install -r requirements.txt
- chmod +x ./CI/run_pytest.sh
- ./CI/run_pytest.sh
after_script:
- ./CI/update_badge.sh > /dev/null
artifacts:
name: pages
when: always
paths:
- badges/
- test_results/
### Tests ###
tests:
tags:
- leap
- machinelearningtools
- zam347
- base
- django
stage: test
variables:
FAILURE_THRESHOLD: 100
......@@ -51,10 +109,8 @@ tests:
coverage:
tags:
- leap
- machinelearningtools
- zam347
- base
- django
stage: test
variables:
FAILURE_THRESHOLD: 50
......@@ -79,7 +135,6 @@ coverage:
pages:
stage: pages
tags:
- leap
- zam347
- base
script:
......
#!/bin/bash
# run pytest for all run_modules
python3 -m pytest --html=report.html --self-contained-html test/ | tee test_results.out
python3.6 -m pytest --html=report.html --self-contained-html test/ | tee test_results.out
IS_FAILED=$?
if [ -z ${TEST_TYPE+x} ]; then
TEST_TYPE=""; else
TEST_TYPE="_"${TEST_TYPE};
fi
# move html test report
mkdir test_results/
TEST_RESULTS_DIR="test_results${TEST_TYPE}/"
mkdir ${TEST_RESULTS_DIR}
BRANCH_NAME=$( echo -e "${CI_COMMIT_REF_NAME////_}")
mkdir test_results/${BRANCH_NAME}
mkdir test_results/recent
cp report.html test_results/${BRANCH_NAME}/.
cp report.html test_results/recent/.
mkdir "${TEST_RESULTS_DIR}/${BRANCH_NAME}"
mkdir "${TEST_RESULTS_DIR}/recent"
cp report.html "${TEST_RESULTS_DIR}/${BRANCH_NAME}/."
cp report.html "${TEST_RESULTS_DIR}/recent/."
if [[ "${CI_COMMIT_REF_NAME}" = "master" ]]; then
cp -r report.html test_results/.
cp -r report.html ${TEST_RESULTS_DIR}/.
fi
# exit 0 if no tests implemented
RUN_NO_TESTS="$(grep -c 'no tests ran' test_results.out)"
if [[ ${RUN_NO_TESTS} > 0 ]]; then
if [[ ${RUN_NO_TESTS} -gt 0 ]]; then
echo "no test available"
echo "incomplete" > status.txt
echo "no tests avail" > incomplete.txt
......@@ -27,20 +33,19 @@ fi
# extract if tests passed or not
TEST_FAILED="$(grep -oP '(\d+\s{1}failed)' test_results.out)"
TEST_FAILED="$(echo ${TEST_FAILED} | (grep -oP '\d*'))"
TEST_FAILED="$(echo "${TEST_FAILED}" | (grep -oP '\d*'))"
TEST_PASSED="$(grep -oP '\d+\s{1}passed' test_results.out)"
TEST_PASSED="$(echo ${TEST_PASSED} | (grep -oP '\d*'))"
TEST_PASSED="$(echo "${TEST_PASSED}" | (grep -oP '\d*'))"
if [[ -z "$TEST_FAILED" ]]; then
TEST_FAILED=0
fi
let "TEST_PASSED=${TEST_PASSED}-${TEST_FAILED}"
(( TEST_PASSED=TEST_PASSED-TEST_FAILED ))
# calculate metrics
let "SUM=${TEST_FAILED}+${TEST_PASSED}"
let "TEST_PASSED_RATIO=${TEST_PASSED}*100/${SUM}"
(( SUM=TEST_FAILED+TEST_PASSED ))
(( TEST_PASSED_RATIO=TEST_PASSED*100/SUM ))
# report
if [[ ${IS_FAILED} == 0 ]]; then
if [[ ${IS_FAILED} -eq 0 ]]; then
if [[ ${TEST_PASSED_RATIO} -lt 100 ]]; then
echo "only ${TEST_PASSED_RATIO}% passed"
echo "incomplete" > status.txt
......
#!/usr/bin/env bash
# run coverage twice, 1) for html deploy 2) for success evaluation
python3 -m pytest --cov=src --cov-report html test/
python3 -m pytest --cov=src --cov-report term test/ | tee coverage_results.out
python3.6 -m pytest --cov=src --cov-report term --cov-report html test/ | tee coverage_results.out
IS_FAILED=$?
# move html coverage report
mkdir coverage/
BRANCH_NAME=$( echo -e "${CI_COMMIT_REF_NAME////_}")
mkdir coverage/${BRANCH_NAME}
mkdir coverage/recent
cp -r htmlcov/* coverage/${BRANCH_NAME}/.
mkdir "coverage/${BRANCH_NAME}"
mkdir "coverage/recent"
cp -r htmlcov/* "coverage/${BRANCH_NAME}/."
cp -r htmlcov/* coverage/recent/.
if [[ "${CI_COMMIT_REF_NAME}" = "master" ]]; then
cp -r htmlcov/* coverage/.
......@@ -19,10 +18,10 @@ fi
# extract coverage information
COVERAGE_RATIO="$(grep -oP '\d+\%' coverage_results.out | tail -1)"
COVERAGE_RATIO="$(echo ${COVERAGE_RATIO} | (grep -oP '\d*'))"
COVERAGE_RATIO="$(echo "${COVERAGE_RATIO}" | (grep -oP '\d*'))"
# report
if [[ ${IS_FAILED} == 0 ]]; then
if [[ ${IS_FAILED} -eq 0 ]]; then
if [[ ${COVERAGE_RATIO} -lt ${COVERAGE_PASS_THRESHOLD} ]]; then
echo "only ${COVERAGE_RATIO}% covered"
echo "incomplete" > status.txt
......
......@@ -9,13 +9,12 @@ import logging
import math
import os
import socket
import sys
import time
import keras.backend as K
import xarray as xr
from typing import Dict, Callable
from typing import Dict, Callable, Pattern, Union
def to_list(arg):
......@@ -99,6 +98,7 @@ class TimeTracking(object):
def prepare_host(create_new=True, sampling="daily"):
hostname = socket.gethostname()
runner_regex = re.compile(r"runner-.*-project-2411-concurrent-\d+")
try:
user = os.getlogin()
except OSError:
......@@ -113,7 +113,7 @@ def prepare_host(create_new=True, sampling="daily"):
path = f"/p/project/cjjsc42/{user}/DATA/toar_{sampling}/"
elif (len(hostname) > 2) and (hostname[:2] == "jw"):
path = f"/p/home/jusers/{user}/juwels/intelliaq/DATA/toar_{sampling}/"
elif "runner-6HmDp9Qd-project-2411-concurrent" in hostname:
elif runner_regex.match(hostname) is not None:
path = f"/home/{user}/machinelearningtools/data/toar_{sampling}/"
else:
raise OSError(f"unknown host '{hostname}'")
......@@ -156,7 +156,7 @@ def set_bootstrap_path(bootstrap_path, data_path, sampling):
class PyTestRegex:
"""Assert that a given string meets some expectations."""
def __init__(self, pattern: str, flags: int = 0):
def __init__(self, pattern: Union[str, Pattern], flags: int = 0):
self._regex = re.compile(pattern, flags)
def __eq__(self, actual: str) -> bool:
......
......@@ -131,7 +131,7 @@ class TestTimeTracking:
class TestPrepareHost:
@mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
"runner-6HmDp9Qd-project-2411-concurrent"])
"runner-6HmDp9Qd-project-2411-concurrent-01"])
@mock.patch("os.getlogin", return_value="testUser")
@mock.patch("os.path.exists", return_value=True)
def test_prepare_host(self, mock_host, mock_user, mock_path):
......@@ -163,7 +163,7 @@ class TestPrepareHost:
assert PyTestRegex(r"path '.*' does not exist for host '.*'\.") == e.value.args[0]
@mock.patch("socket.gethostname", side_effect=["linux-aa9b", "ZAM144", "zam347", "jrtest", "jwtest",
"runner-6HmDp9Qd-project-2411-concurrent"])
"runner-6HmDp9Qd-project-2411-concurrent-01"])
@mock.patch("os.getlogin", side_effect=OSError)
@mock.patch("os.path.exists", return_value=True)
def test_os_error(self, mock_path, mock_user, mock_host):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment