diff --git a/01-MSA-hello-world/Makefile b/01-MSA-hello-world/.master/Makefile
similarity index 100%
rename from 01-MSA-hello-world/Makefile
rename to 01-MSA-hello-world/.master/Makefile
diff --git a/01-MSA-hello-world/README.md b/01-MSA-hello-world/.master/README.md
similarity index 100%
rename from 01-MSA-hello-world/README.md
rename to 01-MSA-hello-world/.master/README.md
diff --git a/01-MSA-hello-world/compile.sh b/01-MSA-hello-world/.master/compile.sh
similarity index 100%
rename from 01-MSA-hello-world/compile.sh
rename to 01-MSA-hello-world/.master/compile.sh
diff --git a/01-MSA-hello-world/.master/copy.mk b/01-MSA-hello-world/.master/copy.mk
new file mode 100755
index 0000000000000000000000000000000000000000..96f9690cc50990424468f3a4ca5c556283f20f4b
--- /dev/null
+++ b/01-MSA-hello-world/.master/copy.mk
@@ -0,0 +1,34 @@
+#!/usr/bin/make -f
+TASKDIR = ../tasks
+SOLUTIONDIR = ../solutions
+
+PROCESSFILES = job_msa_juwels.sh job_msa_jureca.sh
+COPYFILES = Makefile README.md mpi_hello_world.c compile.sh
+
+
+TASKPROCCESFILES = $(addprefix $(TASKDIR)/,$(PROCESSFILES))
+TASKCOPYFILES = $(addprefix $(TASKDIR)/,$(COPYFILES))
+SOLUTIONPROCCESFILES = $(addprefix $(SOLUTIONDIR)/,$(PROCESSFILES))
+SOLUTIONCOPYFILES = $(addprefix $(SOLUTIONDIR)/,$(COPYFILES))
+
+.PHONY: all task
+all: task
+task: ${TASKPROCCESFILES} ${TASKCOPYFILES} ${SOLUTIONPROCCESFILES} ${SOLUTIONCOPYFILES}
+
+
+${TASKPROCCESFILES}: $(PROCESSFILES)
+	mkdir -p $(TASKDIR)/
+	echo 'SOLUTION: false' | jinja2 --format yaml $(notdir $@) > $@
+	
+${SOLUTIONPROCCESFILES}: $(PROCESSFILES)
+	mkdir -p $(SOLUTIONDIR)/
+	echo 'SOLUTION: true' | jinja2 --format yaml $(notdir $@) > $@
+
+
+${TASKCOPYFILES}: $(COPYFILES)
+	mkdir -p $(TASKDIR)/
+	cp $(notdir $@) $@
+	
+${SOLUTIONCOPYFILES}: $(COPYFILES)
+	mkdir -p $(SOLUTIONDIR)/
+	cp $(notdir $@) $@
\ No newline at end of file
diff --git a/01-MSA-hello-world/.master/job_msa_jureca.sh b/01-MSA-hello-world/.master/job_msa_jureca.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5785e1010970c17331eeda3b657c48d76998c48f
--- /dev/null
+++ b/01-MSA-hello-world/.master/job_msa_jureca.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -x
+#SBATCH --account=exalab
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+{%- if SOLUTION %}
+#SBATCH hetjob
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --partition=dc-gpu-devel
+{%- endif %}
+
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
+: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
+{% endif %}
diff --git a/01-MSA-hello-world/.master/job_msa_juwels.sh b/01-MSA-hello-world/.master/job_msa_juwels.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e0e7055cf6e7a662774f2439e5b36b83aa1ad005
--- /dev/null
+++ b/01-MSA-hello-world/.master/job_msa_juwels.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -x
+#SBATCH --account=training2317
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+{%- if SOLUTION %}
+#SBATCH hetjob
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --partition=develbooster
+{%- endif %}
+
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
+: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
+{% endif %}
diff --git a/01-MSA-hello-world/mpi_hello_world.c b/01-MSA-hello-world/.master/mpi_hello_world.c
similarity index 100%
rename from 01-MSA-hello-world/mpi_hello_world.c
rename to 01-MSA-hello-world/.master/mpi_hello_world.c
diff --git a/01-MSA-hello-world/.master/requirements.txt b/01-MSA-hello-world/.master/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c12da848baea07a2cf9343fe35cc247a47030322
--- /dev/null
+++ b/01-MSA-hello-world/.master/requirements.txt
@@ -0,0 +1,4 @@
+# Hand-generated by Andreas
+jinja2
+PyYAML
+jinja2-cli[yaml]
\ No newline at end of file
diff --git a/01-MSA-hello-world/solutions/Makefile b/01-MSA-hello-world/solutions/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..9921395f86a0367d329449a03837d7ca78fd6866
--- /dev/null
+++ b/01-MSA-hello-world/solutions/Makefile
@@ -0,0 +1,14 @@
+SYS:=none
+_SYS=$(addprefix .,$(SYS))
+MPICC=mpicc
+
+EXECS=mpi_hello_world$(_SYS).out
+
+.PHONY: all clean
+all: ${EXECS}
+
+mpi_hello_world$(_SYS).out: mpi_hello_world.c
+	${MPICC} -o $@ $<
+
+clean:
+	rm ${EXECS} *.out || true
diff --git a/01-MSA-hello-world/solutions/README.md b/01-MSA-hello-world/solutions/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c92a0bef97de8d6e00f227e73fe134960438874
--- /dev/null
+++ b/01-MSA-hello-world/solutions/README.md
@@ -0,0 +1,18 @@
+# MSA Hello World
+
+In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
+
+Execute the following on JUWELS Booster
+
+```bash
+bash compile.sh
+```
+
+Execute the following on JUWELS Cluster
+
+```bash
+bash compile.sh
+sbatch job_msa_juwels.sh
+```
+
+Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
diff --git a/01-MSA-hello-world/solutions/compile.sh b/01-MSA-hello-world/solutions/compile.sh
new file mode 100755
index 0000000000000000000000000000000000000000..78e3228a4e51dc29b7ff9c24c4d1a7f2247e4f97
--- /dev/null
+++ b/01-MSA-hello-world/solutions/compile.sh
@@ -0,0 +1,13 @@
+if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+elif [[ "$SYSTEMNAME" == "juwels" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+else
+	echo "The system $SYSTEMNAME is not supported!"
+	echo "Please load a capable MPI and compile the executables on each system with"
+	echo "make SYS=PARTIONNAME"
+fi
\ No newline at end of file
diff --git a/01-MSA-hello-world/job_msa_jureca.sh b/01-MSA-hello-world/solutions/job_msa_jureca.sh
similarity index 68%
rename from 01-MSA-hello-world/job_msa_jureca.sh
rename to 01-MSA-hello-world/solutions/job_msa_jureca.sh
index 7e1323e2851b5ada58c0dc974129a59afd64051a..15b917ddccea4855e92307700acdf686ff36e306 100644
--- a/01-MSA-hello-world/job_msa_jureca.sh
+++ b/01-MSA-hello-world/solutions/job_msa_jureca.sh
@@ -6,9 +6,12 @@
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
 #SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
 #SBATCH --partition=dc-gpu-devel
 
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
+
diff --git a/01-MSA-hello-world/job_msa_juwels.sh b/01-MSA-hello-world/solutions/job_msa_juwels.sh
similarity index 69%
rename from 01-MSA-hello-world/job_msa_juwels.sh
rename to 01-MSA-hello-world/solutions/job_msa_juwels.sh
index 48a492abf1395ed768e9f14abdf08cfff523bb7f..ac0173bcb1ffbd8ba9d0458ff15e266ba0282830 100644
--- a/01-MSA-hello-world/job_msa_juwels.sh
+++ b/01-MSA-hello-world/solutions/job_msa_juwels.sh
@@ -6,9 +6,12 @@
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
 #SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
 #SBATCH --partition=develbooster
 
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
+
diff --git a/01-MSA-hello-world/solutions/mpi_hello_world.c b/01-MSA-hello-world/solutions/mpi_hello_world.c
new file mode 100644
index 0000000000000000000000000000000000000000..2c2a1bdc59fc6287d7b6baee125553ec977b4be7
--- /dev/null
+++ b/01-MSA-hello-world/solutions/mpi_hello_world.c
@@ -0,0 +1,27 @@
+#include <mpi.h>
+#include <stdio.h>
+
+int main(int argc, char** argv) {
+    // Initialize the MPI environment
+    MPI_Init(NULL, NULL);
+
+    // Get the number of processes
+    int world_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+    // Get the rank of the process
+    int world_rank;
+    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+    // Get the name of the processor, which is the hostname
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+    int name_len;
+    MPI_Get_processor_name(processor_name, &name_len);
+
+    // Print off a hello world message
+    printf("Hello world from processor %s, rank %d out of %d processors\n",
+           processor_name, world_rank, world_size);
+
+    // Finalize the MPI environment.
+    MPI_Finalize();
+}
diff --git a/01-MSA-hello-world/tasks/Makefile b/01-MSA-hello-world/tasks/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..9921395f86a0367d329449a03837d7ca78fd6866
--- /dev/null
+++ b/01-MSA-hello-world/tasks/Makefile
@@ -0,0 +1,14 @@
+SYS:=none
+_SYS=$(addprefix .,$(SYS))
+MPICC=mpicc
+
+EXECS=mpi_hello_world$(_SYS).out
+
+.PHONY: all clean
+all: ${EXECS}
+
+mpi_hello_world$(_SYS).out: mpi_hello_world.c
+	${MPICC} -o $@ $<
+
+clean:
+	rm ${EXECS} *.out || true
diff --git a/01-MSA-hello-world/tasks/README.md b/01-MSA-hello-world/tasks/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5c92a0bef97de8d6e00f227e73fe134960438874
--- /dev/null
+++ b/01-MSA-hello-world/tasks/README.md
@@ -0,0 +1,18 @@
+# MSA Hello World
+
+In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
+
+Execute the following on JUWELS Booster
+
+```bash
+bash compile.sh
+```
+
+Execute the following on JUWELS Cluster
+
+```bash
+bash compile.sh
+sbatch job_msa_juwels.sh
+```
+
+Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
diff --git a/01-MSA-hello-world/tasks/compile.sh b/01-MSA-hello-world/tasks/compile.sh
new file mode 100755
index 0000000000000000000000000000000000000000..78e3228a4e51dc29b7ff9c24c4d1a7f2247e4f97
--- /dev/null
+++ b/01-MSA-hello-world/tasks/compile.sh
@@ -0,0 +1,13 @@
+if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+elif [[ "$SYSTEMNAME" == "juwels" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+else
+	echo "The system $SYSTEMNAME is not supported!"
+	echo "Please load a capable MPI and compile the executables on each system with"
+	echo "make SYS=PARTIONNAME"
+fi
\ No newline at end of file
diff --git a/01-MSA-hello-world/tasks/job_msa_jureca.sh b/01-MSA-hello-world/tasks/job_msa_jureca.sh
new file mode 100644
index 0000000000000000000000000000000000000000..07bc86d491f875de8b595ba1ed03f3a6e42e2b52
--- /dev/null
+++ b/01-MSA-hello-world/tasks/job_msa_jureca.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -x
+#SBATCH --account=exalab
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out 
diff --git a/01-MSA-hello-world/tasks/job_msa_juwels.sh b/01-MSA-hello-world/tasks/job_msa_juwels.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ca48bf36ce5e96d0c2726e24212669f65c63ee4e
--- /dev/null
+++ b/01-MSA-hello-world/tasks/job_msa_juwels.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -x
+#SBATCH --account=training2317
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out 
diff --git a/01-MSA-hello-world/tasks/mpi_hello_world.c b/01-MSA-hello-world/tasks/mpi_hello_world.c
new file mode 100644
index 0000000000000000000000000000000000000000..2c2a1bdc59fc6287d7b6baee125553ec977b4be7
--- /dev/null
+++ b/01-MSA-hello-world/tasks/mpi_hello_world.c
@@ -0,0 +1,27 @@
+#include <mpi.h>
+#include <stdio.h>
+
+int main(int argc, char** argv) {
+    // Initialize the MPI environment
+    MPI_Init(NULL, NULL);
+
+    // Get the number of processes
+    int world_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+    // Get the rank of the process
+    int world_rank;
+    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+    // Get the name of the processor, which is the hostname
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+    int name_len;
+    MPI_Get_processor_name(processor_name, &name_len);
+
+    // Print off a hello world message
+    printf("Hello world from processor %s, rank %d out of %d processors\n",
+           processor_name, world_rank, world_size);
+
+    // Finalize the MPI environment.
+    MPI_Finalize();
+}