From fb5b2b142d4ce85e00134bae570404fb5b4fbb5d Mon Sep 17 00:00:00 2001
From: Andreas Herten <a.herten@fz-juelich.de>
Date: Wed, 14 Jun 2023 22:53:03 +0200
Subject: [PATCH] Add TODO-ified verison of Task 1 (entirely in the batch
 scripts)

The .master folder uses the copy.mk script form all our courses to generate solutions and tasks. But because we are not using C files, we cannot use the cppp (C partial pre-processor) which we use for the tasks for the other courses. BUT Jinja to the rescue. I found a jinja2-cli application which consumes a YAML file and just processes a random other file. And it works! Look at the requirements.txt to set up your venv for the jinja2-cli app. That said, the pre-processed versions of the files are commited so no need to install it.
---
 01-MSA-hello-world/{ => .master}/Makefile     |  0
 01-MSA-hello-world/{ => .master}/README.md    |  0
 01-MSA-hello-world/{ => .master}/compile.sh   |  0
 01-MSA-hello-world/.master/copy.mk            | 34 +++++++++++++++++++
 01-MSA-hello-world/.master/job_msa_jureca.sh  | 20 +++++++++++
 01-MSA-hello-world/.master/job_msa_juwels.sh  | 20 +++++++++++
 .../{ => .master}/mpi_hello_world.c           |  0
 01-MSA-hello-world/.master/requirements.txt   |  4 +++
 01-MSA-hello-world/solutions/Makefile         | 14 ++++++++
 01-MSA-hello-world/solutions/README.md        | 18 ++++++++++
 01-MSA-hello-world/solutions/compile.sh       | 13 +++++++
 .../{ => solutions}/job_msa_jureca.sh         |  3 ++
 .../{ => solutions}/job_msa_juwels.sh         |  3 ++
 .../solutions/mpi_hello_world.c               | 27 +++++++++++++++
 01-MSA-hello-world/tasks/Makefile             | 14 ++++++++
 01-MSA-hello-world/tasks/README.md            | 18 ++++++++++
 01-MSA-hello-world/tasks/compile.sh           | 13 +++++++
 01-MSA-hello-world/tasks/job_msa_jureca.sh    | 12 +++++++
 01-MSA-hello-world/tasks/job_msa_juwels.sh    | 12 +++++++
 01-MSA-hello-world/tasks/mpi_hello_world.c    | 27 +++++++++++++++
 20 files changed, 252 insertions(+)
 rename 01-MSA-hello-world/{ => .master}/Makefile (100%)
 rename 01-MSA-hello-world/{ => .master}/README.md (100%)
 rename 01-MSA-hello-world/{ => .master}/compile.sh (100%)
 create mode 100755 01-MSA-hello-world/.master/copy.mk
 create mode 100644 01-MSA-hello-world/.master/job_msa_jureca.sh
 create mode 100644 01-MSA-hello-world/.master/job_msa_juwels.sh
 rename 01-MSA-hello-world/{ => .master}/mpi_hello_world.c (100%)
 create mode 100644 01-MSA-hello-world/.master/requirements.txt
 create mode 100644 01-MSA-hello-world/solutions/Makefile
 create mode 100644 01-MSA-hello-world/solutions/README.md
 create mode 100755 01-MSA-hello-world/solutions/compile.sh
 rename 01-MSA-hello-world/{ => solutions}/job_msa_jureca.sh (68%)
 rename 01-MSA-hello-world/{ => solutions}/job_msa_juwels.sh (69%)
 create mode 100644 01-MSA-hello-world/solutions/mpi_hello_world.c
 create mode 100644 01-MSA-hello-world/tasks/Makefile
 create mode 100644 01-MSA-hello-world/tasks/README.md
 create mode 100755 01-MSA-hello-world/tasks/compile.sh
 create mode 100644 01-MSA-hello-world/tasks/job_msa_jureca.sh
 create mode 100644 01-MSA-hello-world/tasks/job_msa_juwels.sh
 create mode 100644 01-MSA-hello-world/tasks/mpi_hello_world.c

diff --git a/01-MSA-hello-world/Makefile b/01-MSA-hello-world/.master/Makefile
similarity index 100%
rename from 01-MSA-hello-world/Makefile
rename to 01-MSA-hello-world/.master/Makefile
diff --git a/01-MSA-hello-world/README.md b/01-MSA-hello-world/.master/README.md
similarity index 100%
rename from 01-MSA-hello-world/README.md
rename to 01-MSA-hello-world/.master/README.md
diff --git a/01-MSA-hello-world/compile.sh b/01-MSA-hello-world/.master/compile.sh
similarity index 100%
rename from 01-MSA-hello-world/compile.sh
rename to 01-MSA-hello-world/.master/compile.sh
diff --git a/01-MSA-hello-world/.master/copy.mk b/01-MSA-hello-world/.master/copy.mk
new file mode 100755
index 0000000..96f9690
--- /dev/null
+++ b/01-MSA-hello-world/.master/copy.mk
@@ -0,0 +1,34 @@
+#!/usr/bin/make -f
+TASKDIR = ../tasks
+SOLUTIONDIR = ../solutions
+
+PROCESSFILES = job_msa_juwels.sh job_msa_jureca.sh
+COPYFILES = Makefile README.md mpi_hello_world.c compile.sh
+
+
+TASKPROCCESFILES = $(addprefix $(TASKDIR)/,$(PROCESSFILES))
+TASKCOPYFILES = $(addprefix $(TASKDIR)/,$(COPYFILES))
+SOLUTIONPROCCESFILES = $(addprefix $(SOLUTIONDIR)/,$(PROCESSFILES))
+SOLUTIONCOPYFILES = $(addprefix $(SOLUTIONDIR)/,$(COPYFILES))
+
+.PHONY: all task
+all: task
+task: ${TASKPROCCESFILES} ${TASKCOPYFILES} ${SOLUTIONPROCCESFILES} ${SOLUTIONCOPYFILES}
+
+
+${TASKPROCCESFILES}: $(PROCESSFILES)
+	mkdir -p $(TASKDIR)/
+	echo 'SOLUTION: false' | jinja2 --format yaml $(notdir $@) > $@
+	
+${SOLUTIONPROCCESFILES}: $(PROCESSFILES)
+	mkdir -p $(SOLUTIONDIR)/
+	echo 'SOLUTION: true' | jinja2 --format yaml $(notdir $@) > $@
+
+
+${TASKCOPYFILES}: $(COPYFILES)
+	mkdir -p $(TASKDIR)/
+	cp $(notdir $@) $@
+	
+${SOLUTIONCOPYFILES}: $(COPYFILES)
+	mkdir -p $(SOLUTIONDIR)/
+	cp $(notdir $@) $@
\ No newline at end of file
diff --git a/01-MSA-hello-world/.master/job_msa_jureca.sh b/01-MSA-hello-world/.master/job_msa_jureca.sh
new file mode 100644
index 0000000..5785e10
--- /dev/null
+++ b/01-MSA-hello-world/.master/job_msa_jureca.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -x
+#SBATCH --account=exalab
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+{%- if SOLUTION %}
+#SBATCH hetjob
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --partition=dc-gpu-devel
+{%- endif %}
+
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
+: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
+{% endif %}
diff --git a/01-MSA-hello-world/.master/job_msa_juwels.sh b/01-MSA-hello-world/.master/job_msa_juwels.sh
new file mode 100644
index 0000000..e0e7055
--- /dev/null
+++ b/01-MSA-hello-world/.master/job_msa_juwels.sh
@@ -0,0 +1,20 @@
+#!/bin/bash -x
+#SBATCH --account=training2317
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+{%- if SOLUTION %}
+#SBATCH hetjob
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --partition=develbooster
+{%- endif %}
+
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
+: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
+{% endif %}
diff --git a/01-MSA-hello-world/mpi_hello_world.c b/01-MSA-hello-world/.master/mpi_hello_world.c
similarity index 100%
rename from 01-MSA-hello-world/mpi_hello_world.c
rename to 01-MSA-hello-world/.master/mpi_hello_world.c
diff --git a/01-MSA-hello-world/.master/requirements.txt b/01-MSA-hello-world/.master/requirements.txt
new file mode 100644
index 0000000..c12da84
--- /dev/null
+++ b/01-MSA-hello-world/.master/requirements.txt
@@ -0,0 +1,4 @@
+# Hand-generated by Andreas
+jinja2
+PyYAML
+jinja2-cli[yaml]
\ No newline at end of file
diff --git a/01-MSA-hello-world/solutions/Makefile b/01-MSA-hello-world/solutions/Makefile
new file mode 100644
index 0000000..9921395
--- /dev/null
+++ b/01-MSA-hello-world/solutions/Makefile
@@ -0,0 +1,14 @@
+SYS:=none
+_SYS=$(addprefix .,$(SYS))
+MPICC=mpicc
+
+EXECS=mpi_hello_world$(_SYS).out
+
+.PHONY: all clean
+all: ${EXECS}
+
+mpi_hello_world$(_SYS).out: mpi_hello_world.c
+	${MPICC} -o $@ $<
+
+clean:
+	rm ${EXECS} *.out || true
diff --git a/01-MSA-hello-world/solutions/README.md b/01-MSA-hello-world/solutions/README.md
new file mode 100644
index 0000000..5c92a0b
--- /dev/null
+++ b/01-MSA-hello-world/solutions/README.md
@@ -0,0 +1,18 @@
+# MSA Hello World
+
+In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
+
+Execute the following on JUWELS Booster
+
+```bash
+bash compile.sh
+```
+
+Execute the following on JUWELS Cluster
+
+```bash
+bash compile.sh
+sbatch job_msa_juwels.sh
+```
+
+Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
diff --git a/01-MSA-hello-world/solutions/compile.sh b/01-MSA-hello-world/solutions/compile.sh
new file mode 100755
index 0000000..78e3228
--- /dev/null
+++ b/01-MSA-hello-world/solutions/compile.sh
@@ -0,0 +1,13 @@
+if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+elif [[ "$SYSTEMNAME" == "juwels" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+else
+	echo "The system $SYSTEMNAME is not supported!"
+	echo "Please load a capable MPI and compile the executables on each system with"
+	echo "make SYS=PARTIONNAME"
+fi
\ No newline at end of file
diff --git a/01-MSA-hello-world/job_msa_jureca.sh b/01-MSA-hello-world/solutions/job_msa_jureca.sh
similarity index 68%
rename from 01-MSA-hello-world/job_msa_jureca.sh
rename to 01-MSA-hello-world/solutions/job_msa_jureca.sh
index 7e1323e..15b917d 100644
--- a/01-MSA-hello-world/job_msa_jureca.sh
+++ b/01-MSA-hello-world/solutions/job_msa_jureca.sh
@@ -6,9 +6,12 @@
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
 #SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
 #SBATCH --partition=dc-gpu-devel
 
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
+
diff --git a/01-MSA-hello-world/job_msa_juwels.sh b/01-MSA-hello-world/solutions/job_msa_juwels.sh
similarity index 69%
rename from 01-MSA-hello-world/job_msa_juwels.sh
rename to 01-MSA-hello-world/solutions/job_msa_juwels.sh
index 48a492a..ac0173b 100644
--- a/01-MSA-hello-world/job_msa_juwels.sh
+++ b/01-MSA-hello-world/solutions/job_msa_juwels.sh
@@ -6,9 +6,12 @@
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
 #SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
 #SBATCH --partition=develbooster
 
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
+
diff --git a/01-MSA-hello-world/solutions/mpi_hello_world.c b/01-MSA-hello-world/solutions/mpi_hello_world.c
new file mode 100644
index 0000000..2c2a1bd
--- /dev/null
+++ b/01-MSA-hello-world/solutions/mpi_hello_world.c
@@ -0,0 +1,27 @@
+#include <mpi.h>
+#include <stdio.h>
+
+int main(int argc, char** argv) {
+    // Initialize the MPI environment
+    MPI_Init(NULL, NULL);
+
+    // Get the number of processes
+    int world_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+    // Get the rank of the process
+    int world_rank;
+    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+    // Get the name of the processor, which is the hostname
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+    int name_len;
+    MPI_Get_processor_name(processor_name, &name_len);
+
+    // Print off a hello world message
+    printf("Hello world from processor %s, rank %d out of %d processors\n",
+           processor_name, world_rank, world_size);
+
+    // Finalize the MPI environment.
+    MPI_Finalize();
+}
diff --git a/01-MSA-hello-world/tasks/Makefile b/01-MSA-hello-world/tasks/Makefile
new file mode 100644
index 0000000..9921395
--- /dev/null
+++ b/01-MSA-hello-world/tasks/Makefile
@@ -0,0 +1,14 @@
+SYS:=none
+_SYS=$(addprefix .,$(SYS))
+MPICC=mpicc
+
+EXECS=mpi_hello_world$(_SYS).out
+
+.PHONY: all clean
+all: ${EXECS}
+
+mpi_hello_world$(_SYS).out: mpi_hello_world.c
+	${MPICC} -o $@ $<
+
+clean:
+	rm ${EXECS} *.out || true
diff --git a/01-MSA-hello-world/tasks/README.md b/01-MSA-hello-world/tasks/README.md
new file mode 100644
index 0000000..5c92a0b
--- /dev/null
+++ b/01-MSA-hello-world/tasks/README.md
@@ -0,0 +1,18 @@
+# MSA Hello World
+
+In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
+
+Execute the following on JUWELS Booster
+
+```bash
+bash compile.sh
+```
+
+Execute the following on JUWELS Cluster
+
+```bash
+bash compile.sh
+sbatch job_msa_juwels.sh
+```
+
+Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
diff --git a/01-MSA-hello-world/tasks/compile.sh b/01-MSA-hello-world/tasks/compile.sh
new file mode 100755
index 0000000..78e3228
--- /dev/null
+++ b/01-MSA-hello-world/tasks/compile.sh
@@ -0,0 +1,13 @@
+if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+elif [[ "$SYSTEMNAME" == "juwels" ]]; then
+	echo "Building for $SYSTEMNAME"
+	ml GCC ParaStationMPI
+	make SYS=$SYSTEMNAME
+else
+	echo "The system $SYSTEMNAME is not supported!"
+	echo "Please load a capable MPI and compile the executables on each system with"
+	echo "make SYS=PARTIONNAME"
+fi
\ No newline at end of file
diff --git a/01-MSA-hello-world/tasks/job_msa_jureca.sh b/01-MSA-hello-world/tasks/job_msa_jureca.sh
new file mode 100644
index 0000000..07bc86d
--- /dev/null
+++ b/01-MSA-hello-world/tasks/job_msa_jureca.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -x
+#SBATCH --account=exalab
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=dc-cpu-devel
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out 
diff --git a/01-MSA-hello-world/tasks/job_msa_juwels.sh b/01-MSA-hello-world/tasks/job_msa_juwels.sh
new file mode 100644
index 0000000..ca48bf3
--- /dev/null
+++ b/01-MSA-hello-world/tasks/job_msa_juwels.sh
@@ -0,0 +1,12 @@
+#!/bin/bash -x
+#SBATCH --account=training2317
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=1
+#SBATCH --output=slurm-out.%j
+#SBATCH --error=slurm-err.%j
+#SBATCH --time=00:15:00
+#SBATCH --partition=devel
+#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+
+#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out 
diff --git a/01-MSA-hello-world/tasks/mpi_hello_world.c b/01-MSA-hello-world/tasks/mpi_hello_world.c
new file mode 100644
index 0000000..2c2a1bd
--- /dev/null
+++ b/01-MSA-hello-world/tasks/mpi_hello_world.c
@@ -0,0 +1,27 @@
+#include <mpi.h>
+#include <stdio.h>
+
+int main(int argc, char** argv) {
+    // Initialize the MPI environment
+    MPI_Init(NULL, NULL);
+
+    // Get the number of processes
+    int world_size;
+    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
+
+    // Get the rank of the process
+    int world_rank;
+    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
+
+    // Get the name of the processor, which is the hostname
+    char processor_name[MPI_MAX_PROCESSOR_NAME];
+    int name_len;
+    MPI_Get_processor_name(processor_name, &name_len);
+
+    // Print off a hello world message
+    printf("Hello world from processor %s, rank %d out of %d processors\n",
+           processor_name, world_rank, world_size);
+
+    // Finalize the MPI environment.
+    MPI_Finalize();
+}
-- 
GitLab