Skip to content
Snippets Groups Projects
Commit 779b2f1e authored by Sebastian Achilles's avatar Sebastian Achilles
Browse files

Merge branch 'ah-todo-ify-task1' into 'main'

Add TODO-ified verison of Task 1 (entirely in the batch scripts)

See merge request !5
parents 9c2e006b fb5b2b14
Branches
No related tags found
1 merge request!5Add TODO-ified verison of Task 1 (entirely in the batch scripts)
Showing
with 252 additions and 0 deletions
File moved
File moved
#!/usr/bin/make -f
TASKDIR = ../tasks
SOLUTIONDIR = ../solutions
PROCESSFILES = job_msa_juwels.sh job_msa_jureca.sh
COPYFILES = Makefile README.md mpi_hello_world.c compile.sh
TASKPROCCESFILES = $(addprefix $(TASKDIR)/,$(PROCESSFILES))
TASKCOPYFILES = $(addprefix $(TASKDIR)/,$(COPYFILES))
SOLUTIONPROCCESFILES = $(addprefix $(SOLUTIONDIR)/,$(PROCESSFILES))
SOLUTIONCOPYFILES = $(addprefix $(SOLUTIONDIR)/,$(COPYFILES))
.PHONY: all task
all: task
task: ${TASKPROCCESFILES} ${TASKCOPYFILES} ${SOLUTIONPROCCESFILES} ${SOLUTIONCOPYFILES}
${TASKPROCCESFILES}: $(PROCESSFILES)
mkdir -p $(TASKDIR)/
echo 'SOLUTION: false' | jinja2 --format yaml $(notdir $@) > $@
${SOLUTIONPROCCESFILES}: $(PROCESSFILES)
mkdir -p $(SOLUTIONDIR)/
echo 'SOLUTION: true' | jinja2 --format yaml $(notdir $@) > $@
${TASKCOPYFILES}: $(COPYFILES)
mkdir -p $(TASKDIR)/
cp $(notdir $@) $@
${SOLUTIONCOPYFILES}: $(COPYFILES)
mkdir -p $(SOLUTIONDIR)/
cp $(notdir $@) $@
\ No newline at end of file
#!/bin/bash -x
#SBATCH --account=exalab
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
{% endif %}
#!/bin/bash -x
#SBATCH --account=training2317
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
{% endif %}
# Hand-generated by Andreas
jinja2
PyYAML
jinja2-cli[yaml]
\ No newline at end of file
SYS:=none
_SYS=$(addprefix .,$(SYS))
MPICC=mpicc
EXECS=mpi_hello_world$(_SYS).out
.PHONY: all clean
all: ${EXECS}
mpi_hello_world$(_SYS).out: mpi_hello_world.c
${MPICC} -o $@ $<
clean:
rm ${EXECS} *.out || true
# MSA Hello World
In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
Execute the following on JUWELS Booster
```bash
bash compile.sh
```
Execute the following on JUWELS Cluster
```bash
bash compile.sh
sbatch job_msa_juwels.sh
```
Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
elif [[ "$SYSTEMNAME" == "juwels" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
else
echo "The system $SYSTEMNAME is not supported!"
echo "Please load a capable MPI and compile the executables on each system with"
echo "make SYS=PARTIONNAME"
fi
\ No newline at end of file
......@@ -6,9 +6,12 @@
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
......@@ -6,9 +6,12 @@
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Get the name of the processor, which is the hostname
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Print off a hello world message
printf("Hello world from processor %s, rank %d out of %d processors\n",
processor_name, world_rank, world_size);
// Finalize the MPI environment.
MPI_Finalize();
}
SYS:=none
_SYS=$(addprefix .,$(SYS))
MPICC=mpicc
EXECS=mpi_hello_world$(_SYS).out
.PHONY: all clean
all: ${EXECS}
mpi_hello_world$(_SYS).out: mpi_hello_world.c
${MPICC} -o $@ $<
clean:
rm ${EXECS} *.out || true
# MSA Hello World
In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
Execute the following on JUWELS Booster
```bash
bash compile.sh
```
Execute the following on JUWELS Cluster
```bash
bash compile.sh
sbatch job_msa_juwels.sh
```
Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
elif [[ "$SYSTEMNAME" == "juwels" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
else
echo "The system $SYSTEMNAME is not supported!"
echo "Please load a capable MPI and compile the executables on each system with"
echo "make SYS=PARTIONNAME"
fi
\ No newline at end of file
#!/bin/bash -x
#SBATCH --account=exalab
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
#!/bin/bash -x
#SBATCH --account=training2317
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Get the name of the processor, which is the hostname
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Print off a hello world message
printf("Hello world from processor %s, rank %d out of %d processors\n",
processor_name, world_rank, world_size);
// Finalize the MPI environment.
MPI_Finalize();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment