diff --git a/01-MSA-hello-world/Makefile b/01-MSA-hello-world/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..9921395f86a0367d329449a03837d7ca78fd6866 --- /dev/null +++ b/01-MSA-hello-world/Makefile @@ -0,0 +1,14 @@ +SYS:=none +_SYS=$(addprefix .,$(SYS)) +MPICC=mpicc + +EXECS=mpi_hello_world$(_SYS).out + +.PHONY: all clean +all: ${EXECS} + +mpi_hello_world$(_SYS).out: mpi_hello_world.c + ${MPICC} -o $@ $< + +clean: + rm ${EXECS} *.out || true diff --git a/01-MSA-hello-world/README.md b/01-MSA-hello-world/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5c92a0bef97de8d6e00f227e73fe134960438874 --- /dev/null +++ b/01-MSA-hello-world/README.md @@ -0,0 +1,18 @@ +# MSA Hello World + +In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster. + +Execute the following on JUWELS Booster + +```bash +bash compile.sh +``` + +Execute the following on JUWELS Cluster + +```bash +bash compile.sh +sbatch job_msa_juwels.sh +``` + +Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number. \ No newline at end of file diff --git a/01-MSA-hello-world/compile.sh b/01-MSA-hello-world/compile.sh new file mode 100755 index 0000000000000000000000000000000000000000..78e3228a4e51dc29b7ff9c24c4d1a7f2247e4f97 --- /dev/null +++ b/01-MSA-hello-world/compile.sh @@ -0,0 +1,13 @@ +if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then + echo "Building for $SYSTEMNAME" + ml GCC ParaStationMPI + make SYS=$SYSTEMNAME +elif [[ "$SYSTEMNAME" == "juwels" ]]; then + echo "Building for $SYSTEMNAME" + ml GCC ParaStationMPI + make SYS=$SYSTEMNAME +else + echo "The system $SYSTEMNAME is not supported!" + echo "Please load a capable MPI and compile the executables on each system with" + echo "make SYS=PARTIONNAME" +fi \ No newline at end of file diff --git a/01-MSA-hello-world/job_msa_jureca.sh b/01-MSA-hello-world/job_msa_jureca.sh new file mode 100644 index 0000000000000000000000000000000000000000..7e1323e2851b5ada58c0dc974129a59afd64051a --- /dev/null +++ b/01-MSA-hello-world/job_msa_jureca.sh @@ -0,0 +1,14 @@ +#!/bin/bash -x +#SBATCH --account=exalab +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --output=slurm-out.%j +#SBATCH --error=slurm-err.%j +#SBATCH --time=00:15:00 +#SBATCH --partition=dc-cpu-devel +#SBATCH hetjob +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --partition=dc-gpu-devel + +srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out diff --git a/01-MSA-hello-world/job_msa_juwels.sh b/01-MSA-hello-world/job_msa_juwels.sh new file mode 100644 index 0000000000000000000000000000000000000000..48a492abf1395ed768e9f14abdf08cfff523bb7f --- /dev/null +++ b/01-MSA-hello-world/job_msa_juwels.sh @@ -0,0 +1,14 @@ +#!/bin/bash -x +#SBATCH --account=training2317 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --output=slurm-out.%j +#SBATCH --error=slurm-err.%j +#SBATCH --time=00:15:00 +#SBATCH --partition=devel +#SBATCH hetjob +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=1 +#SBATCH --partition=develbooster + +srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out diff --git a/01-MSA-hello-world/mpi_hello_world.c b/01-MSA-hello-world/mpi_hello_world.c new file mode 100644 index 0000000000000000000000000000000000000000..2c2a1bdc59fc6287d7b6baee125553ec977b4be7 --- /dev/null +++ b/01-MSA-hello-world/mpi_hello_world.c @@ -0,0 +1,27 @@ +#include <mpi.h> +#include <stdio.h> + +int main(int argc, char** argv) { + // Initialize the MPI environment + MPI_Init(NULL, NULL); + + // Get the number of processes + int world_size; + MPI_Comm_size(MPI_COMM_WORLD, &world_size); + + // Get the rank of the process + int world_rank; + MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); + + // Get the name of the processor, which is the hostname + char processor_name[MPI_MAX_PROCESSOR_NAME]; + int name_len; + MPI_Get_processor_name(processor_name, &name_len); + + // Print off a hello world message + printf("Hello world from processor %s, rank %d out of %d processors\n", + processor_name, world_rank, world_size); + + // Finalize the MPI environment. + MPI_Finalize(); +}