Skip to content
Snippets Groups Projects
Commit f5974133 authored by Andreas Herten's avatar Andreas Herten
Browse files

Merge branch 'main' into sa-ping-pong

parents 255608ff 0621cd10
No related branches found
No related tags found
1 merge request!3add 02 MSA Ping-Pong
SYS:=none
_SYS=$(addprefix .,$(SYS))
MPICC=mpicc
EXECS=mpi_hello_world$(_SYS).out
.PHONY: all clean
all: ${EXECS}
mpi_hello_world$(_SYS).out: mpi_hello_world.c
${MPICC} -o $@ $<
clean:
rm ${EXECS} *.out || true
# MSA Hello World
In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
Execute the following on JUWELS Booster
```bash
bash compile.sh
```
Execute the following on JUWELS Cluster
```bash
bash compile.sh
sbatch job_msa_juwels.sh
```
Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
elif [[ "$SYSTEMNAME" == "juwels" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
else
echo "The system $SYSTEMNAME is not supported!"
echo "Please load a capable MPI and compile the executables on each system with"
echo "make SYS=PARTIONNAME"
fi
\ No newline at end of file
#!/bin/bash -x
#SBATCH --account=exalab
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
#!/bin/bash -x
#SBATCH --account=training2317
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
#include <mpi.h>
#include <stdio.h>
int main(int argc, char** argv) {
// Initialize the MPI environment
MPI_Init(NULL, NULL);
// Get the number of processes
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
// Get the rank of the process
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Get the name of the processor, which is the hostname
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
// Print off a hello world message
printf("Hello world from processor %s, rank %d out of %d processors\n",
processor_name, world_rank, world_size);
// Finalize the MPI environment.
MPI_Finalize();
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment