Skip to content
Snippets Groups Projects
Commit 3b1335b3 authored by Andreas Herten's avatar Andreas Herten
Browse files

Some reshuffling and documentation

Revamp how things are compiled and move systemname to name of executable; make JUWELS the default and also add instructions for JUWELS
parent 74db8fc4
Branches
No related tags found
1 merge request!1add 01 MSA hello-world
EXECS=mpi_hello_world.out SYS:=none
_SYS=$(addprefix .,$(SYS))
MPICC=mpicc MPICC=mpicc
EXECS=mpi_hello_world$(_SYS).out
.PHONY: all clean
all: ${EXECS} all: ${EXECS}
mpi_hello_world.out: mpi_hello_world.c mpi_hello_world$(_SYS).out: mpi_hello_world.c
${MPICC} -o mpi_hello_world.out mpi_hello_world.c ${MPICC} -o $@ $<
clean: clean:
rm ${EXECS} *.out || true rm ${EXECS} *.out || true
# MSA Hello World
In this simple example, the Slurm heterogeneous job functionality is used to launch a job between JUWELS Cluster and Booster.
Execute the following on JUWELS Booster
```bash
bash compile.sh
```
Execute the following on JUWELS Cluster
```bash
bash compile.sh
sbatch job_msa_juwels.sh
```
Monitor your job with `squeue --me`. When it ran through successfully, have a look at the output in `slurm-out.N`, with `N` being your job number.
\ No newline at end of file
if [[ "$SYSTEMNAME" == "juwelsbooster" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
elif [[ "$SYSTEMNAME" == "juwels" ]]; then
echo "Building for $SYSTEMNAME"
ml GCC ParaStationMPI
make SYS=$SYSTEMNAME
else
echo "The system $SYSTEMNAME is not supported!"
echo "Please load a capable MPI and compile the executables on each system with"
echo "make SYS=PARTIONNAME"
fi
\ No newline at end of file
ml GCC ParaStationMPI
make
mv mpi_hello_world.out mpi_hello_world_cluster.out
ml GCC ParaStationMPI
make
mv mpi_hello_world.out mpi_hello_world_booster.out
...@@ -11,4 +11,4 @@ ...@@ -11,4 +11,4 @@
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel #SBATCH --partition=dc-gpu-devel
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world_cluster.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world_booster.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
...@@ -11,4 +11,4 @@ ...@@ -11,4 +11,4 @@
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster #SBATCH --partition=develbooster
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world_cluster.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world_booster.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
...@@ -13,7 +13,7 @@ int main(int argc, char** argv) { ...@@ -13,7 +13,7 @@ int main(int argc, char** argv) {
int world_rank; int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
// Get the name of the processor // Get the name of the processor, which is the hostname
char processor_name[MPI_MAX_PROCESSOR_NAME]; char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len; int name_len;
MPI_Get_processor_name(processor_name, &name_len); MPI_Get_processor_name(processor_name, &name_len);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment