Skip to content
Snippets Groups Projects
Commit c43fe266 authored by Andreas Herten's avatar Andreas Herten
Browse files

Merge branch 'main' of gitlab.jsc.fz-juelich.de:herten1/msa-hello-world

parents 485d1a7a ae25efdc
No related branches found
No related tags found
No related merge requests found
Showing
with 40 additions and 40 deletions
......@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
{% endif %}
......@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
{% endif %}
......@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
......@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
......@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
......@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#SBATCH --partition=dc-cpu
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#SBATCH --partition=dc-cpu
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#SBATCH --partition=dc-cpu
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment