Skip to content
Snippets Groups Projects
Commit 10c863b8 authored by Sebastian Achilles's avatar Sebastian Achilles
Browse files

MSA batch jobs: update queue

parent 779b2f1e
No related branches found
No related tags found
1 merge request!6MSA batch jobs: update queue
......@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
{% endif %}
......@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
{%- if SOLUTION %}
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
{%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
{% endif %}
......@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
......@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
......@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
#SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
......@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
#SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on booster partition
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel
#SBATCH --partition=dc-cpu
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel
#SBATCH --partition=dc-gpu
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
......@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00
#SBATCH --partition=devel
#SBATCH --partition=batch
#SBATCH hetjob
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster
#SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment