Skip to content
Snippets Groups Projects
Commit 46dc6757 authored by Sebastian Achilles's avatar Sebastian Achilles
Browse files

Merge branch 'sa-queue' into 'main'

MSA batch jobs: update queue

See merge request !6
parents ecf40e1a 10c863b8
Branches
No related tags found
1 merge request!6MSA batch jobs: update queue
...@@ -5,16 +5,16 @@ ...@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel #SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition #TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
{%- if SOLUTION %} {%- if SOLUTION %}
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel #SBATCH --partition=dc-gpu
{%- endif %} {%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%} srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
{% endif %} {% endif %}
...@@ -5,16 +5,16 @@ ...@@ -5,16 +5,16 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=devel #SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition #TODO: Add hetjob with 1 node, 1 task per node on booster partition
{%- if SOLUTION %} {%- if SOLUTION %}
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster #SBATCH --partition=booster
{%- endif %} {%- endif %}
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%} srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
: xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
{% endif %} {% endif %}
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel #SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition #TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel #SBATCH --partition=dc-gpu
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
...@@ -5,13 +5,13 @@ ...@@ -5,13 +5,13 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=devel #SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition #TODO: Add hetjob with 1 node, 1 task per node on booster partition
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster #SBATCH --partition=booster
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel #SBATCH --partition=dc-cpu
#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition #TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=devel #SBATCH --partition=batch
#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition #TODO: Add hetjob with 1 node, 1 task per node on booster partition
#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax #TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=devel #SBATCH --partition=batch
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster #SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=dc-cpu-devel #SBATCH --partition=dc-cpu
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=dc-gpu-devel #SBATCH --partition=dc-gpu
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
#SBATCH --output=slurm-out.%j #SBATCH --output=slurm-out.%j
#SBATCH --error=slurm-err.%j #SBATCH --error=slurm-err.%j
#SBATCH --time=00:15:00 #SBATCH --time=00:15:00
#SBATCH --partition=devel #SBATCH --partition=batch
#SBATCH hetjob #SBATCH hetjob
#SBATCH --nodes=1 #SBATCH --nodes=1
#SBATCH --ntasks-per-node=1 #SBATCH --ntasks-per-node=1
#SBATCH --partition=develbooster #SBATCH --partition=booster
srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment