From 10c863b88bdd8afb803747173b8fa47ecf3ddd4c Mon Sep 17 00:00:00 2001
From: Sebastian Achilles <s.achilles@fz-juelich.de>
Date: Thu, 15 Jun 2023 18:01:58 +0200
Subject: [PATCH] MSA batch jobs: update queue

---
 01-MSA-hello-world/.master/job_msa_jureca.sh   | 8 ++++----
 01-MSA-hello-world/.master/job_msa_juwels.sh   | 8 ++++----
 01-MSA-hello-world/solutions/job_msa_jureca.sh | 8 ++++----
 01-MSA-hello-world/solutions/job_msa_juwels.sh | 8 ++++----
 01-MSA-hello-world/tasks/job_msa_jureca.sh     | 6 +++---
 01-MSA-hello-world/tasks/job_msa_juwels.sh     | 6 +++---
 02-MSA-hello-world-gpu/job_msa_juwels.sh       | 4 ++--
 03-MSA-ping-pong/job_msa_jureca.sh             | 4 ++--
 03-MSA-ping-pong/job_msa_juwels.sh             | 4 ++--
 9 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/01-MSA-hello-world/.master/job_msa_jureca.sh b/01-MSA-hello-world/.master/job_msa_jureca.sh
index 5785e10..5fcb967 100644
--- a/01-MSA-hello-world/.master/job_msa_jureca.sh
+++ b/01-MSA-hello-world/.master/job_msa_jureca.sh
@@ -5,16 +5,16 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=dc-cpu-devel
-#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+#SBATCH --partition=dc-cpu
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
 {%- if SOLUTION %}
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=dc-gpu-devel
+#SBATCH --partition=dc-gpu
 {%- endif %}
 
-#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%}
 : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
 {% endif %}
diff --git a/01-MSA-hello-world/.master/job_msa_juwels.sh b/01-MSA-hello-world/.master/job_msa_juwels.sh
index e0e7055..86ce31e 100644
--- a/01-MSA-hello-world/.master/job_msa_juwels.sh
+++ b/01-MSA-hello-world/.master/job_msa_juwels.sh
@@ -5,16 +5,16 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=devel
-#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+#SBATCH --partition=batch
+#TODO: Add hetjob with 1 node, 1 task per node on booster partition
 {%- if SOLUTION %}
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=develbooster
+#SBATCH --partition=booster
 {%- endif %}
 
-#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%}
 : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
 {% endif %}
diff --git a/01-MSA-hello-world/solutions/job_msa_jureca.sh b/01-MSA-hello-world/solutions/job_msa_jureca.sh
index 15b917d..87726a0 100644
--- a/01-MSA-hello-world/solutions/job_msa_jureca.sh
+++ b/01-MSA-hello-world/solutions/job_msa_jureca.sh
@@ -5,13 +5,13 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=dc-cpu-devel
-#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+#SBATCH --partition=dc-cpu
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=dc-gpu-devel
+#SBATCH --partition=dc-gpu
 
-#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out
 
diff --git a/01-MSA-hello-world/solutions/job_msa_juwels.sh b/01-MSA-hello-world/solutions/job_msa_juwels.sh
index ac0173b..f81b610 100644
--- a/01-MSA-hello-world/solutions/job_msa_juwels.sh
+++ b/01-MSA-hello-world/solutions/job_msa_juwels.sh
@@ -5,13 +5,13 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=devel
-#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+#SBATCH --partition=batch
+#TODO: Add hetjob with 1 node, 1 task per node on booster partition
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=develbooster
+#SBATCH --partition=booster
 
-#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out
 
diff --git a/01-MSA-hello-world/tasks/job_msa_jureca.sh b/01-MSA-hello-world/tasks/job_msa_jureca.sh
index 07bc86d..f040d6c 100644
--- a/01-MSA-hello-world/tasks/job_msa_jureca.sh
+++ b/01-MSA-hello-world/tasks/job_msa_jureca.sh
@@ -5,8 +5,8 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=dc-cpu-devel
-#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition
+#SBATCH --partition=dc-cpu
+#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition
 
-#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out 
diff --git a/01-MSA-hello-world/tasks/job_msa_juwels.sh b/01-MSA-hello-world/tasks/job_msa_juwels.sh
index ca48bf3..5e76cab 100644
--- a/01-MSA-hello-world/tasks/job_msa_juwels.sh
+++ b/01-MSA-hello-world/tasks/job_msa_juwels.sh
@@ -5,8 +5,8 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=devel
-#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition
+#SBATCH --partition=batch
+#TODO: Add hetjob with 1 node, 1 task per node on booster partition
 
-#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax
+#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax
 srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out 
diff --git a/02-MSA-hello-world-gpu/job_msa_juwels.sh b/02-MSA-hello-world-gpu/job_msa_juwels.sh
index 32bd369..62d47a7 100644
--- a/02-MSA-hello-world-gpu/job_msa_juwels.sh
+++ b/02-MSA-hello-world-gpu/job_msa_juwels.sh
@@ -5,10 +5,10 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=devel
+#SBATCH --partition=batch
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=develbooster
+#SBATCH --partition=booster
 
 srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out
diff --git a/03-MSA-ping-pong/job_msa_jureca.sh b/03-MSA-ping-pong/job_msa_jureca.sh
index d328271..d786610 100644
--- a/03-MSA-ping-pong/job_msa_jureca.sh
+++ b/03-MSA-ping-pong/job_msa_jureca.sh
@@ -5,10 +5,10 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=dc-cpu-devel
+#SBATCH --partition=dc-cpu
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=dc-gpu-devel
+#SBATCH --partition=dc-gpu
 
 srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
diff --git a/03-MSA-ping-pong/job_msa_juwels.sh b/03-MSA-ping-pong/job_msa_juwels.sh
index e6f2ba4..5d068bc 100644
--- a/03-MSA-ping-pong/job_msa_juwels.sh
+++ b/03-MSA-ping-pong/job_msa_juwels.sh
@@ -5,10 +5,10 @@
 #SBATCH --output=slurm-out.%j
 #SBATCH --error=slurm-err.%j
 #SBATCH --time=00:15:00
-#SBATCH --partition=devel
+#SBATCH --partition=batch
 #SBATCH hetjob
 #SBATCH --nodes=1
 #SBATCH --ntasks-per-node=1
-#SBATCH --partition=develbooster
+#SBATCH --partition=booster
 
 srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out
-- 
GitLab