diff --git a/01-MSA-hello-world/.master/job_msa_jureca.sh b/01-MSA-hello-world/.master/job_msa_jureca.sh index 5785e1010970c17331eeda3b657c48d76998c48f..5fcb967f07f0763538967822643cf3f35710fdfd 100644 --- a/01-MSA-hello-world/.master/job_msa_jureca.sh +++ b/01-MSA-hello-world/.master/job_msa_jureca.sh @@ -5,16 +5,16 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=dc-cpu-devel -#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition +#SBATCH --partition=dc-cpu +#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition {%- if SOLUTION %} #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=dc-gpu-devel +#SBATCH --partition=dc-gpu {%- endif %} -#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% if SOLUTION -%} : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out {% endif %} diff --git a/01-MSA-hello-world/.master/job_msa_juwels.sh b/01-MSA-hello-world/.master/job_msa_juwels.sh index e0e7055cf6e7a662774f2439e5b36b83aa1ad005..86ce31ed20ddc0d94d0c9ab79638ed886a64d643 100644 --- a/01-MSA-hello-world/.master/job_msa_juwels.sh +++ b/01-MSA-hello-world/.master/job_msa_juwels.sh @@ -5,16 +5,16 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=devel -#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition +#SBATCH --partition=batch +#TODO: Add hetjob with 1 node, 1 task per node on booster partition {%- if SOLUTION %} #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=develbooster +#SBATCH --partition=booster {%- endif %} -#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out {% if SOLUTION -%} : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out {% endif %} diff --git a/01-MSA-hello-world/solutions/job_msa_jureca.sh b/01-MSA-hello-world/solutions/job_msa_jureca.sh index 15b917ddccea4855e92307700acdf686ff36e306..87726a09343dc4f90f441898275e32638bb4d09f 100644 --- a/01-MSA-hello-world/solutions/job_msa_jureca.sh +++ b/01-MSA-hello-world/solutions/job_msa_jureca.sh @@ -5,13 +5,13 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=dc-cpu-devel -#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition +#SBATCH --partition=dc-cpu +#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=dc-gpu-devel +#SBATCH --partition=dc-gpu -#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out diff --git a/01-MSA-hello-world/solutions/job_msa_juwels.sh b/01-MSA-hello-world/solutions/job_msa_juwels.sh index ac0173bcb1ffbd8ba9d0458ff15e266ba0282830..f81b610779e4abd322d98c5ac3097d2e8c9c9ec2 100644 --- a/01-MSA-hello-world/solutions/job_msa_juwels.sh +++ b/01-MSA-hello-world/solutions/job_msa_juwels.sh @@ -5,13 +5,13 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=devel -#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition +#SBATCH --partition=batch +#TODO: Add hetjob with 1 node, 1 task per node on booster partition #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=develbooster +#SBATCH --partition=booster -#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out : xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwelsbooster.out diff --git a/01-MSA-hello-world/tasks/job_msa_jureca.sh b/01-MSA-hello-world/tasks/job_msa_jureca.sh index 07bc86d491f875de8b595ba1ed03f3a6e42e2b52..f040d6cbdc3c884c523a33c813de91dbc435f3c5 100644 --- a/01-MSA-hello-world/tasks/job_msa_jureca.sh +++ b/01-MSA-hello-world/tasks/job_msa_jureca.sh @@ -5,8 +5,8 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=dc-cpu-devel -#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu-devel partition +#SBATCH --partition=dc-cpu +#TODO: Add hetjob with 1 node, 1 task per node on dc-gpu partition -#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu-devel) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on dc-gpu) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.jureca.out diff --git a/01-MSA-hello-world/tasks/job_msa_juwels.sh b/01-MSA-hello-world/tasks/job_msa_juwels.sh index ca48bf36ce5e96d0c2726e24212669f65c63ee4e..5e76cab5d7107ca15357d81de8be93718927e3c3 100644 --- a/01-MSA-hello-world/tasks/job_msa_juwels.sh +++ b/01-MSA-hello-world/tasks/job_msa_juwels.sh @@ -5,8 +5,8 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=devel -#TODO: Add hetjob with 1 node, 1 task per node on develbooster partition +#SBATCH --partition=batch +#TODO: Add hetjob with 1 node, 1 task per node on booster partition -#TODO: Extend following line for a second heterogeneous job component (running on develbooster) using the colon syntax +#TODO: Extend following line for a second heterogeneous job component (running on booster) using the colon syntax srun xenv -P -L GCC -L ParaStationMPI ./mpi_hello_world.juwels.out diff --git a/02-MSA-hello-world-gpu/job_msa_juwels.sh b/02-MSA-hello-world-gpu/job_msa_juwels.sh index 32bd3696ee2e173aaa8207ee2d0c3466e2ffafc6..62d47a74369608f0b22728a02d3c329da8fcb888 100644 --- a/02-MSA-hello-world-gpu/job_msa_juwels.sh +++ b/02-MSA-hello-world-gpu/job_msa_juwels.sh @@ -5,10 +5,10 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=devel +#SBATCH --partition=batch #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=develbooster +#SBATCH --partition=booster srun xenv -P -L GCC -L ParaStationMPI ./hello.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./hello.gpu.out diff --git a/03-MSA-ping-pong/job_msa_jureca.sh b/03-MSA-ping-pong/job_msa_jureca.sh index d328271e5fdf8031af52d77d9b9d1dc7911bdac9..d786610cb7da25e04ed6e539656193d1c5b00cdb 100644 --- a/03-MSA-ping-pong/job_msa_jureca.sh +++ b/03-MSA-ping-pong/job_msa_jureca.sh @@ -5,10 +5,10 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=dc-cpu-devel +#SBATCH --partition=dc-cpu #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=dc-gpu-devel +#SBATCH --partition=dc-gpu srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out diff --git a/03-MSA-ping-pong/job_msa_juwels.sh b/03-MSA-ping-pong/job_msa_juwels.sh index e6f2ba4bdb4fd07222f526aecf43a57f0040a878..5d068bc90b37ededb518caa80ef36234076b26d1 100644 --- a/03-MSA-ping-pong/job_msa_juwels.sh +++ b/03-MSA-ping-pong/job_msa_juwels.sh @@ -5,10 +5,10 @@ #SBATCH --output=slurm-out.%j #SBATCH --error=slurm-err.%j #SBATCH --time=00:15:00 -#SBATCH --partition=devel +#SBATCH --partition=batch #SBATCH hetjob #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 -#SBATCH --partition=develbooster +#SBATCH --partition=booster srun xenv -P -L GCC -L ParaStationMPI ./ping-pong.cpu.out : xenv -P -L GCC -L ParaStationMPI -L MPI-settings/CUDA ./ping-pong.gpu.out