diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 59b7beb8b53b6d8e0d9e698e00201e8458c7a482..0633072e6a3de4314f7a46348dbb0f84001e462b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -62,6 +62,9 @@ check_style:
     - elif [[ $(basename $i) == *Turbomole* ]]; then
     - sg turbomol -c "eb --check-style --check-contrib $i" || faulty="$faulty $i"
     - sg turbomol -c "eb --dry-run $i" || faulty="$faulty $i"
+    - elif [[ $(basename $i) == *MATLAB* ]]; then
+    - sg matlab -c "eb --check-style --check-contrib $i" || faulty="$faulty $i"
+    - sg matlab -c "eb --dry-run $i" || faulty="$faulty $i"
     - else
     - eb --check-style --check-contrib $i || faulty="$faulty $i"
     - eb --dry-run $i || faulty="$faulty $i"
@@ -107,6 +110,9 @@ full_check_style:
     - elif [[ $(basename $i) == *Turbomole* ]]; then
     - sg turbomol -c "eb --check-style --check-contrib $i" || faulty="$faulty $i"
     - sg turbomol -c "eb --dry-run $i" || faulty="$faulty $i"
+    - elif [[ $(basename $i) == *MATLAB* ]]; then
+    - sg matlab -c "eb --check-style --check-contrib $i" || faulty="$faulty $i"
+    - sg matlab -c "eb --dry-run $i" || faulty="$faulty $i"
     - else
     - eb --check-style --check-contrib $i || faulty="$faulty $i"
     - eb --dry-run $i || faulty="$faulty $i"
@@ -134,7 +140,7 @@ update_acls:
       - SYSTEM:
         - juwels
         - juwels_booster
-        - jurecadc
+        - jureca
         - jusuf
         - hdfml
   tags:
diff --git a/Custom_EasyBlocks/cpmd.py b/Custom_EasyBlocks/cpmd.py
index 25b38296877dd66301ef190bc1d46b1c8a5f3c3c..8885f4b26da9e8646fc4f11f6e61632d14724894 100644
--- a/Custom_EasyBlocks/cpmd.py
+++ b/Custom_EasyBlocks/cpmd.py
@@ -36,7 +36,8 @@ EasyBuild support for building and installing CPMD, implemented as an easyblock
 #the pseudo-fix is to remove installdir/obj as postinstallcmds from the easyconfig file
 #the documentation was missing and is now added in a somewhat weird procedure
 #to the builddir and subsequently copied to the installdir/doc via postinstallcmds from the easyconfig file
-#the hybrid MPI/OMP version can solely installed through the openmp toolchain option
+#the hybrid version is installed via prefix_opt = '-omp -DEST=' independent of toolchain options in easyconfig file
+# 
 #the cuda implementation is ignored
 #
 
@@ -68,6 +69,7 @@ class EB_CPMD(ConfigureMake):
         """Custom easyconfig parameters for CPMD."""
         extra_vars = {
             'base_configuration': [None, "Base configuration from which to start (file name)", CUSTOM],
+            'MIT' : [None, "Open-Source Version of CPMD (MIT licence)",CUSTOM],
         }
         return ConfigureMake.extra_options(extra_vars)
 
@@ -89,7 +91,12 @@ class EB_CPMD(ConfigureMake):
         config_file_candidates = []
 
         for confdirname in ["configure", "CONFIGURE"]:
-            config_file_prefix = os.path.join(self.builddir, "CPMD", confdirname)
+            if self.cfg['MIT']:
+                 cpmdname = 'CPMD-%s' % self.version
+            else:
+                 cpmdname = 'CPMD'
+
+            config_file_prefix = os.path.join(self.builddir, cpmdname , confdirname)
             if os.path.isdir(config_file_prefix):
                 break
         else:
@@ -149,6 +156,8 @@ class EB_CPMD(ConfigureMake):
                 (r"^(\s*CC=.*)", r"#\1"),
                 (r"^(\s*FC=.*)", r"#\1"),
                 (r"^(\s*LD)=.*", r"\1='$(FC)'"),
+                (r" -openmp ", r" -qopenmp "),
+                (r"-mkl=", r"-qmkl="),
             ])
         except IOError as err:
             raise EasyBuildError("Failed to patch %s: %s", selected_base_config, err)
@@ -174,8 +183,8 @@ class EB_CPMD(ConfigureMake):
         options = [self.cfg['configopts']]
 
         # enable OpenMP support if desired
-        if self.toolchain.options.get('openmp', None) and LooseVersion(self.version) >= LooseVersion('4.0'):
-            options.append("-omp")
+#       if self.toolchain.options.get('openmp', None) and LooseVersion(self.version) >= LooseVersion('4.0'):
+#           options.append("-omp")
 
         # This "option" has to come last as it's the chief argument, coming after
         # all flags and so forth.
@@ -240,11 +249,12 @@ class EB_CPMD(ConfigureMake):
             if preproc_flag is None:
                 preproc_flag = ''
 
+# do not use default='None' for CPPFLAGS
             apply_regex_substitutions(makefile, [
-                (r"^(\s*CPPFLAGS\s*=.*)", r"\1 {0}".format(os.getenv('CPPFLAGS'))),
-                (r"^(\s*CFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('CFLAGS'))),
-                (r"^(\s*FFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('FFLAGS'))),
-                (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('LDFLAGS'))),
+                (r"^(\s*CPPFLAGS\s*=.*)", r"\1 {0}".format(os.getenv('CPPFLAGS','  '))),
+                (r"^(\s*CFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('CFLAGS','  '))),
+                (r"^(\s*FFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('FFLAGS','  '))),
+                (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('LDFLAGS','  '))),
 
                 # Allow to define own XFLAGS
                 (r"# CPPFLAGS =", r"CPPFLAGS +="),
@@ -259,14 +269,14 @@ class EB_CPMD(ConfigureMake):
             ])
             if self.toolchain.options.get('openmp', None):
                 apply_regex_substitutions(makefile, [
-                    (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0} {1}".format(os.getenv('LIBLAPACK_MT'), os.getenv('LIBBLAS_MT')))
+                    (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0} {1}".format(os.getenv('LIBLAPACK_MT',' '), os.getenv('LIBBLAS_MT',' ')))
                 ])
             else:
                 apply_regex_substitutions(makefile, [
-                    (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0} {1}".format(os.getenv('LIBLAPACK'), os.getenv('LIBBLAS')))
+                    (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0} {1}".format(os.getenv('LIBLAPACK',' '), os.getenv('LIBBLAS',' ')))
                 ])
             apply_regex_substitutions(makefile, [
-                (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('LIBFFT'))),
+                (r"^(\s*LFLAGS\s*=.*)",   r"\1 {0}".format(os.getenv('LIBFFT',' '))),
             ])
 
             if get_software_root('imkl'):
diff --git a/Custom_EasyBlocks/nvhpc.py b/Custom_EasyBlocks/nvhpc.py
index ea6bd75dce5f4e08d71cfb7cd822e596798573cc..7744209d1ff1ff5c573baea9878223d92f6ffdb1 100644
--- a/Custom_EasyBlocks/nvhpc.py
+++ b/Custom_EasyBlocks/nvhpc.py
@@ -1,6 +1,6 @@
 ##
-# Copyright 2015-2022 Bart Oldeman
-# Copyright 2016-2022 Forschungszentrum Juelich
+# Copyright 2015-2023 Bart Oldeman
+# Copyright 2016-2023 Forschungszentrum Juelich
 #
 # This file is triple-licensed under GPLv2 (see below), MIT, and
 # BSD three-clause licenses.
@@ -164,7 +164,10 @@ class EB_NVHPC(PackedBinary):
             line = re.sub(r"^PATH=/", r"#PATH=/", line)
             sys.stdout.write(line)
 
-        cmd = "%s -x %s -g77 gfortran" % (makelocalrc_filename, compilers_subdir)
+        if LooseVersion(self.version) >= LooseVersion('22.9'):
+            cmd = f"%s -x %s -cuda {default_cuda_version} -stdpar {default_compute_capability}" % (makelocalrc_filename, os.path.join(compilers_subdir, "bin"))
+        else:
+            cmd = "%s -x %s -g77 /" % (makelocalrc_filename, compilers_subdir)
         run_cmd(cmd, log_all=True, simple=True)
 
         # If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so
@@ -306,4 +309,3 @@ class EB_NVHPC(PackedBinary):
             if not self.cfg['module_add_cuda'] and get_software_root('CUDA'):
                 txt += self.module_generator.set_environment('NVHPC_CUDA_HOME', os.getenv('CUDA_HOME'))
         return txt
-
diff --git a/Custom_EasyBlocks/nvidia_driver.py b/Custom_EasyBlocks/nvidia_driver.py
index 48d74f48553a2bb6ebc9d716db570dc17220327e..75ca2bcc32e6148ed5c08bfa36fba50e9c4416a9 100644
--- a/Custom_EasyBlocks/nvidia_driver.py
+++ b/Custom_EasyBlocks/nvidia_driver.py
@@ -32,6 +32,7 @@ class EB_nvidia_minus_driver(Binary):
         """Support for generic 'default' modules with specific real versions"""
         extra_vars = {
             'realversion': [None, "Real version to be used when version = 'default'", CUSTOM],
+            'just_GL_libs': [False, "Install just GL-related libs", CUSTOM],
         }
         return extra_vars
 
@@ -68,33 +69,46 @@ class EB_nvidia_minus_driver(Binary):
         "Install NVIDIA libs simply by copying files. We can't use the installer because it requires root privileges."
 
         # list of libs
-        libs = expand_glob_paths([os.path.join(self.libsdir, 'lib*.so*')])
-        try:
-            libs += expand_glob_paths([os.path.join(self.libsdir, '*.la')])
-        except EasyBuildError:
-            self.log.info("No *.la files found. Proceeding without them.")
-        libs += [os.path.join(self.libsdir, 'nvidia_drv.so')]
-
-        # list of binaries
-        binaries = ['nvidia-bug-report.sh',
-                    'nvidia-cuda-mps-control',
-                    'nvidia-cuda-mps-server',
-                    'nvidia-debugdump',
-                    'nvidia-settings',
-                    'nvidia-smi',
-                    'nvidia-xconfig']
-        binaries = [os.path.join(self.libsdir, x) for x in binaries]
-
-        # list of manpages
-        manpages = ['nvidia-settings.1.gz',
-                    'nvidia-cuda-mps-control.1.gz',
-                    'nvidia-xconfig.1.gz',
-                    'nvidia-smi.1.gz']
-        manpages = [os.path.join(self.libsdir, x) for x in manpages]
+        if not self.cfg['just_GL_libs']:
+            libs = expand_glob_paths([os.path.join(self.libsdir, 'lib*.so*')])
+            try:
+                libs += expand_glob_paths([os.path.join(self.libsdir, '*.la')])
+            except EasyBuildError:
+                self.log.info("No *.la files found. Proceeding without them.")
+            libs += [os.path.join(self.libsdir, 'nvidia_drv.so')]
+        else:
+            libs = expand_glob_paths([os.path.join(self.libsdir, 'libEGL*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libGL*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libOpenGL.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libnvidia-egl*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libnvidia-gl*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libnvidia-rtcore*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libnvidia-tls*.so*')])
+            libs += expand_glob_paths([os.path.join(self.libsdir, 'libnvidia-vulkan*.so*')])
+
+
+        if not self.cfg['just_GL_libs']:
+            # list of binaries
+            binaries = ['nvidia-bug-report.sh',
+                        'nvidia-cuda-mps-control',
+                        'nvidia-cuda-mps-server',
+                        'nvidia-debugdump',
+                        'nvidia-settings',
+                        'nvidia-smi',
+                        'nvidia-xconfig']
+            binaries = [os.path.join(self.libsdir, x) for x in binaries]
+
+            # list of manpages
+            manpages = ['nvidia-settings.1.gz',
+                        'nvidia-cuda-mps-control.1.gz',
+                        'nvidia-xconfig.1.gz',
+                        'nvidia-smi.1.gz']
+            manpages = [os.path.join(self.libsdir, x) for x in manpages]
+
+            copy(binaries, os.path.join(self.installdir, 'bin'))
+            copy(manpages, os.path.join(self.installdir, 'man', 'man1'))
 
         copy(libs, os.path.join(self.installdir, 'lib64'))
-        copy(binaries, os.path.join(self.installdir, 'bin'))
-        copy(manpages, os.path.join(self.installdir, 'man', 'man1'))
 
     def post_install_step(self):
         """Generate the appropriate symlinks"""
@@ -104,12 +118,13 @@ class EB_nvidia_minus_driver(Binary):
         # Run ldconfig to create missing symlinks (libcuda.so.1, etc)
         run_cmd("/usr/sbin/ldconfig -N %s" % libdir)
 
-        # Create an extra symlink for libcuda.so, otherwise PGI 19.X breaks
-        # Create an extra symlink for libnvidia-ml.so, otherwise MVAPICH2 doesn't find it if it doesn't rely on stubs
-        missing_links = ['libcuda.so', 'libnvidia-ml.so']
-        for missing_link in missing_links:
-            run_cmd("ln -s %s/%s.1 %s/%s" %
-                    (libdir, missing_link, libdir, missing_link))
+        if not self.cfg['just_GL_libs']:
+            # Create an extra symlink for libcuda.so, otherwise PGI 19.X breaks
+            # Create an extra symlink for libnvidia-ml.so, otherwise MVAPICH2 doesn't find it if it doesn't rely on stubs
+            missing_links = ['libcuda.so', 'libnvidia-ml.so']
+            for missing_link in missing_links:
+                run_cmd("ln -s %s/%s.1 %s/%s" %
+                        (libdir, missing_link, libdir, missing_link))
 
         super(EB_nvidia_minus_driver, self).post_install_step()
 
@@ -120,13 +135,26 @@ class EB_nvidia_minus_driver(Binary):
 
         chk_libdir = ["lib64"]
 
-        nvlibs = ["cuda"]
+        if not self.cfg['just_GL_libs']:
+            nvlibs = ["cuda"]
+            binaries = [os.path.join("bin", x) for x in ["nvidia-smi"]]
+            libs = [os.path.join("%s", "lib%s.%s.1") % (x, y, shlib_ext)
+                    for x in chk_libdir for y in nvlibs]
+        else:
+            nvlibs_0_suffix = ["EGL_nvidia", "GLX_nvidia", "OpenGL"]
+            nvlibs_1_suffix = ["GLESv1_CM_nvidia"]
+            nvlibs_2_suffix = ["GLESv2_nvidia"]
+            binaries = []
+            libs = [os.path.join("%s", "lib%s.%s.0") % (x, y, shlib_ext)
+                    for x in chk_libdir for y in nvlibs_0_suffix]
+            libs += [os.path.join("%s", "lib%s.%s.1") % (x, y, shlib_ext)
+                     for x in chk_libdir for y in nvlibs_1_suffix]
+            libs += [os.path.join("%s", "lib%s.%s.2") % (x, y, shlib_ext)
+                     for x in chk_libdir for y in nvlibs_2_suffix]
+
         custom_paths = {
-            'files': [os.path.join("bin", x) for x in ["nvidia-smi"]] +
-            [os.path.join("%s", "lib%s.%s.1") % (x, y, shlib_ext)
-             for x in chk_libdir for y in nvlibs],
+            'files': binaries + libs,
             'dirs': [''],
         }
-
         super(EB_nvidia_minus_driver, self).sanity_check_step(
             custom_paths=custom_paths)
diff --git a/Golden_Repo/a/AMBER/AMBER-22-gpsmkl-2022a.eb b/Golden_Repo/a/AMBER/AMBER-22-gpsmkl-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..a286e14a83f55df35363d25b44f2991005e97a94
--- /dev/null
+++ b/Golden_Repo/a/AMBER/AMBER-22-gpsmkl-2022a.eb
@@ -0,0 +1,112 @@
+easyblock = 'CMakeMake'
+
+name = 'AMBER'
+version = '22'
+versionsuffix = '-AmberTools-23-plumed'
+
+homepage = 'http://ambermd.org'
+description = """
+AMBER: 'Assisted Model Building with Energy Refinement' is a set of molecular
+mechanics force fields and a package of molecular simulation programs.
+
+Citation:
+D.A. Case, H.M. Aktulga, K. Belfon, I.Y. Ben-Shalom, J.T. Berryman,
+S.R. Brozell, D.S. Cerutti, T.E. Cheatham, III, G.A. Cisneros,
+V.W.D. Cruzeiro, T.A. Darden, N. Forouzesh, G. Giambaşu, T. Giese,
+M.K. Gilson, H. Gohlke, A.W. Goetz, J. Harris, S. Izadi, S.A. Izmailov,
+K. Kasavajhala, M.C. Kaymak, E. King, A. Kovalenko, T. Kurtzman,
+T.S. Lee, P. Li, C. Lin, J. Liu, T. Luchko, R. Luo, M. Machado,
+V. Man, M. Manathunga, K.M. Merz, Y. Miao, O. Mikhailovskii, G. Monard,
+H. Nguyen, K.A. O’Hearn, A. Onufriev, F. Pan, S. Pantano, R. Qi,
+A. Rahnamoun, D.R. Roe, A. Roitberg, C. Sagui, S. Schott-Verdugo,
+A. Shajan, J. Shen, C.L. Simmerling, N.R. Skrynnikov, J. Smith,
+J. Swails, R.C. Walker, J. Wang, J. Wang, H. Wei, X. Wu, Y. Wu,
+Y. Xiong, Y. Xue, D.M. York, S. Zhao, Q. Zhu, and P.A. Kollman
+(2023), Amber 2023, University of California, San Francisco. 
+"""
+
+toolchain = {'name': 'gpsmkl', 'version': '2022a'}
+toolchainopts = {'pic': True}
+toolchainopts = {'openmp': True, 'usempi': True}
+
+builddependencies = [
+    ('CMake', '3.23.1'),
+    ('binutils', '2.38'),
+]
+dependencies = [
+    #    ('FFTW.MPI', '3.3.10'),
+    ('Boost', '1.79.0'),
+    ('flex', '2.6.4'),
+    ('NCCL', 'default', '-CUDA-11.7'),
+    ('netCDF', '4.9.0'),
+    ('netCDF-Fortran', '4.6.0'),
+    ('PnetCDF', '1.12.3'),
+    ('Python', '3.10.4'),
+    ('matplotlib', '3.5.2', '', ('gcccoremkl', '11.3.0-2022.1.0')),
+    #    ('mpi4py', '3.1.4', '', ('gompi', '2022a'))
+    ('PLUMED', '2.8.1'),
+]
+
+sources = [
+    'AmberTools23.tar.bz2',
+    'Amber22.tar.bz2',
+]
+patches = [
+    'AmberTools-21_CMake-FlexiBLAS.patch',
+    'pmemd_xray_non_bulk_no_implicit_type.patch',
+]
+
+checksums = [
+    'debb52e6ef2e1b4eaa917a8b4d4934bd2388659c660501a81ea044903bf9ee9d',
+    '3c887ccbad690fc76ff0b120a3448eae023c08e76582aac07900d4a9708ebd16',
+    '9543812c24c4b7842f64f1f8abaf2c92b5c4c0fadcdbd9811e76b81a778f0d36',
+    '1dc2c70c597b19736b86d9f0e00743282da68eeeb1b57650db44dc3a5f4da898',
+]
+
+separate_build_dir = True
+local_build_mpi_parts = "TRUE"
+local_build_cuda_parts = "TRUE"
+local_build_cuda_nccl = "TRUE"
+
+preconfigopts = "CC=gcc && CXX=g++ && COMPILER=GNU "
+preconfigopts += " && cd %(builddir)s/amber22_src && "
+preconfigopts += " ./update_amber --update && cd ../easybuild_obj && "
+
+configopts = "-DCOMPILER=GNU -DCHECK_UPDATES=OFF -DAPPLY_UPDATES=OFF -DBUILD_GUI=FALSE "
+configopts += " -DINSTALL_TESTS=TRUE -DOPENMP=TRUE -DMPI=%s " % local_build_mpi_parts
+configopts += " -DDOWNLOAD_MINICONDA=FALSE -DTRUST_SYSTEM_LIBS=TRUE "
+configopts += " -DCUDA=%s " % local_build_cuda_parts
+configopts += " -DNCCL=%s " % local_build_cuda_nccl
+configopts += " -DBLA_VENDOR=FlexiBLAS "
+configopts += " -DFORCE_EXTERNAL_LIBS='boost;netcdf;pnetcdf' "
+configopts += " -DUSE_FFT=TRUE "
+buildopts = 'NVCC_GENCODE="-gencode=arch=compute_70,code=sm_70 \
+                           -gencode=arch=compute_75,code=sm_75 \
+                           -gencode=arch=compute_80,code=sm_80"'
+
+modextravars = {
+    'AMBERHOME': '%(installdir)s/',
+}
+modextrapaths = {'PYTHONPATH': ['lib/python%(pyshortver)s/site-packages']}
+
+modluafooter = '''
+add_property("arch","gpu")
+'''
+
+group = "amber"
+
+modloadmsg = '''
+
+Info: (1) Check the loaded modules to see if loading the AMBER module
+succeeded. If it did, ignore the rest of this message. (2) If AMBER
+didn't load, one possible reason is that "amber" is not currently
+your primary group. You can temporarily change your primary group by
+typing "newgrp amber". (3) If that didn't work, you are probably
+not a member of the group "amber", you have to first add yourself
+to that group. Visit "https://judoor.fz-juelich.de/", follow the
+link "Request access to restricted software", enable "amber" for
+your account, wait 15-20 minutes and then try "newgrp amber" again.
+
+'''
+
+moduleclass = 'bio'
diff --git a/Golden_Repo/a/AMBER/AmberTools-21_CMake-FlexiBLAS.patch b/Golden_Repo/a/AMBER/AmberTools-21_CMake-FlexiBLAS.patch
new file mode 100644
index 0000000000000000000000000000000000000000..669c7e3d3055a88d2a82d5a934d025aee421b597
--- /dev/null
+++ b/Golden_Repo/a/AMBER/AmberTools-21_CMake-FlexiBLAS.patch
@@ -0,0 +1,91 @@
+make CMake scripts that picks BLAS/LAPACK library aware of FlexiBLAS
+author: Kenneth Hoste (HPC-UGent)
+--- cmake/patched-cmake-modules/FindBLASFixed.cmake.orig	2022-05-04 18:53:42.410384491 +0200
++++ cmake/patched-cmake-modules/FindBLASFixed.cmake	2022-05-04 18:54:39.034612008 +0200
+@@ -36,7 +36,7 @@
+ #   BLA_F95     if set on tries to find the f95 interfaces for BLAS/LAPACK
+ #
+ # ######### ## List of vendors (BLA_VENDOR) valid in this module #
+-# Goto,OpenBLAS,ATLAS PhiPACK,CXML,DXML,SunPerf,SCSL,SGIMATH,IBMESSL,
++# Goto,OpenBLAS,FlexiBLAS ATLAS PhiPACK,CXML,DXML,SunPerf,SCSL,SGIMATH,IBMESSL,
+ # ACML,ACML_MP,ACML_GPU,Apple, NAS, Generic
+ 
+ include(CMakePushCheckState)
+@@ -49,8 +49,8 @@
+ 	set(BLA_VENDOR_DEFAULT "All")
+ endif ()
+ 
+-set(BLA_VENDOR ${BLA_VENDOR_DEFAULT} CACHE STRING "BLAS vendor to use for BLAS and Lapack.  Valid values: All, Goto, OpenBLAS, ATLAS, PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
+-validate_configuration_enum(BLA_VENDOR  All Goto OpenBLAS ATLAS PhiPACK CXML DXML SunPerf SCSL SGIMATH IBMESSL ACML ACML_MP ACML_GPU Apple NAS Generic)
++set(BLA_VENDOR ${BLA_VENDOR_DEFAULT} CACHE STRING "BLAS vendor to use for BLAS and Lapack.  Valid values: All, Goto, OpenBLAS, FlexiBLAS, ATLAS, PhiPACK, CXML, DXML, SunPerf, SCSL, SGIMATH, IBMESSL, ACML, ACML_MP, ACML_GPU, Apple, NAS, Generic")
++validate_configuration_enum(BLA_VENDOR  All Goto OpenBLAS FlexiBLAS ATLAS PhiPACK CXML DXML SunPerf SCSL SGIMATH IBMESSL ACML ACML_MP ACML_GPU Apple NAS Generic)
+ 
+ if(DEFINED BLAS_FIND_QUIETLY)
+ 	set(CMAKE_REQUIRED_QUIET ${BLAS_FIND_QUIETLY})
+@@ -159,6 +159,20 @@
+  endif()
+ endif ()
+ 
++if (BLA_VENDOR STREQUAL "FlexiBLAS" OR BLA_VENDOR STREQUAL "All")
++ if(NOT BLAS_LIBRARIES)
++	 # FlexiBLAS (https://github.com/mpimd-csc/flexiblas)
++	check_fortran_libraries(
++	BLAS_LIBRARIES
++	BLAS
++	sgemm
++	""
++	"flexiblas"
++	""
++	)
++ endif()
++endif ()
++
+ if (BLA_VENDOR STREQUAL "OpenBLAS" OR BLA_VENDOR STREQUAL "All")
+  if(NOT BLAS_LIBRARIES)
+ 	# OpenBLAS (http://www.openblas.net)
+@@ -524,4 +538,4 @@
+ endif()
+ 
+ cmake_pop_check_state()
+-set(CMAKE_FIND_LIBRARY_SUFFIXES ${_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
+\ No newline at end of file
++set(CMAKE_FIND_LIBRARY_SUFFIXES ${_blas_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
+--- cmake/patched-cmake-modules/FindLAPACKFixed.cmake.orig	2022-05-04 18:56:14.315407989 +0200
++++ cmake/patched-cmake-modules/FindLAPACKFixed.cmake	2022-05-04 18:56:39.930772703 +0200
+@@ -36,7 +36,7 @@
+ #   BLA_F95     if set on tries to find the f95 interfaces for BLAS/LAPACK
+ #
+ # ## List of vendors (BLA_VENDOR) valid in this module:
+-# OpenBLAS, ACML,Apple, NAS, Generic
++# OpenBLAS, FlexiBLAS, ACML,Apple, NAS, Generic
+ 
+ set(_lapack_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
+ 
+@@ -173,6 +173,20 @@
+ 		endif()
+ 	endif ()
+ 	
++	if(BLA_VENDOR STREQUAL "FlexiBLAS" OR BLA_VENDOR STREQUAL "All")
++		if(NOT LAPACK_LIBRARIES)
++			check_lapack_libraries(
++			LAPACK_LIBRARIES
++			LAPACK
++			cheev
++			""
++			"flexiblas"
++			"${BLAS_LIBRARIES}"
++			""
++			)
++		endif()
++	endif()
++	
+ 	if(BLA_VENDOR STREQUAL "OpenBLAS" OR BLA_VENDOR STREQUAL "All")
+ 		if(NOT LAPACK_LIBRARIES)
+ 			check_lapack_libraries(
+@@ -289,4 +303,4 @@
+ endif()
+ 
+ cmake_pop_check_state()
+-set(CMAKE_FIND_LIBRARY_SUFFIXES ${_lapack_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
+\ No newline at end of file
++set(CMAKE_FIND_LIBRARY_SUFFIXES ${_lapack_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES})
diff --git a/Golden_Repo/a/AMBER/pmemd_xray_non_bulk_no_implicit_type.patch b/Golden_Repo/a/AMBER/pmemd_xray_non_bulk_no_implicit_type.patch
new file mode 100644
index 0000000000000000000000000000000000000000..834c94690589257f847944e804853b5aa01c6e1e
--- /dev/null
+++ b/Golden_Repo/a/AMBER/pmemd_xray_non_bulk_no_implicit_type.patch
@@ -0,0 +1,19 @@
+diff -ruN amber22_src/src/pmemd/src/xray/src/xray_non_bulk_impl_cpu.F90 amber22_src1/src/pmemd/src/xray/src/xray_non_bulk_impl_cpu.F90
+--- amber22_src/src/pmemd/src/xray/src/xray_non_bulk_impl_cpu.F90	2022-04-09 03:11:46.000000000 +0200
++++ amber22_src1/src/pmemd/src/xray/src/xray_non_bulk_impl_cpu.F90	2023-08-25 20:30:12.828529000 +0200
+@@ -90,7 +90,7 @@
+     call check_precondition(size(frac, 2) == size(scatter_type_index))
+     call check_precondition(size(hkl, 2) == size(atomic_scatter_factor, 1))
+     
+-    !$omp parallel do private(ihkl,i,f,angle)
++    !$omp parallel do private(ihkl,f,angle)
+     do ihkl = 1, size(hkl, 2)
+       
+       ! Fhkl = SUM( fj * exp(2 * M_PI * i * (h * xj + k * yj + l * zj)) ),
+@@ -127,4 +127,4 @@
+   end subroutine calc_f_non_bulk
+ 
+ 
+-end module xray_non_bulk_impl_cpu_module
+\ No newline at end of file
++end module xray_non_bulk_impl_cpu_module
diff --git a/Golden_Repo/a/Advisor/Advisor-2023.0.0.eb b/Golden_Repo/a/Advisor/Advisor-2023.0.0.eb
index 187fcafa1806ed28a940d4caef54cd832a603389..e0123c4fd3db076fdaaf22f080f0ec859a1805ff 100644
--- a/Golden_Repo/a/Advisor/Advisor-2023.0.0.eb
+++ b/Golden_Repo/a/Advisor/Advisor-2023.0.0.eb
@@ -17,6 +17,11 @@ checksums = ['5d8ef163f70ee3dc42b13642f321d974f49915d55914ba1ca9177ed29b100b9d']
 
 dontcreateinstalldir = True
 
+modextrapaths = {
+    'PATH': '%(namelower)s/%(version)s/bin64',
+    'MANPATH': '%(namelower)s/%(version)s/man'
+}
+
 sanity_check_paths = {
     'files': ['%(namelower)s/%(version)s/bin64/advisor'],
     'dirs': ['%(namelower)s/%(version)s/bin64',
diff --git a/Golden_Repo/a/Autoconf/Autoconf-2.71.eb b/Golden_Repo/a/Autoconf/Autoconf-2.71.eb
new file mode 100644
index 0000000000000000000000000000000000000000..b562522d9877fa7ff92dc74b2d12a24009db02fb
--- /dev/null
+++ b/Golden_Repo/a/Autoconf/Autoconf-2.71.eb
@@ -0,0 +1,32 @@
+##
+# Author:    Robert Mijakovic <robert.mijakovic@lxp.lu>
+##
+easyblock = 'ConfigureMake'
+
+name = 'Autoconf'
+version = '2.71'
+
+homepage = 'https://www.gnu.org/software/autoconf/'
+description = """Autoconf is an extensible package of M4 macros that produce shell scripts
+ to automatically configure software source code packages. These scripts can adapt the
+ packages to many kinds of UNIX-like systems without manual user intervention. Autoconf
+ creates a configuration script for a package from a template file that lists the
+ operating system features that the package can use, in the form of M4 macro calls."""
+
+toolchain = SYSTEM
+
+source_urls = [GNU_SOURCE]
+sources = [SOURCELOWER_TAR_XZ]
+checksums = ['f14c83cfebcc9427f2c3cea7258bd90df972d92eb26752da4ddad81c87a0faa4']
+
+dependencies = [('M4', '1.4.19')]
+
+sanity_check_paths = {
+    'files': ["bin/%s" % x for x in ["autoconf", "autoheader", "autom4te", "autoreconf", "autoscan",
+                                     "autoupdate", "ifnames"]],
+    'dirs': [],
+}
+
+hidden = True
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/a/Automake/Automake-1.16.5.eb b/Golden_Repo/a/Automake/Automake-1.16.5.eb
new file mode 100644
index 0000000000000000000000000000000000000000..d06ea66d141bf21870416cf1ca933bb5ffea7017
--- /dev/null
+++ b/Golden_Repo/a/Automake/Automake-1.16.5.eb
@@ -0,0 +1,44 @@
+easyblock = 'ConfigureMake'
+
+name = 'Automake'
+version = '1.16.5'
+
+homepage = 'https://www.gnu.org/software/automake/automake.html'
+
+description = "Automake: GNU Standards-compliant Makefile generator"
+
+toolchain = SYSTEM
+
+source_urls = [GNU_SOURCE]
+sources = [SOURCELOWER_TAR_GZ]
+patches = ['Automake-1.16.5_fix-help2man-error.patch']
+checksums = [
+    {'automake-1.16.5.tar.gz': '07bd24ad08a64bc17250ce09ec56e921d6343903943e99ccf63bbf0705e34605'},
+    {'Automake-1.16.5_fix-help2man-error.patch': 'ebcd629aefcf6b7dbb3bc3a8abcdf71d4f7605ecda6c6eae2f93d73271df6930'},
+]
+
+dependencies = [
+    ('Autoconf', '2.71'),
+]
+
+osdependencies = [
+    # Thread::Queue is already available by default in Ubuntu
+    # providing a package that is automatically installed as workaround
+    ('perl-Thread-Queue', 'debianutils'),
+]
+
+preconfigopts = "export PERL='/usr/bin/env perl' && "
+
+sanity_check_paths = {
+    'files': ['bin/aclocal', 'bin/automake'],
+    'dirs': []
+}
+
+sanity_check_commands = [
+    "aclocal --help",
+    "automake --help",
+]
+
+hidden = True
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/a/Automake/Automake-1.16.5_fix-help2man-error.patch b/Golden_Repo/a/Automake/Automake-1.16.5_fix-help2man-error.patch
new file mode 100644
index 0000000000000000000000000000000000000000..396cf962b88348b931b5d0a170188c3f06128979
--- /dev/null
+++ b/Golden_Repo/a/Automake/Automake-1.16.5_fix-help2man-error.patch
@@ -0,0 +1,15 @@
+fix for:
+  help2man: can't get --help info from automake-1.16
+
+based on https://github.com/xbmc/xbmc/pull/18584
+--- a/Makefile.in	2020-03-16 19:11:10.000000000 -0700
++++ b/Makefile.in	2020-10-22 08:06:24.606751367 -0700
+@@ -699,7 +699,7 @@
+ update_mans = \
+   $(AM_V_GEN): \
+     && $(MKDIR_P) doc \
+-    && ./pre-inst-env $(PERL) $(srcdir)/doc/help2man --output=$@
++    && ./pre-inst-env $(PERL) $(srcdir)/doc/help2man --output=$@ --no-discard-stderr
+ 
+ amhello_sources = \
+   doc/amhello/configure.ac \
diff --git a/Golden_Repo/a/Autotools/Autotools-20220317.eb b/Golden_Repo/a/Autotools/Autotools-20220317.eb
new file mode 100644
index 0000000000000000000000000000000000000000..b45786412f1fae88af357c43d0ce094d6db1757e
--- /dev/null
+++ b/Golden_Repo/a/Autotools/Autotools-20220317.eb
@@ -0,0 +1,23 @@
+easyblock = 'Bundle'
+
+name = 'Autotools'
+version = '20220317'  # date of the most recent change
+
+homepage = 'https://autotools.io'
+
+description = """
+ This bundle collect the standard GNU build tools: Autoconf, Automake
+ and libtool
+"""
+
+toolchain = SYSTEM
+
+dependencies = [
+    ('Autoconf', '2.71'),    # 20210128
+    ('Automake', '1.16.5'),  # 20211003
+    ('libtool', '2.4.7'),    # 20220317
+]
+
+hidden = True
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/c/CMake/CMake-3.26.3-GCCcore-11.3.0.eb b/Golden_Repo/c/CMake/CMake-3.26.3-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..62315100324b8ac31088c58532dab7ecbe074489
--- /dev/null
+++ b/Golden_Repo/c/CMake/CMake-3.26.3-GCCcore-11.3.0.eb
@@ -0,0 +1,30 @@
+name = 'CMake'
+version = '3.26.3'
+
+homepage = 'https://www.cmake.org'
+
+description = """
+ CMake, the cross-platform, open-source build system.  CMake is a family of
+ tools designed to build, test and package software.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+source_urls = ['https://www.cmake.org/files/v%(version_major_minor)s']
+sources = [SOURCELOWER_TAR_GZ]
+checksums = ['bbd8d39217509d163cb544a40d6428ac666ddc83e22905d3e52c925781f0f659']
+
+builddependencies = [
+    ('binutils', '2.38'),
+]
+
+dependencies = [
+    ('ncurses', '6.3'),
+    ('zlib', '1.2.12'),
+    ('bzip2', '1.0.8'),
+    ('cURL', '7.83.0'),
+    ('libarchive', '3.6.1'),
+    ('OpenSSL', '1.1', '', True),
+]
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/c/CPMD/CPMD-4.3-intel-para-2022a_MIT.eb b/Golden_Repo/c/CPMD/CPMD-4.3-intel-para-2022a_MIT.eb
new file mode 100644
index 0000000000000000000000000000000000000000..3085814b6d28470fcfc7ead35a9a0db02212c346
--- /dev/null
+++ b/Golden_Repo/c/CPMD/CPMD-4.3-intel-para-2022a_MIT.eb
@@ -0,0 +1,58 @@
+name = 'CPMD'
+version = '4.3'
+versionsuffix = '_MIT'
+
+homepage = 'https://github.com/CPMD-code'
+description = """The CPMD code is a parallelized plane wave / pseudopotential
+implementation of Density Functional Theory, particularly designed for
+ab-initio molecular dynamics. This is version 4.3 released under MIT licence in January 2023.
+"""
+
+toolchain = {'name': 'intel-para', 'version': '2022a'}
+toolchainopts = {'usempi': True}
+
+sources = [
+    {'download_filename': 'archive/refs/tags/%(version)s.tar.gz', 'filename': 'cpmd-4.3MIT.tar.gz'},
+    'cpmd4.3_manual.pdf',
+]
+
+source_urls = [
+    'https://github.com/CPMD-code/CPMD',
+]
+
+patches = [
+    'cppflags.patch',
+    '%(namelower)s-v%(version)s-config.patch'
+]
+
+checksums = [
+    'e0290f9da0d255f90a612e60662b14a97ca53003f89073c6af84fa7bc8739f65',
+    '2bfe01db05df1cb21cc8eae500da92b7744c786beeef25e6b2c86116ffc2e135',
+    '36c57801d5643c5e07f81ce7d4e973ae2e3100fb61220bccbbe4de3629c20d8c',
+    '45719bf7ca0c567c9c78b3f23201976fceda565d47fea2d1bc998b72fdc53caa',
+]
+
+# the cpmd.py post-cleanup of the results of the configure step 
+# is rather error-prone
+
+preconfigopts = 'pwd && chmod u+x scripts/configure.sh && '
+
+prefix_opt = '-DEST='
+
+MIT = True
+
+postinstallcmds = [
+    'rm -rf %(installdir)s/obj',
+    'mkdir %(installdir)s/doc',
+    'cp %(builddir)s/cpmd4.3_manual.pdf %(installdir)s/doc'
+]
+
+
+sanity_check_paths = {
+    'files': ['bin/cpmd.x', 'lib/libcpmd.a'],
+    'dirs': ['bin', 'lib'],
+}
+
+modloadmsg = 'MPI-Version: cpmd.x \n'
+
+moduleclass = 'chem'
diff --git a/Golden_Repo/c/Colmap/Colmap-3.8-gcccoremkl-11.3.0-2022.1.0.eb b/Golden_Repo/c/Colmap/Colmap-3.8-gcccoremkl-11.3.0-2022.1.0.eb
index 1bea63bc85f7c8367e0e92af2092008e8fb1e452..2781a05b1cf5ad1488fcb63785f738cf07fd9d1b 100644
--- a/Golden_Repo/c/Colmap/Colmap-3.8-gcccoremkl-11.3.0-2022.1.0.eb
+++ b/Golden_Repo/c/Colmap/Colmap-3.8-gcccoremkl-11.3.0-2022.1.0.eb
@@ -14,7 +14,11 @@ toolchainopts = {'cstd': 'c++14'}
 github_account = 'colmap'
 source_urls = [GITHUB_SOURCE]
 sources = ['%(version)s.tar.gz']
-checksums = ['02288f8f61692fe38049d65608ed832b31246e7792692376afb712fa4cef8775']
+patches = ['unpatch-5695733.patch']
+checksums = [
+    {'3.8.tar.gz': '02288f8f61692fe38049d65608ed832b31246e7792692376afb712fa4cef8775'},
+    {'unpatch-5695733.patch': '37aad48aa547f412d9d8be050b7032c168d8512cda995ac773dce1b70772156b'},
+]
 
 builddependencies = [
     ('binutils', '2.38'),
@@ -35,12 +39,12 @@ dependencies = [
     ('CUDA', '11.7', '', SYSTEM),
     ('OpenGL', '2022a'),
     ('Qt5', '5.15.5'),
+    ('tbb', '2021.5.0'),
 ]
 
-configopts = "-DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF "
+configopts = "-DBUILD_TESTING=OFF "
 configopts += "-DCUDA_ENABLED=ON "
 configopts += "-DCMAKE_CUDA_ARCHITECTURES=all-major "
-configopts += "-DCUDA_NVCC_FLAGS='--std c++14' "
 
 sanity_check_paths = {
     'files': ['bin/colmap', 'lib/colmap/libcolmap.a'],
diff --git a/Golden_Repo/c/Colmap/unpatch-5695733.patch b/Golden_Repo/c/Colmap/unpatch-5695733.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c9467622f8034041fe754c42f04a120f4399c24e
--- /dev/null
+++ b/Golden_Repo/c/Colmap/unpatch-5695733.patch
@@ -0,0 +1,3011 @@
+diff -Naur colmap-3.8.orig/src/feature/extraction.cc colmap-3.8/src/feature/extraction.cc
+--- colmap-3.8.orig/src/feature/extraction.cc	2023-01-31 16:18:47.000000000 +0100
++++ colmap-3.8/src/feature/extraction.cc	2023-08-19 09:24:47.427261849 +0200
+@@ -212,9 +212,9 @@
+     }
+ 
+     if (sift_options_.max_image_size > 0) {
+-      CHECK(resizer_queue_->Push(std::move(image_data)));
++      CHECK(resizer_queue_->Push(image_data));
+     } else {
+-      CHECK(extractor_queue_->Push(std::move(image_data)));
++      CHECK(extractor_queue_->Push(image_data));
+     }
+   }
+ 
+@@ -316,9 +316,9 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& image_data = input_job.Data();
++      auto image_data = input_job.Data();
+ 
+       if (image_data.status == ImageReader::Status::SUCCESS) {
+         if (static_cast<int>(image_data.bitmap.Width()) > max_image_size_ ||
+@@ -336,7 +336,7 @@
+         }
+       }
+ 
+-      output_queue_->Push(std::move(image_data));
++      output_queue_->Push(image_data);
+     } else {
+       break;
+     }
+@@ -383,9 +383,9 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& image_data = input_job.Data();
++      auto image_data = input_job.Data();
+ 
+       if (image_data.status == ImageReader::Status::SUCCESS) {
+         bool success = false;
+@@ -421,7 +421,7 @@
+ 
+       image_data.bitmap.Deallocate();
+ 
+-      output_queue_->Push(std::move(image_data));
++      output_queue_->Push(image_data);
+     } else {
+       break;
+     }
+diff -Naur colmap-3.8.orig/src/feature/extraction.cc.orig colmap-3.8/src/feature/extraction.cc.orig
+--- colmap-3.8.orig/src/feature/extraction.cc.orig	1970-01-01 01:00:00.000000000 +0100
++++ colmap-3.8/src/feature/extraction.cc.orig	2023-01-31 16:18:47.000000000 +0100
+@@ -0,0 +1,531 @@
++// Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
++// All rights reserved.
++//
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are met:
++//
++//     * Redistributions of source code must retain the above copyright
++//       notice, this list of conditions and the following disclaimer.
++//
++//     * Redistributions in binary form must reproduce the above copyright
++//       notice, this list of conditions and the following disclaimer in the
++//       documentation and/or other materials provided with the distribution.
++//
++//     * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
++//       its contributors may be used to endorse or promote products derived
++//       from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++// POSSIBILITY OF SUCH DAMAGE.
++//
++// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
++
++#include "feature/extraction.h"
++
++#include <numeric>
++
++#include "SiftGPU/SiftGPU.h"
++#include "feature/sift.h"
++#include "util/cuda.h"
++#include "util/misc.h"
++
++namespace colmap {
++namespace {
++
++void ScaleKeypoints(const Bitmap& bitmap, const Camera& camera,
++                    FeatureKeypoints* keypoints) {
++  if (static_cast<size_t>(bitmap.Width()) != camera.Width() ||
++      static_cast<size_t>(bitmap.Height()) != camera.Height()) {
++    const float scale_x = static_cast<float>(camera.Width()) / bitmap.Width();
++    const float scale_y = static_cast<float>(camera.Height()) / bitmap.Height();
++    for (auto& keypoint : *keypoints) {
++      keypoint.Rescale(scale_x, scale_y);
++    }
++  }
++}
++
++void MaskKeypoints(const Bitmap& mask, FeatureKeypoints* keypoints,
++                   FeatureDescriptors* descriptors) {
++  size_t out_index = 0;
++  BitmapColor<uint8_t> color;
++  for (size_t i = 0; i < keypoints->size(); ++i) {
++    if (!mask.GetPixel(static_cast<int>(keypoints->at(i).x),
++                       static_cast<int>(keypoints->at(i).y), &color) ||
++        color.r == 0) {
++      // Delete this keypoint by not copying it to the output.
++    } else {
++      // Retain this keypoint by copying it to the output index (in case this
++      // index differs from its current position).
++      if (out_index != i) {
++        keypoints->at(out_index) = keypoints->at(i);
++        for (int col = 0; col < descriptors->cols(); ++col) {
++          (*descriptors)(out_index, col) = (*descriptors)(i, col);
++        }
++      }
++      out_index += 1;
++    }
++  }
++
++  keypoints->resize(out_index);
++  descriptors->conservativeResize(out_index, descriptors->cols());
++}
++
++}  // namespace
++
++SiftFeatureExtractor::SiftFeatureExtractor(
++    const ImageReaderOptions& reader_options,
++    const SiftExtractionOptions& sift_options)
++    : reader_options_(reader_options),
++      sift_options_(sift_options),
++      database_(reader_options_.database_path),
++      image_reader_(reader_options_, &database_) {
++  CHECK(reader_options_.Check());
++  CHECK(sift_options_.Check());
++
++  std::shared_ptr<Bitmap> camera_mask;
++  if (!reader_options_.camera_mask_path.empty()) {
++    camera_mask = std::make_shared<Bitmap>();
++    if (!camera_mask->Read(reader_options_.camera_mask_path,
++                           /*as_rgb*/ false)) {
++      std::cerr << "  ERROR: Cannot read camera mask file: "
++                << reader_options_.camera_mask_path
++                << ". No mask is going to be used." << std::endl;
++      camera_mask.reset();
++    }
++  }
++
++  const int num_threads = GetEffectiveNumThreads(sift_options_.num_threads);
++  CHECK_GT(num_threads, 0);
++
++  // Make sure that we only have limited number of objects in the queue to avoid
++  // excess in memory usage since images and features take lots of memory.
++  const int kQueueSize = 1;
++  resizer_queue_ = std::make_unique<JobQueue<internal::ImageData>>(kQueueSize);
++  extractor_queue_ =
++      std::make_unique<JobQueue<internal::ImageData>>(kQueueSize);
++  writer_queue_ = std::make_unique<JobQueue<internal::ImageData>>(kQueueSize);
++
++  if (sift_options_.max_image_size > 0) {
++    for (int i = 0; i < num_threads; ++i) {
++      resizers_.emplace_back(std::make_unique<internal::ImageResizerThread>(
++          sift_options_.max_image_size, resizer_queue_.get(),
++          extractor_queue_.get()));
++    }
++  }
++
++  if (!sift_options_.domain_size_pooling &&
++      !sift_options_.estimate_affine_shape && sift_options_.use_gpu) {
++    std::vector<int> gpu_indices = CSVToVector<int>(sift_options_.gpu_index);
++    CHECK_GT(gpu_indices.size(), 0);
++
++#ifdef CUDA_ENABLED
++    if (gpu_indices.size() == 1 && gpu_indices[0] == -1) {
++      const int num_cuda_devices = GetNumCudaDevices();
++      CHECK_GT(num_cuda_devices, 0);
++      gpu_indices.resize(num_cuda_devices);
++      std::iota(gpu_indices.begin(), gpu_indices.end(), 0);
++    }
++#endif  // CUDA_ENABLED
++
++    auto sift_gpu_options = sift_options_;
++    for (const auto& gpu_index : gpu_indices) {
++      sift_gpu_options.gpu_index = std::to_string(gpu_index);
++      extractors_.emplace_back(
++          std::make_unique<internal::SiftFeatureExtractorThread>(
++              sift_gpu_options, camera_mask, extractor_queue_.get(),
++              writer_queue_.get()));
++    }
++  } else {
++    if (sift_options_.num_threads == -1 &&
++        sift_options_.max_image_size ==
++            SiftExtractionOptions().max_image_size &&
++        sift_options_.first_octave == SiftExtractionOptions().first_octave) {
++      std::cout
++          << "WARNING: Your current options use the maximum number of "
++             "threads on the machine to extract features. Extracting SIFT "
++             "features on the CPU can consume a lot of RAM per thread for "
++             "large images. Consider reducing the maximum image size and/or "
++             "the first octave or manually limit the number of extraction "
++             "threads. Ignore this warning, if your machine has sufficient "
++             "memory for the current settings."
++          << std::endl;
++    }
++
++    auto custom_sift_options = sift_options_;
++    custom_sift_options.use_gpu = false;
++    for (int i = 0; i < num_threads; ++i) {
++      extractors_.emplace_back(
++          std::make_unique<internal::SiftFeatureExtractorThread>(
++              custom_sift_options, camera_mask, extractor_queue_.get(),
++              writer_queue_.get()));
++    }
++  }
++
++  writer_ = std::make_unique<internal::FeatureWriterThread>(
++      image_reader_.NumImages(), &database_, writer_queue_.get());
++}
++
++void SiftFeatureExtractor::Run() {
++  PrintHeading1("Feature extraction");
++
++  for (auto& resizer : resizers_) {
++    resizer->Start();
++  }
++
++  for (auto& extractor : extractors_) {
++    extractor->Start();
++  }
++
++  writer_->Start();
++
++  for (auto& extractor : extractors_) {
++    if (!extractor->CheckValidSetup()) {
++      return;
++    }
++  }
++
++  while (image_reader_.NextIndex() < image_reader_.NumImages()) {
++    if (IsStopped()) {
++      resizer_queue_->Stop();
++      extractor_queue_->Stop();
++      resizer_queue_->Clear();
++      extractor_queue_->Clear();
++      break;
++    }
++
++    internal::ImageData image_data;
++    image_data.status =
++        image_reader_.Next(&image_data.camera, &image_data.image,
++                           &image_data.bitmap, &image_data.mask);
++
++    if (image_data.status != ImageReader::Status::SUCCESS) {
++      image_data.bitmap.Deallocate();
++    }
++
++    if (sift_options_.max_image_size > 0) {
++      CHECK(resizer_queue_->Push(std::move(image_data)));
++    } else {
++      CHECK(extractor_queue_->Push(std::move(image_data)));
++    }
++  }
++
++  resizer_queue_->Wait();
++  resizer_queue_->Stop();
++  for (auto& resizer : resizers_) {
++    resizer->Wait();
++  }
++
++  extractor_queue_->Wait();
++  extractor_queue_->Stop();
++  for (auto& extractor : extractors_) {
++    extractor->Wait();
++  }
++
++  writer_queue_->Wait();
++  writer_queue_->Stop();
++  writer_->Wait();
++
++  GetTimer().PrintMinutes();
++}
++
++FeatureImporter::FeatureImporter(const ImageReaderOptions& reader_options,
++                                 const std::string& import_path)
++    : reader_options_(reader_options), import_path_(import_path) {}
++
++void FeatureImporter::Run() {
++  PrintHeading1("Feature import");
++
++  if (!ExistsDir(import_path_)) {
++    std::cerr << "  ERROR: Import directory does not exist." << std::endl;
++    return;
++  }
++
++  Database database(reader_options_.database_path);
++  ImageReader image_reader(reader_options_, &database);
++
++  while (image_reader.NextIndex() < image_reader.NumImages()) {
++    if (IsStopped()) {
++      break;
++    }
++
++    std::cout << StringPrintf("Processing file [%d/%d]",
++                              image_reader.NextIndex() + 1,
++                              image_reader.NumImages())
++              << std::endl;
++
++    // Load image data and possibly save camera to database.
++    Camera camera;
++    Image image;
++    Bitmap bitmap;
++    if (image_reader.Next(&camera, &image, &bitmap, nullptr) !=
++        ImageReader::Status::SUCCESS) {
++      continue;
++    }
++
++    const std::string path = JoinPaths(import_path_, image.Name() + ".txt");
++
++    if (ExistsFile(path)) {
++      FeatureKeypoints keypoints;
++      FeatureDescriptors descriptors;
++      LoadSiftFeaturesFromTextFile(path, &keypoints, &descriptors);
++
++      std::cout << "  Features:       " << keypoints.size() << std::endl;
++
++      DatabaseTransaction database_transaction(&database);
++
++      if (image.ImageId() == kInvalidImageId) {
++        image.SetImageId(database.WriteImage(image));
++      }
++
++      if (!database.ExistsKeypoints(image.ImageId())) {
++        database.WriteKeypoints(image.ImageId(), keypoints);
++      }
++
++      if (!database.ExistsDescriptors(image.ImageId())) {
++        database.WriteDescriptors(image.ImageId(), descriptors);
++      }
++    } else {
++      std::cout << "  SKIP: No features found at " << path << std::endl;
++    }
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++namespace internal {
++
++ImageResizerThread::ImageResizerThread(const int max_image_size,
++                                       JobQueue<ImageData>* input_queue,
++                                       JobQueue<ImageData>* output_queue)
++    : max_image_size_(max_image_size),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {}
++
++void ImageResizerThread::Run() {
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& image_data = input_job.Data();
++
++      if (image_data.status == ImageReader::Status::SUCCESS) {
++        if (static_cast<int>(image_data.bitmap.Width()) > max_image_size_ ||
++            static_cast<int>(image_data.bitmap.Height()) > max_image_size_) {
++          // Fit the down-sampled version exactly into the max dimensions.
++          const double scale =
++              static_cast<double>(max_image_size_) /
++              std::max(image_data.bitmap.Width(), image_data.bitmap.Height());
++          const int new_width =
++              static_cast<int>(image_data.bitmap.Width() * scale);
++          const int new_height =
++              static_cast<int>(image_data.bitmap.Height() * scale);
++
++          image_data.bitmap.Rescale(new_width, new_height);
++        }
++      }
++
++      output_queue_->Push(std::move(image_data));
++    } else {
++      break;
++    }
++  }
++}
++
++SiftFeatureExtractorThread::SiftFeatureExtractorThread(
++    const SiftExtractionOptions& sift_options,
++    const std::shared_ptr<Bitmap>& camera_mask,
++    JobQueue<ImageData>* input_queue, JobQueue<ImageData>* output_queue)
++    : sift_options_(sift_options),
++      camera_mask_(camera_mask),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(sift_options_.Check());
++
++#ifndef CUDA_ENABLED
++  if (sift_options_.use_gpu) {
++    opengl_context_ = std::make_unique<OpenGLContextManager>();
++  }
++#endif
++}
++
++void SiftFeatureExtractorThread::Run() {
++  std::unique_ptr<SiftGPU> sift_gpu;
++  if (sift_options_.use_gpu) {
++#ifndef CUDA_ENABLED
++    CHECK(opengl_context_);
++    CHECK(opengl_context_->MakeCurrent());
++#endif
++
++    sift_gpu = std::make_unique<SiftGPU>();
++    if (!CreateSiftGPUExtractor(sift_options_, sift_gpu.get())) {
++      std::cerr << "ERROR: SiftGPU not fully supported." << std::endl;
++      SignalInvalidSetup();
++      return;
++    }
++  }
++
++  SignalValidSetup();
++
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& image_data = input_job.Data();
++
++      if (image_data.status == ImageReader::Status::SUCCESS) {
++        bool success = false;
++        if (sift_options_.estimate_affine_shape ||
++            sift_options_.domain_size_pooling) {
++          success = ExtractCovariantSiftFeaturesCPU(
++              sift_options_, image_data.bitmap, &image_data.keypoints,
++              &image_data.descriptors);
++        } else if (sift_options_.use_gpu) {
++          success = ExtractSiftFeaturesGPU(
++              sift_options_, image_data.bitmap, sift_gpu.get(),
++              &image_data.keypoints, &image_data.descriptors);
++        } else {
++          success = ExtractSiftFeaturesCPU(sift_options_, image_data.bitmap,
++                                           &image_data.keypoints,
++                                           &image_data.descriptors);
++        }
++        if (success) {
++          ScaleKeypoints(image_data.bitmap, image_data.camera,
++                         &image_data.keypoints);
++          if (camera_mask_) {
++            MaskKeypoints(*camera_mask_, &image_data.keypoints,
++                          &image_data.descriptors);
++          }
++          if (image_data.mask.Data()) {
++            MaskKeypoints(image_data.mask, &image_data.keypoints,
++                          &image_data.descriptors);
++          }
++        } else {
++          image_data.status = ImageReader::Status::FAILURE;
++        }
++      }
++
++      image_data.bitmap.Deallocate();
++
++      output_queue_->Push(std::move(image_data));
++    } else {
++      break;
++    }
++  }
++}
++
++FeatureWriterThread::FeatureWriterThread(const size_t num_images,
++                                         Database* database,
++                                         JobQueue<ImageData>* input_queue)
++    : num_images_(num_images), database_(database), input_queue_(input_queue) {}
++
++void FeatureWriterThread::Run() {
++  size_t image_index = 0;
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& image_data = input_job.Data();
++
++      image_index += 1;
++
++      std::cout << StringPrintf("Processed file [%d/%d]", image_index,
++                                num_images_)
++                << std::endl;
++
++      std::cout << StringPrintf("  Name:            %s",
++                                image_data.image.Name().c_str())
++                << std::endl;
++
++      if (image_data.status == ImageReader::Status::IMAGE_EXISTS) {
++        std::cout << "  SKIP: Features for image already extracted."
++                  << std::endl;
++      } else if (image_data.status == ImageReader::Status::BITMAP_ERROR) {
++        std::cout << "  ERROR: Failed to read image file format." << std::endl;
++      } else if (image_data.status ==
++                 ImageReader::Status::CAMERA_SINGLE_DIM_ERROR) {
++        std::cout << "  ERROR: Single camera specified, "
++                     "but images have different dimensions."
++                  << std::endl;
++      } else if (image_data.status ==
++                 ImageReader::Status::CAMERA_EXIST_DIM_ERROR) {
++        std::cout << "  ERROR: Image previously processed, but current image "
++                     "has different dimensions."
++                  << std::endl;
++      } else if (image_data.status == ImageReader::Status::CAMERA_PARAM_ERROR) {
++        std::cout << "  ERROR: Camera has invalid parameters." << std::endl;
++      } else if (image_data.status == ImageReader::Status::FAILURE) {
++        std::cout << "  ERROR: Failed to extract features." << std::endl;
++      }
++
++      if (image_data.status != ImageReader::Status::SUCCESS) {
++        continue;
++      }
++
++      std::cout << StringPrintf("  Dimensions:      %d x %d",
++                                image_data.camera.Width(),
++                                image_data.camera.Height())
++                << std::endl;
++      std::cout << StringPrintf("  Camera:          #%d - %s",
++                                image_data.camera.CameraId(),
++                                image_data.camera.ModelName().c_str())
++                << std::endl;
++      std::cout << StringPrintf("  Focal Length:    %.2fpx",
++                                image_data.camera.MeanFocalLength());
++      if (image_data.camera.HasPriorFocalLength()) {
++        std::cout << " (Prior)" << std::endl;
++      } else {
++        std::cout << std::endl;
++      }
++      if (image_data.image.HasTvecPrior()) {
++        std::cout << StringPrintf(
++                         "  GPS:             LAT=%.3f, LON=%.3f, ALT=%.3f",
++                         image_data.image.TvecPrior(0),
++                         image_data.image.TvecPrior(1),
++                         image_data.image.TvecPrior(2))
++                  << std::endl;
++      }
++      std::cout << StringPrintf("  Features:        %d",
++                                image_data.keypoints.size())
++                << std::endl;
++
++      DatabaseTransaction database_transaction(database_);
++
++      if (image_data.image.ImageId() == kInvalidImageId) {
++        image_data.image.SetImageId(database_->WriteImage(image_data.image));
++      }
++
++      if (!database_->ExistsKeypoints(image_data.image.ImageId())) {
++        database_->WriteKeypoints(image_data.image.ImageId(),
++                                  image_data.keypoints);
++      }
++
++      if (!database_->ExistsDescriptors(image_data.image.ImageId())) {
++        database_->WriteDescriptors(image_data.image.ImageId(),
++                                    image_data.descriptors);
++      }
++    } else {
++      break;
++    }
++  }
++}
++
++}  // namespace internal
++}  // namespace colmap
+diff -Naur colmap-3.8.orig/src/feature/matching.cc colmap-3.8/src/feature/matching.cc
+--- colmap-3.8.orig/src/feature/matching.cc	2023-01-31 16:18:47.000000000 +0100
++++ colmap-3.8/src/feature/matching.cc	2023-08-19 09:24:47.428261855 +0200
+@@ -118,7 +118,7 @@
+     visual_index->Query(query_options, keypoints, descriptors,
+                         &retrieval.image_scores);
+ 
+-    CHECK(retrieval_queue.Push(std::move(retrieval)));
++    CHECK(retrieval_queue.Push(retrieval));
+   };
+ 
+   // Initially, make all retrieval threads busy and continue with the matching.
+@@ -151,7 +151,7 @@
+     }
+ 
+     // Pop the next results from the retrieval queue.
+-    auto retrieval = retrieval_queue.Pop();
++    const auto retrieval = retrieval_queue.Pop();
+     CHECK(retrieval.IsValid());
+ 
+     const auto& image_id = retrieval.Data().image_id;
+@@ -363,13 +363,13 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& data = input_job.Data();
++      auto data = input_job.Data();
+ 
+       if (!cache_->ExistsDescriptors(data.image_id1) ||
+           !cache_->ExistsDescriptors(data.image_id2)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -378,7 +378,7 @@
+       MatchSiftFeaturesCPU(options_, *descriptors1, *descriptors2,
+                            &data.matches);
+ 
+-      CHECK(output_queue_->Push(std::move(data)));
++      CHECK(output_queue_->Push(data));
+     }
+   }
+ }
+@@ -420,13 +420,13 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& data = input_job.Data();
++      auto data = input_job.Data();
+ 
+       if (!cache_->ExistsDescriptors(data.image_id1) ||
+           !cache_->ExistsDescriptors(data.image_id2)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -437,7 +437,7 @@
+       MatchSiftFeaturesGPU(options_, descriptors1_ptr, descriptors2_ptr,
+                            &sift_match_gpu, &data.matches);
+ 
+-      CHECK(output_queue_->Push(std::move(data)));
++      CHECK(output_queue_->Push(data));
+     }
+   }
+ }
+@@ -473,13 +473,13 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& data = input_job.Data();
++      auto data = input_job.Data();
+ 
+       if (data.two_view_geometry.inlier_matches.size() <
+           static_cast<size_t>(options_.min_num_inliers)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -487,7 +487,7 @@
+           !cache_->ExistsKeypoints(data.image_id2) ||
+           !cache_->ExistsDescriptors(data.image_id1) ||
+           !cache_->ExistsDescriptors(data.image_id2)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -499,7 +499,7 @@
+                                  *descriptors1, *descriptors2,
+                                  &data.two_view_geometry);
+ 
+-      CHECK(output_queue_->Push(std::move(data)));
++      CHECK(output_queue_->Push(data));
+     }
+   }
+ }
+@@ -540,13 +540,13 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& data = input_job.Data();
++      auto data = input_job.Data();
+ 
+       if (data.two_view_geometry.inlier_matches.size() <
+           static_cast<size_t>(options_.min_num_inliers)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -554,7 +554,7 @@
+           !cache_->ExistsKeypoints(data.image_id2) ||
+           !cache_->ExistsDescriptors(data.image_id1) ||
+           !cache_->ExistsDescriptors(data.image_id2)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -569,7 +569,7 @@
+                                  descriptors1_ptr, descriptors2_ptr,
+                                  &sift_match_gpu, &data.two_view_geometry);
+ 
+-      CHECK(output_queue_->Push(std::move(data)));
++      CHECK(output_queue_->Push(data));
+     }
+   }
+ }
+@@ -621,12 +621,12 @@
+       break;
+     }
+ 
+-    auto input_job = input_queue_->Pop();
++    const auto input_job = input_queue_->Pop();
+     if (input_job.IsValid()) {
+-      auto& data = input_job.Data();
++      auto data = input_job.Data();
+ 
+       if (data.matches.size() < static_cast<size_t>(options_.min_num_inliers)) {
+-        CHECK(output_queue_->Push(std::move(data)));
++        CHECK(output_queue_->Push(data));
+         continue;
+       }
+ 
+@@ -649,7 +649,7 @@
+                                         two_view_geometry_options_);
+       }
+ 
+-      CHECK(output_queue_->Push(std::move(data)));
++      CHECK(output_queue_->Push(data));
+     }
+   }
+ }
+@@ -855,9 +855,9 @@
+     if (exists_matches) {
+       data.matches = cache_->GetMatches(image_pair.first, image_pair.second);
+       cache_->DeleteMatches(image_pair.first, image_pair.second);
+-      CHECK(verifier_queue_.Push(std::move(data)));
++      CHECK(verifier_queue_.Push(data));
+     } else {
+-      CHECK(matcher_queue_.Push(std::move(data)));
++      CHECK(matcher_queue_.Push(data));
+     }
+   }
+ 
+@@ -866,9 +866,9 @@
+   //////////////////////////////////////////////////////////////////////////////
+ 
+   for (size_t i = 0; i < num_outputs; ++i) {
+-    auto output_job = output_queue_.Pop();
++    const auto output_job = output_queue_.Pop();
+     CHECK(output_job.IsValid());
+-    auto& output = output_job.Data();
++    auto output = output_job.Data();
+ 
+     if (output.matches.size() < static_cast<size_t>(options_.min_num_inliers)) {
+       output.matches = {};
+diff -Naur colmap-3.8.orig/src/feature/matching.cc.orig colmap-3.8/src/feature/matching.cc.orig
+--- colmap-3.8.orig/src/feature/matching.cc.orig	1970-01-01 01:00:00.000000000 +0100
++++ colmap-3.8/src/feature/matching.cc.orig	2023-01-31 16:18:47.000000000 +0100
+@@ -0,0 +1,1722 @@
++// Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
++// All rights reserved.
++//
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are met:
++//
++//     * Redistributions of source code must retain the above copyright
++//       notice, this list of conditions and the following disclaimer.
++//
++//     * Redistributions in binary form must reproduce the above copyright
++//       notice, this list of conditions and the following disclaimer in the
++//       documentation and/or other materials provided with the distribution.
++//
++//     * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
++//       its contributors may be used to endorse or promote products derived
++//       from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++// POSSIBILITY OF SUCH DAMAGE.
++//
++// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
++
++#include "feature/matching.h"
++
++#include <fstream>
++#include <numeric>
++
++#include "SiftGPU/SiftGPU.h"
++#include "base/gps.h"
++#include "feature/utils.h"
++#include "retrieval/visual_index.h"
++#include "util/cuda.h"
++#include "util/misc.h"
++
++namespace colmap {
++namespace {
++
++void PrintElapsedTime(const Timer& timer) {
++  std::cout << StringPrintf(" in %.3fs", timer.ElapsedSeconds()) << std::endl;
++}
++
++void IndexImagesInVisualIndex(const int num_threads, const int num_checks,
++                              const int max_num_features,
++                              const std::vector<image_t>& image_ids,
++                              Thread* thread, FeatureMatcherCache* cache,
++                              retrieval::VisualIndex<>* visual_index) {
++  retrieval::VisualIndex<>::IndexOptions index_options;
++  index_options.num_threads = num_threads;
++  index_options.num_checks = num_checks;
++
++  for (size_t i = 0; i < image_ids.size(); ++i) {
++    if (thread->IsStopped()) {
++      return;
++    }
++
++    Timer timer;
++    timer.Start();
++
++    std::cout << StringPrintf("Indexing image [%d/%d]", i + 1, image_ids.size())
++              << std::flush;
++
++    auto keypoints = *cache->GetKeypoints(image_ids[i]);
++    auto descriptors = *cache->GetDescriptors(image_ids[i]);
++    if (max_num_features > 0 && descriptors.rows() > max_num_features) {
++      ExtractTopScaleFeatures(&keypoints, &descriptors, max_num_features);
++    }
++
++    visual_index->Add(index_options, image_ids[i], keypoints, descriptors);
++
++    PrintElapsedTime(timer);
++  }
++
++  // Compute the TF-IDF weights, etc.
++  visual_index->Prepare();
++}
++
++void MatchNearestNeighborsInVisualIndex(
++    const int num_threads, const int num_images, const int num_neighbors,
++    const int num_checks, const int num_images_after_verification,
++    const int max_num_features, const std::vector<image_t>& image_ids,
++    Thread* thread, FeatureMatcherCache* cache,
++    retrieval::VisualIndex<>* visual_index, SiftFeatureMatcher* matcher) {
++  struct Retrieval {
++    image_t image_id = kInvalidImageId;
++    std::vector<retrieval::ImageScore> image_scores;
++  };
++
++  // Create a thread pool to retrieve the nearest neighbors.
++  ThreadPool retrieval_thread_pool(num_threads);
++  JobQueue<Retrieval> retrieval_queue(num_threads);
++
++  // The retrieval thread kernel function. Note that the descriptors should be
++  // extracted outside of this function sequentially to avoid any concurrent
++  // access to the database causing race conditions.
++  retrieval::VisualIndex<>::QueryOptions query_options;
++  query_options.max_num_images = num_images;
++  query_options.num_neighbors = num_neighbors;
++  query_options.num_checks = num_checks;
++  query_options.num_images_after_verification = num_images_after_verification;
++  auto QueryFunc = [&](const image_t image_id) {
++    auto keypoints = *cache->GetKeypoints(image_id);
++    auto descriptors = *cache->GetDescriptors(image_id);
++    if (max_num_features > 0 && descriptors.rows() > max_num_features) {
++      ExtractTopScaleFeatures(&keypoints, &descriptors, max_num_features);
++    }
++
++    Retrieval retrieval;
++    retrieval.image_id = image_id;
++    visual_index->Query(query_options, keypoints, descriptors,
++                        &retrieval.image_scores);
++
++    CHECK(retrieval_queue.Push(std::move(retrieval)));
++  };
++
++  // Initially, make all retrieval threads busy and continue with the matching.
++  size_t image_idx = 0;
++  const size_t init_num_tasks =
++      std::min(image_ids.size(), 2 * retrieval_thread_pool.NumThreads());
++  for (; image_idx < init_num_tasks; ++image_idx) {
++    retrieval_thread_pool.AddTask(QueryFunc, image_ids[image_idx]);
++  }
++
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++
++  // Pop the finished retrieval results and enqueue them for feature matching.
++  for (size_t i = 0; i < image_ids.size(); ++i) {
++    if (thread->IsStopped()) {
++      retrieval_queue.Stop();
++      return;
++    }
++
++    Timer timer;
++    timer.Start();
++
++    std::cout << StringPrintf("Matching image [%d/%d]", i + 1, image_ids.size())
++              << std::flush;
++
++    // Push the next image to the retrieval queue.
++    if (image_idx < image_ids.size()) {
++      retrieval_thread_pool.AddTask(QueryFunc, image_ids[image_idx]);
++      image_idx += 1;
++    }
++
++    // Pop the next results from the retrieval queue.
++    auto retrieval = retrieval_queue.Pop();
++    CHECK(retrieval.IsValid());
++
++    const auto& image_id = retrieval.Data().image_id;
++    const auto& image_scores = retrieval.Data().image_scores;
++
++    // Compose the image pairs from the scores.
++    image_pairs.clear();
++    image_pairs.reserve(image_scores.size());
++    for (const auto image_score : image_scores) {
++      image_pairs.emplace_back(image_id, image_score.image_id);
++    }
++
++    matcher->Match(image_pairs);
++
++    PrintElapsedTime(timer);
++  }
++}
++
++}  // namespace
++
++bool ExhaustiveMatchingOptions::Check() const {
++  CHECK_OPTION_GT(block_size, 1);
++  return true;
++}
++
++bool SequentialMatchingOptions::Check() const {
++  CHECK_OPTION_GT(overlap, 0);
++  CHECK_OPTION_GT(loop_detection_period, 0);
++  CHECK_OPTION_GT(loop_detection_num_images, 0);
++  CHECK_OPTION_GT(loop_detection_num_nearest_neighbors, 0);
++  CHECK_OPTION_GT(loop_detection_num_checks, 0);
++  return true;
++}
++
++bool VocabTreeMatchingOptions::Check() const {
++  CHECK_OPTION_GT(num_images, 0);
++  CHECK_OPTION_GT(num_nearest_neighbors, 0);
++  CHECK_OPTION_GT(num_checks, 0);
++  return true;
++}
++
++bool SpatialMatchingOptions::Check() const {
++  CHECK_OPTION_GT(max_num_neighbors, 0);
++  CHECK_OPTION_GT(max_distance, 0.0);
++  return true;
++}
++
++bool TransitiveMatchingOptions::Check() const {
++  CHECK_OPTION_GT(batch_size, 0);
++  CHECK_OPTION_GT(num_iterations, 0);
++  return true;
++}
++
++bool ImagePairsMatchingOptions::Check() const {
++  CHECK_OPTION_GT(block_size, 0);
++  return true;
++}
++
++bool FeaturePairsMatchingOptions::Check() const { return true; }
++
++FeatureMatcherCache::FeatureMatcherCache(const size_t cache_size,
++                                         const Database* database)
++    : cache_size_(cache_size), database_(database) {
++  CHECK_NOTNULL(database_);
++}
++
++void FeatureMatcherCache::Setup() {
++  const std::vector<Camera> cameras = database_->ReadAllCameras();
++  cameras_cache_.reserve(cameras.size());
++  for (const auto& camera : cameras) {
++    cameras_cache_.emplace(camera.CameraId(), camera);
++  }
++
++  const std::vector<Image> images = database_->ReadAllImages();
++  images_cache_.reserve(images.size());
++  for (const auto& image : images) {
++    images_cache_.emplace(image.ImageId(), image);
++  }
++
++  keypoints_cache_ = std::make_unique<LRUCache<image_t, FeatureKeypointsPtr>>(
++      cache_size_, [this](const image_t image_id) {
++        return std::make_shared<FeatureKeypoints>(
++            database_->ReadKeypoints(image_id));
++      });
++
++  descriptors_cache_ =
++      std::make_unique<LRUCache<image_t, FeatureDescriptorsPtr>>(
++          cache_size_, [this](const image_t image_id) {
++            return std::make_shared<FeatureDescriptors>(
++                database_->ReadDescriptors(image_id));
++          });
++
++  keypoints_exists_cache_ = std::make_unique<LRUCache<image_t, bool>>(
++      images.size(), [this](const image_t image_id) {
++        return database_->ExistsKeypoints(image_id);
++      });
++
++  descriptors_exists_cache_ = std::make_unique<LRUCache<image_t, bool>>(
++      images.size(), [this](const image_t image_id) {
++        return database_->ExistsDescriptors(image_id);
++      });
++}
++
++const Camera& FeatureMatcherCache::GetCamera(const camera_t camera_id) const {
++  return cameras_cache_.at(camera_id);
++}
++
++const Image& FeatureMatcherCache::GetImage(const image_t image_id) const {
++  return images_cache_.at(image_id);
++}
++
++FeatureKeypointsPtr FeatureMatcherCache::GetKeypoints(const image_t image_id) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return keypoints_cache_->Get(image_id);
++}
++
++FeatureDescriptorsPtr FeatureMatcherCache::GetDescriptors(
++    const image_t image_id) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return descriptors_cache_->Get(image_id);
++}
++
++FeatureMatches FeatureMatcherCache::GetMatches(const image_t image_id1,
++                                               const image_t image_id2) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return database_->ReadMatches(image_id1, image_id2);
++}
++
++std::vector<image_t> FeatureMatcherCache::GetImageIds() const {
++  std::vector<image_t> image_ids;
++  image_ids.reserve(images_cache_.size());
++  for (const auto& image : images_cache_) {
++    image_ids.push_back(image.first);
++  }
++  return image_ids;
++}
++
++bool FeatureMatcherCache::ExistsKeypoints(const image_t image_id) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return keypoints_exists_cache_->Get(image_id);
++}
++
++bool FeatureMatcherCache::ExistsDescriptors(const image_t image_id) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return descriptors_exists_cache_->Get(image_id);
++}
++
++bool FeatureMatcherCache::ExistsMatches(const image_t image_id1,
++                                        const image_t image_id2) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return database_->ExistsMatches(image_id1, image_id2);
++}
++
++bool FeatureMatcherCache::ExistsInlierMatches(const image_t image_id1,
++                                              const image_t image_id2) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  return database_->ExistsInlierMatches(image_id1, image_id2);
++}
++
++void FeatureMatcherCache::WriteMatches(const image_t image_id1,
++                                       const image_t image_id2,
++                                       const FeatureMatches& matches) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  database_->WriteMatches(image_id1, image_id2, matches);
++}
++
++void FeatureMatcherCache::WriteTwoViewGeometry(
++    const image_t image_id1, const image_t image_id2,
++    const TwoViewGeometry& two_view_geometry) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  database_->WriteTwoViewGeometry(image_id1, image_id2, two_view_geometry);
++}
++
++void FeatureMatcherCache::DeleteMatches(const image_t image_id1,
++                                        const image_t image_id2) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  database_->DeleteMatches(image_id1, image_id2);
++}
++
++void FeatureMatcherCache::DeleteInlierMatches(const image_t image_id1,
++                                              const image_t image_id2) {
++  std::unique_lock<std::mutex> lock(database_mutex_);
++  database_->DeleteInlierMatches(image_id1, image_id2);
++}
++
++FeatureMatcherThread::FeatureMatcherThread(const SiftMatchingOptions& options,
++                                           FeatureMatcherCache* cache)
++    : options_(options), cache_(cache) {}
++
++void FeatureMatcherThread::SetMaxNumMatches(const int max_num_matches) {
++  options_.max_num_matches = max_num_matches;
++}
++
++SiftCPUFeatureMatcher::SiftCPUFeatureMatcher(const SiftMatchingOptions& options,
++                                             FeatureMatcherCache* cache,
++                                             JobQueue<Input>* input_queue,
++                                             JobQueue<Output>* output_queue)
++    : FeatureMatcherThread(options, cache),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(options_.Check());
++}
++
++void SiftCPUFeatureMatcher::Run() {
++  SignalValidSetup();
++
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& data = input_job.Data();
++
++      if (!cache_->ExistsDescriptors(data.image_id1) ||
++          !cache_->ExistsDescriptors(data.image_id2)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      const auto descriptors1 = cache_->GetDescriptors(data.image_id1);
++      const auto descriptors2 = cache_->GetDescriptors(data.image_id2);
++      MatchSiftFeaturesCPU(options_, *descriptors1, *descriptors2,
++                           &data.matches);
++
++      CHECK(output_queue_->Push(std::move(data)));
++    }
++  }
++}
++
++SiftGPUFeatureMatcher::SiftGPUFeatureMatcher(const SiftMatchingOptions& options,
++                                             FeatureMatcherCache* cache,
++                                             JobQueue<Input>* input_queue,
++                                             JobQueue<Output>* output_queue)
++    : FeatureMatcherThread(options, cache),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(options_.Check());
++
++  prev_uploaded_image_ids_[0] = kInvalidImageId;
++  prev_uploaded_image_ids_[1] = kInvalidImageId;
++
++#ifndef CUDA_ENABLED
++  opengl_context_ = std::make_unique<OpenGLContextManager>();
++#endif
++}
++
++void SiftGPUFeatureMatcher::Run() {
++#ifndef CUDA_ENABLED
++  CHECK(opengl_context_);
++  CHECK(opengl_context_->MakeCurrent());
++#endif
++
++  SiftMatchGPU sift_match_gpu;
++  if (!CreateSiftGPUMatcher(options_, &sift_match_gpu)) {
++    std::cout << "ERROR: SiftGPU not fully supported" << std::endl;
++    SignalInvalidSetup();
++    return;
++  }
++
++  SignalValidSetup();
++
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& data = input_job.Data();
++
++      if (!cache_->ExistsDescriptors(data.image_id1) ||
++          !cache_->ExistsDescriptors(data.image_id2)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      const FeatureDescriptors* descriptors1_ptr;
++      GetDescriptorData(0, data.image_id1, &descriptors1_ptr);
++      const FeatureDescriptors* descriptors2_ptr;
++      GetDescriptorData(1, data.image_id2, &descriptors2_ptr);
++      MatchSiftFeaturesGPU(options_, descriptors1_ptr, descriptors2_ptr,
++                           &sift_match_gpu, &data.matches);
++
++      CHECK(output_queue_->Push(std::move(data)));
++    }
++  }
++}
++
++void SiftGPUFeatureMatcher::GetDescriptorData(
++    const int index, const image_t image_id,
++    const FeatureDescriptors** descriptors_ptr) {
++  CHECK_GE(index, 0);
++  CHECK_LE(index, 1);
++  if (prev_uploaded_image_ids_[index] == image_id) {
++    *descriptors_ptr = nullptr;
++  } else {
++    prev_uploaded_descriptors_[index] = cache_->GetDescriptors(image_id);
++    *descriptors_ptr = prev_uploaded_descriptors_[index].get();
++    prev_uploaded_image_ids_[index] = image_id;
++  }
++}
++
++GuidedSiftCPUFeatureMatcher::GuidedSiftCPUFeatureMatcher(
++    const SiftMatchingOptions& options, FeatureMatcherCache* cache,
++    JobQueue<Input>* input_queue, JobQueue<Output>* output_queue)
++    : FeatureMatcherThread(options, cache),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(options_.Check());
++}
++
++void GuidedSiftCPUFeatureMatcher::Run() {
++  SignalValidSetup();
++
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& data = input_job.Data();
++
++      if (data.two_view_geometry.inlier_matches.size() <
++          static_cast<size_t>(options_.min_num_inliers)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      if (!cache_->ExistsKeypoints(data.image_id1) ||
++          !cache_->ExistsKeypoints(data.image_id2) ||
++          !cache_->ExistsDescriptors(data.image_id1) ||
++          !cache_->ExistsDescriptors(data.image_id2)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      const auto keypoints1 = cache_->GetKeypoints(data.image_id1);
++      const auto keypoints2 = cache_->GetKeypoints(data.image_id2);
++      const auto descriptors1 = cache_->GetDescriptors(data.image_id1);
++      const auto descriptors2 = cache_->GetDescriptors(data.image_id2);
++      MatchGuidedSiftFeaturesCPU(options_, *keypoints1, *keypoints2,
++                                 *descriptors1, *descriptors2,
++                                 &data.two_view_geometry);
++
++      CHECK(output_queue_->Push(std::move(data)));
++    }
++  }
++}
++
++GuidedSiftGPUFeatureMatcher::GuidedSiftGPUFeatureMatcher(
++    const SiftMatchingOptions& options, FeatureMatcherCache* cache,
++    JobQueue<Input>* input_queue, JobQueue<Output>* output_queue)
++    : FeatureMatcherThread(options, cache),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(options_.Check());
++
++  prev_uploaded_image_ids_[0] = kInvalidImageId;
++  prev_uploaded_image_ids_[1] = kInvalidImageId;
++
++#ifndef CUDA_ENABLED
++  opengl_context_ = std::make_unique<OpenGLContextManager>();
++#endif
++}
++
++void GuidedSiftGPUFeatureMatcher::Run() {
++#ifndef CUDA_ENABLED
++  CHECK(opengl_context_);
++  CHECK(opengl_context_->MakeCurrent());
++#endif
++
++  SiftMatchGPU sift_match_gpu;
++  if (!CreateSiftGPUMatcher(options_, &sift_match_gpu)) {
++    std::cout << "ERROR: SiftGPU not fully supported" << std::endl;
++    SignalInvalidSetup();
++    return;
++  }
++
++  SignalValidSetup();
++
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& data = input_job.Data();
++
++      if (data.two_view_geometry.inlier_matches.size() <
++          static_cast<size_t>(options_.min_num_inliers)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      if (!cache_->ExistsKeypoints(data.image_id1) ||
++          !cache_->ExistsKeypoints(data.image_id2) ||
++          !cache_->ExistsDescriptors(data.image_id1) ||
++          !cache_->ExistsDescriptors(data.image_id2)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      const FeatureDescriptors* descriptors1_ptr;
++      const FeatureKeypoints* keypoints1_ptr;
++      GetFeatureData(0, data.image_id1, &keypoints1_ptr, &descriptors1_ptr);
++      const FeatureDescriptors* descriptors2_ptr;
++      const FeatureKeypoints* keypoints2_ptr;
++      GetFeatureData(1, data.image_id2, &keypoints2_ptr, &descriptors2_ptr);
++
++      MatchGuidedSiftFeaturesGPU(options_, keypoints1_ptr, keypoints2_ptr,
++                                 descriptors1_ptr, descriptors2_ptr,
++                                 &sift_match_gpu, &data.two_view_geometry);
++
++      CHECK(output_queue_->Push(std::move(data)));
++    }
++  }
++}
++
++void GuidedSiftGPUFeatureMatcher::GetFeatureData(
++    const int index, const image_t image_id,
++    const FeatureKeypoints** keypoints_ptr,
++    const FeatureDescriptors** descriptors_ptr) {
++  CHECK_GE(index, 0);
++  CHECK_LE(index, 1);
++  if (prev_uploaded_image_ids_[index] == image_id) {
++    *keypoints_ptr = nullptr;
++    *descriptors_ptr = nullptr;
++  } else {
++    prev_uploaded_keypoints_[index] = cache_->GetKeypoints(image_id);
++    prev_uploaded_descriptors_[index] = cache_->GetDescriptors(image_id);
++    *keypoints_ptr = prev_uploaded_keypoints_[index].get();
++    *descriptors_ptr = prev_uploaded_descriptors_[index].get();
++    prev_uploaded_image_ids_[index] = image_id;
++  }
++}
++
++TwoViewGeometryVerifier::TwoViewGeometryVerifier(
++    const SiftMatchingOptions& options, FeatureMatcherCache* cache,
++    JobQueue<Input>* input_queue, JobQueue<Output>* output_queue)
++    : options_(options),
++      cache_(cache),
++      input_queue_(input_queue),
++      output_queue_(output_queue) {
++  CHECK(options_.Check());
++
++  two_view_geometry_options_.min_num_inliers =
++      static_cast<size_t>(options_.min_num_inliers);
++  two_view_geometry_options_.ransac_options.max_error = options_.max_error;
++  two_view_geometry_options_.ransac_options.confidence = options_.confidence;
++  two_view_geometry_options_.ransac_options.min_num_trials =
++      static_cast<size_t>(options_.min_num_trials);
++  two_view_geometry_options_.ransac_options.max_num_trials =
++      static_cast<size_t>(options_.max_num_trials);
++  two_view_geometry_options_.ransac_options.min_inlier_ratio =
++      options_.min_inlier_ratio;
++  two_view_geometry_options_.force_H_use = options_.planar_scene;
++  two_view_geometry_options_.compute_relative_pose = options_.compute_relative_pose;
++}
++
++void TwoViewGeometryVerifier::Run() {
++  while (true) {
++    if (IsStopped()) {
++      break;
++    }
++
++    auto input_job = input_queue_->Pop();
++    if (input_job.IsValid()) {
++      auto& data = input_job.Data();
++
++      if (data.matches.size() < static_cast<size_t>(options_.min_num_inliers)) {
++        CHECK(output_queue_->Push(std::move(data)));
++        continue;
++      }
++
++      const auto& camera1 =
++          cache_->GetCamera(cache_->GetImage(data.image_id1).CameraId());
++      const auto& camera2 =
++          cache_->GetCamera(cache_->GetImage(data.image_id2).CameraId());
++      const auto keypoints1 = cache_->GetKeypoints(data.image_id1);
++      const auto keypoints2 = cache_->GetKeypoints(data.image_id2);
++      const auto& points1 = FeatureKeypointsToPointsVector(*keypoints1);
++      const auto& points2 = FeatureKeypointsToPointsVector(*keypoints2);
++
++      if (options_.multiple_models) {
++        data.two_view_geometry.EstimateMultiple(camera1, points1, camera2,
++                                                points2, data.matches,
++                                                two_view_geometry_options_);
++      } else {
++        data.two_view_geometry.Estimate(camera1, points1, camera2, points2,
++                                        data.matches,
++                                        two_view_geometry_options_);
++      }
++
++      CHECK(output_queue_->Push(std::move(data)));
++    }
++  }
++}
++
++SiftFeatureMatcher::SiftFeatureMatcher(const SiftMatchingOptions& options,
++                                       Database* database,
++                                       FeatureMatcherCache* cache)
++    : options_(options), database_(database), cache_(cache), is_setup_(false) {
++  CHECK(options_.Check());
++
++  const int num_threads = GetEffectiveNumThreads(options_.num_threads);
++  CHECK_GT(num_threads, 0);
++
++  std::vector<int> gpu_indices = CSVToVector<int>(options_.gpu_index);
++  CHECK_GT(gpu_indices.size(), 0);
++
++#ifdef CUDA_ENABLED
++  if (options_.use_gpu && gpu_indices.size() == 1 && gpu_indices[0] == -1) {
++    const int num_cuda_devices = GetNumCudaDevices();
++    CHECK_GT(num_cuda_devices, 0);
++    gpu_indices.resize(num_cuda_devices);
++    std::iota(gpu_indices.begin(), gpu_indices.end(), 0);
++  }
++#endif  // CUDA_ENABLED
++
++  if (options_.use_gpu) {
++    auto gpu_options = options_;
++    matchers_.reserve(gpu_indices.size());
++    for (const auto& gpu_index : gpu_indices) {
++      gpu_options.gpu_index = std::to_string(gpu_index);
++      matchers_.emplace_back(std::make_unique<SiftGPUFeatureMatcher>(
++          gpu_options, cache, &matcher_queue_, &verifier_queue_));
++    }
++  } else {
++    matchers_.reserve(num_threads);
++    for (int i = 0; i < num_threads; ++i) {
++      matchers_.emplace_back(std::make_unique<SiftCPUFeatureMatcher>(
++          options_, cache, &matcher_queue_, &verifier_queue_));
++    }
++  }
++
++  verifiers_.reserve(num_threads);
++  if (options_.guided_matching) {
++    for (int i = 0; i < num_threads; ++i) {
++      verifiers_.emplace_back(std::make_unique<TwoViewGeometryVerifier>(
++          options_, cache, &verifier_queue_, &guided_matcher_queue_));
++    }
++
++    if (options_.use_gpu) {
++      auto gpu_options = options_;
++      guided_matchers_.reserve(gpu_indices.size());
++      for (const auto& gpu_index : gpu_indices) {
++        gpu_options.gpu_index = std::to_string(gpu_index);
++        guided_matchers_.emplace_back(
++            std::make_unique<GuidedSiftGPUFeatureMatcher>(
++                gpu_options, cache, &guided_matcher_queue_, &output_queue_));
++      }
++    } else {
++      guided_matchers_.reserve(num_threads);
++      for (int i = 0; i < num_threads; ++i) {
++        guided_matchers_.emplace_back(
++            std::make_unique<GuidedSiftCPUFeatureMatcher>(
++                options_, cache, &guided_matcher_queue_, &output_queue_));
++      }
++    }
++  } else {
++    for (int i = 0; i < num_threads; ++i) {
++      verifiers_.emplace_back(std::make_unique<TwoViewGeometryVerifier>(
++          options_, cache, &verifier_queue_, &output_queue_));
++    }
++  }
++}
++
++SiftFeatureMatcher::~SiftFeatureMatcher() {
++  matcher_queue_.Wait();
++  verifier_queue_.Wait();
++  guided_matcher_queue_.Wait();
++  output_queue_.Wait();
++
++  for (auto& matcher : matchers_) {
++    matcher->Stop();
++  }
++
++  for (auto& verifier : verifiers_) {
++    verifier->Stop();
++  }
++
++  for (auto& guided_matcher : guided_matchers_) {
++    guided_matcher->Stop();
++  }
++
++  matcher_queue_.Stop();
++  verifier_queue_.Stop();
++  guided_matcher_queue_.Stop();
++  output_queue_.Stop();
++
++  for (auto& matcher : matchers_) {
++    matcher->Wait();
++  }
++
++  for (auto& verifier : verifiers_) {
++    verifier->Wait();
++  }
++
++  for (auto& guided_matcher : guided_matchers_) {
++    guided_matcher->Wait();
++  }
++}
++
++bool SiftFeatureMatcher::Setup() {
++  const int max_num_features = CHECK_NOTNULL(database_)->MaxNumDescriptors();
++  options_.max_num_matches =
++      std::min(options_.max_num_matches, max_num_features);
++
++  for (auto& matcher : matchers_) {
++    matcher->SetMaxNumMatches(options_.max_num_matches);
++    matcher->Start();
++  }
++
++  for (auto& verifier : verifiers_) {
++    verifier->Start();
++  }
++
++  for (auto& guided_matcher : guided_matchers_) {
++    guided_matcher->SetMaxNumMatches(options_.max_num_matches);
++    guided_matcher->Start();
++  }
++
++  for (auto& matcher : matchers_) {
++    if (!matcher->CheckValidSetup()) {
++      return false;
++    }
++  }
++
++  for (auto& guided_matcher : guided_matchers_) {
++    if (!guided_matcher->CheckValidSetup()) {
++      return false;
++    }
++  }
++
++  is_setup_ = true;
++
++  return true;
++}
++
++void SiftFeatureMatcher::Match(
++    const std::vector<std::pair<image_t, image_t>>& image_pairs) {
++  CHECK_NOTNULL(database_);
++  CHECK_NOTNULL(cache_);
++  CHECK(is_setup_);
++
++  if (image_pairs.empty()) {
++    return;
++  }
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Match the image pairs
++  //////////////////////////////////////////////////////////////////////////////
++
++  std::unordered_set<image_pair_t> image_pair_ids;
++  image_pair_ids.reserve(image_pairs.size());
++
++  size_t num_outputs = 0;
++  for (const auto& image_pair : image_pairs) {
++    // Avoid self-matches.
++    if (image_pair.first == image_pair.second) {
++      continue;
++    }
++
++    // Avoid duplicate image pairs.
++    const image_pair_t pair_id =
++        Database::ImagePairToPairId(image_pair.first, image_pair.second);
++    if (image_pair_ids.count(pair_id) > 0) {
++      continue;
++    }
++
++    image_pair_ids.insert(pair_id);
++
++    const bool exists_matches =
++        cache_->ExistsMatches(image_pair.first, image_pair.second);
++    const bool exists_inlier_matches =
++        cache_->ExistsInlierMatches(image_pair.first, image_pair.second);
++
++    if (exists_matches && exists_inlier_matches) {
++      continue;
++    }
++
++    num_outputs += 1;
++
++    // If only one of the matches or inlier matches exist, we recompute them
++    // from scratch and delete the existing results. This must be done before
++    // pushing the jobs to the queue, otherwise database constraints might fail
++    // when writing an existing result into the database.
++
++    if (exists_inlier_matches) {
++      cache_->DeleteInlierMatches(image_pair.first, image_pair.second);
++    }
++
++    internal::FeatureMatcherData data;
++    data.image_id1 = image_pair.first;
++    data.image_id2 = image_pair.second;
++
++    if (exists_matches) {
++      data.matches = cache_->GetMatches(image_pair.first, image_pair.second);
++      cache_->DeleteMatches(image_pair.first, image_pair.second);
++      CHECK(verifier_queue_.Push(std::move(data)));
++    } else {
++      CHECK(matcher_queue_.Push(std::move(data)));
++    }
++  }
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Write results to database
++  //////////////////////////////////////////////////////////////////////////////
++
++  for (size_t i = 0; i < num_outputs; ++i) {
++    auto output_job = output_queue_.Pop();
++    CHECK(output_job.IsValid());
++    auto& output = output_job.Data();
++
++    if (output.matches.size() < static_cast<size_t>(options_.min_num_inliers)) {
++      output.matches = {};
++    }
++
++    if (output.two_view_geometry.inlier_matches.size() <
++        static_cast<size_t>(options_.min_num_inliers)) {
++      output.two_view_geometry = TwoViewGeometry();
++    }
++
++    cache_->WriteMatches(output.image_id1, output.image_id2, output.matches);
++    cache_->WriteTwoViewGeometry(output.image_id1, output.image_id2,
++                                 output.two_view_geometry);
++  }
++
++  CHECK_EQ(output_queue_.Size(), 0);
++}
++
++ExhaustiveFeatureMatcher::ExhaustiveFeatureMatcher(
++    const ExhaustiveMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(5 * options_.block_size, &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void ExhaustiveFeatureMatcher::Run() {
++  PrintHeading1("Exhaustive feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  const std::vector<image_t> image_ids = cache_.GetImageIds();
++
++  const size_t block_size = static_cast<size_t>(options_.block_size);
++  const size_t num_blocks = static_cast<size_t>(
++      std::ceil(static_cast<double>(image_ids.size()) / block_size));
++  const size_t num_pairs_per_block = block_size * (block_size - 1) / 2;
++
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++  image_pairs.reserve(num_pairs_per_block);
++
++  for (size_t start_idx1 = 0; start_idx1 < image_ids.size();
++       start_idx1 += block_size) {
++    const size_t end_idx1 =
++        std::min(image_ids.size(), start_idx1 + block_size) - 1;
++    for (size_t start_idx2 = 0; start_idx2 < image_ids.size();
++         start_idx2 += block_size) {
++      const size_t end_idx2 =
++          std::min(image_ids.size(), start_idx2 + block_size) - 1;
++
++      if (IsStopped()) {
++        GetTimer().PrintMinutes();
++        return;
++      }
++
++      Timer timer;
++      timer.Start();
++
++      std::cout << StringPrintf("Matching block [%d/%d, %d/%d]",
++                                start_idx1 / block_size + 1, num_blocks,
++                                start_idx2 / block_size + 1, num_blocks)
++                << std::flush;
++
++      image_pairs.clear();
++      for (size_t idx1 = start_idx1; idx1 <= end_idx1; ++idx1) {
++        for (size_t idx2 = start_idx2; idx2 <= end_idx2; ++idx2) {
++          const size_t block_id1 = idx1 % block_size;
++          const size_t block_id2 = idx2 % block_size;
++          if ((idx1 > idx2 && block_id1 <= block_id2) ||
++              (idx1 < idx2 &&
++               block_id1 < block_id2)) {  // Avoid duplicate pairs
++            image_pairs.emplace_back(image_ids[idx1], image_ids[idx2]);
++          }
++        }
++      }
++
++      DatabaseTransaction database_transaction(&database_);
++      matcher_.Match(image_pairs);
++
++      PrintElapsedTime(timer);
++    }
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++SequentialFeatureMatcher::SequentialFeatureMatcher(
++    const SequentialMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(std::max(5 * options_.loop_detection_num_images,
++                      5 * options_.overlap),
++             &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void SequentialFeatureMatcher::Run() {
++  PrintHeading1("Sequential feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  const std::vector<image_t> ordered_image_ids = GetOrderedImageIds();
++
++  RunSequentialMatching(ordered_image_ids);
++  if (options_.loop_detection) {
++    RunLoopDetection(ordered_image_ids);
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++std::vector<image_t> SequentialFeatureMatcher::GetOrderedImageIds() const {
++  const std::vector<image_t> image_ids = cache_.GetImageIds();
++
++  std::vector<Image> ordered_images;
++  ordered_images.reserve(image_ids.size());
++  for (const auto image_id : image_ids) {
++    ordered_images.push_back(cache_.GetImage(image_id));
++  }
++
++  std::sort(ordered_images.begin(), ordered_images.end(),
++            [](const Image& image1, const Image& image2) {
++              return image1.Name() < image2.Name();
++            });
++
++  std::vector<image_t> ordered_image_ids;
++  ordered_image_ids.reserve(image_ids.size());
++  for (const auto& image : ordered_images) {
++    ordered_image_ids.push_back(image.ImageId());
++  }
++
++  return ordered_image_ids;
++}
++
++void SequentialFeatureMatcher::RunSequentialMatching(
++    const std::vector<image_t>& image_ids) {
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++  image_pairs.reserve(options_.overlap);
++
++  for (size_t image_idx1 = 0; image_idx1 < image_ids.size(); ++image_idx1) {
++    if (IsStopped()) {
++      return;
++    }
++
++    const auto image_id1 = image_ids.at(image_idx1);
++
++    Timer timer;
++    timer.Start();
++
++    std::cout << StringPrintf("Matching image [%d/%d]", image_idx1 + 1,
++                              image_ids.size())
++              << std::flush;
++
++    image_pairs.clear();
++    for (int i = 0; i < options_.overlap; ++i) {
++      const size_t image_idx2 = image_idx1 + i;
++      if (image_idx2 < image_ids.size()) {
++        image_pairs.emplace_back(image_id1, image_ids.at(image_idx2));
++        if (options_.quadratic_overlap) {
++          const size_t image_idx2_quadratic = image_idx1 + (1 << i);
++          if (image_idx2_quadratic < image_ids.size()) {
++            image_pairs.emplace_back(image_id1,
++                                     image_ids.at(image_idx2_quadratic));
++          }
++        }
++      } else {
++        break;
++      }
++    }
++
++    DatabaseTransaction database_transaction(&database_);
++    matcher_.Match(image_pairs);
++
++    PrintElapsedTime(timer);
++  }
++}
++
++void SequentialFeatureMatcher::RunLoopDetection(
++    const std::vector<image_t>& image_ids) {
++  // Read the pre-trained vocabulary tree from disk.
++  retrieval::VisualIndex<> visual_index;
++  visual_index.Read(options_.vocab_tree_path);
++
++  // Index all images in the visual index.
++  IndexImagesInVisualIndex(match_options_.num_threads,
++                           options_.loop_detection_num_checks,
++                           options_.loop_detection_max_num_features, image_ids,
++                           this, &cache_, &visual_index);
++
++  if (IsStopped()) {
++    return;
++  }
++
++  // Only perform loop detection for every n-th image.
++  std::vector<image_t> match_image_ids;
++  for (size_t i = 0; i < image_ids.size();
++       i += options_.loop_detection_period) {
++    match_image_ids.push_back(image_ids[i]);
++  }
++
++  MatchNearestNeighborsInVisualIndex(
++      match_options_.num_threads, options_.loop_detection_num_images,
++      options_.loop_detection_num_nearest_neighbors,
++      options_.loop_detection_num_checks,
++      options_.loop_detection_num_images_after_verification,
++      options_.loop_detection_max_num_features, match_image_ids, this, &cache_,
++      &visual_index, &matcher_);
++}
++
++VocabTreeFeatureMatcher::VocabTreeFeatureMatcher(
++    const VocabTreeMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(5 * options_.num_images, &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void VocabTreeFeatureMatcher::Run() {
++  PrintHeading1("Vocabulary tree feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  // Read the pre-trained vocabulary tree from disk.
++  retrieval::VisualIndex<> visual_index;
++  visual_index.Read(options_.vocab_tree_path);
++
++  const std::vector<image_t> all_image_ids = cache_.GetImageIds();
++  std::vector<image_t> image_ids;
++  if (options_.match_list_path == "") {
++    image_ids = cache_.GetImageIds();
++  } else {
++    // Map image names to image identifiers.
++    std::unordered_map<std::string, image_t> image_name_to_image_id;
++    image_name_to_image_id.reserve(all_image_ids.size());
++    for (const auto image_id : all_image_ids) {
++      const auto& image = cache_.GetImage(image_id);
++      image_name_to_image_id.emplace(image.Name(), image_id);
++    }
++
++    // Read the match list path.
++    std::ifstream file(options_.match_list_path);
++    CHECK(file.is_open()) << options_.match_list_path;
++    std::string line;
++    while (std::getline(file, line)) {
++      StringTrim(&line);
++
++      if (line.empty() || line[0] == '#') {
++        continue;
++      }
++
++      if (image_name_to_image_id.count(line) == 0) {
++        std::cerr << "ERROR: Image " << line << " does not exist." << std::endl;
++      } else {
++        image_ids.push_back(image_name_to_image_id.at(line));
++      }
++    }
++  }
++
++  // Index all images in the visual index.
++  IndexImagesInVisualIndex(match_options_.num_threads, options_.num_checks,
++                           options_.max_num_features, all_image_ids, this,
++                           &cache_, &visual_index);
++
++  if (IsStopped()) {
++    GetTimer().PrintMinutes();
++    return;
++  }
++
++  // Match all images in the visual index.
++  MatchNearestNeighborsInVisualIndex(
++      match_options_.num_threads, options_.num_images,
++      options_.num_nearest_neighbors, options_.num_checks,
++      options_.num_images_after_verification, options_.max_num_features,
++      image_ids, this, &cache_, &visual_index, &matcher_);
++
++  GetTimer().PrintMinutes();
++}
++
++SpatialFeatureMatcher::SpatialFeatureMatcher(
++    const SpatialMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(5 * options_.max_num_neighbors, &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void SpatialFeatureMatcher::Run() {
++  PrintHeading1("Spatial feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  const std::vector<image_t> image_ids = cache_.GetImageIds();
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Spatial indexing
++  //////////////////////////////////////////////////////////////////////////////
++
++  Timer timer;
++  timer.Start();
++
++  std::cout << "Indexing images..." << std::flush;
++
++  GPSTransform gps_transform;
++
++  size_t num_locations = 0;
++  Eigen::Matrix<float, Eigen::Dynamic, 3, Eigen::RowMajor> location_matrix(
++      image_ids.size(), 3);
++
++  std::vector<size_t> location_idxs;
++  location_idxs.reserve(image_ids.size());
++
++  std::vector<Eigen::Vector3d> ells(1);
++
++  for (size_t i = 0; i < image_ids.size(); ++i) {
++    const auto image_id = image_ids[i];
++    const auto& image = cache_.GetImage(image_id);
++
++    if ((image.TvecPrior(0) == 0 && image.TvecPrior(1) == 0 &&
++         options_.ignore_z) ||
++        (image.TvecPrior(0) == 0 && image.TvecPrior(1) == 0 &&
++         image.TvecPrior(2) == 0 && !options_.ignore_z)) {
++      continue;
++    }
++
++    location_idxs.push_back(i);
++
++    if (options_.is_gps) {
++      ells[0](0) = image.TvecPrior(0);
++      ells[0](1) = image.TvecPrior(1);
++      ells[0](2) = options_.ignore_z ? 0 : image.TvecPrior(2);
++
++      const auto xyzs = gps_transform.EllToXYZ(ells);
++
++      location_matrix(num_locations, 0) = static_cast<float>(xyzs[0](0));
++      location_matrix(num_locations, 1) = static_cast<float>(xyzs[0](1));
++      location_matrix(num_locations, 2) = static_cast<float>(xyzs[0](2));
++    } else {
++      location_matrix(num_locations, 0) =
++          static_cast<float>(image.TvecPrior(0));
++      location_matrix(num_locations, 1) =
++          static_cast<float>(image.TvecPrior(1));
++      location_matrix(num_locations, 2) =
++          static_cast<float>(options_.ignore_z ? 0 : image.TvecPrior(2));
++    }
++
++    num_locations += 1;
++  }
++
++  PrintElapsedTime(timer);
++
++  if (num_locations == 0) {
++    std::cout << " => No images with location data." << std::endl;
++    GetTimer().PrintMinutes();
++    return;
++  }
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Building spatial index
++  //////////////////////////////////////////////////////////////////////////////
++
++  timer.Restart();
++
++  std::cout << "Building search index..." << std::flush;
++
++  flann::Matrix<float> locations(location_matrix.data(), num_locations,
++                                 location_matrix.cols());
++
++  flann::LinearIndexParams index_params;
++  flann::LinearIndex<flann::L2<float>> search_index(index_params);
++  search_index.buildIndex(locations);
++
++  PrintElapsedTime(timer);
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Searching spatial index
++  //////////////////////////////////////////////////////////////////////////////
++
++  timer.Restart();
++
++  std::cout << "Searching for nearest neighbors..." << std::flush;
++
++  const int knn = std::min<int>(options_.max_num_neighbors, num_locations);
++
++  Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
++      index_matrix(num_locations, knn);
++  flann::Matrix<size_t> indices(index_matrix.data(), num_locations, knn);
++
++  Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
++      distance_matrix(num_locations, knn);
++  flann::Matrix<float> distances(distance_matrix.data(), num_locations, knn);
++
++  flann::SearchParams search_params(flann::FLANN_CHECKS_AUTOTUNED);
++  if (match_options_.num_threads == ThreadPool::kMaxNumThreads) {
++    search_params.cores = std::thread::hardware_concurrency();
++  } else {
++    search_params.cores = match_options_.num_threads;
++  }
++  if (search_params.cores <= 0) {
++    search_params.cores = 1;
++  }
++
++  search_index.knnSearch(locations, indices, distances, knn, search_params);
++
++  PrintElapsedTime(timer);
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Matching
++  //////////////////////////////////////////////////////////////////////////////
++
++  const float max_distance =
++      static_cast<float>(options_.max_distance * options_.max_distance);
++
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++  image_pairs.reserve(knn);
++
++  for (size_t i = 0; i < num_locations; ++i) {
++    if (IsStopped()) {
++      GetTimer().PrintMinutes();
++      return;
++    }
++
++    timer.Restart();
++
++    std::cout << StringPrintf("Matching image [%d/%d]", i + 1, num_locations)
++              << std::flush;
++
++    image_pairs.clear();
++
++    for (int j = 0; j < knn; ++j) {
++      // Check if query equals result.
++      if (index_matrix(i, j) == i) {
++        continue;
++      }
++
++      // Since the nearest neighbors are sorted by distance, we can break.
++      if (distance_matrix(i, j) > max_distance) {
++        break;
++      }
++
++      const size_t idx = location_idxs[i];
++      const image_t image_id = image_ids.at(idx);
++      const size_t nn_idx = location_idxs.at(index_matrix(i, j));
++      const image_t nn_image_id = image_ids.at(nn_idx);
++      image_pairs.emplace_back(image_id, nn_image_id);
++    }
++
++    DatabaseTransaction database_transaction(&database_);
++    matcher_.Match(image_pairs);
++
++    PrintElapsedTime(timer);
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++TransitiveFeatureMatcher::TransitiveFeatureMatcher(
++    const TransitiveMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(options_.batch_size, &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void TransitiveFeatureMatcher::Run() {
++  PrintHeading1("Transitive feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  const std::vector<image_t> image_ids = cache_.GetImageIds();
++
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++  std::unordered_set<image_pair_t> image_pair_ids;
++
++  for (int iteration = 0; iteration < options_.num_iterations; ++iteration) {
++    if (IsStopped()) {
++      GetTimer().PrintMinutes();
++      return;
++    }
++
++    Timer timer;
++    timer.Start();
++
++    std::cout << StringPrintf("Iteration [%d/%d]", iteration + 1,
++                              options_.num_iterations)
++              << std::endl;
++
++    std::vector<std::pair<image_t, image_t>> existing_image_pairs;
++    std::vector<int> existing_num_inliers;
++    database_.ReadTwoViewGeometryNumInliers(&existing_image_pairs,
++                                            &existing_num_inliers);
++
++    CHECK_EQ(existing_image_pairs.size(), existing_num_inliers.size());
++
++    std::unordered_map<image_t, std::vector<image_t>> adjacency;
++    for (const auto& image_pair : existing_image_pairs) {
++      adjacency[image_pair.first].push_back(image_pair.second);
++      adjacency[image_pair.second].push_back(image_pair.first);
++    }
++
++    const size_t batch_size = static_cast<size_t>(options_.batch_size);
++
++    size_t num_batches = 0;
++    image_pairs.clear();
++    image_pair_ids.clear();
++    for (const auto& image : adjacency) {
++      const auto image_id1 = image.first;
++      for (const auto& image_id2 : image.second) {
++        if (adjacency.count(image_id2) > 0) {
++          for (const auto& image_id3 : adjacency.at(image_id2)) {
++            const auto image_pair_id =
++                Database::ImagePairToPairId(image_id1, image_id3);
++            if (image_pair_ids.count(image_pair_id) == 0) {
++              image_pairs.emplace_back(image_id1, image_id3);
++              image_pair_ids.insert(image_pair_id);
++              if (image_pairs.size() >= batch_size) {
++                num_batches += 1;
++                std::cout << StringPrintf("  Batch %d", num_batches)
++                          << std::flush;
++                DatabaseTransaction database_transaction(&database_);
++                matcher_.Match(image_pairs);
++                image_pairs.clear();
++                PrintElapsedTime(timer);
++                timer.Restart();
++
++                if (IsStopped()) {
++                  GetTimer().PrintMinutes();
++                  return;
++                }
++              }
++            }
++          }
++        }
++      }
++    }
++
++    num_batches += 1;
++    std::cout << StringPrintf("  Batch %d", num_batches) << std::flush;
++    DatabaseTransaction database_transaction(&database_);
++    matcher_.Match(image_pairs);
++    PrintElapsedTime(timer);
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++ImagePairsFeatureMatcher::ImagePairsFeatureMatcher(
++    const ImagePairsMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(options.block_size, &database_),
++      matcher_(match_options, &database_, &cache_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void ImagePairsFeatureMatcher::Run() {
++  PrintHeading1("Custom feature matching");
++
++  if (!matcher_.Setup()) {
++    return;
++  }
++
++  cache_.Setup();
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Reading image pairs list
++  //////////////////////////////////////////////////////////////////////////////
++
++  std::unordered_map<std::string, image_t> image_name_to_image_id;
++  image_name_to_image_id.reserve(cache_.GetImageIds().size());
++  for (const auto image_id : cache_.GetImageIds()) {
++    const auto& image = cache_.GetImage(image_id);
++    image_name_to_image_id.emplace(image.Name(), image_id);
++  }
++
++  std::ifstream file(options_.match_list_path);
++  CHECK(file.is_open()) << options_.match_list_path;
++
++  std::string line;
++  std::vector<std::pair<image_t, image_t>> image_pairs;
++  std::unordered_set<colmap::image_pair_t> image_pairs_set;
++  while (std::getline(file, line)) {
++    StringTrim(&line);
++
++    if (line.empty() || line[0] == '#') {
++      continue;
++    }
++
++    std::stringstream line_stream(line);
++
++    std::string image_name1;
++    std::string image_name2;
++
++    std::getline(line_stream, image_name1, ' ');
++    StringTrim(&image_name1);
++    std::getline(line_stream, image_name2, ' ');
++    StringTrim(&image_name2);
++
++    if (image_name_to_image_id.count(image_name1) == 0) {
++      std::cerr << "ERROR: Image " << image_name1 << " does not exist."
++                << std::endl;
++      continue;
++    }
++    if (image_name_to_image_id.count(image_name2) == 0) {
++      std::cerr << "ERROR: Image " << image_name2 << " does not exist."
++                << std::endl;
++      continue;
++    }
++
++    const image_t image_id1 = image_name_to_image_id.at(image_name1);
++    const image_t image_id2 = image_name_to_image_id.at(image_name2);
++    const image_pair_t image_pair =
++        Database::ImagePairToPairId(image_id1, image_id2);
++    const bool image_pair_exists = image_pairs_set.insert(image_pair).second;
++    if (image_pair_exists) {
++      image_pairs.emplace_back(image_id1, image_id2);
++    }
++  }
++
++  //////////////////////////////////////////////////////////////////////////////
++  // Feature matching
++  //////////////////////////////////////////////////////////////////////////////
++
++  const size_t num_match_blocks = image_pairs.size() / options_.block_size + 1;
++  std::vector<std::pair<image_t, image_t>> block_image_pairs;
++  block_image_pairs.reserve(options_.block_size);
++
++  for (size_t i = 0; i < image_pairs.size(); i += options_.block_size) {
++    if (IsStopped()) {
++      GetTimer().PrintMinutes();
++      return;
++    }
++
++    Timer timer;
++    timer.Start();
++
++    std::cout << StringPrintf("Matching block [%d/%d]",
++                              i / options_.block_size + 1, num_match_blocks)
++              << std::flush;
++
++    const size_t block_end = i + options_.block_size <= image_pairs.size()
++                                 ? i + options_.block_size
++                                 : image_pairs.size();
++    std::vector<std::pair<image_t, image_t>> block_image_pairs;
++    block_image_pairs.reserve(options_.block_size);
++    for (size_t j = i; j < block_end; ++j) {
++      block_image_pairs.push_back(image_pairs[j]);
++    }
++
++    DatabaseTransaction database_transaction(&database_);
++    matcher_.Match(block_image_pairs);
++
++    PrintElapsedTime(timer);
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++FeaturePairsFeatureMatcher::FeaturePairsFeatureMatcher(
++    const FeaturePairsMatchingOptions& options,
++    const SiftMatchingOptions& match_options, const std::string& database_path)
++    : options_(options),
++      match_options_(match_options),
++      database_(database_path),
++      cache_(kCacheSize, &database_) {
++  CHECK(options_.Check());
++  CHECK(match_options_.Check());
++}
++
++void FeaturePairsFeatureMatcher::Run() {
++  PrintHeading1("Importing matches");
++
++  cache_.Setup();
++
++  std::unordered_map<std::string, const Image*> image_name_to_image;
++  image_name_to_image.reserve(cache_.GetImageIds().size());
++  for (const auto image_id : cache_.GetImageIds()) {
++    const auto& image = cache_.GetImage(image_id);
++    image_name_to_image.emplace(image.Name(), &image);
++  }
++
++  std::ifstream file(options_.match_list_path);
++  CHECK(file.is_open()) << options_.match_list_path;
++
++  std::string line;
++  while (std::getline(file, line)) {
++    if (IsStopped()) {
++      GetTimer().PrintMinutes();
++      return;
++    }
++
++    StringTrim(&line);
++    if (line.empty()) {
++      continue;
++    }
++
++    std::istringstream line_stream(line);
++
++    std::string image_name1, image_name2;
++    try {
++      line_stream >> image_name1 >> image_name2;
++    } catch (...) {
++      std::cerr << "ERROR: Could not read image pair." << std::endl;
++      break;
++    }
++
++    std::cout << StringPrintf("%s - %s", image_name1.c_str(),
++                              image_name2.c_str())
++              << std::endl;
++
++    if (image_name_to_image.count(image_name1) == 0) {
++      std::cout << StringPrintf("SKIP: Image %s not found in database.",
++                                image_name1.c_str())
++                << std::endl;
++      break;
++    }
++    if (image_name_to_image.count(image_name2) == 0) {
++      std::cout << StringPrintf("SKIP: Image %s not found in database.",
++                                image_name2.c_str())
++                << std::endl;
++      break;
++    }
++
++    const Image& image1 = *image_name_to_image[image_name1];
++    const Image& image2 = *image_name_to_image[image_name2];
++
++    bool skip_pair = false;
++    if (database_.ExistsInlierMatches(image1.ImageId(), image2.ImageId())) {
++      std::cout << "SKIP: Matches for image pair already exist in database."
++                << std::endl;
++      skip_pair = true;
++    }
++
++    FeatureMatches matches;
++    while (std::getline(file, line)) {
++      StringTrim(&line);
++
++      if (line.empty()) {
++        break;
++      }
++
++      std::istringstream line_stream(line);
++
++      FeatureMatch match;
++      try {
++        line_stream >> match.point2D_idx1 >> match.point2D_idx2;
++      } catch (...) {
++        std::cerr << "ERROR: Cannot read feature matches." << std::endl;
++        break;
++      }
++
++      matches.push_back(match);
++    }
++
++    if (skip_pair) {
++      continue;
++    }
++
++    const Camera& camera1 = cache_.GetCamera(image1.CameraId());
++    const Camera& camera2 = cache_.GetCamera(image2.CameraId());
++
++    if (options_.verify_matches) {
++      database_.WriteMatches(image1.ImageId(), image2.ImageId(), matches);
++
++      const auto keypoints1 = cache_.GetKeypoints(image1.ImageId());
++      const auto keypoints2 = cache_.GetKeypoints(image2.ImageId());
++
++      TwoViewGeometry two_view_geometry;
++      TwoViewGeometry::Options two_view_geometry_options;
++      two_view_geometry_options.min_num_inliers =
++          static_cast<size_t>(match_options_.min_num_inliers);
++      two_view_geometry_options.ransac_options.max_error =
++          match_options_.max_error;
++      two_view_geometry_options.ransac_options.confidence =
++          match_options_.confidence;
++      two_view_geometry_options.ransac_options.min_num_trials =
++          static_cast<size_t>(match_options_.min_num_trials);
++      two_view_geometry_options.ransac_options.max_num_trials =
++          static_cast<size_t>(match_options_.max_num_trials);
++      two_view_geometry_options.ransac_options.min_inlier_ratio =
++          match_options_.min_inlier_ratio;
++
++      two_view_geometry.Estimate(
++          camera1, FeatureKeypointsToPointsVector(*keypoints1), camera2,
++          FeatureKeypointsToPointsVector(*keypoints2), matches,
++          two_view_geometry_options);
++
++      database_.WriteTwoViewGeometry(image1.ImageId(), image2.ImageId(),
++                                     two_view_geometry);
++    } else {
++      TwoViewGeometry two_view_geometry;
++
++      if (camera1.HasPriorFocalLength() && camera2.HasPriorFocalLength()) {
++        two_view_geometry.config = TwoViewGeometry::CALIBRATED;
++      } else {
++        two_view_geometry.config = TwoViewGeometry::UNCALIBRATED;
++      }
++
++      two_view_geometry.inlier_matches = matches;
++
++      database_.WriteTwoViewGeometry(image1.ImageId(), image2.ImageId(),
++                                     two_view_geometry);
++    }
++  }
++
++  GetTimer().PrintMinutes();
++}
++
++}  // namespace colmap
+diff -Naur colmap-3.8.orig/src/mvs/meshing.cc colmap-3.8/src/mvs/meshing.cc
+--- colmap-3.8.orig/src/mvs/meshing.cc	2023-01-31 16:18:47.000000000 +0100
++++ colmap-3.8/src/mvs/meshing.cc	2023-08-19 09:24:47.428261855 +0200
+@@ -821,7 +821,7 @@
+       }
+     }
+ 
+-    CHECK(result_queue.Push(std::move(image_cell_graph_data)));
++    CHECK(result_queue.Push(image_cell_graph_data));
+   };
+ 
+   // Add first batch of images to the thread job queue.
+@@ -849,7 +849,7 @@
+     }
+ 
+     // Pop the next results from the queue.
+-    auto result = result_queue.Pop();
++    const auto result = result_queue.Pop();
+     CHECK(result.IsValid());
+ 
+     // Accumulate the weights of the image into the global graph.
+diff -Naur colmap-3.8.orig/src/util/threading.h colmap-3.8/src/util/threading.h
+--- colmap-3.8.orig/src/util/threading.h	2023-01-31 16:18:47.000000000 +0100
++++ colmap-3.8/src/util/threading.h	2023-08-19 09:24:47.429261861 +0200
+@@ -263,7 +263,7 @@
+   class Job {
+    public:
+     Job() : valid_(false) {}
+-    explicit Job(T data) : data_(std::move(data)), valid_(true) {}
++    explicit Job(const T& data) : data_(data), valid_(true) {}
+ 
+     // Check whether the data is valid.
+     bool IsValid() const { return valid_; }
+@@ -285,7 +285,7 @@
+   size_t Size();
+ 
+   // Push a new job to the queue. Waits if the number of jobs is exceeded.
+-  bool Push(T data);
++  bool Push(const T& data);
+ 
+   // Pop a job from the queue. Waits if there is no job in the queue.
+   Job Pop();
+@@ -361,7 +361,7 @@
+ }
+ 
+ template <typename T>
+-bool JobQueue<T>::Push(T data) {
++bool JobQueue<T>::Push(const T& data) {
+   std::unique_lock<std::mutex> lock(mutex_);
+   while (jobs_.size() >= max_num_jobs_ && !stop_) {
+     pop_condition_.wait(lock);
+@@ -369,7 +369,7 @@
+   if (stop_) {
+     return false;
+   } else {
+-    jobs_.push(std::move(data));
++    jobs_.push(data);
+     push_condition_.notify_one();
+     return true;
+   }
+@@ -384,13 +384,13 @@
+   if (stop_) {
+     return Job();
+   } else {
+-    Job job(std::move(jobs_.front()));
++    const T data = jobs_.front();
+     jobs_.pop();
+     pop_condition_.notify_one();
+     if (jobs_.empty()) {
+       empty_condition_.notify_all();
+     }
+-    return job;
++    return Job(data);
+   }
+ }
+ 
+diff -Naur colmap-3.8.orig/src/util/threading.h.orig colmap-3.8/src/util/threading.h.orig
+--- colmap-3.8.orig/src/util/threading.h.orig	1970-01-01 01:00:00.000000000 +0100
++++ colmap-3.8/src/util/threading.h.orig	2023-01-31 16:18:47.000000000 +0100
+@@ -0,0 +1,421 @@
++// Copyright (c) 2023, ETH Zurich and UNC Chapel Hill.
++// All rights reserved.
++//
++// Redistribution and use in source and binary forms, with or without
++// modification, are permitted provided that the following conditions are met:
++//
++//     * Redistributions of source code must retain the above copyright
++//       notice, this list of conditions and the following disclaimer.
++//
++//     * Redistributions in binary form must reproduce the above copyright
++//       notice, this list of conditions and the following disclaimer in the
++//       documentation and/or other materials provided with the distribution.
++//
++//     * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
++//       its contributors may be used to endorse or promote products derived
++//       from this software without specific prior written permission.
++//
++// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++// POSSIBILITY OF SUCH DAMAGE.
++//
++// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
++
++#ifndef COLMAP_SRC_UTIL_THREADING_
++#define COLMAP_SRC_UTIL_THREADING_
++
++#include <atomic>
++#include <climits>
++#include <functional>
++#include <future>
++#include <list>
++#include <queue>
++#include <unordered_map>
++#include <thread>
++
++#include "util/timer.h"
++
++namespace colmap {
++
++#ifdef __clang__
++#pragma clang diagnostic push
++#pragma clang diagnostic ignored "-Wkeyword-macro"
++#endif
++
++#ifdef __clang__
++#pragma clang diagnostic pop  // -Wkeyword-macro
++#endif
++
++// Helper class to create single threads with simple controls and timing, e.g.:
++//
++//      class MyThread : public Thread {
++//        enum {
++//          PROCESSED_CALLBACK,
++//        };
++//
++//        MyThread() { RegisterCallback(PROCESSED_CALLBACK); }
++//        void Run() {
++//          // Some setup routine... note that this optional.
++//          if (setup_valid) {
++//            SignalValidSetup();
++//          } else {
++//            SignalInvalidSetup();
++//          }
++//
++//          // Some pre-processing...
++//          for (const auto& item : items) {
++//            BlockIfPaused();
++//            if (IsStopped()) {
++//              // Tear down...
++//              break;
++//            }
++//            // Process item...
++//            Callback(PROCESSED_CALLBACK);
++//          }
++//        }
++//      };
++//
++//      MyThread thread;
++//      thread.AddCallback(MyThread::PROCESSED_CALLBACK, []() {
++//        std::cout << "Processed item"; })
++//      thread.AddCallback(MyThread::STARTED_CALLBACK, []() {
++//        std::cout << "Start"; })
++//      thread.AddCallback(MyThread::FINISHED_CALLBACK, []() {
++//        std::cout << "Finished"; })
++//      thread.Start();
++//      // thread.CheckValidSetup();
++//      // Pause, resume, stop, ...
++//      thread.Wait();
++//      thread.Timer().PrintElapsedSeconds();
++//
++class Thread {
++ public:
++  enum {
++    STARTED_CALLBACK = INT_MIN,
++    FINISHED_CALLBACK,
++  };
++
++  Thread();
++  virtual ~Thread() = default;
++
++  // Control the state of the thread.
++  virtual void Start();
++  virtual void Stop();
++  virtual void Pause();
++  virtual void Resume();
++  virtual void Wait();
++
++  // Check the state of the thread.
++  bool IsStarted();
++  bool IsStopped();
++  bool IsPaused();
++  bool IsRunning();
++  bool IsFinished();
++
++  // To be called from inside the main run function. This blocks the main
++  // caller, if the thread is paused, until the thread is resumed.
++  void BlockIfPaused();
++
++  // To be called from outside. This blocks the caller until the thread is
++  // setup, i.e. it signaled that its setup was valid or not. If it never gives
++  // this signal, this call will block the caller infinitely. Check whether
++  // setup is valid. Note that the result is only meaningful if the thread gives
++  // a setup signal.
++  bool CheckValidSetup();
++
++  // Set callbacks that can be triggered within the main run function.
++  void AddCallback(const int id, const std::function<void()>& func);
++
++  // Get timing information of the thread, properly accounting for pause times.
++  const Timer& GetTimer() const;
++
++ protected:
++  // This is the main run function to be implemented by the child class. If you
++  // are looping over data and want to support the pause operation, call
++  // `BlockIfPaused` at appropriate places in the loop. To support the stop
++  // operation, check the `IsStopped` state and early return from this method.
++  virtual void Run() = 0;
++
++  // Register a new callback. Note that only registered callbacks can be
++  // set/reset and called from within the thread. Hence, this method should be
++  // called from the derived thread constructor.
++  void RegisterCallback(const int id);
++
++  // Call back to the function with the specified name, if it exists.
++  void Callback(const int id) const;
++
++  // Get the unique identifier of the current thread.
++  std::thread::id GetThreadId() const;
++
++  // Signal that the thread is setup. Only call this function once.
++  void SignalValidSetup();
++  void SignalInvalidSetup();
++
++ private:
++  // Wrapper around the main run function to set the finished flag.
++  void RunFunc();
++
++  std::thread thread_;
++  std::mutex mutex_;
++  std::condition_variable pause_condition_;
++  std::condition_variable setup_condition_;
++
++  Timer timer_;
++
++  bool started_;
++  bool stopped_;
++  bool paused_;
++  bool pausing_;
++  bool finished_;
++  bool setup_;
++  bool setup_valid_;
++
++  std::unordered_map<int, std::list<std::function<void()>>> callbacks_;
++};
++
++// A thread pool class to submit generic tasks (functors) to a pool of workers:
++//
++//    ThreadPool thread_pool;
++//    thread_pool.AddTask([]() { /* Do some work */ });
++//    auto future = thread_pool.AddTask([]() { /* Do some work */ return 1; });
++//    const auto result = future.get();
++//    for (int i = 0; i < 10; ++i) {
++//      thread_pool.AddTask([](const int i) { /* Do some work */ });
++//    }
++//    thread_pool.Wait();
++//
++class ThreadPool {
++ public:
++  static const int kMaxNumThreads = -1;
++
++  explicit ThreadPool(const int num_threads = kMaxNumThreads);
++  ~ThreadPool();
++
++  inline size_t NumThreads() const;
++
++  // Add new task to the thread pool.
++  template <class func_t, class... args_t>
++  auto AddTask(func_t&& f, args_t&&... args)
++      -> std::future<typename std::result_of<func_t(args_t...)>::type>;
++
++  // Stop the execution of all workers.
++  void Stop();
++
++  // Wait until tasks are finished.
++  void Wait();
++
++  // Get the unique identifier of the current thread.
++  std::thread::id GetThreadId() const;
++
++  // Get the index of the current thread. In a thread pool of size N,
++  // the thread index defines the 0-based index of the thread in the pool.
++  // In other words, there are the thread indices 0, ..., N-1.
++  int GetThreadIndex();
++
++ private:
++  void WorkerFunc(const int index);
++
++  std::vector<std::thread> workers_;
++  std::queue<std::function<void()>> tasks_;
++
++  std::mutex mutex_;
++  std::condition_variable task_condition_;
++  std::condition_variable finished_condition_;
++
++  bool stopped_;
++  int num_active_workers_;
++
++  std::unordered_map<std::thread::id, int> thread_id_to_index_;
++};
++
++// A job queue class for the producer-consumer paradigm.
++//
++//    JobQueue<int> job_queue;
++//
++//    std::thread producer_thread([&job_queue]() {
++//      for (int i = 0; i < 10; ++i) {
++//        job_queue.Push(i);
++//      }
++//    });
++//
++//    std::thread consumer_thread([&job_queue]() {
++//      for (int i = 0; i < 10; ++i) {
++//        const auto job = job_queue.Pop();
++//        if (job.IsValid()) { /* Do some work */ }
++//        else { break; }
++//      }
++//    });
++//
++//    producer_thread.join();
++//    consumer_thread.join();
++//
++template <typename T>
++class JobQueue {
++ public:
++  class Job {
++   public:
++    Job() : valid_(false) {}
++    explicit Job(T data) : data_(std::move(data)), valid_(true) {}
++
++    // Check whether the data is valid.
++    bool IsValid() const { return valid_; }
++
++    // Get reference to the data.
++    T& Data() { return data_; }
++    const T& Data() const { return data_; }
++
++   private:
++    T data_;
++    bool valid_;
++  };
++
++  JobQueue();
++  explicit JobQueue(const size_t max_num_jobs);
++  ~JobQueue();
++
++  // The number of pushed and not popped jobs in the queue.
++  size_t Size();
++
++  // Push a new job to the queue. Waits if the number of jobs is exceeded.
++  bool Push(T data);
++
++  // Pop a job from the queue. Waits if there is no job in the queue.
++  Job Pop();
++
++  // Wait for all jobs to be popped and then stop the queue.
++  void Wait();
++
++  // Stop the queue and return from all push/pop calls with false.
++  void Stop();
++
++  // Clear all pushed and not popped jobs from the queue.
++  void Clear();
++
++ private:
++  size_t max_num_jobs_;
++  std::atomic<bool> stop_;
++  std::queue<T> jobs_;
++  std::mutex mutex_;
++  std::condition_variable push_condition_;
++  std::condition_variable pop_condition_;
++  std::condition_variable empty_condition_;
++};
++
++// Return the number of logical CPU cores if num_threads <= 0,
++// otherwise return the input value of num_threads.
++int GetEffectiveNumThreads(const int num_threads);
++
++////////////////////////////////////////////////////////////////////////////////
++// Implementation
++////////////////////////////////////////////////////////////////////////////////
++
++size_t ThreadPool::NumThreads() const { return workers_.size(); }
++
++template <class func_t, class... args_t>
++auto ThreadPool::AddTask(func_t&& f, args_t&&... args)
++    -> std::future<typename std::result_of<func_t(args_t...)>::type> {
++  typedef typename std::result_of<func_t(args_t...)>::type return_t;
++
++  auto task = std::make_shared<std::packaged_task<return_t()>>(
++      std::bind(std::forward<func_t>(f), std::forward<args_t>(args)...));
++
++  std::future<return_t> result = task->get_future();
++
++  {
++    std::unique_lock<std::mutex> lock(mutex_);
++    if (stopped_) {
++      throw std::runtime_error("Cannot add task to stopped thread pool.");
++    }
++    tasks_.emplace([task]() { (*task)(); });
++  }
++
++  task_condition_.notify_one();
++
++  return result;
++}
++
++template <typename T>
++JobQueue<T>::JobQueue() : JobQueue(std::numeric_limits<size_t>::max()) {}
++
++template <typename T>
++JobQueue<T>::JobQueue(const size_t max_num_jobs)
++    : max_num_jobs_(max_num_jobs), stop_(false) {}
++
++template <typename T>
++JobQueue<T>::~JobQueue() {
++  Stop();
++}
++
++template <typename T>
++size_t JobQueue<T>::Size() {
++  std::unique_lock<std::mutex> lock(mutex_);
++  return jobs_.size();
++}
++
++template <typename T>
++bool JobQueue<T>::Push(T data) {
++  std::unique_lock<std::mutex> lock(mutex_);
++  while (jobs_.size() >= max_num_jobs_ && !stop_) {
++    pop_condition_.wait(lock);
++  }
++  if (stop_) {
++    return false;
++  } else {
++    jobs_.push(std::move(data));
++    push_condition_.notify_one();
++    return true;
++  }
++}
++
++template <typename T>
++typename JobQueue<T>::Job JobQueue<T>::Pop() {
++  std::unique_lock<std::mutex> lock(mutex_);
++  while (jobs_.empty() && !stop_) {
++    push_condition_.wait(lock);
++  }
++  if (stop_) {
++    return Job();
++  } else {
++    Job job(std::move(jobs_.front()));
++    jobs_.pop();
++    pop_condition_.notify_one();
++    if (jobs_.empty()) {
++      empty_condition_.notify_all();
++    }
++    return job;
++  }
++}
++
++template <typename T>
++void JobQueue<T>::Wait() {
++  std::unique_lock<std::mutex> lock(mutex_);
++  while (!jobs_.empty()) {
++    empty_condition_.wait(lock);
++  }
++}
++
++template <typename T>
++void JobQueue<T>::Stop() {
++  stop_ = true;
++  push_condition_.notify_all();
++  pop_condition_.notify_all();
++}
++
++template <typename T>
++void JobQueue<T>::Clear() {
++  std::unique_lock<std::mutex> lock(mutex_);
++  std::queue<T> empty_jobs;
++  std::swap(jobs_, empty_jobs);
++}
++
++}  // namespace colmap
++
++#endif  // COLMAP_SRC_UTIL_THREADING_
diff --git a/Golden_Repo/e/eudev/eudev-3.2.11-GCCcore-11.3.0.eb b/Golden_Repo/e/eudev/eudev-3.2.11-GCCcore-11.3.0.eb
index 5710764ae9d5d3c56f33b82e2b7e347ed1aee803..dbbfe278a963c7da58d470ae73548ba083b413c1 100644
--- a/Golden_Repo/e/eudev/eudev-3.2.11-GCCcore-11.3.0.eb
+++ b/Golden_Repo/e/eudev/eudev-3.2.11-GCCcore-11.3.0.eb
@@ -16,10 +16,14 @@ toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
 
 source_urls = [('https://github.com/eudev-project/eudev/archive')]
 sources = ['v%(version)s.tar.gz']
-patches = ['%(name)s-%(version)s_python3.patch']
+patches = [
+    '%(name)s-%(version)s_python3.patch',
+    '%(name)s_fix-xsltproc-path.patch',
+]
 checksums = [
     {'v3.2.11.tar.gz': 'dcfc482099f3fdfcfdb9aeabbc4d609877cf4d2d0407f50ab0c59d43fff44f92'},
     {'eudev-3.2.11_python3.patch': '846b1e72e12853c4146d3a4e312301001bbfb13110ce76de2afdf860f4d085a8'},
+    {'eudev_fix-xsltproc-path.patch': 'e1d0848c9af38e26af5773c38c7933a96471cac95b1243a1f9860a0eaa999f17'},
 ]
 
 builddependencies = [
@@ -28,11 +32,12 @@ builddependencies = [
     ('Python', '3.10.4'),
     ('Autotools', '20220317'),
     ('pkgconf', '1.8.0'),
+    ('libxslt', '1.1.34'),
 ]
 
 osdependencies = [('kernel-headers', 'linux-libc-dev')]
 
-preconfigopts = "./autogen.sh && "
+preconfigopts = 'autoreconf -f -i -s && '
 configopts = '--disable-blkid --disable-selinux --disable-manpages '
 
 runtest = 'check'
diff --git a/Golden_Repo/e/eudev/eudev_fix-xsltproc-path.patch b/Golden_Repo/e/eudev/eudev_fix-xsltproc-path.patch
new file mode 100644
index 0000000000000000000000000000000000000000..34c4b23a25e81c2d4fdbab69a7eb117c555484dc
--- /dev/null
+++ b/Golden_Repo/e/eudev/eudev_fix-xsltproc-path.patch
@@ -0,0 +1,12 @@
+diff -Naur eudev-3.2.11.orig/man/make.sh eudev-3.2.11/man/make.sh
+--- eudev-3.2.11.orig/man/make.sh       2021-12-14 19:57:01.000000000 +0000
++++ eudev-3.2.11/man/make.sh    2023-07-01 18:24:34.751268855 +0000
+@@ -2,7 +2,7 @@
+
+ set -e
+
+-XSLTPROC="/usr/bin/xsltproc"
++XSLTPROC="xsltproc"
+
+ XSLTPROC_FLAGS="--stringparam man.output.quietly 1 \
+ --stringparam funcsynopsis.style ansi \
diff --git a/Golden_Repo/g/gpsmkl/gpsmkl-2022a.eb b/Golden_Repo/g/gpsmkl/gpsmkl-2022a.eb
index fff6c24e8961b5ebf09645e224923ccb2be2cacb..bc0eaa58dbf61072fd2a575f79835bf658d296c9 100644
--- a/Golden_Repo/g/gpsmkl/gpsmkl-2022a.eb
+++ b/Golden_Repo/g/gpsmkl/gpsmkl-2022a.eb
@@ -18,7 +18,7 @@ local_mkl_ver = '2022.1.0'
 # compiler toolchain dependencies
 dependencies = [
     local_compiler,
-    ('psmpi', '5.7.0-1', '', local_compiler),  # part of gpsmpi toolchain
+    ('psmpi', '5.7.1-1', '', local_compiler),  # part of gpsmpi toolchain
     ('imkl', local_mkl_ver, '', SYSTEM),
     ('imkl-FFTW', local_mkl_ver, '', local_comp_mpi_tc),
 ]
diff --git a/Golden_Repo/g/gpsmpi/gpsmpi-2022a.eb b/Golden_Repo/g/gpsmpi/gpsmpi-2022a.eb
index 41208be4cce57a0eda22b45e2f270a29ffccc745..a87cf8d856ea47f645348240a9c6fd2276f7872c 100644
--- a/Golden_Repo/g/gpsmpi/gpsmpi-2022a.eb
+++ b/Golden_Repo/g/gpsmpi/gpsmpi-2022a.eb
@@ -13,7 +13,7 @@ local_compiler = ('GCC', '11.3.0')
 
 dependencies = [
     local_compiler,
-    ('psmpi', '5.7.0-1', '', local_compiler),
+    ('psmpi', '5.7.1-1', '', local_compiler),
 ]
 
 moduleclass = 'toolchain'
diff --git a/Golden_Repo/h/hwloc/hwloc-2.7.1-GCCcore-11.3.0.eb b/Golden_Repo/h/hwloc/hwloc-2.7.1-GCCcore-11.3.0.eb
index bd30b42db4be7238f80ab6ad03830fa4ecde2f30..338e12aac6653becb1b6b70d1c22ec5e24e58f6a 100644
--- a/Golden_Repo/h/hwloc/hwloc-2.7.1-GCCcore-11.3.0.eb
+++ b/Golden_Repo/h/hwloc/hwloc-2.7.1-GCCcore-11.3.0.eb
@@ -30,10 +30,11 @@ dependencies = [
     ('numactl', '2.0.15'),
     ('libxml2', '2.9.13'),
     ('libpciaccess', '0.16'),
+    ('CUDA', '11.7', '', SYSTEM),
 ]
 
-configopts = "--enable-libnuma=$EBROOTNUMACTL "
-configopts += "--disable-cairo --disable-opencl --disable-cuda --disable-nvml --disable-gl --disable-libudev "
+configopts = "--enable-libnuma=$EBROOTNUMACTL --enable-cuda --enable-nvml --enable-opencl "
+configopts += "--disable-cairo --disable-gl --disable-libudev "
 
 sanity_check_paths = {
     'files': ['bin/lstopo', 'include/hwloc/linux.h',
diff --git a/Golden_Repo/hidden_deps.txt b/Golden_Repo/hidden_deps.txt
index cc52565cb3bc0d0eb622f665c47bd7b870643ddb..c1e86317d6b7b8adfd9410fb8e4eaf370111fe21 100644
--- a/Golden_Repo/hidden_deps.txt
+++ b/Golden_Repo/hidden_deps.txt
@@ -78,7 +78,6 @@ LittleCMS
 LuaJIT2-OpenResty
 M4
 MATIO
-MATLAB
 Mako
 NASM
 NLopt
diff --git a/Golden_Repo/i/intel-para/intel-para-2022a.eb b/Golden_Repo/i/intel-para/intel-para-2022a.eb
index 8f74fd6408f645655e79aa701ba06a56fbfe5d59..10f184ec1e809da37cc4ddc1b4da8dbeeaa9677e 100644
--- a/Golden_Repo/i/intel-para/intel-para-2022a.eb
+++ b/Golden_Repo/i/intel-para/intel-para-2022a.eb
@@ -18,7 +18,7 @@ local_comp_mpi_tc = ('ipsmpi', version)
 
 dependencies = [
     local_compiler,
-    ('psmpi', '5.7.0-1', '', local_compiler),
+    ('psmpi', '5.7.1-1', '', local_compiler),
     ('imkl', local_comp_ver, '', SYSTEM),
     ('imkl-FFTW', local_comp_ver, '', local_comp_mpi_tc),
 ]
diff --git a/Golden_Repo/i/ipsmpi/ipsmpi-2022a.eb b/Golden_Repo/i/ipsmpi/ipsmpi-2022a.eb
index 69a24d0030ea6e0d5e9ad0c84143bbedc1877266..99c7a5e95eab2fe35636ebe1e76cdf43848815fb 100644
--- a/Golden_Repo/i/ipsmpi/ipsmpi-2022a.eb
+++ b/Golden_Repo/i/ipsmpi/ipsmpi-2022a.eb
@@ -12,7 +12,7 @@ local_comp_ver = '2022.1.0'
 local_compiler = ('intel-compilers', local_comp_ver)
 dependencies = [
     local_compiler,
-    ('psmpi', '5.7.0-1', '', local_compiler),
+    ('psmpi', '5.7.1-1', '', local_compiler),
 ]
 
 moduleclass = 'toolchain'
diff --git a/Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-0.9.0-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-1.0.1-GCCcore-11.3.0-2023.3.6.eb
similarity index 63%
rename from Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-0.9.0-GCCcore-11.3.0-2023.3.6.eb
rename to Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-1.0.1-GCCcore-11.3.0-2023.3.6.eb
index a9a307a38ef19a385bf26877d54ddba87620a649..cd94f5390722c8d59b3be43cc896712fe1091a41 100644
--- a/Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-0.9.0-GCCcore-11.3.0-2023.3.6.eb
+++ b/Golden_Repo/j/JupyterExtension-jupyterai/JupyterExtension-jupyterai-1.0.1-GCCcore-11.3.0-2023.3.6.eb
@@ -1,7 +1,7 @@
 easyblock = 'PythonBundle'
 
 name = 'JupyterExtension-jupyterai'
-version = '0.9.0'
+version = '1.0.1'
 local_jupyterver = '2023.3.6'
 versionsuffix = '-' + local_jupyterver
 
@@ -41,33 +41,21 @@ exts_list = [
         'source_tmpl': '%(name)s-%(version)s-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl',
         'checksums': ['dca531952a2e3eac56f479ff22951af4715ee44788a3fe991d208d766d3f95f3'],
     }),
-    ('webcolors', '1.13', {
-        'checksums': ['c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a'],
-    }),
-    ('uri-template', '1.3.0', {
-        'checksums': ['0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7'],
-    }),
     ('typing_extensions', '4.5.0', {
         'checksums': ['5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb'],
     }),
     ('numexpr', '2.8.4', {
         'checksums': ['d5432537418d18691b9115d615d6daa17ee8275baef3edf1afbbf8bc69806147'],
     }),
-    ('jsonpointer', '2.4', {
-        'checksums': ['585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88'],
-    }),
     ('jsonpath-ng', '1.5.3', {
         'checksums': ['a273b182a82c1256daab86a313b937059261b5c5f8c4fa3fc38b882b344dd567'],
     }),
     ('importlib_metadata', '5.2.0', {
         'checksums': ['404d48d62bba0b7a77ff9d405efd91501bef2e67ff4ace0bed40a0cf28c3c7cd'],
     }),
-    ('grpcio', '1.51.3', {
+    ('grpcio', '1.56.2', {
         'modulename': False,
-        'checksums': ['be7b2265b7527bb12109a7727581e274170766d5b3c9258d4e466f4872522d7a'],
-    }),
-    ('fqdn', '1.5.1', {
-        'checksums': ['105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f'],
+        'checksums': ['0ff789ae7d8ddd76d2ac02e7d13bfef6fc4928ac01e1dcaa182be51b6bcc0aaa'],
     }),
     ('typing_inspect', '0.9.0', {
         'checksums': ['b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78'],
@@ -75,50 +63,66 @@ exts_list = [
     ('tiktoken', '0.4.0', {
         'checksums': ['59b20a819969735b48161ced9b92f05dc4519c17be4015cfb73b65270a243620'],
     }),
-    ('ray', '2.4.0', {
+    ('ray', '2.6.1', {
         'source_tmpl': '%(name)s-%(version)s-cp310-cp310-manylinux2014_x86_64.whl',
-        'checksums': ['5ed5a29795b122e9e2b832d5224ab9b1cc235beab700d2a413b23c63b3d3c80c'],
+        'checksums': ['c9b5aabf5f41fe05028e4f3a271dc89ca7cd9c210f48a4ed815b852210ebb5a8'],
     }),
-    ('pydantic', '1.10.10', {
-        'checksums': ['3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a'],
+    ('pydantic', '1.10.12', {
+        'checksums': ['0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303'],
     }),
     ('marshmallow', '3.19.0', {
         'checksums': ['90032c0fd650ce94b6ec6dc8dfeb0e3ff50c144586462c389b81a07205bedb78'],
     }),
-    ('arrow', '1.2.3', {
-        'checksums': ['3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1'],
-    }),
     ('openapi-schema-pydantic', '1.2.4', {
         'checksums': ['3e22cf58b74a69f752cc7e5f1537f6e44164282db2700cbbcd3bb99ddd065196'],
     }),
     ('openai', '0.27.8', {
         'checksums': ['2483095c7db1eee274cebac79e315a986c4e55207bb4fa7b82d185b3a2ed9536'],
     }),
+    ('huggingface_hub', '0.16.4', {
+        'checksums': ['608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14'],
+    }),
+    ('ai21', '1.2.2', {
+        'checksums': ['753639f579dcff96017af04048fac35c38927d1f969a11fe4699250bf7e6d356'],
+    }),
     ('marshmallow-enum', '1.5.1', {
         'checksums': ['38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58'],
     }),
-    ('isoduration', '20.11.0', {
-        'checksums': ['ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9'],
+    ('dataclasses_json', '0.5.13', {
+        'checksums': ['425810e1356fb6917eb7c323e3aaee0c9398fc55b5001d3532381679f727fc18'],
     }),
-    ('dataclasses-json', '0.5.9', {
-        'checksums': ['e9ac87b73edc0141aafbce02b44e93553c3123ad574958f0fe52a534b6707e8e'],
+    ('langchainplus_sdk', '0.0.20', {
+        'checksums': ['3d300e2e3290f68cc9d842c059f9458deba60e776c9e790309688cad1bfbb219'],
     }),
-    ('langchain', '0.0.159', {
+    ('langchain', '0.0.220', {
         'source_tmpl': '%(name)s-%(version)s-py3-none-any.whl',
-        'checksums': ['d9df75405d6f02cc683bef5b3e79fbf9ca3b22c5ff198dd823b5393c568e2ec4'],
+        'checksums': ['fcba303c0744f74a026eda97d65da51a90cd5b20ab1944ad766d95ee1eb68b75'],
     }),
     ('jupyter_ai_magics', version, {
-        'checksums': ['53d403df3015796c330b496c04609fac16590937be6670fe725d9349f0522059'],
+        'checksums': ['5704f5e3d73b26ff84fffa00718bb53a958494381e60656bacaaabb652a2bebe'],
     }),
     ('jupyter_ai', version, {
-        'checksums': ['ce3c37ae69c90dd62340ef1dc1df2e1cc35c7420eba2aba8b88e9ce97c3b336d'],
+        'patches': ['fix_rootdir_pr327.patch'],
+        'checksums': [
+            '05ea82653365cc2137a2de5576442badb8393c001f68692411e1feb0f5abe955',
+            'c358666fb2a4b72dd43ae81c9d8a043895d1633cacfc0930e5b8f528ee4a8958',
+        ],
     }),
 ]
 
 modextrapaths = {
+    'JUPYTER_PATH': ['share/jupyter'],
+    'JUPYTER_CONFIG_PATH': ['etc/jupyter'],
     'JUPYTER_EXTRA_LABEXTENSIONS_PATH': ['share/jupyter/labextensions'],
 }
 
+# Ensure that the user-specific $HOME/.jupyter is first entry in JUPYTER_CONFIG_PATH
+modluafooter = """
+setenv("JUPYTERAI_SAVEDIR", os.getenv("HOME"))
+prepend_path("JUPYTER_PATH", pathJoin(os.getenv("HOME"), ".local/share/jupyter"))
+prepend_path("JUPYTER_CONFIG_PATH", pathJoin(os.getenv("HOME"), ".jupyter"))
+"""
+
 sanity_check_paths = {
     'files': [],
     'dirs': [
diff --git a/Golden_Repo/j/JupyterExtension-jupyterai/fix_rootdir_pr327.patch b/Golden_Repo/j/JupyterExtension-jupyterai/fix_rootdir_pr327.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7894086ea615d70599f7e15319bfe6ea6a733cf7
--- /dev/null
+++ b/Golden_Repo/j/JupyterExtension-jupyterai/fix_rootdir_pr327.patch
@@ -0,0 +1,145 @@
+From e362ada133be27f3fc845ce06f3a1bf3e3f96fc8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jens=20Henrik=20G=C3=B6bbert?= <goebbert1@jwlogin10.juwels>
+Date: Wed, 9 Aug 2023 12:34:19 +0200
+Subject: [PATCH 1/4] add alternatives to c.ServerApp.root_dir
+
+---
+ packages/jupyter-ai/jupyter_ai/extension.py | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/packages/jupyter-ai/jupyter_ai/extension.py b/packages/jupyter-ai/jupyter_ai/extension.py
+index f9d0fee5..1a22b5a2 100644
+--- a/packages/jupyter-ai/jupyter_ai/extension.py
++++ b/packages/jupyter-ai/jupyter_ai/extension.py
+@@ -1,4 +1,5 @@
+ import time
++from os import environ, access, W_OK, getcwd
+ 
+ from dask.distributed import Client as DaskClient
+ from jupyter_ai_magics.utils import get_em_providers, get_lm_providers
+@@ -71,6 +72,13 @@ def initialize_settings(self):
+         # consumers a Future that resolves to the Dask client when awaited.
+         dask_client_future = loop.create_task(self._get_dask_client())
+ 
++        # get root directory for read and writing
++        root_wdir = os.environ.get('JUPYTERAI_ROOTDIR')
++        if not root_wdir:
++            root_wdir = self.serverapp.root_dir
++            if not os.access(root_wdir, os.W_OK):
++                root_wdir = os.getcwd()
++
+         # initialize chat handlers
+         chat_handler_kwargs = {
+             "log": self.log,
+@@ -85,11 +93,11 @@ def initialize_settings(self):
+         )
+         generate_chat_handler = GenerateChatHandler(
+             **chat_handler_kwargs,
+-            root_dir=self.serverapp.root_dir,
++            root_dir=root_wdir,
+         )
+         learn_chat_handler = LearnChatHandler(
+             **chat_handler_kwargs,
+-            root_dir=self.serverapp.root_dir,
++            root_dir=root_wdir,
+             dask_client_future=dask_client_future,
+         )
+         help_chat_handler = HelpChatHandler(**chat_handler_kwargs)
+
+From daa4142dde81ce99f61e7b68542ddf2eaf173c6d Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jens=20Henrik=20G=C3=B6bbert?= <goebbert1@jwlogin10.juwels>
+Date: Wed, 9 Aug 2023 13:43:02 +0200
+Subject: [PATCH 2/4] rename env to JUPYTERAI_SAVEDIR
+
+---
+ packages/jupyter-ai/jupyter_ai/extension.py | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/packages/jupyter-ai/jupyter_ai/extension.py b/packages/jupyter-ai/jupyter_ai/extension.py
+index 1a22b5a2..ff62026c 100644
+--- a/packages/jupyter-ai/jupyter_ai/extension.py
++++ b/packages/jupyter-ai/jupyter_ai/extension.py
+@@ -73,11 +73,11 @@ def initialize_settings(self):
+         dask_client_future = loop.create_task(self._get_dask_client())
+ 
+         # get root directory for read and writing
+-        root_wdir = os.environ.get('JUPYTERAI_ROOTDIR')
+-        if not root_wdir:
+-            root_wdir = self.serverapp.root_dir
+-            if not os.access(root_wdir, os.W_OK):
+-                root_wdir = os.getcwd()
++        save_dir = os.environ.get('JUPYTERAI_SAVEDIR')
++        if not save_dir:
++            save_dir = self.serverapp.root_dir
++            if not os.access(save_dir, os.W_OK):
++                save_dir = os.getcwd()
+ 
+         # initialize chat handlers
+         chat_handler_kwargs = {
+@@ -93,11 +93,11 @@ def initialize_settings(self):
+         )
+         generate_chat_handler = GenerateChatHandler(
+             **chat_handler_kwargs,
+-            root_dir=root_wdir,
++            root_dir=save_dir,
+         )
+         learn_chat_handler = LearnChatHandler(
+             **chat_handler_kwargs,
+-            root_dir=root_wdir,
++            root_dir=save_dir,
+             dask_client_future=dask_client_future,
+         )
+         help_chat_handler = HelpChatHandler(**chat_handler_kwargs)
+
+From c5a2d5b6f534d49796c6c7d8d4fbf936db9885f2 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jens=20Henrik=20G=C3=B6bbert?= <goebbert1@jwlogin10.juwels>
+Date: Wed, 9 Aug 2023 14:42:20 +0200
+Subject: [PATCH 3/4] fix 'import os'
+
+---
+ packages/jupyter-ai/jupyter_ai/extension.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/packages/jupyter-ai/jupyter_ai/extension.py b/packages/jupyter-ai/jupyter_ai/extension.py
+index ff62026c..d2b4ef96 100644
+--- a/packages/jupyter-ai/jupyter_ai/extension.py
++++ b/packages/jupyter-ai/jupyter_ai/extension.py
+@@ -1,5 +1,5 @@
+ import time
+-from os import environ, access, W_OK, getcwd
++import os
+ 
+ from dask.distributed import Client as DaskClient
+ from jupyter_ai_magics.utils import get_em_providers, get_lm_providers
+
+From 9891c2ff9df54a07a3e439340f6784af61dbf24b Mon Sep 17 00:00:00 2001
+From: "pre-commit-ci[bot]"
+ <66853113+pre-commit-ci[bot]@users.noreply.github.com>
+Date: Wed, 9 Aug 2023 13:00:13 +0000
+Subject: [PATCH 4/4] [pre-commit.ci] auto fixes from pre-commit.com hooks
+
+for more information, see https://pre-commit.ci
+---
+ packages/jupyter-ai/jupyter_ai/extension.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/packages/jupyter-ai/jupyter_ai/extension.py b/packages/jupyter-ai/jupyter_ai/extension.py
+index d2b4ef96..dbea8400 100644
+--- a/packages/jupyter-ai/jupyter_ai/extension.py
++++ b/packages/jupyter-ai/jupyter_ai/extension.py
+@@ -1,5 +1,5 @@
+-import time
+ import os
++import time
+ 
+ from dask.distributed import Client as DaskClient
+ from jupyter_ai_magics.utils import get_em_providers, get_lm_providers
+@@ -73,7 +73,7 @@ def initialize_settings(self):
+         dask_client_future = loop.create_task(self._get_dask_client())
+ 
+         # get root directory for read and writing
+-        save_dir = os.environ.get('JUPYTERAI_SAVEDIR')
++        save_dir = os.environ.get("JUPYTERAI_SAVEDIR")
+         if not save_dir:
+             save_dir = self.serverapp.root_dir
+             if not os.access(save_dir, os.W_OK):
diff --git a/Golden_Repo/j/JupyterKernel-Cling/JupyterKernel-Cling-20230205-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterKernel-Cling/JupyterKernel-Cling-20230205-GCCcore-11.3.0-2023.3.6.eb
index 9980ddaaee1994d28e66590977b523446155d33e..9ec2dbb7cf3d881027d3c7dcb05011557e1f6ab8 100644
--- a/Golden_Repo/j/JupyterKernel-Cling/JupyterKernel-Cling-20230205-GCCcore-11.3.0-2023.3.6.eb
+++ b/Golden_Repo/j/JupyterKernel-Cling/JupyterKernel-Cling-20230205-GCCcore-11.3.0-2023.3.6.eb
@@ -67,8 +67,9 @@ postinstallcmds = [
 
     # Ensure we remove the virtuel environment to avoid wrong search path for python packages
     'rm -f %(installdir)s/pyvenv.cfg',
+    'rm -f %(installdir)s/Activate.ps1'
     'rm -f %(installdir)s/bin/python',
-    'rm -f %(installdir)s/bin/python3',
+    'rm -f %(installdir)s/bin/python3*',
     'rm -f %(installdir)s/bin/activate',
     'rm -f %(installdir)s/bin/activate*',
     'rm -f %(installdir)s/bin/easy_install*',
diff --git a/Golden_Repo/j/JupyterKernel-Julia/JupyterKernel-Julia-1.8.5-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterKernel-Julia/JupyterKernel-Julia-1.8.5-GCCcore-11.3.0-2023.3.6.eb
index 116e8204b611caa02ca8cbab50dabcaa08cccf17..2479b573345c635de2ebde9f3a063ebd5204198c 100644
--- a/Golden_Repo/j/JupyterKernel-Julia/JupyterKernel-Julia-1.8.5-GCCcore-11.3.0-2023.3.6.eb
+++ b/Golden_Repo/j/JupyterKernel-Julia/JupyterKernel-Julia-1.8.5-GCCcore-11.3.0-2023.3.6.eb
@@ -17,11 +17,11 @@ toolchainopts = {'pic': True}
 
 builddependencies = [
     ('binutils', '2.38'),
+    ('Julia', version, '', ('gcccoremkl', '11.3.0-2022.1.0')),
 ]
 
 dependencies = [
     ('Python', '3.10.4'),
-    ('Julia', version, '', ('gcccoremkl', '11.3.0-2022.1.0')),
 ]
 
 components = [
@@ -69,6 +69,9 @@ exts_list = [
     }),
 ]
 
+local_kernel_dir = 'julia-%(version_major_minor)s'
+local_kernel_name = 'Julia-%s' % version
+
 modextrapaths = {
     'PYTHONPATH': ['lib/python%(pyshortver)s/site-packages'],
     'JUPYTER_PATH': [local_jupyter_path],  # add search path for kernelspecs
@@ -94,13 +97,40 @@ postinstallcmds = [
         'EOF'
     ) % (local_jupyter_path),
 
+    # write kernel.sh
+    (
+        '{ cat > %%(installdir)s/share/jupyter/kernels/%s/kernel.sh; } << EOF\n'
+        '#!/bin/bash \n'
+        '\n'
+        '# Load required modules \n'
+        'module purge \n'
+        'module load Stages/${STAGE} \n'
+        'module load GCC/11.3.0 \n'
+        'module load ParaStationMPI \n'
+        '\n'
+        'module load Julia/%s \n'
+        'module load %s/.%s%s \n'
+        '\n'
+        'exec julia \$@\n'
+        '\n'
+        'EOF'
+    ) % (local_kernel_dir, version, name, version, versionsuffix),
+    'chmod +x %%(installdir)s/share/jupyter/kernels/%s/kernel.sh' % local_kernel_dir,
+
     # configure Python<->Julia bridge (of python package julia)
     'source %(builddir)s/env.sh && python -c "import julia; julia.install()"',
 
-    # Ensure we remove the virtuel environment to avoid wrong search path for python packages
+    # replace `[..]/bin/julia` with `kernel.sh
+    (
+        'sed -i \'s#.*\/bin\/julia.*#    "%%(installdir)s/share/jupyter/kernels/%s/kernel.sh",#\' '
+        '    %%(installdir)s/share/jupyter/kernels/%s/kernel.json'
+    ) % (local_kernel_dir, local_kernel_dir),
+
+    # Ensure we remove the virtual environment to avoid wrong search path for python packages
     'rm -f %(installdir)s/pyvenv.cfg',
+    'rm -f %(installdir)s/Activate.ps1'
     'rm -f %(installdir)s/bin/python',
-    'rm -f %(installdir)s/bin/python3',
+    'rm -f %(installdir)s/bin/python3*',
     'rm -f %(installdir)s/bin/activate',
     'rm -f %(installdir)s/bin/activate*',
     'rm -f %(installdir)s/bin/easy_install*',
@@ -114,8 +144,9 @@ postinstallcmds = [
     'rm -rf %(installdir)s/lib/python%(pyshortver)s/site-packages/setuptools-*',
 ]
 
-# specify that Bundle easyblock should run a full sanity check, rather than just trying to load the module
-# full_sanity_check = True
+# Julia is only a build-dependency and MUST NOT be a runtime dependency
+# Hence, we need to disable the sanitycheck
+skipsteps = ['sanitycheck']
 sanity_check_paths = {
     'files': [
         'share/jupyter/kernels/julia-%(version_major_minor)s/kernel.json',
diff --git a/Golden_Repo/j/JupyterKernel-PyEarthSystem/JupyterKernel-PyEarthSystem-2023.5-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterKernel-PyEarthSystem/JupyterKernel-PyEarthSystem-2023.5-GCCcore-11.3.0-2023.3.6.eb
index bf9df673fa3b696e9e1ca4443a9c53ecb7c85809..00e75db8753ad1c5cf8639b38c82406f51d822b4 100644
--- a/Golden_Repo/j/JupyterKernel-PyEarthSystem/JupyterKernel-PyEarthSystem-2023.5-GCCcore-11.3.0-2023.3.6.eb
+++ b/Golden_Repo/j/JupyterKernel-PyEarthSystem/JupyterKernel-PyEarthSystem-2023.5-GCCcore-11.3.0-2023.3.6.eb
@@ -12,7 +12,6 @@ Project Jupyter exists to develop open-source software, open-standards, and serv
 for interactive computing across dozens of programming languages.
 """
 
-# toolchain = {'name': 'gcccoremkl', 'version': '11.3.0-2022.1.0'}
 toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
 toolchainopts = {'pic': True}
 
@@ -65,7 +64,7 @@ components = [
 exts_default_options = {
     'source_urls': [PYPI_SOURCE],
     'use_pip': True,
-    'sanity_pip_check': False,  # skip as it requires protobuf, TensorFlow
+    'sanity_pip_check': True,
     'download_dep_fail': True,
     'use_pip_for_deps': False,
 }
diff --git a/Golden_Repo/j/JupyterKernel-PyQuantum/JupyterKernel-PyQuantum-2023.5-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterKernel-PyQuantum/JupyterKernel-PyQuantum-2023.5-GCCcore-11.3.0-2023.3.6.eb
index 617a25379d850101a53994704226fa1af08dd647..e25a6d48efb573b9aaf9a84226f7a14bd183630b 100644
--- a/Golden_Repo/j/JupyterKernel-PyQuantum/JupyterKernel-PyQuantum-2023.5-GCCcore-11.3.0-2023.3.6.eb
+++ b/Golden_Repo/j/JupyterKernel-PyQuantum/JupyterKernel-PyQuantum-2023.5-GCCcore-11.3.0-2023.3.6.eb
@@ -28,7 +28,7 @@ builddependencies = [
     #    ('DWave', '6.3.0', '', ('gcccoremkl', '11.3.0-2022.4.0')),
     #    ('PyQuil', '3.3.3'),
     #    ('Qiskit', '0.41.0', '', ('gpsmkl', '2022')),
-    #    ('Qiskit-juqcs', '0.5.0', '', ('gpsmkl', '2022')),
+    #    ('Qiskit-juqcs', '0.8.0', '', ('gpsmkl', '2022')),
 ]
 
 dependencies = [
@@ -107,7 +107,7 @@ postinstallcmds = [
         'module load DWave/6.3.0 \n'
         'module load PyQuil/3.3.3 \n'
         'module load Qiskit/0.41.0 \n'
-        # 'module load Qiskit-juqcs/0.5.0 \n' # not yet compatible with current Qiskit
+        'module load Qiskit-juqcs/0.8.0 \n'
         'module load pulser/0.12.0 \n'
         'module load myqlm/1.7.3 \n'
         'module load %s/.%s%s \n'
diff --git a/Golden_Repo/j/JupyterLab/401.html b/Golden_Repo/j/JupyterLab/401.html
new file mode 100644
index 0000000000000000000000000000000000000000..635c4615c89442bc54b385eb71e2704d39c6cdd5
--- /dev/null
+++ b/Golden_Repo/j/JupyterLab/401.html
@@ -0,0 +1,131 @@
+<!DOCTYPE html>
+<html><head>
+  <meta http-equiv="Refresh" content="0; url=https://jupyter-jsc.fz-juelich.de/hub/logout?stopall=false&alldevices=false" />
+
+  <meta http-equiv="content-type" content="text/html; charset=UTF-8">
+  <meta charset="utf-8">
+
+  <title>jupyter-jsc</title>
+  <meta http-equiv="X-UA-Compatible" content="chrome=1">
+  <meta property="og:image" content="/hub/static/images/mini_website.jpg">
+  <meta property="og:locale" content="en_US">
+  <meta property="og:site_name" content="jupyter-jsc">
+  <meta property="og:title" content="jupyter-jsc">
+  <meta property="og:type" content="website">
+  <meta property="og:url" content="https://jupyter-jsc.fz-juelich.de/">
+
+  <link rel="stylesheet" href="/hub/static/css/style.min.css" type="text/css">
+  <link rel="stylesheet" href="/hub/static/css/j4j_font.min.htm" type="text/css">
+  <link rel="stylesheet" href="/hub/static/css/j4j_base.min.css" type="text/css">
+  <link rel="stylesheet" href="/hub/static/css/j4j_base_header.min.css" type="text/css">
+  <link rel="stylesheet" href="/hub/static/css/j4j_base_footer.min.css" type="text/css">
+  <link rel="icon" href="/hub//static/images/favicon.svg" type="jpg/png">
+  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jquery-confirm/3.3.2/jquery-confirm.min.css">  
+  <link rel="stylesheet" href="/hub/static/css/j4j_page_home.min.css" type="text/css">
+  <link rel="stylesheet" href="/hub/static/css/spawn_style.css" type="text/css">
+
+<body>
+
+<div id="container">
+
+  <div id="header-background">
+  <div id="header">
+  <nav class="navbar navbar-default">
+    <div class="container-fluid">
+      <div class="navbar-header">
+        <span id="jupyterhub-logo" class="pull-left"><a href="https://www.fz-juelich.de/jsc" target="_blank"><img src="/hub/static/images/jsc.png" alt="JupyterHub" class="jpy-logo" title="Home"></a></span>
+      </div>
+
+      <div id="thenavbar">
+        <ul class="nav navbar-nav">
+          
+          <li><a href="https://jupyter-jsc.fz-juelich.de/hub/start">Start</a></li>
+          
+          <li id="navbarbtn-links" class="main-menu-btn menu-btn"><a>Links</a>
+            <div id="navbarmenu-links" class="menu-box">
+              <ul>
+                <li id="navbarbtn-links-1" class="menu-btn"><a>jupyter-jsc</a>
+                  <div id="navbarmenu-links-1" class="menu-box menu-sub-box show-sub-header" style="">
+                    <ul>
+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/nbviewer/github/kreuzert/Jupyter-JSC/blob/master/Extensions.ipynb">Extensions at jupyter-jsc</a></li>
+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/nbviewer/github/kreuzert/Jupyter-JSC/blob/master/FAQ.ipynb">HDFCloud FAQ</a></li>
+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/static/files/projects.html">Link Projects to Home</a></li>
+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/static/files/kernel.html">Setup your own kernel</a></li>
+                      <li class=""><a target="_blank" href="https://www.unicore.eu/about-unicore/case-studies/jupyter-at-jsc/">jupyter-jsc at unicore.eu</a></li>
+                    </ul>
+                  </div>
+                </li>
+                <li id="navbarbtn-links-2" class="menu-btn"><a>Jupyter</a>
+                  <div id="navbarmenu-links-2" class="menu-box menu-sub-box show-sub-header" style="">
+                    <ul>
+                      <li class=""><a target="_blank" href="https://www-jupyter.org/">Home</a></li>
+                      <li class=""><a target="_blank" href="https://newsletter.jupyter.org/">Newsletter</a></li>
+                      <li class=""><a target="_blank" href="https://www.youtube.com/watch?v=HW29067qVWk">Introduction Video</a></li>
+                      <li class=""><a target="_blank" href="https://blog.jupyter.org/">Blog</a></li>
+                      <li class=""><a target="_blank" href="https://jupyter.org/documentation.html">Documentation</a></li>
+                      <li class=""><a target="_blank" href="https://www.oreilly.com/topics/jupyter">O'Reilly on Jupyter</a></li>
+                      <li class=""><a target="_blank" href="https://twitter.com/projectjupyter">Twitter</a></li>
+                      <li class=""><a target="_blank" href="https://github.com/trending/jupyter-notebook">Jupyter-Notebooks</a></li>
+                    </ul>
+                  </div>
+                </li>
+                <li id="navbarbtn-links-3" class="menu-btn"><a>JSC</a>
+                  <div id="navbarmenu-links-3" class="menu-box menu-sub-box show-sub-header" style="">
+                    <ul>
+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JUWELS/JUWELS_node.html">JUWELS</a></li>
+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JURECA/JURECA_node.html">JURECA</a></li>
+                      <li class=""><a target="_blank" href="https://hbp-hpc-platform.fz-juelich.de/?page_id=1073">JURON</a></li>
+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/News/Newsletter/newsletter_node.html">Newsletter</a></li>
+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/News/Events/events_node.html">Events</a></li>
+                      <li class=""><a target="_blank" href="https://twitter.com/fzj_jsc">Twitter</a></li>
+                    </ul>
+                  </div>
+                </li>
+              </ul>
+            </div>
+          </li>
+          
+        </ul>
+      </div>
+    </div>
+  </nav>
+  </div>
+  </div>
+
+<div id="body">
+<div class="background-wrapper">
+  <div class="content" id="JupyterLabs-div">
+    
+      <!--<center><h2 style="color:red">jupyter-jsc maintenance: 25-02-2020 - 26-02-2020</h2></center>-->
+      <h2>
+      The access token of your browser session to the running JupyterLab has expired.
+      </h2>
+      <p>
+        Unfortunately you have to log out and log in again from the Jupyter-JSC to regain access permission.<br>
+        <a href="https://jupyter-jsc.fz-juelich.de/hub/logout?stopall=false&alldevices=false"> Logout now </a>
+      </p>
+    
+  </div>
+</div>
+</div>
+
+<div class="footer">
+  <div class="footer-top-background">
+  </div>
+  <div class="footer-bottom-background">
+    <div class="footer-bottom">
+      <div class="footer-links">
+        <span>© Forschungszentrum Jülich</span>
+        <a href="https://jupyter-jsc.fz-juelich.de/hub/imprint">Imprint</a>
+        <a href="https://jupyter-jsc.fz-juelich.de/hub/privacy">Privacy Policy</a>
+        <a href="mailto:ds-support@fz-juelich.de?subject=jupyter-jsc Support&amp;body=Please describe your problem here. (english or german)">Support</a>
+        <a href="https://jupyter-jsc.fz-juelich.de/hub/terms">Terms of Service</a>
+      </div>
+      <a href="https://www.helmholtz.de/en/" target="_blank"><img class="helmholtz-logo" src="/hub/static/images/helmholtz.png"></a>
+    </div>
+  </div>
+</div>
+
+</div> <!-- container -->
+
+</body></html>
diff --git a/Golden_Repo/j/JupyterLab/401html.patch b/Golden_Repo/j/JupyterLab/401html.patch
deleted file mode 100644
index c3e71a800541a396377737132022c67a077d7564..0000000000000000000000000000000000000000
--- a/Golden_Repo/j/JupyterLab/401html.patch
+++ /dev/null
@@ -1,135 +0,0 @@
-diff -Naur jupyterlab-2.2.9.orig/401.html jupyterlab-2.2.9/401.html
---- jupyterlab-2.2.9.orig/401.html	1970-01-01 01:00:00.000000000 +0100
-+++ jupyterlab-2.2.9/401.html	2020-12-11 23:24:45.301738818 +0100
-@@ -0,0 +1,131 @@
-+<!DOCTYPE html>
-+<html><head>
-+  <meta http-equiv="Refresh" content="0; url=https://jupyter-jsc.fz-juelich.de/hub/logout?stopall=false&alldevices=false" />
-+
-+  <meta http-equiv="content-type" content="text/html; charset=UTF-8">
-+  <meta charset="utf-8">
-+
-+  <title>jupyter-jsc</title>
-+  <meta http-equiv="X-UA-Compatible" content="chrome=1">
-+  <meta property="og:image" content="/hub/static/images/mini_website.jpg">
-+  <meta property="og:locale" content="en_US">
-+  <meta property="og:site_name" content="jupyter-jsc">
-+  <meta property="og:title" content="jupyter-jsc">
-+  <meta property="og:type" content="website">
-+  <meta property="og:url" content="https://jupyter-jsc.fz-juelich.de/">
-+
-+  <link rel="stylesheet" href="/hub/static/css/style.min.css" type="text/css">
-+  <link rel="stylesheet" href="/hub/static/css/j4j_font.min.htm" type="text/css">
-+  <link rel="stylesheet" href="/hub/static/css/j4j_base.min.css" type="text/css">
-+  <link rel="stylesheet" href="/hub/static/css/j4j_base_header.min.css" type="text/css">
-+  <link rel="stylesheet" href="/hub/static/css/j4j_base_footer.min.css" type="text/css">
-+  <link rel="icon" href="/hub//static/images/favicon.svg" type="jpg/png">
-+  <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jquery-confirm/3.3.2/jquery-confirm.min.css">  
-+  <link rel="stylesheet" href="/hub/static/css/j4j_page_home.min.css" type="text/css">
-+  <link rel="stylesheet" href="/hub/static/css/spawn_style.css" type="text/css">
-+
-+<body>
-+
-+<div id="container">
-+
-+  <div id="header-background">
-+  <div id="header">
-+  <nav class="navbar navbar-default">
-+    <div class="container-fluid">
-+      <div class="navbar-header">
-+        <span id="jupyterhub-logo" class="pull-left"><a href="https://www.fz-juelich.de/jsc" target="_blank"><img src="/hub/static/images/jsc.png" alt="JupyterHub" class="jpy-logo" title="Home"></a></span>
-+      </div>
-+
-+      <div id="thenavbar">
-+        <ul class="nav navbar-nav">
-+          
-+          <li><a href="https://jupyter-jsc.fz-juelich.de/hub/start">Start</a></li>
-+          
-+          <li id="navbarbtn-links" class="main-menu-btn menu-btn"><a>Links</a>
-+            <div id="navbarmenu-links" class="menu-box">
-+              <ul>
-+                <li id="navbarbtn-links-1" class="menu-btn"><a>jupyter-jsc</a>
-+                  <div id="navbarmenu-links-1" class="menu-box menu-sub-box show-sub-header" style="">
-+                    <ul>
-+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/nbviewer/github/kreuzert/Jupyter-JSC/blob/master/Extensions.ipynb">Extensions at jupyter-jsc</a></li>
-+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/nbviewer/github/kreuzert/Jupyter-JSC/blob/master/FAQ.ipynb">HDFCloud FAQ</a></li>
-+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/static/files/projects.html">Link Projects to Home</a></li>
-+                      <li class=""><a href="https://jupyter-jsc.fz-juelich.de/static/files/kernel.html">Setup your own kernel</a></li>
-+                      <li class=""><a target="_blank" href="https://www.unicore.eu/about-unicore/case-studies/jupyter-at-jsc/">jupyter-jsc at unicore.eu</a></li>
-+                    </ul>
-+                  </div>
-+                </li>
-+                <li id="navbarbtn-links-2" class="menu-btn"><a>Jupyter</a>
-+                  <div id="navbarmenu-links-2" class="menu-box menu-sub-box show-sub-header" style="">
-+                    <ul>
-+                      <li class=""><a target="_blank" href="https://www-jupyter.org/">Home</a></li>
-+                      <li class=""><a target="_blank" href="https://newsletter.jupyter.org/">Newsletter</a></li>
-+                      <li class=""><a target="_blank" href="https://www.youtube.com/watch?v=HW29067qVWk">Introduction Video</a></li>
-+                      <li class=""><a target="_blank" href="https://blog.jupyter.org/">Blog</a></li>
-+                      <li class=""><a target="_blank" href="https://jupyter.org/documentation.html">Documentation</a></li>
-+                      <li class=""><a target="_blank" href="https://www.oreilly.com/topics/jupyter">O'Reilly on Jupyter</a></li>
-+                      <li class=""><a target="_blank" href="https://twitter.com/projectjupyter">Twitter</a></li>
-+                      <li class=""><a target="_blank" href="https://github.com/trending/jupyter-notebook">Jupyter-Notebooks</a></li>
-+                    </ul>
-+                  </div>
-+                </li>
-+                <li id="navbarbtn-links-3" class="menu-btn"><a>JSC</a>
-+                  <div id="navbarmenu-links-3" class="menu-box menu-sub-box show-sub-header" style="">
-+                    <ul>
-+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JUWELS/JUWELS_node.html">JUWELS</a></li>
-+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/Expertise/Supercomputers/JURECA/JURECA_node.html">JURECA</a></li>
-+                      <li class=""><a target="_blank" href="https://hbp-hpc-platform.fz-juelich.de/?page_id=1073">JURON</a></li>
-+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/News/Newsletter/newsletter_node.html">Newsletter</a></li>
-+                      <li class=""><a target="_blank" href="https://www.fz-juelich.de/ias/jsc/EN/News/Events/events_node.html">Events</a></li>
-+                      <li class=""><a target="_blank" href="https://twitter.com/fzj_jsc">Twitter</a></li>
-+                    </ul>
-+                  </div>
-+                </li>
-+              </ul>
-+            </div>
-+          </li>
-+          
-+        </ul>
-+      </div>
-+    </div>
-+  </nav>
-+  </div>
-+  </div>
-+
-+<div id="body">
-+<div class="background-wrapper">
-+  <div class="content" id="JupyterLabs-div">
-+    
-+      <!--<center><h2 style="color:red">jupyter-jsc maintenance: 25-02-2020 - 26-02-2020</h2></center>-->
-+      <h2>
-+      The access token of your browser session to the running JupyterLab has expired.
-+      </h2>
-+      <p>
-+        Unfortunately you have to log out and log in again from the Jupyter-JSC to regain access permission.<br>
-+        <a href="https://jupyter-jsc.fz-juelich.de/hub/logout?stopall=false&alldevices=false"> Logout now </a>
-+      </p>
-+    
-+  </div>
-+</div>
-+</div>
-+
-+<div class="footer">
-+  <div class="footer-top-background">
-+  </div>
-+  <div class="footer-bottom-background">
-+    <div class="footer-bottom">
-+      <div class="footer-links">
-+        <span>© Forschungszentrum Jülich</span>
-+        <a href="https://jupyter-jsc.fz-juelich.de/hub/imprint">Imprint</a>
-+        <a href="https://jupyter-jsc.fz-juelich.de/hub/privacy">Privacy Policy</a>
-+        <a href="mailto:ds-support@fz-juelich.de?subject=jupyter-jsc Support&amp;body=Please describe your problem here. (english or german)">Support</a>
-+        <a href="https://jupyter-jsc.fz-juelich.de/hub/terms">Terms of Service</a>
-+      </div>
-+      <a href="https://www.helmholtz.de/en/" target="_blank"><img class="helmholtz-logo" src="/hub/static/images/helmholtz.png"></a>
-+    </div>
-+  </div>
-+</div>
-+
-+</div> <!-- container -->
-+
-+</body></html>
diff --git a/Golden_Repo/j/JupyterLab/JupyterLab-2023.3.6-GCCcore-11.3.0.eb b/Golden_Repo/j/JupyterLab/JupyterLab-2023.3.6-GCCcore-11.3.0.eb
index 92502da744c167c6701a31d312c152ae20ae3df0..0687e456caa955fe19925ed4d1dc47b88d1ad8f4 100644
--- a/Golden_Repo/j/JupyterLab/JupyterLab-2023.3.6-GCCcore-11.3.0.eb
+++ b/Golden_Repo/j/JupyterLab/JupyterLab-2023.3.6-GCCcore-11.3.0.eb
@@ -116,12 +116,39 @@ exts_list = [
     ('deprecation', '2.1.0', {
         'checksums': ['72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff'],
     }),
+    # jsonschema ####################################
+    ('webcolors', '1.13', {
+        'checksums': ['c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a'],
+    }),
+    ('uri-template', '1.3.0', {
+        'checksums': ['0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7'],
+    }),
     ('rfc3339_validator', '0.1.4', {
         'checksums': ['138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b'],
     }),
     ('rfc3986_validator', '0.1.1', {
         'checksums': ['3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055'],
     }),
+    ('rfc3987', '1.3.8', {
+        'checksums': ['d3c4d257a560d544e9826b38bc81db676890c79ab9d7ac92b39c7a253d5ca733'],
+    }),
+    ('jsonpointer', '2.4', {
+        'checksums': ['585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88'],
+    }),
+    ('arrow', '1.2.3', {
+        'checksums': ['3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1'],
+    }),
+    ('isoduration', '20.11.0', {
+        'checksums': ['ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9'],
+    }),
+    ('fqdn', '1.5.1', {
+        'checksums': ['105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f'],
+    }),
+    ('jsonschema', '4.17.3', {
+        'checksums': ['0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d'],
+        'extras': ['format-nongpl'],
+    }),
+    # nbclassic #####################################
     ('jupyter_events', '0.6.3', {
         'checksums': ['9a6e9995f75d1b7146b436ea24d696ce3a35bfa8bfe45e0c33c334c79464d0b3'],
     }),
@@ -136,9 +163,6 @@ exts_list = [
     ('requests', '2.30.0', {
         'checksums': ['239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4'],
     }),
-    ('jsonschema', '4.17.3', {
-        'checksums': ['0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d'],
-    }),
     ('json5', '0.9.11', {
         'checksums': ['4f1e196acc55b83985a51318489f345963c7ba84aa37607e49073066c562e99b'],
     }),
@@ -171,12 +195,20 @@ exts_list = [
     # jupyterlab ####################################
     ('jupyterlab', local_jlab_version, {
         'patches': [
-            ('401html.patch', 1),
+            {
+                'name': 'jupyterlab-silencenotify.patch',
+                'copy': '.'
+            },
+            {
+                'name': '401.html',
+                'copy': '.'
+            },
             # 'jupyterlab-rendermime_p14618.patch'
         ],
         'checksums': [
             'ac0cb19756be1d1e14b2be1f23c603de46e0f0113960fce9888889ca55ae8923',
-            '094c89560472168c5ca24b3907d4a6a5b090352ef0c7657995d735246cc338f5',  # 401html.patch
+            'dd7b13106dab97cbbc23fdb5457025d0a28b9298da4b0f90e7ef93babe58b6b0',  # jupyterlab-silencenotify.patch
+            '5c1591daa5a428ac6ecb83f6b48dbb8e4e32c152f8ef308ccc4e235ab0dd4903',  # 401.html
         ],
     }),
     # jupyter-server-proxy ##########################
@@ -198,8 +230,8 @@ exts_list = [
     ('aiohttp', '3.8.4', {
         'checksums': ['bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c'],
     }),
-    ('simpervisor', '0.4', {
-        'checksums': ['cec79e13cdbd6edb04a5c98c1ff8d4bd9713e706c069226909a1ef0e89d393c5'],
+    ('simpervisor', '1.0.0', {
+        'checksums': ['7eb87ca86d5e276976f5bb0290975a05d452c6a7b7f58062daea7d8369c823c1'],
     }),
     ('jupyter_server_proxy', '4.0.0', {
         'patches': [
@@ -246,6 +278,10 @@ exts_list = [
             '5bdd4bd545aa66c8cb534975d77a0c4a23c9993f507e479b23a06ae9618f695f',
         ],
     }),
+    # jupyter-archive ####################################
+    ('jupyter-archive', '3.3.4', {
+        'checksums': ['4f420d5a63876735d60740254cf9e18391a17603ccef30783a07f14fc9b5d179'],
+    }),
     # ipyleaflet ####################################
     ('branca', '0.6.0', {
         'checksums': ['55949855214504c7583b71b9a03a84dce2e96a84027613bb53b42d04844ce24e'],
@@ -289,7 +325,7 @@ exts_list = [
         'patches': [('jupyter-resource-usage.config', '.')],
         'checksums': [
             'ab596a1f2f6ced9e5d063f56b772d88527d2539d61831fbfb80a37f940d3e9df',
-            '7634bf6c4d941e20dca4d910441315047147aedec073f620e15048ec6b31b053',
+            '6c784e1e9abafbacfa37b48dfe5a1b8383bbf209e5e452c7ddbbb37c67f7ebd2',
         ],
     }),
     ('jupyterlab-topbar', '0.6.1', {
@@ -573,17 +609,8 @@ exts_list = [
         'checksums': ['6f8f8a9b06b39677f207c09100c8d386bcf592f0cbbdda9f0f50e81445697627'],
     }),
     # dask ##########################################
-    ('dask_labextension', '6.1.0', {
-        'checksums': ['397cf6f2106650954c3f3bf84fe269a4fb72fb6bf56e6089c75cc3bbf9ff12ae'],
-        'preinstallopts': (
-            # https://github.com/jupyterlab/jupyterlab/issues/14335#issuecomment-1611586421
-            'jlpm install && '
-            'jlpm upgrade --caret --scope jupyterlab && '
-            # Minimize dependency versions
-            'jlpm add -D yarn-deduplicate@^5.0.0 && '
-            'jlpm yarn-deduplicate -s fewer && '
-            'jlpm install && '
-        )
+    ('dask_labextension', '6.2.0', {
+        'checksums': ['1c6864e57ea0f854e0d65649342d4bc8ca830ee255a087cf770f52ac56cda753'],
     }),
     # nbdev #########################################
     ('watchdog', '3.0.0', {
@@ -729,7 +756,7 @@ postinstallcmds = [
     #  https://jupyterlab-lsp.readthedocs.io/en/latest/Language%20Servers.html
     #  must be installed _after_ 'jupyter lab build' as this clears the prefix
     (
-        'source %(builddir)s/env.sh && npm install --prefix %(installdir)s/share/jupyter/lab/staging/ '
+        'cd %(installdir)s/share/jupyter/lab/staging/ && npm install '
         '        bash-language-server@2.1.0 '
         '        dockerfile-language-server-nodejs@0.9.0 '
         '        pyright@1.1.309 '
@@ -748,14 +775,35 @@ postinstallcmds = [
     'source %(builddir)s/env.sh && jupyter serverextension enable --py jupyterlab_iframe',
     'source %(builddir)s/env.sh && jupyter serverextension enable --py jupyterlab_s3_browser',
 
-    # STILL NEEDED ???
-    #  dask_labextension
+    # jupyter-archive, server-settings (with commented lines the defaults are used) 
     # (
-    #    'cp %(builddir)s/dask_labextension/dask_labextension-6.1.0/'
-    #    'dask_labextension/labextension/schemas/dask-labextension/plugin.json '
-    #    '   %(installdir)s/share/jupyter/labextensions/dask-labextension/schemas/dask-labextension/plugin.json'
-    # ),
+    #     '{ cat >> %(installdir)s/etc/jupyter/jupyter_server_config.d/jupyter-archive.json; } << \'EOF\'\n'
+    #     '{\n'
+    #     '  "JupyterArchive": {\n'
+    #     '    "stream_max_buffer_size": 104857600, // max size of tornado IOStream buffer\n'
+    #     '    "handler_max_buffer_length": 10240, // max length of chunks in tornado RequestHandler\n'
+    #     '    "archive_download_flush_delay": 100 // delay in ms at which the chunk of data is send to the client\n'
+    #     '  }\n'
+    #     '}\n'
+    #     'EOF'
+    # )
 
+    # jupyterlab-latex fix for https://github.com/jupyterlab/jupyterlab-latex/issues/110#issuecomment-1241460371
+    (
+        '{ cat >> %(installdir)s/etc/jupyter/jupyter_notebook_config.py; } << \'EOF\'\n'
+        'try:\n'
+        '    c.ServerApp.notebook_dir = c.ServerApp.root_dir\n'
+        'except:\n'
+        '    pass\n'
+        'EOF'
+    ),
+    # optimize JupyterLab for network file systems
+    (
+        '{ cat >> %(installdir)s/etc/jupyter/jupyter_notebook_config.py; } << \'EOF\'\n'
+        'c.NotebookNotary.db_file = \':memory:\'\n'
+        'c.FileManagerMixin.use_atomic_writing = False\n'
+        'EOF'
+    ),
     #  Send2Trash
     (
         '{ cat >> %(installdir)s/etc/jupyter/jupyter_notebook_config.py; } << \'EOF\'\n'
@@ -818,6 +866,12 @@ postinstallcmds = [
         'EOF'
     ),
 
+    # disable notifications about new JupyterLab versions
+    (
+        'patch "%%(installdir)s/share/jupyter/lab/schemas/@jupyterlab/apputils-extension/notification.json" '
+        '      < %%(builddir)s/jupyterlab/jupyterlab-%s/jupyterlab-silencenotify.patch'
+    ) % local_jlab_version,
+
     # add webpage, which leads back to https://jupyter-jsc.fz-juelich.de
     'cp %%(builddir)s/jupyterlab/jupyterlab-%s/401.html %%(installdir)s/share/jupyter/lab/static/' % local_jlab_version,
 
@@ -875,7 +929,7 @@ postinstallcmds = [
     #  ###################################################
     #  'chmod -R g-w %(installdir)s ',          # software-group must not modify the installation on accident
     #  'chmod -R ugo-w %(installdir)s/share ', # Noone should add files/configs to the global share after install
-    #  'chmod -R ug-w ...../2023/software/Python/3.10.6-GCCcore-11.3.0/share ',  # Python module, too
+    #  'chmod -R ug-w ...../2023/software/Python/3.10.4-GCCcore-11.3.0/share ',  # Python module, too
 ]
 
 #  specify that Bundle easyblock should run a full sanity check, rather than just trying to load the module
diff --git a/Golden_Repo/j/JupyterLab/jupyter-resource-usage.config b/Golden_Repo/j/JupyterLab/jupyter-resource-usage.config
index 27da2299b6ef51b28f15f4a55c145dd232183b5a..e0b5b10d9d7553598e6d7617ed9331819488a93e 100644
--- a/Golden_Repo/j/JupyterLab/jupyter-resource-usage.config
+++ b/Golden_Repo/j/JupyterLab/jupyter-resource-usage.config
@@ -60,3 +60,4 @@ else:
 c.ResourceUseDisplay.track_cpu_percent = True
 c.ResourceUseDisplay.cpu_limit = cpu_limit
 c.ResourceUseDisplay.cpu_warning_threshold = 0.1
+
diff --git a/Golden_Repo/j/JupyterLab/jupyterlab-silencenotify.patch b/Golden_Repo/j/JupyterLab/jupyterlab-silencenotify.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f31f604b77a0e826a51755bb6cecda663b767332
--- /dev/null
+++ b/Golden_Repo/j/JupyterLab/jupyterlab-silencenotify.patch
@@ -0,0 +1,12 @@
+diff -Naur jupyterlab.orig/jupyterlab-3.6.5/jupyterlab/schemas/@jupyterlab/apputils-extension/notification.json jupyterlab/jupyterlab-3.6.5/jupyterlab/schemas/@jupyterlab/apputils-extension/notification.json
+--- jupyterlab.orig/jupyterlab-3.6.5/jupyterlab/schemas/@jupyterlab/apputils-extension/notification.json	2023-06-26 18:37:41.000000000 +0200
++++ jupyterlab/jupyterlab-3.6.5/jupyterlab/schemas/@jupyterlab/apputils-extension/notification.json	2023-08-16 13:22:27.187215117 +0200
+@@ -29,7 +29,7 @@
+       "title": "Check for JupyterLab updates",
+       "description": "Whether to check for newer version of JupyterLab or not. It requires `fechNews` to be `true` to be active. If `true`, it will make a request to a website.",
+       "type": "boolean",
+-      "default": true
++      "default": false
+     },
+     "doNotDisturbMode": {
+       "title": "Silence all notifications",
diff --git a/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.8.0-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.8.0-GCCcore-11.3.0-2023.3.6.eb
new file mode 100644
index 0000000000000000000000000000000000000000..cfacda0030863ab03bdd9fc8b75fb0e0f6a45fea
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.8.0-GCCcore-11.3.0-2023.3.6.eb
@@ -0,0 +1,141 @@
+easyblock = 'PythonBundle'
+
+name = 'JupyterProxy-Matlab'
+version = '0.8.0'
+local_jupyterver = '2023.3.6'
+versionsuffix = '-' + local_jupyterver
+
+# only users of the UNIX-group 'matlab' must have access
+# group = 'matlab'
+
+homepage = 'https://github.com/mathworks/jupyter-matlab-proxy'
+description = """The MATLAB integration for Jupyter enables you to open a MATLAB desktop
+in a web browser tab, directly from your Jupyter environment.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+builddependencies = [
+    ('binutils', '2.38'),
+    # check for existance
+    # ('MATLAB', '2023a'),
+    # ('XServer', '21.1.6'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('JupyterLab', local_jupyterver),
+]
+
+exts_defaultclass = 'PythonPackage'
+exts_default_options = {
+    'filter': ('python -c "import %(ext_name)s"', ''),
+    'download_dep_fail': True,
+    'source_urls': [PYPI_SOURCE],
+    'use_pip': True,
+    'sanity_pip_check':  True,
+    'use_pip_for_deps': False,
+}
+
+exts_list = [
+    # enable classic jupyter ####
+    ('jupyter_contrib_core', '0.4.2', {
+        'checksums': [('sha256', '1887212f3ca9d4487d624c0705c20dfdf03d5a0b9ea2557d3aaeeb4c38bdcabb')],
+    }),
+    ('jupyter_highlight_selected_word', '0.2.0', {
+        'checksums': [('sha256', '9fa740424859a807950ca08d2bfd28a35154cd32dd6d50ac4e0950022adc0e7b')],
+    }),
+    ('jupyter_nbextensions_configurator', '0.6.1', {
+        'checksums': [('sha256', '4b9e1270ccc1f8e0a421efb8979a737f586813023a4855b9453f61c3ca599b82')],
+    }),
+    ('jupyter_contrib_nbextensions', '0.7.0', {
+        'checksums': [('sha256', '06e33f005885eb92f89cbe82711e921278201298d08ab0d886d1ba09e8c3e9ca')],
+    }),
+    # core-packages ####
+    ('aiohttp-session', '2.12.0', {
+        'checksums': [('sha256', '0ccd11a7c77cb9e5a61f4daacdc9170d561112f9cfaf9e9a2d9867c0587d1950')],
+    }),
+    ('matlab-proxy', '0.8.0', {
+        'patches': [
+            'noVersionCheck.patch',
+            'matlabproxy_timeout.patch',
+        ],
+        'checksums': [
+            ('sha256', '965b0b7cfa6314638435089f211106c114b838e8f4945cb5f5dacb15c0cdf71f'),
+            ('sha256', 'c21ba8969da77c88a266c97dc73dc64dc7a8f4fc20391f311bb4a6992d6e812c'),
+            ('sha256', '4f7d9f67566496223f01156f168e02af61b6b0a441afcaf7258b08f6ec79762d'),
+        ],
+    }),
+    ('jupyter-matlab-proxy', '0.8.0', {
+        'source_urls': ['https://github.com/mathworks/jupyter-matlab-proxy/archive/'],
+        'source_tmpl': 'v%(version)s.tar.gz',
+        'patches': [
+            'jupytermatlabproxy_timeout.patch',
+        ],
+        'checksums': [
+            ('sha256', 'b1785c8bed32d187c2fa15d921d047dd7483ec20442cdc5f34c0aad1ca06bac5'),
+            ('sha256', '4d15cc52352dc0cdeb03cc01285cf01cfe01393f3eee77fe1c2e205d691a5b8d'),
+        ],
+    }),
+]
+
+postinstallcmds = [
+    (
+        '{ cat > %(installdir)s/bin/matlab; } << EOF \n'
+        '#!/bin/bash \n'
+        '\n'
+        '# Load required modules \n'
+        'module purge \n'
+        'module load Stages/${STAGE} \n'
+        'module load GCCcore/.11.3.0 \n'
+        'module load MATLAB \n'
+        'module load XServer/21.1.6 \n'
+        '\n'
+        'matlab "\$@" \n'
+        '\n'
+        'EOF'
+    ),
+    'chmod +x %(installdir)s/bin/matlab',
+    (
+        '{ cat > %(installdir)s/bin/Xvfb; } << EOF \n'
+        '#!/bin/bash \n'
+        '\n'
+        '# Load required modules \n'
+        '# and ensure stderr stays empty as matlab-proxy seems to fail if not \n'
+        'module purge 2>&1 \n'
+        'module load Stages/${STAGE} 2>&1 \n'
+        'module load GCCcore/.11.3.0 2>&1 \n'
+        'module load MATLAB 2>&1 \n'
+        'module load XServer/21.1.6 2>&1 \n'
+        '\n'
+        'Xvfb "\$@" 2>&1\n'
+        '\n'
+        'EOF'
+    ),
+    'chmod +x %(installdir)s/bin/Xvfb'
+]
+
+
+modextravars = {
+    'MWI_USE_EXISTING_LICENSE': 'True',
+}
+
+# Jupyter-matlab-kernel DISABLED:
+# https://github.com/mathworks/jupyter-matlab-proxy/issues/62
+modextrapaths = {
+    # 'MWI_CUSTOM_MATLAB_ROOT': 'lib/python%(pyshortver)s/site-packages/jupyter_matlab_proxy/',
+    # 'JUPYTER_PATH': ['share/jupyter'],  # add search path for kernelspecs
+}
+
+# Ensure that the user-specific $HOME/.local/share/jupyter is first entry in JUPYTHER_PATH
+# modluafooter = """
+# prepend_path("JUPYTER_PATH", pathJoin(os.getenv("HOME"), ".local/share/jupyter"))
+# """
+
+sanity_check_paths = {
+    'files': [],
+    'dirs': ['lib/python%(pyshortver)s/site-packages'],
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.9.0-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.9.0-GCCcore-11.3.0-2023.3.6.eb
new file mode 100644
index 0000000000000000000000000000000000000000..a9163da038dd08343e9cfde7c9b0ca95df97ff52
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-Matlab/JupyterProxy-Matlab-0.9.0-GCCcore-11.3.0-2023.3.6.eb
@@ -0,0 +1,136 @@
+easyblock = 'PythonBundle'
+
+name = 'JupyterProxy-Matlab'
+version = '0.9.0'
+local_jupyterver = '2023.3.6'
+versionsuffix = '-' + local_jupyterver
+
+# only users of the UNIX-group 'matlab' must have access
+# group = 'matlab'
+
+homepage = 'https://github.com/mathworks/jupyter-matlab-proxy'
+description = """The MATLAB integration for Jupyter enables you to open a MATLAB desktop
+in a web browser tab, directly from your Jupyter environment.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+builddependencies = [
+    ('binutils', '2.38'),
+    # check for existance
+    # ('MATLAB', '2023a'),
+    # ('XServer', '21.1.6'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('JupyterLab', local_jupyterver),
+]
+
+exts_defaultclass = 'PythonPackage'
+exts_default_options = {
+    'filter': ('python -c "import %(ext_name)s"', ''),
+    'download_dep_fail': True,
+    'source_urls': [PYPI_SOURCE],
+    'use_pip': True,
+    'sanity_pip_check':  True,
+    'use_pip_for_deps': False,
+}
+
+exts_list = [
+    # enable classic jupyter ####
+    ('jupyter_contrib_core', '0.4.2', {
+        'checksums': [('sha256', '1887212f3ca9d4487d624c0705c20dfdf03d5a0b9ea2557d3aaeeb4c38bdcabb')],
+    }),
+    ('jupyter_highlight_selected_word', '0.2.0', {
+        'checksums': [('sha256', '9fa740424859a807950ca08d2bfd28a35154cd32dd6d50ac4e0950022adc0e7b')],
+    }),
+    ('jupyter_nbextensions_configurator', '0.6.1', {
+        'checksums': [('sha256', '4b9e1270ccc1f8e0a421efb8979a737f586813023a4855b9453f61c3ca599b82')],
+    }),
+    ('jupyter_contrib_nbextensions', '0.7.0', {
+        'checksums': [('sha256', '06e33f005885eb92f89cbe82711e921278201298d08ab0d886d1ba09e8c3e9ca')],
+    }),
+    # core-packages ####
+    ('aiohttp-session', '2.12.0', {
+        'checksums': [('sha256', '0ccd11a7c77cb9e5a61f4daacdc9170d561112f9cfaf9e9a2d9867c0587d1950')],
+    }),
+    ('matlab-proxy', '0.9.0', {
+        'checksums': [
+            ('sha256', 'd8e76ef8c5d89aab286ad5bad65ecbdde1dca8803e2101f7d13e6f3ee3cb5cb5'),
+        ],
+    }),
+    ('jupyter-matlab-proxy', '0.8.0', {
+        'source_urls': ['https://github.com/mathworks/jupyter-matlab-proxy/archive/'],
+        'source_tmpl': 'v%(version)s.tar.gz',
+        'patches': [
+            'jupytermatlabproxy_timeout.patch',
+        ],
+        'checksums': [
+            ('sha256', 'b1785c8bed32d187c2fa15d921d047dd7483ec20442cdc5f34c0aad1ca06bac5'),
+            ('sha256', '4d15cc52352dc0cdeb03cc01285cf01cfe01393f3eee77fe1c2e205d691a5b8d'),
+        ],
+    }),
+]
+
+postinstallcmds = [
+    (
+        '{ cat > %(installdir)s/bin/matlab; } << EOF \n'
+        '#!/bin/bash \n'
+        '\n'
+        '# Load required modules \n'
+        'module purge \n'
+        'module load Stages/${STAGE} \n'
+        'module load GCCcore/.11.3.0 \n'
+        'module load MATLAB \n'
+        'module load XServer/21.1.6 \n'
+        '\n'
+        'matlab "\$@" \n'
+        '\n'
+        'EOF'
+    ),
+    'chmod +x %(installdir)s/bin/matlab',
+    (
+        '{ cat > %(installdir)s/bin/Xvfb; } << EOF \n'
+        '#!/bin/bash \n'
+        '\n'
+        '# Load required modules \n'
+        '# and ensure stderr stays empty as matlab-proxy seems to fail if not \n'
+        'module purge 2>&1 \n'
+        'module load Stages/${STAGE} 2>&1 \n'
+        'module load GCCcore/.11.3.0 2>&1 \n'
+        'module load MATLAB 2>&1 \n'
+        'module load XServer/21.1.6 2>&1 \n'
+        '\n'
+        'Xvfb "\$@" 2>&1\n'
+        '\n'
+        'EOF'
+    ),
+    'chmod +x %(installdir)s/bin/Xvfb'
+]
+
+
+modextravars = {
+    'MWI_USE_EXISTING_LICENSE': 'True',
+    'MWI_PROCESS_START_TIMEOUT': '600',
+}
+
+# Jupyter-matlab-kernel DISABLED:
+# https://github.com/mathworks/jupyter-matlab-proxy/issues/62
+modextrapaths = {
+    # 'MWI_CUSTOM_MATLAB_ROOT': 'lib/python%(pyshortver)s/site-packages/jupyter_matlab_proxy/',
+    # 'JUPYTER_PATH': ['share/jupyter'],  # add search path for kernelspecs
+}
+
+# Ensure that the user-specific $HOME/.local/share/jupyter is first entry in JUPYTHER_PATH
+# modluafooter = """
+# prepend_path("JUPYTER_PATH", pathJoin(os.getenv("HOME"), ".local/share/jupyter"))
+# """
+
+sanity_check_paths = {
+    'files': [],
+    'dirs': ['lib/python%(pyshortver)s/site-packages'],
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/j/JupyterProxy-Matlab/jupytermatlabproxy_timeout.patch b/Golden_Repo/j/JupyterProxy-Matlab/jupytermatlabproxy_timeout.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bd1e4cf7e819450ec9c34aecb1524698c8fc4926
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-Matlab/jupytermatlabproxy_timeout.patch
@@ -0,0 +1,12 @@
+diff -Naur 0.8.0.orig/GCCcore-11.3.0-2023.3.6/jupytermatlabproxy/jupyter-matlab-proxy-0.8.0/src/jupyter_matlab_proxy/__init__.py 0.8.0/GCCcore-11.3.0-2023.3.6/jupytermatlabproxy/jupyter-matlab-proxy-0.8.0/src/jupyter_matlab_proxy/__init__.py
+--- 0.8.0.orig/GCCcore-11.3.0-2023.3.6/jupytermatlabproxy/jupyter-matlab-proxy-0.8.0/src/jupyter_matlab_proxy/__init__.py       2023-10-16 13:08:25.138590609 +0200
++++ 0.8.0/GCCcore-11.3.0-2023.3.6/jupytermatlabproxy/jupyter-matlab-proxy-0.8.0/src/jupyter_matlab_proxy/__init__.py    2023-10-16 12:55:47.863830331 +0200
+@@ -93,7 +93,7 @@
+             "--config",
+             config["extension_name"],
+         ],
+-        "timeout": 100,
++        "timeout": 1800,
+         "environment": _get_env,
+         "absolute_url": True,
+         "launcher_entry": {"title": "Open MATLAB", "icon_path": icon_path},
diff --git a/Golden_Repo/j/JupyterProxy-Matlab/matlabproxy_timeout.patch b/Golden_Repo/j/JupyterProxy-Matlab/matlabproxy_timeout.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a3fdc0a6ad20357ef3b83d70d8fef5663ba3bb31
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-Matlab/matlabproxy_timeout.patch
@@ -0,0 +1,12 @@
+diff -Naur 0.8.0.orig/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.8.0/matlab_proxy/app_state.py 0.8.0/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.8.0/matlab_proxy/app_state.py
+--- 0.8.0.orig/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.8.0/matlab_proxy/app_state.py 2023-10-16 13:09:23.368725542 +0200
++++ 0.8.0/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.8.0/matlab_proxy/app_state.py      2023-10-16 12:57:02.458981541 +0200
+@@ -36,7 +36,7 @@
+     MATLAB_PORT_CHECK_DELAY_IN_SECONDS = 1
+     # The maximum amount of time in seconds the Embedded Connector can take
+     # for launching, before the matlab-proxy server concludes that something is wrong.
+-    EMBEDDED_CONNECTOR_MAX_STARTUP_DURATION_IN_SECONDS = 120
++    EMBEDDED_CONNECTOR_MAX_STARTUP_DURATION_IN_SECONDS = 600
+
+     def __init__(self, settings):
+         """Parameterized constructor for the AppState class.
diff --git a/Golden_Repo/j/JupyterProxy-Matlab/noVersionCheck.patch b/Golden_Repo/j/JupyterProxy-Matlab/noVersionCheck.patch
new file mode 100644
index 0000000000000000000000000000000000000000..baccdaa140e5e2ea42d7ed11a00da5014f49246e
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-Matlab/noVersionCheck.patch
@@ -0,0 +1,41 @@
+diff -Naur 0.7.1.orig/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.7.4/matlab_proxy/settings.py 0.7.1/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.7.4/matlab_proxy/settings.py
+--- 0.7.1.orig/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.7.4/matlab_proxy/settings.py	2023-08-24 13:52:26.000000000 +0200
++++ 0.7.1/GCCcore-11.3.0-2023.3.6/matlabproxy/matlab-proxy-0.7.4/matlab_proxy/settings.py	2023-08-27 22:54:27.491557356 +0200
+@@ -37,10 +37,10 @@
+     if custom_matlab_root_path:
+         matlab_root_path = Path(custom_matlab_root_path)
+ 
+-        # Terminate process if invalid Custom Path was provided!
+-        mwi.validators.terminate_on_invalid_matlab_root_path(
+-            matlab_root_path, is_custom_matlab_root=True
+-        )
++        ## Terminate process if invalid Custom Path was provided!
++        #mwi.validators.terminate_on_invalid_matlab_root_path(
++        #    matlab_root_path, is_custom_matlab_root=True
++        #)
+ 
+         # Generate executable path from root path
+         matlab_executable_path = matlab_root_path / "bin" / "matlab"
+@@ -57,9 +57,9 @@
+
+     if matlab_executable_path:
+         matlab_root_path = Path(matlab_executable_path).resolve().parent.parent
+-        mwi.validators.terminate_on_invalid_matlab_root_path(
+-            matlab_root_path, is_custom_matlab_root=False
+-        )
++        #mwi.validators.terminate_on_invalid_matlab_root_path(
++        #    matlab_root_path, is_custom_matlab_root=False
++        #)
+         logger.info(
+             f"Found MATLAB Executable: {matlab_executable_path} with Root: {matlab_root_path}"
+         )
+@@ -87,6 +87,9 @@
+         return None
+ 
+     version_info_file_path = Path(matlab_root_path) / VERSION_INFO_FILE_NAME
++    if not os.path.exists(version_info_file_path):
++        return None
++
+     tree = ET.parse(version_info_file_path)
+     root = tree.getroot()
+ 
diff --git a/Golden_Repo/j/JupyterProxy-NESTDesktop/JupyterProxy-NESTDesktop-0.3.4-GCCcore-11.3.0-2023.3.6.eb b/Golden_Repo/j/JupyterProxy-NESTDesktop/JupyterProxy-NESTDesktop-0.3.4-GCCcore-11.3.0-2023.3.6.eb
new file mode 100644
index 0000000000000000000000000000000000000000..c8130c0666a2775153d22eab7d3db9e8d663afc9
--- /dev/null
+++ b/Golden_Repo/j/JupyterProxy-NESTDesktop/JupyterProxy-NESTDesktop-0.3.4-GCCcore-11.3.0-2023.3.6.eb
@@ -0,0 +1,70 @@
+easyblock = 'PythonBundle'
+
+name = 'JupyterProxy-NESTDesktop'
+version = '0.3.4'
+local_jupyterver = '2023.3.6'
+versionsuffix = '-' + local_jupyterver
+
+homepage = 'https://nest-desktop.readthedocs.io/'
+description = """
+NEST Desktop is a web-based GUI application for NEST Simulator,
+an advanced simulation tool for the computational neuroscience.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+builddependencies = [
+    ('binutils', '2.38'),
+    # check for existance
+    # ('nest-simulator', '3.6'),
+    ('nest-desktop', '3.3.0'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('JupyterLab', local_jupyterver),
+]
+
+exts_defaultclass = 'PythonPackage'
+exts_default_options = {
+    'download_dep_fail': True,
+    'source_urls': [PYPI_SOURCE],
+    'use_pip': True,
+}
+
+exts_list = [
+    ('jupyter-nestdesktop-proxy', version, {
+        'source_tmpl': 'v%(version)s.tar.gz',
+        'source_urls': ['https://github.com/jhgoebbert/jupyter-nestdesktop-proxy/archive/'],
+        'checksums': [
+            '58f87fdae12941409ee5f9454fa5a5533fded3720409068a465a462e1526cc13',
+        ],
+    }),
+]
+
+postinstallcmds = [
+    # write nest-desktop
+    (
+        'cp %(installdir)s/lib/python%(pyshortver)s/site-packages/jupyter_nestdesktop_proxy/bin/nest-desktop.in '
+        '   %(installdir)s/lib/python%(pyshortver)s/site-packages/jupyter_nestdesktop_proxy/bin/nest-desktop \n'
+        'prepenv="'
+        '  module purge\\n'
+        '  module load Stages/2023\\n'
+        '  module load GCC\\n'
+        '  module load Python\\n'
+        '  module load ParaStationMPI\\n'
+        '  module load nest-simulator\\n'
+        '  module load nest-desktop\\n"\n'
+        'sed -i "s#<MODULES>#$prepenv#g" '
+        '  "%(installdir)s/lib/python%(pyshortver)s/site-packages/jupyter_nestdesktop_proxy/bin/nest-desktop"'
+    ),
+    'chmod +x %(installdir)s/lib/python%(pyshortver)s/site-packages/jupyter_nestdesktop_proxy/bin/nest-desktop'
+]
+
+sanity_check_paths = {
+    'files': [],
+    'dirs': ['lib/python%(pyshortver)s/site-packages'],
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/l/LinaroForge/LinaroForge-23.0.1.eb b/Golden_Repo/l/LinaroForge/LinaroForge-23.0.1.eb
new file mode 100644
index 0000000000000000000000000000000000000000..664308978290caf68faf87e941344d88951c0f89
--- /dev/null
+++ b/Golden_Repo/l/LinaroForge/LinaroForge-23.0.1.eb
@@ -0,0 +1,40 @@
+# For using $SYSTEMNAME to determine license path. The local prefix is to appease the checker
+import os as local_os
+
+easyblock = 'EB_Allinea'
+
+name = 'LinaroForge'
+version = '23.0.1'
+
+homepage = 'https://www.linaroforge.com/'
+description = """
+Linaro Forge is the leading server and HPC development tool suite in research, industry, and academia 
+for C, C++, Fortran, and Python high performance code on Linux.  
+
+Linaro Forge includes Linaro DDT, the best debugger for time-saving high performance application debugging, 
+Linaro MAP, the trusted performance profiler for invaluable optimization advice, and Linaro Performance Reports 
+to help you analyze your HPC application runs.
+"""
+
+usage = """For more information, type "ddt -h", "map -h" or "perf-report -h"
+
+For the Linaro Forge User Guide, please see: "$EBROOTLINAROFORGE/doc/userguide.pdf"
+"""
+
+toolchain = SYSTEM
+
+source_urls = ['https://downloads.linaroforge.com/%(version)s/']
+sources = ['linaro-forge-%(version)s-linux-x86_64.tar']
+checksums = ['1d681891c0c725363f0f45584c9b79e669d5c9782158453b7d24b4b865d72755']
+
+start_dir = '%(builddir)s/linaro-forge-%(version)s-linux-x86_64/'
+install_cmd = "./textinstall.sh --accept-licence %(installdir)s"
+local_licdir = '/p/software/%s/licenses/linaroforge' % local_os.environ['SYSTEMNAME']
+license_file = ['%s/License.lic' % local_licdir]
+
+sanity_check_paths = {
+    'files': ['bin/ddt', 'bin/map', 'bin/perf-report'],
+    'dirs': [],
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/l/lfortran/lfortran-0.19.0-GCCcore-11.3.0.eb b/Golden_Repo/l/lfortran/lfortran-0.19.0-GCCcore-11.3.0.eb
index ac36985bb13a3fd8836964c9eee4cfae81ab0dc3..654debf78debc140cbfa3acf6cb4f2bb118dc4aa 100644
--- a/Golden_Repo/l/lfortran/lfortran-0.19.0-GCCcore-11.3.0.eb
+++ b/Golden_Repo/l/lfortran/lfortran-0.19.0-GCCcore-11.3.0.eb
@@ -24,6 +24,7 @@ builddependencies = [
     ('binutils', '2.38'),
     ('CMake', '3.23.1'),
     ('Python', '3.10.4'),
+    ('Bison', '3.8.2'),
     ('pkgconf', '1.8.0'),
     ('re2c', '2.2'),
 ]
diff --git a/Golden_Repo/l/libneurosim/libneurosim-1.2.0-gpsmkl-2022a.eb b/Golden_Repo/l/libneurosim/libneurosim-1.2.0-gpsmkl-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..709fcee5e72ffc3d507d7c9f2693565256e8305f
--- /dev/null
+++ b/Golden_Repo/l/libneurosim/libneurosim-1.2.0-gpsmkl-2022a.eb
@@ -0,0 +1,38 @@
+easyblock = 'ConfigureMake'
+
+name = 'libneurosim'
+version = '1.2.0'
+
+homepage = 'https://github.com/INCF/libneurosim'
+
+description = """
+Common interfaces for neuronal simulators
+"""
+
+toolchain = {'name': 'gpsmkl', 'version': '2022a'}
+toolchainopts = {'pic': True, 'usempi': True}
+
+github_account = 'INCF'
+source_urls = [GITHUB_SOURCE]
+sources = ['v%(version)s.tar.gz']
+checksums = ['372fa0d8fb31950370f1d27bff4865e1200456239f4ea382c267d9a310175f83']
+
+builddependencies = [
+    ('binutils', '2.38'),
+    ('Autoconf', '2.71'),
+    ('pkgconf', '1.8.0'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+]
+
+preconfigopts = './autogen.sh && '
+configopts = '--with-python=3 '
+
+sanity_check_paths = {
+    'files': ['lib/libneurosim.%s' % x for x in ['a', SHLIB_EXT]],
+    'dirs': ['include', 'lib'],
+}
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/l/libtool/libtool-2.4.7.eb b/Golden_Repo/l/libtool/libtool-2.4.7.eb
new file mode 100644
index 0000000000000000000000000000000000000000..d1b87ebc0b9846a2355f390e5763d9ff9d4bbef8
--- /dev/null
+++ b/Golden_Repo/l/libtool/libtool-2.4.7.eb
@@ -0,0 +1,30 @@
+easyblock = 'ConfigureMake'
+
+name = 'libtool'
+version = '2.4.7'
+
+homepage = 'https://www.gnu.org/software/libtool'
+
+description = """
+ GNU libtool is a generic library support script. Libtool hides the complexity
+ of using shared libraries behind a consistent, portable interface.
+"""
+
+toolchain = SYSTEM
+
+source_urls = [GNU_SOURCE]
+sources = [SOURCELOWER_TAR_GZ]
+checksums = ['04e96c2404ea70c590c546eba4202a4e12722c640016c12b9b2f1ce3d481e9a8']
+
+dependencies = [
+    ('M4', '1.4.19'),
+]
+
+sanity_check_paths = {
+    'files': ['bin/libtool', 'bin/libtoolize', 'lib/libltdl.%s' % SHLIB_EXT],
+    'dirs': ['include/libltdl', 'share/libtool/loaders', 'share/man/man1'],
+}
+
+hidden = True
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/m/MATLAB/MATLAB-2023a-GCCcore-11.3.0.eb b/Golden_Repo/m/MATLAB/MATLAB-2023a-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..afdfd67a0fc5e7beb0c15abf735fb136de2ac7c5
--- /dev/null
+++ b/Golden_Repo/m/MATLAB/MATLAB-2023a-GCCcore-11.3.0.eb
@@ -0,0 +1,65 @@
+# Attention: before calling 'eb':
+# export EB_MATLAB_KEY as fileInstallationKey
+# or export EB_MATLAB_LICFILE as license file
+
+name = 'MATLAB'
+version = '2023a'
+
+# only users of the UNIX-group 'matlab' must have access
+group = 'matlab'
+
+homepage = 'https://www.mathworks.com/products/matlab.html'
+description = """MATLAB is a high-level language and interactive environment
+that enables you to perform computationally intensive tasks faster than with
+traditional programming languages such as C, C++, and Fortran.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+sources = [SOURCELOWER_TAR_GZ]
+checksums = ['7ea4aef06d258af7762c683d8f5386a8917dfe87a4bda6b71f8a445656649f21']
+
+dependencies = [
+    ('X11', '20220504'),  # mlab req. libXt.so which is not on the computes
+    ('Java', '11', '', SYSTEM)
+]
+
+java_options = '-Xmx2048m'
+
+postinstallcmds = [
+    # create a wrapper script to ensure we do not mess up the environment
+    # because MATLAB comes with its own libstdc++ and other system libs
+    # in $EBROOTMATLAB/sys/os/glnxa64/
+    'mv %(installdir)s/bin/matlab %(installdir)s/bin/matlab.bin ',
+    (
+        '{ cat > %(installdir)s/bin/matlab; } << EOF\n'
+        '#!/bin/bash\n'
+        '\n'
+        'MYPATH=\$(readlink -f \$0)\n'
+        'export MATLAB_PATH=\$(realpath \$(dirname "\$MYPATH")/..)\n'
+        '\n'
+        'export LD_LIBRARY_PATH=\$MATLAB_PATH/runtime/glnxa64:\$LD_LIBRARY_PATH\n'
+        'export LD_LIBRARY_PATH=\$MATLAB_PATH/bin/glnxa64:\$LD_LIBRARY_PATH\n'
+        'export LD_LIBRARY_PATH=\$MATLAB_PATH/sys/os/glnxa64:\$LD_LIBRARY_PATH\n'
+        '\n'
+        '"\$MATLAB_PATH/bin/matlab.bin" "\$@"\n'
+        'EOF'
+    ),
+    'chmod +x %(installdir)s/bin/matlab',
+]
+
+modloadmsg = """
+Attention: This software is RESTRICTED to ACADEMIC users who are members of the group matlab\n.
+"""
+
+modextravars = {
+    'MATLAB_ROOT': '%(installdir)s',
+    'MLM_LICENSE_FILE': '27000@zam2076.zam.kfa-juelich.de',
+}
+
+sanity_check_paths = {
+    'files': ['bin/matlab', 'bin/glnxa64/MATLAB'],
+    'dirs': [],
+}
+
+moduleclass = 'math'
diff --git a/Golden_Repo/m/MATLAB/MATLAB-2023b-GCCcore-11.3.0.eb b/Golden_Repo/m/MATLAB/MATLAB-2023b-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..ff2500d5bda552cd123ecdb9745a89d3cc9fba48
--- /dev/null
+++ b/Golden_Repo/m/MATLAB/MATLAB-2023b-GCCcore-11.3.0.eb
@@ -0,0 +1,72 @@
+# Attention: before calling 'eb':
+# export EB_MATLAB_KEY as fileInstallationKey
+# or export EB_MATLAB_LICFILE as license file
+
+name = 'MATLAB'
+version = '2023b'
+
+# only users of the UNIX-group 'matlab' must have access
+group = 'matlab'
+
+homepage = 'https://www.mathworks.com/products/matlab.html'
+description = """MATLAB is a high-level language and interactive environment
+that enables you to perform computationally intensive tasks faster than with
+traditional programming languages such as C, C++, and Fortran.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+sources = [SOURCELOWER_TAR_GZ]
+checksums = ['d96b9b3b36fad31a68061b7e64f6e0c7c3c072f85881290877cc5fc675ccb06f']
+
+dependencies = [
+    ('X11', '20220504'),  # mlab req. libXt.so which is not on the computes
+    ('Java', '11', '', SYSTEM)
+]
+
+java_options = '-Xmx2048m'
+
+# if we remove
+# ```
+# prepend_path("LD_LIBRARY_PATH", pathJoin(root, "runtime/glnxa64"))
+# prepend_path("LD_LIBRARY_PATH", pathJoin(root, "bin/glnxa64"))
+# prepend_path("LD_LIBRARY_PATH", pathJoin(root, "sys/os/glnxa64"))
+# ```
+# from the lua-Script. (needs a modification of eb-block-matlab)
+# postinstallcmds = [
+#    # create a wrapper script to ensure we do not mess up the environment
+#    # because MATLAB comes with its own libstdc++ and other system libs
+#    # in $EBROOTMATLAB/sys/os/glnxa64/
+#    'mv %(installdir)s/bin/matlab %(installdir)s/bin/matlab.bin ',
+#    (
+#        '{ cat > %(installdir)s/bin/matlab; } << EOF\n'
+#        '#!/bin/bash\n'
+#        '\n'
+#        'MYPATH=\$(readlink -f \$0)\n'
+#        'export MATLAB_PATH=\$(realpath \$(dirname "\$MYPATH")/..)\n'
+#        '\n'
+#        'export LD_LIBRARY_PATH=\$MATLAB_PATH/runtime/glnxa64:\$LD_LIBRARY_PATH\n'
+#        'export LD_LIBRARY_PATH=\$MATLAB_PATH/bin/glnxa64:\$LD_LIBRARY_PATH\n'
+#        'export LD_LIBRARY_PATH=\$MATLAB_PATH/sys/os/glnxa64:\$LD_LIBRARY_PATH\n'
+#        '\n'
+#        '"\$MATLAB_PATH/bin/matlab.bin" "\$@"\n'
+#        'EOF'
+#    ),
+#    'chmod +x %(installdir)s/bin/matlab',
+# ]
+
+modloadmsg = """
+Attention: This software is RESTRICTED to ACADEMIC users who are members of the group matlab\n.
+"""
+
+modextravars = {
+    'MATLAB_ROOT': '%(installdir)s',
+    'MLM_LICENSE_FILE': '27000@zam2076.zam.kfa-juelich.de',
+}
+
+sanity_check_paths = {
+    'files': ['bin/matlab', 'bin/glnxa64/MATLAB'],
+    'dirs': [],
+}
+
+moduleclass = 'math'
diff --git a/Golden_Repo/n/NVHPC/NVHPC-22.11.eb b/Golden_Repo/n/NVHPC/NVHPC-22.11.eb
deleted file mode 100644
index 5ecd0e379e323af3cb3c7006cafb7f553d063ee5..0000000000000000000000000000000000000000
--- a/Golden_Repo/n/NVHPC/NVHPC-22.11.eb
+++ /dev/null
@@ -1,73 +0,0 @@
-name = 'NVHPC'
-version = '22.11'
-local_gccver = '11.3.0'
-
-homepage = 'https://developer.nvidia.com/hpc-sdk/'
-description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
-
-toolchain = SYSTEM
-
-# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
-source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
-local_tarball_tmpl = 'nvhpc_2022_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
-sources = [local_tarball_tmpl % '%(arch)s']
-checksums = [
-    {
-        local_tarball_tmpl % 'aarch64':
-            'e60e798657c33b06754d33dfd5ab3bea2882d4a9b9476102303edf2bbe3b7a95',
-        local_tarball_tmpl % 'ppc64le':
-            'ef800203cf6040b3a5df24f19944b272f62caee8362875bcb394e86dc1de2353',
-        local_tarball_tmpl % 'x86_64':
-            'cb91b3a04368457d5cfe3c0e9c0611591fdc8076b01ea977343fe7db7fdcfa3c',
-    }
-]
-
-dependencies = [
-    ('GCCcore', local_gccver),
-    ('binutils', '2.38', '', ('GCCcore', local_gccver)),
-    ('CUDA', '11.7', '', SYSTEM),
-    # This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
-    ('numactl', '2.0.15', '', ('GCCcore', local_gccver))
-]
-
-module_add_cuda = False
-
-# specify default CUDA version that should be used by NVHPC
-# should match one of the CUDA versions that are included with this NVHPC version
-# (see install_components/Linux_x86_64/22.3/cuda/)
-# for NVHPC 22.3, those are: 11.6, 11.0, 10.2;
-# this version can be tweaked from the EasyBuild command line with
-# --try-amend=default_cuda_version="11.0" (for example)
-default_cuda_version = '%(cudaver)s'
-
-# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
-# The following list gives examples for the easyconfig
-#
-# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
-# 1) Bundled CUDA
-#    If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
-#      default_cuda_version = "11.0"
-#    in this easyconfig file; alternatively, it can be specified through the command line during installation with
-#      --try-amend=default_cuda_version="10.2"
-# 2) CUDA provided via EasyBuild
-#    Use CUDA as a dependency, for example
-#      dependencies = [('CUDA', '11.5.0')]
-#    The parameter default_cuda_version still can be set as above.
-#    If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
-#
-# Define a NVHPC-default Compute Capability
-#   cuda_compute_capabilities = "8.0"
-# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
-# Only single values supported, not lists of values!
-#
-# Options to add/remove things to/from environment module (defaults shown)
-#   module_byo_compilers = False  # Remove compilers from PATH (Bring-your-own compilers)
-#   module_nvhpc_own_mpi = False  # Add NVHPC's own pre-compiled OpenMPI
-#   module_add_math_libs = False  # Add NVHPC's math libraries (which should be there from CUDA anyway)
-#   module_add_profilers = False  # Add NVHPC's NVIDIA Profilers
-#   module_add_nccl = False       # Add NVHPC's NCCL library
-#   module_add_nvshmem = False    # Add NVHPC's NVSHMEM library
-#   module_add_cuda = False       # Add NVHPC's bundled CUDA
-
-# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
-moduleclass = 'compiler'
diff --git a/Golden_Repo/n/NVHPC/NVHPC-22.9.eb b/Golden_Repo/n/NVHPC/NVHPC-22.9.eb
deleted file mode 100644
index 61db7dae393bbdb0486c3233a48a254faec7f973..0000000000000000000000000000000000000000
--- a/Golden_Repo/n/NVHPC/NVHPC-22.9.eb
+++ /dev/null
@@ -1,73 +0,0 @@
-name = 'NVHPC'
-version = '22.9'
-local_gccver = '11.3.0'
-
-homepage = 'https://developer.nvidia.com/hpc-sdk/'
-description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
-
-toolchain = SYSTEM
-
-# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
-source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
-local_tarball_tmpl = 'nvhpc_2022_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
-sources = [local_tarball_tmpl % '%(arch)s']
-checksums = [
-    {
-        local_tarball_tmpl % 'aarch64':
-            'bc4473f04b49bc9a26f08c17a72360650ddf48a3b6eefacdc525d79c8d730f30',
-        local_tarball_tmpl % 'ppc64le':
-            '9aac31d36bb09f6653544978021f5b78c272112e7748871566f7e930f5e7475b',
-        local_tarball_tmpl % 'x86_64':
-            'aebfeb826ace3dabf9699f72390ca0340f8789a8ef6fe4032e3c7b794f073ea3',
-    }
-]
-
-dependencies = [
-    ('GCCcore', local_gccver),
-    ('binutils', '2.38', '', ('GCCcore', local_gccver)),
-    ('CUDA', '11.7', '', SYSTEM),
-    # This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
-    ('numactl', '2.0.15', '', ('GCCcore', local_gccver))
-]
-
-module_add_cuda = False
-
-# specify default CUDA version that should be used by NVHPC
-# should match one of the CUDA versions that are included with this NVHPC version
-# (see install_components/Linux_x86_64/22.3/cuda/)
-# for NVHPC 22.3, those are: 11.6, 11.0, 10.2;
-# this version can be tweaked from the EasyBuild command line with
-# --try-amend=default_cuda_version="11.0" (for example)
-default_cuda_version = '%(cudaver)s'
-
-# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
-# The following list gives examples for the easyconfig
-#
-# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
-# 1) Bundled CUDA
-#    If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
-#      default_cuda_version = "11.0"
-#    in this easyconfig file; alternatively, it can be specified through the command line during installation with
-#      --try-amend=default_cuda_version="10.2"
-# 2) CUDA provided via EasyBuild
-#    Use CUDA as a dependency, for example
-#      dependencies = [('CUDA', '11.5.0')]
-#    The parameter default_cuda_version still can be set as above.
-#    If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
-#
-# Define a NVHPC-default Compute Capability
-#   cuda_compute_capabilities = "8.0"
-# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
-# Only single values supported, not lists of values!
-#
-# Options to add/remove things to/from environment module (defaults shown)
-#   module_byo_compilers = False  # Remove compilers from PATH (Bring-your-own compilers)
-#   module_nvhpc_own_mpi = False  # Add NVHPC's own pre-compiled OpenMPI
-#   module_add_math_libs = False  # Add NVHPC's math libraries (which should be there from CUDA anyway)
-#   module_add_profilers = False  # Add NVHPC's NVIDIA Profilers
-#   module_add_nccl = False       # Add NVHPC's NCCL library
-#   module_add_nvshmem = False    # Add NVHPC's NVSHMEM library
-#   module_add_cuda = False       # Add NVHPC's bundled CUDA
-
-# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
-moduleclass = 'compiler'
diff --git a/Golden_Repo/n/nest-desktop/nest-desktop-3.3.0-GCCcore-11.3.0.eb b/Golden_Repo/n/nest-desktop/nest-desktop-3.3.0-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..1d9a11aa3e65fdfcd3ff3b656fdcf309b391ca7b
--- /dev/null
+++ b/Golden_Repo/n/nest-desktop/nest-desktop-3.3.0-GCCcore-11.3.0.eb
@@ -0,0 +1,53 @@
+easyblock = 'PythonBundle'
+
+name = 'nest-desktop'
+version = '3.3.0'
+
+homepage = 'https://nest-desktop.readthedocs.io'
+description = """
+NEST Desktop is a web-based GUI application for NEST Simulator,
+an advanced simulation tool for the computational neuroscience.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+builddependencies = [
+    ('binutils', '2.38'),
+    ('nodejs', '16.15.1'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+]
+
+exts_default_options = {
+    'source_urls': [PYPI_SOURCE],
+    'use_pip': True,
+    'sanity_pip_check': True,
+    'download_dep_fail': True,
+    'use_pip_for_deps': False,
+}
+
+exts_list = [
+    ('nest-desktop', version, {
+        'source_urls': ['https://github.com/nest-desktop/nest-desktop/archive/'],
+        'sources': ['v%s.tar.gz' % version],
+        'checksums': ['e82e7aeafe15e822e100b490ec3254ee6d2ec60f8539f44c4e37abc914c21989'],
+        'buildcmd': 'yarn install && yarn build',
+    }),
+]
+
+# specify that Bundle easyblock should run a full sanity check, rather than just trying to load the module
+# full_sanity_check = True
+sanity_check_paths = {
+    'files': [
+        'bin/nest-desktop',
+    ],
+    'dirs': [
+        'bin',
+        'lib/python%(pyshortver)s/site-packages',
+    ],
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/n/nest-simulator/nest-simulator-3.6-gpsmkl-2022a.eb b/Golden_Repo/n/nest-simulator/nest-simulator-3.6-gpsmkl-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..d281c1283ac228d7951f599f29b62e3bf8378f0a
--- /dev/null
+++ b/Golden_Repo/n/nest-simulator/nest-simulator-3.6-gpsmkl-2022a.eb
@@ -0,0 +1,110 @@
+easyblock = 'PythonBundle'
+
+name = 'nest-simulator'
+version = '3.6'
+
+homepage = 'https://www.nest-simulator.org/'
+description = """NEST is a simulator for spiking neural network models
+that focuses on the dynamics, size and structure of neural systems
+rather than on the exact morphology of individual neurons.
+"""
+
+toolchain = {'name': 'gpsmkl', 'version': '2022a'}
+toolchainopts = {'pic': True, 'usempi': True}
+
+builddependencies = [
+    ('CMake', '3.23.1'),
+    ('pkgconf', '1.8.0'),
+]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('libtool', '2.4.7'),
+    ('Boost', '1.79.0'),
+    ('HDF5', '1.12.2'),
+    ('SciPy-bundle', '2022.05', '', ('gcccoremkl', '11.3.0-2022.1.0')),
+    ('GSL', '2.7'),
+    ('mpi4py', '3.1.4'),
+    ('SIONlib', '1.7.7'),
+    ('libneurosim', '1.2.0'),
+]
+
+components = [
+    ('nest-simulator', '%s' % version, {
+        'easyblock': 'CMakeMake',
+        'source_urls': ['https://github.com/nest/nest-simulator/archive/'],
+        'sources': ['v%s.tar.gz' % version],
+        'checksums': ['68d6b11791e1284dc94fef35d84c08dd7a11322c0f1e1fc9b39c5e6882284922'],
+        'separate_build_dir': True,
+        'start_dir': 'nest-simulator-%s' % version,
+        'configopts': (
+            '-DCMAKE_VERBOSE_MAKEFILE=ON '
+            '-Dwith-libraries=OFF '
+            '-Dwith-warning=ON '
+            '-Dwith-userdoc=OFF '
+            '-Dwith-optimize=ON '
+            '-Dwith-boost=ON '
+            '-Dwith-python=ON '
+            '-Dwith-openmp=ON '
+            '-Dwith-mpi=ON '
+            '-Dwith-gsl=ON '
+            '-Dwith-hdf5=ON '
+            '-Dwith-libneurosim=ON '
+            '-Dwith-ltdl=ON '
+            '-Dwith-music=OFF '
+            '-Dwith-readline=ON '
+            '-Dwith-sionlib=ON '
+            '-DSIONLIB_LIBRARIES=${EBROOTSIONLIB}/lib/ '
+        ),
+        'preinstallopts': (
+            'ln -s %(installdir)s/lib64/ %(installdir)s/lib && '
+        )
+    })
+]
+
+exts_default_options = {
+    'source_urls': [PYPI_SOURCE],
+    'use_pip': True,
+    'sanity_pip_check': False,  # skip as it requires protobuf, TensorFlow
+    'download_dep_fail': True,
+    'use_pip_for_deps': False,
+}
+
+exts_list = [
+    ('blinker', '1.6.2', {
+        'checksums': ['4afd3de66ef3a9f8067559fb7a1cbe555c17dcbe15971b05d1b625c3e7abe213'],
+    }),
+    ('Werkzeug', '2.2.3', {
+        'checksums': ['2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe'],
+    }),
+    ('itsdangerous', '2.1.2', {
+        'checksums': ['5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a'],
+    }),
+    ('Flask', '2.2.5', {
+        'modulename': 'flask',
+        'checksums': ['edee9b0a7ff26621bd5a8c10ff484ae28737a2410d99b0bb9a6850c7fb977aa0'],
+    }),
+    ('Flask-Cors', '4.0.0', {
+        'modulename': 'flask_cors',
+        'checksums': ['f268522fcb2f73e2ecdde1ef45e2fd5c71cc48fe03cffb4b441c6d1b40684eb0'],
+    }),
+    ('RestrictedPython', '6.1', {
+        'modulename': 'RestrictedPython',
+        'checksums': ['7e58ea15cc92a0b916e0c8ca295e8b2c6d40fee4d12e1a2b5063d86efc279a9c'],
+    }),
+    ('gunicorn', '21.2.0', {
+        'checksums': ['88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033'],
+    }),
+]
+
+sanity_check_paths = {
+    'files': [
+        'bin/nest', 'bin/nest-server',
+    ],
+    'dirs': [
+        'bin', 'include', 'lib', 'share',
+        'lib/python%(pyshortver)s/site-packages/nest',
+    ]
+}
+
+moduleclass = 'tools'
diff --git a/Golden_Repo/n/npsmpic/npsmpic-2022a.eb b/Golden_Repo/n/npsmpic/npsmpic-2022a.eb
index 863cd7d65002dacac9067cabdf7c07efe737f5d3..3649f770c340a4efbb114d1773ed13454ed83cac 100644
--- a/Golden_Repo/n/npsmpic/npsmpic-2022a.eb
+++ b/Golden_Repo/n/npsmpic/npsmpic-2022a.eb
@@ -13,7 +13,7 @@ local_compiler = ('NVHPC', '23.1')
 dependencies = [
     local_compiler,
     ('CUDA', '11.7', '', SYSTEM),
-    ('psmpi', '5.7.0-1', '', local_compiler),
+    ('psmpi', '5.7.1-1', '', local_compiler),
 ]
 
 moduleclass = 'toolchain'
diff --git a/Golden_Repo/n/nvidia-driver/nvidia-driver-default.eb b/Golden_Repo/n/nvidia-driver/nvidia-driver-default.eb
index 006b90db0a37ee07cdabb3e7eed8b5649ede84ad..1a3c6bfc1c4bf5702a2157cb202ed050449f9e6e 100644
--- a/Golden_Repo/n/nvidia-driver/nvidia-driver-default.eb
+++ b/Golden_Repo/n/nvidia-driver/nvidia-driver-default.eb
@@ -17,11 +17,6 @@ source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
 sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
 checksums = ['0492ddc5b5e65aa00cbc762e8d6680205c8d08e103b7131087a15126aee495e9']
 
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
+just_GL_libs = True
 
 moduleclass = 'system'
diff --git a/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb b/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
index 690b2151204daca07ac4aa29e7bec0e2a493ac0e..179404a571ef490a750171b2d9d93963969555f0 100644
--- a/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
+++ b/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
@@ -35,7 +35,7 @@ dependencies = [
     ('UCX', 'default'),
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
-    ('PMIx', '3.2.3'),  # We rely on this version since it is the newest supported by psmgmt
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
     ('UCC', 'default'),
 ]
 
diff --git a/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb b/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
index 6bb054ba122071f808ba2a0f0f5f5faf57ed1b9b..884cd075687ff7bfc60c96beb1cbdec9d665c579 100644
--- a/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
+++ b/Golden_Repo/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
@@ -35,7 +35,7 @@ dependencies = [
     ('UCX', 'default'),
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
-    ('PMIx', '3.2.3'),  # We rely on this version since it is the newest supported by psmgmt
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
     ('UCC', 'default'),
 ]
 
diff --git a/Golden_Repo/p/PMIx/PMIx-4.2.6-GCCcore-11.3.0.eb b/Golden_Repo/p/PMIx/PMIx-4.2.6-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..842032b4248bcf73fcddbcafded1998693309486
--- /dev/null
+++ b/Golden_Repo/p/PMIx/PMIx-4.2.6-GCCcore-11.3.0.eb
@@ -0,0 +1,46 @@
+easyblock = 'ConfigureMake'
+
+name = 'PMIx'
+version = '4.2.6'
+
+homepage = 'https://pmix.org/'
+description = """Process Management for Exascale Environments
+PMI Exascale (PMIx) represents an attempt to
+provide an extended version of the PMI standard specifically designed
+to support clusters up to and including exascale sizes. The overall
+objective of the project is not to branch the existing pseudo-standard
+definitions - in fact, PMIx fully supports both of the existing PMI-1
+and PMI-2 APIs - but rather to (a) augment and extend those APIs to
+eliminate some current restrictions that impact scalability, and (b)
+provide a reference implementation of the PMI-server that demonstrates
+the desired level of scalability.
+"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+source_urls = ['https://github.com/openpmix/openpmix/releases/download/v%(version)s']
+sources = ['%(namelower)s-%(version)s.tar.bz2']
+checksums = ['10b0d5a7fca70272e9427c677557578ac452cea02aeb00e30dec2116d20c3cd0']
+
+builddependencies = [('binutils', '2.38')]
+
+dependencies = [
+    ('libevent', '2.1.12'),
+    ('zlib', '1.2.12'),
+    ('hwloc', '2.7.1'),
+]
+
+configopts = ' --with-libevent=$EBROOTLIBEVENT --with-zlib=$EBROOTZLIB'
+configopts += ' --with-hwloc=$EBROOTHWLOC'
+configopts += ' --enable-pmix-binaries'
+configopts += ' --with-munge'
+
+buildopts = 'V=1'
+
+sanity_check_paths = {
+    'files': ['bin/pevent', 'bin/plookup', 'bin/pmix_info', 'bin/pps'],
+    'dirs': ['etc', 'include', 'lib', 'share']
+}
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/p/psmpi-settings/psmpi-settings-5-CUDA.eb b/Golden_Repo/p/psmpi-settings/psmpi-settings-5-CUDA.eb
index f1f0557309f4e69160bcac47c44921b0f323a853..e12b9ee8063c1ee17a49f6c7d56ced2810c28280 100644
--- a/Golden_Repo/p/psmpi-settings/psmpi-settings-5-CUDA.eb
+++ b/Golden_Repo/p/psmpi-settings/psmpi-settings-5-CUDA.eb
@@ -20,6 +20,7 @@ modextravars = {
     'PSP_CUDA': '1',
     'PSP_SHM': '0',
     'PSP_HARD_ABORT': '1',
+    'SLURM_MPI_TYPE': 'pspmi',
 }
 
 modluafooter = '''
diff --git a/Golden_Repo/p/psmpi-settings/psmpi-settings-5-UCX.eb b/Golden_Repo/p/psmpi-settings/psmpi-settings-5-UCX.eb
index 3f0f135e759d20f791754b5a4bbbeb136772a12d..48820298007581f16001279b01082ce936398fd3 100644
--- a/Golden_Repo/p/psmpi-settings/psmpi-settings-5-UCX.eb
+++ b/Golden_Repo/p/psmpi-settings/psmpi-settings-5-UCX.eb
@@ -16,6 +16,7 @@ modextravars = {
     'PSP_OPENIB': '0',
     'PSP_UCP': '1',
     'PSP_HARD_ABORT': '1',
+    'SLURM_MPI_TYPE': 'pspmi',
 }
 
 moduleclass = 'system'
diff --git a/Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.5.0-gpsmkl-2022a.eb b/Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.8.0-gpsmkl-2022a.eb
similarity index 67%
rename from Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.5.0-gpsmkl-2022a.eb
rename to Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.8.0-gpsmkl-2022a.eb
index 7f567c04e8b5b1e73f4ab1872ec6f2cc4f61e9d4..c7cac8092f7c770325d3f51bf5f3be87f10124c4 100644
--- a/Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.5.0-gpsmkl-2022a.eb
+++ b/Golden_Repo/q/Qiskit-juqcs/Qiskit-juqcs-0.8.0-gpsmkl-2022a.eb
@@ -1,7 +1,7 @@
 easyblock = 'PythonBundle'
 
 name = 'Qiskit-juqcs'
-version = '0.5.0'
+version = '0.8.0'
 
 homepage = 'https://jugit.fz-juelich.de/qip/juniq-platform/qiskit-juqcs/'
 description = """Qiskit provider for JUQCS (Juelich Universal Quantum Computer Simulator)."""
@@ -27,12 +27,14 @@ exts_list = [
         'modulename': 'jwt',
         'checksums': ['69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd'],
     }),
-    ('pyunicore', '0.14.1', {
-        'checksums': ['7efd4784d55ef02c4da4431232b54505814666577165c0f320c922ed2b32af01'],
+    ('pyunicore', '0.15.0', {
+        'checksums': ['dc57ef05b1681b20471e8a4a72067671f74b4d41601099fe80324293439175d5'],
     }),
     ('qiskit-juqcs', version, {
         'modulename': False,
-        'checksums': ['6d8800986d5924e2e07635a315d15bfbc48297649604c83a4ec282ea7d6ba737'],
+        'source_urls': ['https://jugit.fz-juelich.de/qip/juniq-platform/qiskit-juqcs/-/archive/%s/' % version],
+        'sources': ['qiskit-juqcs-%s.tar.gz' % version],
+        'checksums': ['8e2609913b4e00bd5752273b9248bf48986cd61c51b2f4d465b93ef108a89696'],
     }),
 ]
 
diff --git a/Golden_Repo/t/TotalView/TotalView-2023.2.15-GCCcore-11.3.0.eb b/Golden_Repo/t/TotalView/TotalView-2023.2.15-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..199a924d61a62df42ca13a5e8e5a3529c54a5cdb
--- /dev/null
+++ b/Golden_Repo/t/TotalView/TotalView-2023.2.15-GCCcore-11.3.0.eb
@@ -0,0 +1,64 @@
+# This is an easyconfig file for EasyBuild, see
+# https://github.com/hpcugent/easybuild
+# Copyright:: Copyright 2014 Juelich Supercomputing Centre, Germany
+# Authors::   Alexandre Strube <surak@surak.eti.br>
+# License::   New BSD
+#
+# This work is based from experiences from the UNITE project
+# http://apps.fz-juelich.de/unite/
+##
+
+name = "TotalView"
+version = "2023.2.15"
+
+homepage = 'http://www.roguewave.com/products-services/totalview'
+
+description = """TotalView breaks down barriers to understanding what's going
+ on with your high-performance computing (HPC) and supercomputing applications.
+ Purpose-built for multicore and parallel computing, TotalView provides a set
+ of tools providing unprecedented control over processes and thread execution,
+ along with deep visibility into program states and data.
+
+ By allowing the simultaneous debugging of many processes and threads in a
+ single window, you get complete control over program execution: running,
+ stepping, and halting line-by-line through code within a single thread or
+ within arbitrary groups of processes or threads. You can also work backwards
+ from failure through reverse debugging, isolating the root cause faster by
+ eliminating the need to repeatedly restart the application, reproduce and
+ troubleshoot difficult problems that can occur in concurrent programs that
+ take advantage of threads, OpenMP, MPI, GPUs, or coprocessors.
+
+ With customizable displays of the state of your running program, memory leaks,
+ deadlocks, and race conditions are things of the past. Whether you're a
+ scientific and technical computing veteran, or new to the development
+ challenges of multicore or parallel applications, TotalView gives you the
+ insight to find and correct errors quickly, validate prototypes early, verify
+ calculations accurately, and above all, certify code correctly.
+
+ TotalView works with C, C++, and Fortran applications written for Linux
+ (including the Cray and Blue Gene platforms), UNIX, Mac OS X, and Xeon Phi
+ coprocessor, and supports OpenMP, MPI, and OpenACC / CUDA.
+ """
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+
+dependencies = [
+    ('X11', '20220504'),
+]
+
+sources = [
+    '%(namelower)s_%(version)s_linux_x86-64.tar',
+    '%(namelower)s.%(version)s-doc.tar',
+]
+checksums = [
+    {'totalview_2023.2.15_linux_x86-64.tar': 'd5e19045b09015be48b5723bce3c16fd22751d54e72205c4d64bb8395c2242d4'},
+    {'totalview.2023.2.15-doc.tar': '06b4e46c4f0e4dfa77d6296a4a2697b92c8f5e9eb89c9e61718d966c3ac41dbd'},
+]
+
+sanity_check_paths = {
+    'files': ["toolworks/%(namelower)s.%(version)s/bin/totalview"],
+    'dirs': []
+}
+
+moduleclass = 'debugger'
diff --git a/Golden_Repo/u/UCX-settings/UCX-settings-DC-CUDA.eb b/Golden_Repo/u/UCX-settings/UCX-settings-DC-CUDA.eb
index d335c8261eb2457db6ff040f80958482ca89a53d..513c564e05224d748ce747a06d5ae13a010c698c 100644
--- a/Golden_Repo/u/UCX-settings/UCX-settings-DC-CUDA.eb
+++ b/Golden_Repo/u/UCX-settings/UCX-settings-DC-CUDA.eb
@@ -20,6 +20,9 @@ modextravars = {
     # It actually has the side effect of using Ethernet and IB ports on JUSUF, which end up saturating the ethernet
     # fabric and result in a slow down
     'UCX_MAX_RNDV_RAILS': '1',
+    # To make sure that transfers involving buffers allocated with cudaMallocManaged are done by the GPU. Important
+    # mostly for nodes with multiple GPUs per ndoe
+    'UCX_RNDV_FRAG_MEM_TYPE': 'cuda',
 }
 
 moduleclass = 'system'
diff --git a/Golden_Repo/u/UCX-settings/UCX-settings-RC-CUDA.eb b/Golden_Repo/u/UCX-settings/UCX-settings-RC-CUDA.eb
index f3d28552145c2e7576c6060e8af92ca48897a126..d0be7425efc43b30b06c309756c11f7c4359ab1a 100644
--- a/Golden_Repo/u/UCX-settings/UCX-settings-RC-CUDA.eb
+++ b/Golden_Repo/u/UCX-settings/UCX-settings-RC-CUDA.eb
@@ -20,6 +20,9 @@ modextravars = {
     # It actually has the side effect of using Ethernet and IB ports on JUSUF, which end up saturating the ethernet
     # fabric and result in a slow down
     'UCX_MAX_RNDV_RAILS': '1',
+    # To make sure that transfers involving buffers allocated with cudaMallocManaged are done by the GPU. Important
+    # mostly for nodes with multiple GPUs per ndoe
+    'UCX_RNDV_FRAG_MEM_TYPE': 'cuda',
 }
 
 moduleclass = 'system'
diff --git a/Golden_Repo/u/UCX-settings/UCX-settings-UD-CUDA.eb b/Golden_Repo/u/UCX-settings/UCX-settings-UD-CUDA.eb
index a2d8d373290e1f96e9aa4d79bb968a0b54ed583a..e581a757effba63bef1bcccb87935503dedc840a 100644
--- a/Golden_Repo/u/UCX-settings/UCX-settings-UD-CUDA.eb
+++ b/Golden_Repo/u/UCX-settings/UCX-settings-UD-CUDA.eb
@@ -20,6 +20,9 @@ modextravars = {
     # It actually has the side effect of using Ethernet and IB ports on JUSUF, which end up saturating the ethernet
     # fabric and result in a slow down
     'UCX_MAX_RNDV_RAILS': '1',
+    # To make sure that transfers involving buffers allocated with cudaMallocManaged are done by the GPU. Important
+    # mostly for nodes with multiple GPUs per ndoe
+    'UCX_RNDV_FRAG_MEM_TYPE': 'cuda',
 }
 
 moduleclass = 'system'
diff --git a/Golden_Repo/x/XZ/XZ-5.2.5-GCCcore-11.3.0.eb b/Golden_Repo/x/XZ/XZ-5.2.5-GCCcore-11.3.0.eb
index 2ee4825b70094d182c1873e0e42d67dfab977b07..c24a57a45c369d03f740cb7bd93016338f922dc3 100644
--- a/Golden_Repo/x/XZ/XZ-5.2.5-GCCcore-11.3.0.eb
+++ b/Golden_Repo/x/XZ/XZ-5.2.5-GCCcore-11.3.0.eb
@@ -14,18 +14,20 @@ toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
 source_urls = ['https://tukaani.org/xz/']
 sources = [SOURCELOWER_TAR_BZ2]
 patches = [
-    'XZ-5.2.5_compat-libs.patch',
+    'xz-5.2.5_fix-symbols.patch',
     'xz-5.2.5-cve-2022-1271.patch',
 ]
 checksums = [
-    '5117f930900b341493827d63aa910ff5e011e0b994197c3b71c08a20228a42df',  # xz-5.2.5.tar.bz2
-    '9747c8fdf0b3c9501ac5479a807151d75b99bea7816a59565edea267230da195',  # XZ-5.2.5_compat-libs.patch
-    '98c6cb1042284fe704ec30083f3fc87364ce9ed2ea51f62bbb0ee9d3448717ec',  # xz-5.2.5-cve-2022-1271.patch
+    {'xz-5.2.5.tar.bz2': '5117f930900b341493827d63aa910ff5e011e0b994197c3b71c08a20228a42df'},
+    {'xz-5.2.5_fix-symbols.patch': 'ce80545e7bd88466fd9561a8abb9da87e62ebc71a1531c909a492fa2844da6d0'},
+    {'xz-5.2.5-cve-2022-1271.patch': '98c6cb1042284fe704ec30083f3fc87364ce9ed2ea51f62bbb0ee9d3448717ec'},
 ]
 
 builddependencies = [
     # use gettext built with system toolchain as build dep to avoid cyclic dependency (XZ -> gettext -> libxml2 -> XZ)
-    ('gettext', '0.21', '', True),
+    ('gettext', '0.21', '', SYSTEM),
+    # use Autotools built with system toolchain as build dep to avoid conflicting ncurses dependency
+    ('Autotools', '20220317', '', SYSTEM),
     ('binutils', '2.38'),
 ]
 
diff --git a/Golden_Repo/x/XZ/xz-5.2.5_fix-symbols.patch b/Golden_Repo/x/XZ/xz-5.2.5_fix-symbols.patch
new file mode 100644
index 0000000000000000000000000000000000000000..fb5ac8f063d5b2589a5e9fecf8cf2101894799cb
--- /dev/null
+++ b/Golden_Repo/x/XZ/xz-5.2.5_fix-symbols.patch
@@ -0,0 +1,681 @@
+From 31d80c6b261b24220776dfaeb8a04f80f80e0a24 Mon Sep 17 00:00:00 2001
+From: Lasse Collin <lasse.collin@tukaani.org>
+Date: Sun, 4 Sep 2022 23:23:00 +0300
+Subject: [PATCH] liblzma: Vaccinate against an ill patch from RHEL/CentOS 7.
+
+RHEL/CentOS 7 shipped with 5.1.2alpha, including the threaded
+encoder that is behind #ifdef LZMA_UNSTABLE in the API headers.
+In 5.1.2alpha these symbols are under XZ_5.1.2alpha in liblzma.map.
+API/ABI compatibility tracking isn't done between development
+releases so newer releases didn't have XZ_5.1.2alpha anymore.
+
+Later RHEL/CentOS 7 updated xz to 5.2.2 but they wanted to keep
+the exported symbols compatible with 5.1.2alpha. After checking
+the ABI changes it turned out that >= 5.2.0 ABI is backward
+compatible with the threaded encoder functions from 5.1.2alpha
+(but not vice versa as fixes and extensions to these functions
+were made between 5.1.2alpha and 5.2.0).
+
+In RHEL/CentOS 7, XZ Utils 5.2.2 was patched with
+xz-5.2.2-compat-libs.patch to modify liblzma.map:
+
+  - XZ_5.1.2alpha was added with lzma_stream_encoder_mt and
+    lzma_stream_encoder_mt_memusage. This matched XZ Utils 5.1.2alpha.
+
+  - XZ_5.2 was replaced with XZ_5.2.2. It is clear that this was
+    an error; the intention was to keep using XZ_5.2 (XZ_5.2.2
+    has never been used in XZ Utils). So XZ_5.2.2 lists all
+    symbols that were listed under XZ_5.2 before the patch.
+    lzma_stream_encoder_mt and _mt_memusage are included too so
+    they are listed both here and under XZ_5.1.2alpha.
+
+The patch didn't add any __asm__(".symver ...") lines to the .c
+files. Thus the resulting liblzma.so exports the threaded encoder
+functions under XZ_5.1.2alpha only. Listing the two functions
+also under XZ_5.2.2 in liblzma.map has no effect without
+matching .symver lines.
+
+The lack of XZ_5.2 in RHEL/CentOS 7 means that binaries linked
+against unpatched XZ Utils 5.2.x won't run on RHEL/CentOS 7.
+This is unfortunate but this alone isn't too bad as the problem
+is contained within RHEL/CentOS 7 and doesn't affect users
+of other distributions. It could also be fixed internally in
+RHEL/CentOS 7.
+
+The second problem is more serious: In XZ Utils 5.2.2 the API
+headers don't have #ifdef LZMA_UNSTABLE for obvious reasons.
+This is true in RHEL/CentOS 7 version too. Thus now programs
+using new APIs can be compiled without an extra #define. However,
+the programs end up depending on symbol version XZ_5.1.2alpha
+(and possibly also XZ_5.2.2) instead of XZ_5.2 as they would
+with an unpatched XZ Utils 5.2.2. This means that such binaries
+won't run on other distributions shipping XZ Utils >= 5.2.0 as
+they don't provide XZ_5.1.2alpha or XZ_5.2.2; they only provide
+XZ_5.2 (and XZ_5.0). (This includes RHEL/CentOS 8 as the patch
+luckily isn't included there anymore with XZ Utils 5.2.4.)
+
+Binaries built by RHEL/CentOS 7 users get distributed and then
+people wonder why they don't run on some other distribution.
+Seems that people have found out about the patch and been copying
+it to some build scripts, seemingly curing the symptoms but
+actually spreading the illness further and outside RHEL/CentOS 7.
+
+The ill patch seems to be from late 2016 (RHEL 7.3) and in 2017 it
+had spread at least to EasyBuild. I heard about the events only
+recently. :-(
+
+This commit splits liblzma.map into two versions: one for
+GNU/Linux and another for other OSes that can use symbol versioning
+(FreeBSD, Solaris, maybe others). The Linux-specific file and the
+matching additions to .c files add full compatibility with binaries
+that have been built against a RHEL/CentOS-patched liblzma. Builds
+for OSes other than GNU/Linux won't get the vaccine as they should
+be immune to the problem (I really hope that no build script uses
+the RHEL/CentOS 7 patch outside GNU/Linux).
+
+The RHEL/CentOS compatibility symbols XZ_5.1.2alpha and XZ_5.2.2
+are intentionally put *after* XZ_5.2 in liblzma_linux.map. This way
+if one forgets to #define HAVE_SYMBOL_VERSIONS_LINUX when building,
+the resulting liblzma.so.5 will have lzma_stream_encoder_mt@@XZ_5.2
+since XZ_5.2 {...} is the first one that lists that function.
+Without HAVE_SYMBOL_VERSIONS_LINUX @XZ_5.1.2alpha and @XZ_5.2.2
+will be missing but that's still a minor problem compared to
+only having lzma_stream_encoder_mt@@XZ_5.1.2alpha!
+
+The "local: *;" line was moved to XZ_5.0 so that it doesn't need
+to be moved around. It doesn't matter where it is put.
+
+Having two similar liblzma_*.map files is a bit silly as it is,
+at least for now, easily possible to generate the generic one
+from the Linux-specific file. But that adds extra steps and
+increases the risk of mistakes when supporting more than one
+build system. So I rather maintain two files in parallel and let
+validate_map.sh check that they are in sync when "make mydist"
+is run.
+
+This adds .symver lines for lzma_stream_encoder_mt@XZ_5.2.2 and
+lzma_stream_encoder_mt_memusage@XZ_5.2.2 even though these
+weren't exported by RHEL/CentOS 7 (only @@XZ_5.1.2alpha was
+for these two). I added these anyway because someone might
+misunderstand the RHEL/CentOS 7 patch and think that @XZ_5.2.2
+(@@XZ_5.2.2) versions were exported too.
+
+At glance one could suggest using __typeof__ to copy the function
+prototypes when making aliases. However, this doesn't work trivially
+because __typeof__ won't copy attributes (lzma_nothrow, lzma_pure)
+and it won't change symbol visibility from hidden to default (done
+by LZMA_API()). Attributes could be copied with __copy__ attribute
+but that needs GCC 9 and a fallback method would be needed anyway.
+
+This uses __symver__ attribute with GCC >= 10 and
+__asm__(".symver ...") with everything else. The attribute method
+is required for LTO (-flto) support with GCC. Using -flto with
+GCC older than 10 is now broken on GNU/Linux and will not be fixed
+(can silently result in a broken liblzma build that has dangerously
+incorrect symbol versions). LTO builds with Clang seem to work
+with the traditional __asm__(".symver ...") method.
+
+Thanks to Boud Roukema for reporting the problem and discussing
+the details and testing the fix.
+---
+ configure.ac                                  |  23 +++-
+ src/liblzma/Makefile.am                       |  10 +-
+ src/liblzma/common/block_buffer_encoder.c     |  18 +++
+ src/liblzma/common/common.c                   |  14 ++
+ src/liblzma/common/common.h                   |  28 ++++
+ src/liblzma/common/hardware_cputhreads.c      |  12 ++
+ src/liblzma/common/stream_encoder_mt.c        |  42 ++++++
+ .../{liblzma.map => liblzma_generic.map}      |   6 +-
+ src/liblzma/liblzma_linux.map                 | 123 ++++++++++++++++++
+ src/liblzma/validate_map.sh                   | 113 ++++++++++++++--
+ 10 files changed, 374 insertions(+), 15 deletions(-)
+ rename src/liblzma/{liblzma.map => liblzma_generic.map} (100%)
+ create mode 100644 src/liblzma/liblzma_linux.map
+
+diff --git a/configure.ac b/configure.ac
+index 7945934..0167c09 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -493,7 +493,28 @@ if test "x$enable_symbol_versions" = xauto; then
+ 	esac
+ fi
+ AC_MSG_RESULT([$enable_symbol_versions])
+-AM_CONDITIONAL([COND_SYMVERS], [test "x$enable_symbol_versions" = xyes])
++
++# There are two variants for symbol versioning.
++# See src/liblzma/validate_map.sh for details.
++if test "x$enable_symbol_versions" = xyes; then
++	case $host_os in
++		linux*)
++			enable_symbol_versions=linux
++			AC_DEFINE([HAVE_SYMBOL_VERSIONS_LINUX], [1],
++				[Define to 1 to if GNU/Linux-specific details
++				are wanted for symbol versioning. This must
++				be used together with liblzma_linux.map.])
++			;;
++		*)
++			enable_symbol_versions=generic
++			;;
++	esac
++fi
++
++AM_CONDITIONAL([COND_SYMVERS_LINUX],
++	[test "x$enable_symbol_versions" = xlinux])
++AM_CONDITIONAL([COND_SYMVERS_GENERIC],
++	[test "x$enable_symbol_versions" = xgeneric])
+ 
+ 
+ ##############
+diff --git a/src/liblzma/Makefile.am b/src/liblzma/Makefile.am
+index ae8967c..cf2144d 100644
+--- a/src/liblzma/Makefile.am
++++ b/src/liblzma/Makefile.am
+@@ -26,10 +26,14 @@ liblzma_la_CPPFLAGS = \
+ 	-DTUKLIB_SYMBOL_PREFIX=lzma_
+ liblzma_la_LDFLAGS = -no-undefined -version-info 7:6:2
+ 
+-EXTRA_DIST += liblzma.map validate_map.sh
+-if COND_SYMVERS
++EXTRA_DIST += liblzma_generic.map liblzma_linux.map validate_map.sh
++if COND_SYMVERS_GENERIC
+ liblzma_la_LDFLAGS += \
+-	-Wl,--version-script=$(top_srcdir)/src/liblzma/liblzma.map
++	-Wl,--version-script=$(top_srcdir)/src/liblzma/liblzma_generic.map
++endif
++if COND_SYMVERS_LINUX
++liblzma_la_LDFLAGS += \
++	-Wl,--version-script=$(top_srcdir)/src/liblzma/liblzma_linux.map
+ endif
+ 
+ liblzma_la_SOURCES += ../common/tuklib_physmem.c
+diff --git a/src/liblzma/common/block_buffer_encoder.c b/src/liblzma/common/block_buffer_encoder.c
+index 39e263a..a47342e 100644
+--- a/src/liblzma/common/block_buffer_encoder.c
++++ b/src/liblzma/common/block_buffer_encoder.c
+@@ -325,6 +325,24 @@ lzma_block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,
+ }
+ 
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++// This is for compatibility with binaries linked against liblzma that
++// has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.
++LZMA_SYMVER_API("lzma_block_uncomp_encode@XZ_5.2.2",
++	lzma_ret, lzma_block_uncomp_encode_522)(lzma_block *block,
++		const uint8_t *in, size_t in_size,
++		uint8_t *out, size_t *out_pos, size_t out_size)
++		lzma_nothrow lzma_attr_warn_unused_result
++		__attribute__((__alias__("lzma_block_uncomp_encode_52")));
++
++LZMA_SYMVER_API("lzma_block_uncomp_encode@@XZ_5.2",
++	lzma_ret, lzma_block_uncomp_encode_52)(lzma_block *block,
++		const uint8_t *in, size_t in_size,
++		uint8_t *out, size_t *out_pos, size_t out_size)
++		lzma_nothrow lzma_attr_warn_unused_result;
++
++#define lzma_block_uncomp_encode lzma_block_uncomp_encode_52
++#endif
+ extern LZMA_API(lzma_ret)
+ lzma_block_uncomp_encode(lzma_block *block,
+ 		const uint8_t *in, size_t in_size,
+diff --git a/src/liblzma/common/common.c b/src/liblzma/common/common.c
+index cf714e5..10fc884 100644
+--- a/src/liblzma/common/common.c
++++ b/src/liblzma/common/common.c
+@@ -366,6 +366,20 @@ lzma_end(lzma_stream *strm)
+ }
+ 
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++// This is for compatibility with binaries linked against liblzma that
++// has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.
++LZMA_SYMVER_API("lzma_get_progress@XZ_5.2.2",
++	void, lzma_get_progress_522)(lzma_stream *strm,
++		uint64_t *progress_in, uint64_t *progress_out) lzma_nothrow
++		__attribute__((__alias__("lzma_get_progress_52")));
++
++LZMA_SYMVER_API("lzma_get_progress@@XZ_5.2",
++	void, lzma_get_progress_52)(lzma_stream *strm,
++		uint64_t *progress_in, uint64_t *progress_out) lzma_nothrow;
++
++#define lzma_get_progress lzma_get_progress_52
++#endif
+ extern LZMA_API(void)
+ lzma_get_progress(lzma_stream *strm,
+ 		uint64_t *progress_in, uint64_t *progress_out)
+diff --git a/src/liblzma/common/common.h b/src/liblzma/common/common.h
+index b3d3b7a..6b659c6 100644
+--- a/src/liblzma/common/common.h
++++ b/src/liblzma/common/common.h
+@@ -34,6 +34,34 @@
+ 
+ #include "lzma.h"
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++// To keep link-time optimization (LTO, -flto) working with GCC,
++// the __symver__ attribute must be used instead of __asm__(".symver ...").
++// Otherwise the symbol versions may be lost, resulting in broken liblzma
++// that has wrong default versions in the exported symbol list!
++// The attribute was added in GCC 10; LTO with older GCC is not supported.
++//
++// To keep -Wmissing-prototypes happy, use LZMA_SYMVER_API only with function
++// declarations (including those with __alias__ attribute) and LZMA_API with
++// the function definitions. This means a little bit of silly copy-and-paste
++// between declarations and definitions though.
++//
++// As of GCC 12.2, the __symver__ attribute supports only @ and @@ but the
++// very convenient @@@ isn't supported (it's supported by GNU assembler
++// since 2000). When using @@ instead of @@@, the internal name must not be
++// the same as the external name to avoid problems in some situations. This
++// is why "#define foo_52 foo" is needed for the default symbol versions.
++#	if TUKLIB_GNUC_REQ(10, 0)
++#		define LZMA_SYMVER_API(extnamever, type, intname) \
++			extern __attribute__((__symver__(extnamever))) \
++					LZMA_API(type) intname
++#	else
++#		define LZMA_SYMVER_API(extnamever, type, intname) \
++			__asm__(".symver " #intname "," extnamever); \
++			extern LZMA_API(type) intname
++#	endif
++#endif
++
+ // These allow helping the compiler in some often-executed branches, whose
+ // result is almost always the same.
+ #ifdef __GNUC__
+diff --git a/src/liblzma/common/hardware_cputhreads.c b/src/liblzma/common/hardware_cputhreads.c
+index f468366..5d246d2 100644
+--- a/src/liblzma/common/hardware_cputhreads.c
++++ b/src/liblzma/common/hardware_cputhreads.c
+@@ -15,6 +15,18 @@
+ #include "tuklib_cpucores.h"
+ 
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++// This is for compatibility with binaries linked against liblzma that
++// has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.
++LZMA_SYMVER_API("lzma_cputhreads@XZ_5.2.2",
++	uint32_t, lzma_cputhreads_522)(void) lzma_nothrow
++		__attribute__((__alias__("lzma_cputhreads_52")));
++
++LZMA_SYMVER_API("lzma_cputhreads@@XZ_5.2",
++	uint32_t, lzma_cputhreads_52)(void) lzma_nothrow;
++
++#define lzma_cputhreads lzma_cputhreads_52
++#endif
+ extern LZMA_API(uint32_t)
+ lzma_cputhreads(void)
+ {
+diff --git a/src/liblzma/common/stream_encoder_mt.c b/src/liblzma/common/stream_encoder_mt.c
+index 2ab4d04..819b227 100644
+--- a/src/liblzma/common/stream_encoder_mt.c
++++ b/src/liblzma/common/stream_encoder_mt.c
+@@ -1078,6 +1078,31 @@ stream_encoder_mt_init(lzma_next_coder *next, const lzma_allocator *allocator,
+ }
+ 
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++// These are for compatibility with binaries linked against liblzma that
++// has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.
++// Actually that patch didn't create lzma_stream_encoder_mt@XZ_5.2.2
++// but it has been added here anyway since someone might misread the
++// RHEL patch and think both @XZ_5.1.2alpha and @XZ_5.2.2 exist.
++LZMA_SYMVER_API("lzma_stream_encoder_mt@XZ_5.1.2alpha",
++	lzma_ret, lzma_stream_encoder_mt_512a)(
++		lzma_stream *strm, const lzma_mt *options)
++		lzma_nothrow lzma_attr_warn_unused_result
++		__attribute__((__alias__("lzma_stream_encoder_mt_52")));
++
++LZMA_SYMVER_API("lzma_stream_encoder_mt@XZ_5.2.2",
++	lzma_ret, lzma_stream_encoder_mt_522)(
++		lzma_stream *strm, const lzma_mt *options)
++		lzma_nothrow lzma_attr_warn_unused_result
++		__attribute__((__alias__("lzma_stream_encoder_mt_52")));
++
++LZMA_SYMVER_API("lzma_stream_encoder_mt@@XZ_5.2",
++	lzma_ret, lzma_stream_encoder_mt_52)(
++		lzma_stream *strm, const lzma_mt *options)
++		lzma_nothrow lzma_attr_warn_unused_result;
++
++#define lzma_stream_encoder_mt lzma_stream_encoder_mt_52
++#endif
+ extern LZMA_API(lzma_ret)
+ lzma_stream_encoder_mt(lzma_stream *strm, const lzma_mt *options)
+ {
+@@ -1093,6 +1118,23 @@ lzma_stream_encoder_mt(lzma_stream *strm, const lzma_mt *options)
+ }
+ 
+ 
++#ifdef HAVE_SYMBOL_VERSIONS_LINUX
++LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@XZ_5.1.2alpha",
++	uint64_t, lzma_stream_encoder_mt_memusage_512a)(
++	const lzma_mt *options) lzma_nothrow lzma_attr_pure
++	__attribute__((__alias__("lzma_stream_encoder_mt_memusage_52")));
++
++LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@XZ_5.2.2",
++	uint64_t, lzma_stream_encoder_mt_memusage_522)(
++	const lzma_mt *options) lzma_nothrow lzma_attr_pure
++	__attribute__((__alias__("lzma_stream_encoder_mt_memusage_52")));
++
++LZMA_SYMVER_API("lzma_stream_encoder_mt_memusage@@XZ_5.2",
++	uint64_t, lzma_stream_encoder_mt_memusage_52)(
++	const lzma_mt *options) lzma_nothrow lzma_attr_pure;
++
++#define lzma_stream_encoder_mt_memusage lzma_stream_encoder_mt_memusage_52
++#endif
+ // This function name is a monster but it's consistent with the older
+ // monster names. :-( 31 chars is the max that C99 requires so in that
+ // sense it's not too long. ;-)
+diff --git a/src/liblzma/liblzma.map b/src/liblzma/liblzma_generic.map
+similarity index 100%
+rename from src/liblzma/liblzma.map
+rename to src/liblzma/liblzma_generic.map
+index f53a4ea..8cca05b 100644
+--- a/src/liblzma/liblzma.map
++++ b/src/liblzma/liblzma_generic.map
+@@ -93,6 +93,9 @@ global:
+ 	lzma_vli_decode;
+ 	lzma_vli_encode;
+ 	lzma_vli_size;
++
++local:
++	*;
+ };
+ 
+ XZ_5.2 {
+@@ -102,7 +105,4 @@ global:
+ 	lzma_get_progress;
+ 	lzma_stream_encoder_mt;
+ 	lzma_stream_encoder_mt_memusage;
+-
+-local:
+-	*;
+ } XZ_5.0;
+diff --git a/src/liblzma/liblzma_linux.map b/src/liblzma/liblzma_linux.map
+new file mode 100644
+index 0000000..4be882c
+--- /dev/null
++++ b/src/liblzma/liblzma_linux.map
+@@ -0,0 +1,123 @@
++XZ_5.0 {
++global:
++	lzma_alone_decoder;
++	lzma_alone_encoder;
++	lzma_auto_decoder;
++	lzma_block_buffer_bound;
++	lzma_block_buffer_decode;
++	lzma_block_buffer_encode;
++	lzma_block_compressed_size;
++	lzma_block_decoder;
++	lzma_block_encoder;
++	lzma_block_header_decode;
++	lzma_block_header_encode;
++	lzma_block_header_size;
++	lzma_block_total_size;
++	lzma_block_unpadded_size;
++	lzma_check_is_supported;
++	lzma_check_size;
++	lzma_code;
++	lzma_crc32;
++	lzma_crc64;
++	lzma_easy_buffer_encode;
++	lzma_easy_decoder_memusage;
++	lzma_easy_encoder;
++	lzma_easy_encoder_memusage;
++	lzma_end;
++	lzma_filter_decoder_is_supported;
++	lzma_filter_encoder_is_supported;
++	lzma_filter_flags_decode;
++	lzma_filter_flags_encode;
++	lzma_filter_flags_size;
++	lzma_filters_copy;
++	lzma_filters_update;
++	lzma_get_check;
++	lzma_index_append;
++	lzma_index_block_count;
++	lzma_index_buffer_decode;
++	lzma_index_buffer_encode;
++	lzma_index_cat;
++	lzma_index_checks;
++	lzma_index_decoder;
++	lzma_index_dup;
++	lzma_index_encoder;
++	lzma_index_end;
++	lzma_index_file_size;
++	lzma_index_hash_append;
++	lzma_index_hash_decode;
++	lzma_index_hash_end;
++	lzma_index_hash_init;
++	lzma_index_hash_size;
++	lzma_index_init;
++	lzma_index_iter_init;
++	lzma_index_iter_locate;
++	lzma_index_iter_next;
++	lzma_index_iter_rewind;
++	lzma_index_memusage;
++	lzma_index_memused;
++	lzma_index_size;
++	lzma_index_stream_count;
++	lzma_index_stream_flags;
++	lzma_index_stream_padding;
++	lzma_index_stream_size;
++	lzma_index_total_size;
++	lzma_index_uncompressed_size;
++	lzma_lzma_preset;
++	lzma_memlimit_get;
++	lzma_memlimit_set;
++	lzma_memusage;
++	lzma_mf_is_supported;
++	lzma_mode_is_supported;
++	lzma_physmem;
++	lzma_properties_decode;
++	lzma_properties_encode;
++	lzma_properties_size;
++	lzma_raw_buffer_decode;
++	lzma_raw_buffer_encode;
++	lzma_raw_decoder;
++	lzma_raw_decoder_memusage;
++	lzma_raw_encoder;
++	lzma_raw_encoder_memusage;
++	lzma_stream_buffer_bound;
++	lzma_stream_buffer_decode;
++	lzma_stream_buffer_encode;
++	lzma_stream_decoder;
++	lzma_stream_encoder;
++	lzma_stream_flags_compare;
++	lzma_stream_footer_decode;
++	lzma_stream_footer_encode;
++	lzma_stream_header_decode;
++	lzma_stream_header_encode;
++	lzma_version_number;
++	lzma_version_string;
++	lzma_vli_decode;
++	lzma_vli_encode;
++	lzma_vli_size;
++
++local:
++	*;
++};
++
++XZ_5.2 {
++global:
++	lzma_block_uncomp_encode;
++	lzma_cputhreads;
++	lzma_get_progress;
++	lzma_stream_encoder_mt;
++	lzma_stream_encoder_mt_memusage;
++} XZ_5.0;
++
++XZ_5.1.2alpha {
++global:
++	lzma_stream_encoder_mt;
++	lzma_stream_encoder_mt_memusage;
++} XZ_5.0;
++
++XZ_5.2.2 {
++global:
++	lzma_block_uncomp_encode;
++	lzma_cputhreads;
++	lzma_get_progress;
++	lzma_stream_encoder_mt;
++	lzma_stream_encoder_mt_memusage;
++} XZ_5.1.2alpha;
+diff --git a/src/liblzma/validate_map.sh b/src/liblzma/validate_map.sh
+index 3aee466..2bf6f8b 100644
+--- a/src/liblzma/validate_map.sh
++++ b/src/liblzma/validate_map.sh
+@@ -2,7 +2,79 @@
+ 
+ ###############################################################################
+ #
+-# Check liblzma.map for certain types of errors
++# Check liblzma_*.map for certain types of errors.
++#
++# liblzma_generic.map is for FreeBSD and Solaris and possibly others
++# except GNU/Linux.
++#
++# liblzma_linux.map is for GNU/Linux only. This and the matching extra code
++# in the .c files make liblzma >= 5.2.7 compatible with binaries that were
++# linked against ill-patched liblzma in RHEL/CentOS 7. By providing the
++# compatibility in official XZ Utils release will hopefully prevent people
++# from further copying the broken patch to other places when they want
++# compatibility with binaries linked on RHEL/CentOS 7. The long version
++# of the story:
++#
++#     RHEL/CentOS 7 shipped with 5.1.2alpha, including the threaded
++#     encoder that is behind #ifdef LZMA_UNSTABLE in the API headers.
++#     In 5.1.2alpha these symbols are under XZ_5.1.2alpha in liblzma.map.
++#     API/ABI compatibility tracking isn't done between development
++#     releases so newer releases didn't have XZ_5.1.2alpha anymore.
++#
++#     Later RHEL/CentOS 7 updated xz to 5.2.2 but they wanted to keep
++#     the exported symbols compatible with 5.1.2alpha. After checking
++#     the ABI changes it turned out that >= 5.2.0 ABI is backward
++#     compatible with the threaded encoder functions from 5.1.2alpha
++#     (but not vice versa as fixes and extensions to these functions
++#     were made between 5.1.2alpha and 5.2.0).
++#
++#     In RHEL/CentOS 7, XZ Utils 5.2.2 was patched with
++#     xz-5.2.2-compat-libs.patch to modify liblzma.map:
++#
++#       - XZ_5.1.2alpha was added with lzma_stream_encoder_mt and
++#         lzma_stream_encoder_mt_memusage. This matched XZ Utils 5.1.2alpha.
++#
++#       - XZ_5.2 was replaced with XZ_5.2.2. It is clear that this was
++#         an error; the intention was to keep using XZ_5.2 (XZ_5.2.2
++#         has never been used in XZ Utils). So XZ_5.2.2 lists all
++#         symbols that were listed under XZ_5.2 before the patch.
++#         lzma_stream_encoder_mt and _mt_memusage are included too so
++#         they are listed both here and under XZ_5.1.2alpha.
++#
++#     The patch didn't add any __asm__(".symver ...") lines to the .c
++#     files. Thus the resulting liblzma.so exports the threaded encoder
++#     functions under XZ_5.1.2alpha only. Listing the two functions
++#     also under XZ_5.2.2 in liblzma.map has no effect without
++#     matching .symver lines.
++#
++#     The lack of XZ_5.2 in RHEL/CentOS 7 means that binaries linked
++#     against unpatched XZ Utils 5.2.x won't run on RHEL/CentOS 7.
++#     This is unfortunate but this alone isn't too bad as the problem
++#     is contained within RHEL/CentOS 7 and doesn't affect users
++#     of other distributions. It could also be fixed internally in
++#     RHEL/CentOS 7.
++#
++#     The second problem is more serious: In XZ Utils 5.2.2 the API
++#     headers don't have #ifdef LZMA_UNSTABLE for obvious reasons.
++#     This is true in RHEL/CentOS 7 version too. Thus now programs
++#     using new APIs can be compiled without an extra #define. However,
++#     the programs end up depending on symbol version XZ_5.1.2alpha
++#     (and possibly also XZ_5.2.2) instead of XZ_5.2 as they would
++#     with an unpatched XZ Utils 5.2.2. This means that such binaries
++#     won't run on other distributions shipping XZ Utils >= 5.2.0 as
++#     they don't provide XZ_5.1.2alpha or XZ_5.2.2; they only provide
++#     XZ_5.2 (and XZ_5.0). (This includes RHEL/CentOS 8 as the patch
++#     luckily isn't included there anymore with XZ Utils 5.2.4.)
++#
++#     Binaries built by RHEL/CentOS 7 users get distributed and then
++#     people wonder why they don't run on some other distribution.
++#     Seems that people have found out about the patch and been copying
++#     it to some build scripts, seemingly curing the symptoms but
++#     actually spreading the illness further and outside RHEL/CentOS 7.
++#     Adding compatibility in an official XZ Utils release should work
++#     as a vaccine against this ill patch and stop it from spreading.
++#     The vaccine is kept GNU/Linux-only as other OSes should be immune
++#     (hopefully it hasn't spread via some build script to other OSes).
+ #
+ # Author: Lasse Collin
+ #
+@@ -18,11 +90,11 @@ STATUS=0
+ 
+ cd "$(dirname "$0")"
+ 
+-# Get the list of symbols that aren't defined in liblzma.map.
++# Get the list of symbols that aren't defined in liblzma_generic.map.
+ SYMS=$(sed -n 's/^extern LZMA_API([^)]*) \([a-z0-9_]*\)(.*$/\1;/p' \
+ 		api/lzma/*.h \
+ 	| sort \
+-	| grep -Fve "$(sed '/[{}:*]/d;/^$/d;s/^	//' liblzma.map)")
++	| grep -Fve "$(sed '/[{}:*]/d;/^$/d;s/^	//' liblzma_generic.map)")
+ 
+ # Check that there are no old alpha or beta versions listed.
+ VER=$(cd ../.. && sh build-aux/version.sh)
+@@ -30,21 +102,41 @@ NAMES=
+ case $VER in
+ 	*alpha | *beta)
+ 		NAMES=$(sed -n 's/^.*XZ_\([^ ]*\)\(alpha\|beta\) .*$/\1\2/p' \
+-			liblzma.map | grep -Fv "$VER")
++			liblzma_generic.map | grep -Fv "$VER")
+ 		;;
+ esac
+ 
+ # Check for duplicate lines. It can catch missing dependencies.
+-DUPS=$(sort liblzma.map | sed '/^$/d;/^global:$/d' | uniq -d)
++DUPS=$(sort liblzma_generic.map | sed '/^$/d;/^global:$/d' | uniq -d)
++
++# Check that liblzma_linux.map is in sync with liblzma_generic.map.
++# The RHEL/CentOS 7 compatibility symbols are in a fixed location
++# so it makes it easy to remove them for comparison with liblzma_generic.map.
++#
++# NOTE: Putting XZ_5.2 before the compatibility symbols XZ_5.1.2alpha
++# and XZ_5.2.2 in liblzma_linux.map is important: If liblzma_linux.map is
++# incorrectly used without #define HAVE_SYMBOL_VERSIONS_LINUX, only the first
++# occurrence of each function name will be used from liblzma_linux.map;
++# the rest are ignored by the linker. Thus having XZ_5.2 before the
++# compatibility symbols means that @@XZ_5.2 will be used for the symbols
++# listed under XZ_5.2 {...} and the same function names later in
++# the file under XZ_5.1.2alpha {...} and XZ_5.2.2 {...} will be
++# ignored (@XZ_5.1.2alpha or @XZ_5.2.2 won't be added at all when
++# the #define HAVE_SYMBOL_VERSIONS_LINUX isn't used).
++IN_SYNC=
++if ! sed '109,123d' liblzma_linux.map \
++		| cmp -s - liblzma_generic.map; then
++	IN_SYNC=no
++fi
+ 
+ # Print error messages if needed.
+-if test -n "$SYMS$NAMES$DUPS"; then
++if test -n "$SYMS$NAMES$DUPS$IN_SYNC"; then
+ 	echo
+-	echo 'validate_map.sh found problems from liblzma.map:'
++	echo 'validate_map.sh found problems from liblzma_*.map:'
+ 	echo
+ 
+ 	if test -n "$SYMS"; then
+-		echo 'liblzma.map lacks the following symbols:'
++		echo 'liblzma_generic.map lacks the following symbols:'
+ 		echo "$SYMS"
+ 		echo
+ 	fi
+@@ -61,6 +153,11 @@ if test -n "$SYMS$NAMES$DUPS"; then
+ 		echo
+ 	fi
+ 
++	if test -n "$IN_SYNC"; then
++		echo "liblzma_generic.map and liblzma_linux.map aren't in sync"
++		echo
++	fi
++
+ 	STATUS=1
+ fi
+ 
+-- 
+2.20.1
+
diff --git a/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb b/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
index 5f099296322b2e56b87c2530b0a72ab3fc36280c..25260176d568cc842cbdda6fdff4d213be036299 100644
--- a/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
+++ b/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
@@ -31,7 +31,7 @@ dependencies = [
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
     ('UCC', 'default'),
-    ('PMIx', '3.2.3'),
+    ('PMIx', '4.2.6'),
 ]
 
 configopts = '--enable-shared '
diff --git a/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb b/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
index 5bb7a448efc9018c201e7ffe08768e3833795e29..140398c52fd5f8cedfc92afba474b0eecae0991f 100644
--- a/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
+++ b/Overlays/deep_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
@@ -31,7 +31,7 @@ dependencies = [
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
     ('UCC', 'default'),
-    ('PMIx', '3.2.3'),
+    ('PMIx', '4.2.6'),
 ]
 
 configopts = '--enable-shared '
diff --git a/Overlays/hdfml_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/hdfml_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/hdfml_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb b/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
index 628afaded350470ca24258d66033303e71020ff1..daedd3f26c4a168d06db7f3a6b2e060afb625b5f 100644
--- a/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
+++ b/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
@@ -35,7 +35,7 @@ dependencies = [
     ('UCX', 'default'),
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
-    ('PMIx', '3.2.3'),  # We rely on this version since it is the newest supported by psmgmt
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
     ('UCC', 'default'),
 ]
 
diff --git a/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb b/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
index 894b648ec0ea3c94c8f2ff461c97be37bedae4d8..8130bad0612beba62aaf07fcc0f70854e66ba4de 100644
--- a/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
+++ b/Overlays/hdfml_overlay/o/OpenMPI/OpenMPI-4.1.4-NVHPC-23.1.eb
@@ -38,7 +38,7 @@ dependencies = [
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
     # We rely on this version since it is the newest supported by psmgmt
-    ('PMIx', '3.2.3'),
+    ('PMIx', '4.2.6'),
     ('UCC', 'default'),
 ]
 
diff --git a/Overlays/jureca_arm_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb b/Overlays/jureca_arm_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..f1f1285ec9aba9e1c5f20d3d05fddb9bb14a74f1
--- /dev/null
+++ b/Overlays/jureca_arm_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
@@ -0,0 +1,66 @@
+name = 'OpenMPI'
+version = '4.1.4'
+
+homepage = 'https://www.open-mpi.org/'
+description = """The Open MPI Project is an open source MPI-3 implementation."""
+
+toolchain = {'name': 'GCC', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
+sources = [SOURCELOWER_TAR_BZ2]
+patches = [
+    'OpenMPI-4.1.1_opal-datatype-cuda-performance.patch',
+]
+checksums = [
+    '92912e175fd1234368c8730c03f4996fe5942e7479bb1d10059405e7f2b3930d',  # openmpi-4.1.4.tar.bz2
+    # OpenMPI-4.1.1_opal-datatype-cuda-performance.patch
+    'b767c7166cf0b32906132d58de5439c735193c9fd09ec3c5c11db8d5fa68750e',
+]
+
+osdependencies = [
+    # needed for --with-verbs
+    ('libibverbs-dev', 'libibverbs-devel', 'rdma-core-devel'),
+]
+
+builddependencies = [
+    ('pkgconf', '1.8.0'),
+    ('Perl', '5.34.1'),
+    ('Autotools', '20220317'),
+]
+
+dependencies = [
+    ('zlib', '1.2.12'),
+    ('hwloc', '2.7.1'),
+    ('UCX', 'default'),
+    ('CUDA', '11.7', '', SYSTEM),
+    ('libevent', '2.1.12'),
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
+    ('UCC', 'default'),
+]
+
+# Update configure to include changes from the "internal-cuda" patch
+# by running a subset of autogen.pl sufficient to achieve this
+# without doing the full, long-running regeneration.
+preconfigopts = ' && '.join([
+    'cd config',
+    'autom4te --language=m4sh opal_get_version.m4sh -o opal_get_version.sh',
+    'cd ..',
+    'autoconf',
+    'autoheader',
+    'aclocal',
+    'automake',
+    ''
+])
+
+configopts = '--without-orte '
+configopts += '--without-psm2 '
+configopts += '--disable-oshmem '
+# No IME on ARM
+# configopts += '--with-ime=/opt/ddn/ime '
+configopts += '--with-gpfs '
+
+# to enable SLURM integration (site-specific)
+configopts += '--with-slurm --with-pmix=external --with-libevent=external --with-ompi-pmix-rte'
+
+moduleclass = 'mpi'
diff --git a/Overlays/jureca_spr_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/jureca_spr_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/jureca_spr_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/Overlays/jureca_spr_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb b/Overlays/jureca_spr_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
index 49c9419c1eaa7ed650c95d3e37e88f289642688c..3024d08685f2f46326036a1cb2bc0b450981d8e6 100644
--- a/Overlays/jureca_spr_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
+++ b/Overlays/jureca_spr_overlay/o/OpenMPI/OpenMPI-4.1.4-GCC-11.3.0.eb
@@ -37,7 +37,7 @@ dependencies = [
     ('UCX', 'default'),
     ('CUDA', '12.0', '', SYSTEM),
     ('libevent', '2.1.12'),
-    ('PMIx', '3.2.3'),  # We rely on this version since it is the newest supported by psmgmt
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
     ('UCC', 'default'),
 ]
 
diff --git a/Overlays/jurecadc_overlay/b/BullMPI/BullMPI-4.1.4-GCC-11.3.0.eb b/Overlays/jurecadc_overlay/b/BullMPI/BullMPI-4.1.4-GCC-11.3.0.eb
index 8fadbbf13ce87fc0ce78a10ffda287fca76c53ac..a0d6e9e3a0e1c18c9b339264ba7a7db23ddfc0a4 100644
--- a/Overlays/jurecadc_overlay/b/BullMPI/BullMPI-4.1.4-GCC-11.3.0.eb
+++ b/Overlays/jurecadc_overlay/b/BullMPI/BullMPI-4.1.4-GCC-11.3.0.eb
@@ -29,7 +29,7 @@ dependencies = [
     ('UCX', 'default'),
     ('CUDA', '11.7', '', SYSTEM),
     ('libevent', '2.1.12'),
-    ('PMIx', '3.2.3'),  # We rely on this version since it is the newest supported by psmgmt
+    ('PMIx', '4.2.6'),  # We rely on this version since it is the newest supported by psmgmt
     ('UCC', 'default'),
 ]
 
diff --git a/Overlays/jurecadc_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/jurecadc_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/jurecadc_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/Overlays/jusuf_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/jusuf_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/jusuf_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/Overlays/juwels_overlay/i/impi-settings/impi-settings-2021-UCX.eb b/Overlays/juwels_overlay/i/impi-settings/impi-settings-2021-UCX.eb
index 45793958bf1aefc5aa52e02f5b6b6847dc65c28d..c6c7234dc0a1bfe4ae06aca319745e4a9b9abe02 100644
--- a/Overlays/juwels_overlay/i/impi-settings/impi-settings-2021-UCX.eb
+++ b/Overlays/juwels_overlay/i/impi-settings/impi-settings-2021-UCX.eb
@@ -16,6 +16,7 @@ sources = []
 modextravars = {
     'FI_PROVIDER': 'mlx',
     'I_MPI_PMI_VALUE_LENGTH_MAX': '900',
+    'SLURM_MPI_TYPE': 'pspmi',
 }
 
 moduleclass = 'system'
diff --git a/Overlays/juwels_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/juwels_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/juwels_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/Overlays/juwelsbooster_overlay/n/nvidia-driver/nvidia-driver-default.eb b/Overlays/juwelsbooster_overlay/n/nvidia-driver/nvidia-driver-default.eb
deleted file mode 100644
index 2900da59e4083c0d7fe1a0d7fd1cae2fd1bd0bf2..0000000000000000000000000000000000000000
--- a/Overlays/juwelsbooster_overlay/n/nvidia-driver/nvidia-driver-default.eb
+++ /dev/null
@@ -1,27 +0,0 @@
-name = 'nvidia-driver'
-version = 'default'
-realversion = '525.105.17'
-
-homepage = 'https://developer.nvidia.com/cuda-toolkit'
-description = f"""
-This is a set of libraries normally installed by the NVIDIA driver installer.
-
-The real version of this package is {realversion}.
-"""
-
-site_contacts = 'sc@fz-juelich.de'
-
-toolchain = SYSTEM
-
-source_urls = ['http://us.download.nvidia.com/tesla/%s/' % realversion]
-sources = ['NVIDIA-Linux-x86_64-%s.run' % realversion]
-checksums = ['c635a21a282c9b53485f19ebb64a0f4b536a968b94d4d97629e0bc547a58142a']
-
-# To avoid conflicts between NVML and the kernel driver
-postinstallcmds = ['rm %(installdir)s/lib64/libnvidia-ml.so*']
-
-modluafooter = '''
-add_property("arch","gpu")
-'''
-
-moduleclass = 'system'
diff --git a/acls.yml b/acls.yml
index a478f36b6dc0e5e1e8f7b395ee56b293b3c425ee..8307ade0745f732b8ea2ac2a0f02194d6164e080 100644
--- a/acls.yml
+++ b/acls.yml
@@ -232,6 +232,9 @@ software:
   - name: 'JupyterProxy-Matlab'
     owner: 'goebbert1'
     base: True
+  - name: 'JupyterProxy-NESTDesktop'
+    owner: 'goebbert1'
+    base: True
   - name: 'lfortran'
     owner: 'goebbert1'
     base: True
@@ -244,6 +247,9 @@ software:
   - name: 'libepoxy'
     owner: 'strube1'
     base: True
+  - name: 'libneurosim'
+    owner: 'goebbert1'
+    mpi: True
   - name: 'libspng'
     owner: 'goebbert1'
     base: True
@@ -277,6 +283,12 @@ software:
   - name: 'myqlm'
     owner: ['gonzalezcalaza1', 'goebbert1']
     base: True
+  - name: 'nest-desktop'
+    owner: 'goebbert1'
+    base: True
+  - name: 'nest-simulator'
+    owner: 'goebbert1'
+    mpi: True
   - name: 'nlohmann-json'
     owner: 'goebbert1'
     base: True
@@ -362,7 +374,7 @@ software:
     owner: ['gonzalezcalaza1', 'goebbert1']
     mpi: True
   - name: 'Qiskit-juqcs'
-    owner: 'gonzalezcalaza1'
+    owner: ['gonzalezcalaza1', 'goebbert1']
     mpi: True
   - name: 'Qt5'
     owner: 'goebbert1'
@@ -1439,4 +1451,7 @@ software:
     owner: 'griessbach1'
     base: True
     mpi: True
-    
+  - name: 'LinaroForge'
+    owner: 'knobloch1'
+    system: True
+