diff --git a/Custom_EasyBlocks/tensorflow.py b/Custom_EasyBlocks/tensorflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca13dd157c232d3e58f9d0a7a55699687fcef40f
--- /dev/null
+++ b/Custom_EasyBlocks/tensorflow.py
@@ -0,0 +1,1123 @@
+##
+# Copyright 2017-2022 Ghent University
+#
+# This file is part of EasyBuild,
+# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
+# with support of Ghent University (http://ugent.be/hpc),
+# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
+# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
+# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
+#
+# https://github.com/easybuilders/easybuild
+#
+# EasyBuild is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation v2.
+#
+# EasyBuild is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with EasyBuild.  If not, see <http://www.gnu.org/licenses/>.
+##
+"""
+EasyBuild support for building and installing TensorFlow, implemented as an easyblock
+
+@author: Kenneth Hoste (HPC-UGent)
+@author: Ake Sandgren (Umea University)
+@author: Damian Alvarez (Forschungzentrum Juelich GmbH)
+@author: Alexander Grund (TU Dresden)
+"""
+import glob
+import os
+import re
+import stat
+import tempfile
+from distutils.version import LooseVersion
+
+import easybuild.tools.environment as env
+import easybuild.tools.toolchain as toolchain
+from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_python_version
+from easybuild.easyblocks.python import EXTS_FILTER_PYTHON_PACKAGES
+from easybuild.framework.easyconfig import CUSTOM
+from easybuild.tools import run
+from easybuild.tools.build_log import EasyBuildError, print_warning
+from easybuild.tools.config import build_option, IGNORE
+from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, copy_file, mkdir, resolve_path
+from easybuild.tools.filetools import is_readable, read_file, which, write_file, remove_file
+from easybuild.tools.modules import get_software_root, get_software_version, get_software_libdir
+from easybuild.tools.run import run_cmd
+from easybuild.tools.systemtools import X86_64, get_cpu_architecture, get_os_name, get_os_version
+
+
+CPU_DEVICE = 'cpu'
+GPU_DEVICE = 'gpu'
+
+# Wrapper for Intel(MPI) compilers, where required environment variables
+# are hardcoded to make sure they are present;
+# this is required because Bazel resets the environment in which
+# compiler commands are executed...
+INTEL_COMPILER_WRAPPER = """#!/bin/bash
+
+export CPATH='%(cpath)s'
+
+# Only relevant for Intel compilers.
+export INTEL_LICENSE_FILE='%(intel_license_file)s'
+
+# Only relevant for MPI compiler wrapper (mpiicc/mpicc etc),
+# not for regular compiler.
+export I_MPI_ROOT='%(intel_mpi_root)s'
+
+# Exclude location of this wrapper from $PATH to avoid other potential
+# wrappers calling this wrapper.
+export PATH=$(echo $PATH | tr ':' '\n' | grep -v "^%(wrapper_dir)s$" | tr '\n' ':')
+
+%(compiler_path)s "$@"
+"""
+
+
+def split_tf_libs_txt(valid_libs_txt):
+    """Split the VALID_LIBS entry from the TF file into single names"""
+    entries = valid_libs_txt.split(',')
+    # Remove double quotes and whitespace
+    result = [entry.strip().strip('"') for entry in entries]
+    # Remove potentially trailing empty element due to trailing comma in the txt
+    if not result[-1]:
+        result.pop()
+    return result
+
+
+def get_system_libs_from_tf(source_dir):
+    """Return the valid values for TF_SYSTEM_LIBS from the TensorFlow source directory"""
+    syslibs_path = os.path.join(source_dir, 'third_party', 'systemlibs', 'syslibs_configure.bzl')
+    result = []
+    if os.path.exists(syslibs_path):
+        txt = read_file(syslibs_path)
+        valid_libs_match = re.search(r'VALID_LIBS\s*=\s*\[(.*?)\]', txt, re.DOTALL)
+        if not valid_libs_match:
+            raise EasyBuildError('VALID_LIBS definition not found in %s', syslibs_path)
+        result = split_tf_libs_txt(valid_libs_match.group(1))
+    return result
+
+
+def get_system_libs_for_version(tf_version, as_valid_libs=False):
+    """
+    Determine valid values for $TF_SYSTEM_LIBS for the given TF version
+
+    If as_valid_libs=False (default) then returns 2 dictioniaries:
+        1: Mapping of <EB name> to <TF name>
+        2: Mapping of <package name> to <TF name> (for python extensions)
+    else returns a string formated like the VALID_LIBS variable in third_party/systemlibs/syslibs_configure.bzl
+        Those can be used to check/diff against third_party/systemlibs/syslibs_configure.bzl by running:
+            python -c 'from easybuild.easyblocks.tensorflow import get_system_libs_for_version; \
+                        print(get_system_libs_for_version("2.1.0", as_valid_libs=True))'
+    """
+    tf_version = LooseVersion(tf_version)
+
+    def is_version_ok(version_range):
+        """Return True if the TF version to be installed matches the version_range"""
+        min_version, max_version = version_range.split(':')
+        result = True
+        if min_version and tf_version < LooseVersion(min_version):
+            result = False
+        if max_version and tf_version >= LooseVersion(max_version):
+            result = False
+        return result
+
+    # For these lists check third_party/systemlibs/syslibs_configure.bzl --> VALID_LIBS
+    # Also verify third_party/systemlibs/<name>.BUILD or third_party/systemlibs/<name>/BUILD.system
+    # if it does something "strange" (e.g. link hardcoded headers)
+
+    # Software which is added as a dependency in the EC
+    available_system_libs = {
+        # Format: (<EB name>, <version range>): <TF name>
+        #         <version range> is '<min version>:<exclusive max version>'
+        ('Abseil', '2.9.0:'): 'com_google_absl',
+        ('cURL', '2.0.0:'): 'curl',
+        ('double-conversion', '2.0.0:'): 'double_conversion',
+        ('flatbuffers', '2.0.0:'): 'flatbuffers',
+        ('giflib', '2.0.0:2.1.0'): 'gif_archive',
+        ('giflib', '2.1.0:'): 'gif',
+        ('hwloc', '2.0.0:'): 'hwloc',
+        ('ICU', '2.0.0:'): 'icu',
+        ('JsonCpp', '2.0.0:'): 'jsoncpp_git',
+        ('libjpeg-turbo', '2.0.0:2.2.0'): 'jpeg',
+        ('libjpeg-turbo', '2.2.0:'): 'libjpeg_turbo',
+        ('libpng', '2.0.0:2.1.0'): 'png_archive',
+        ('libpng', '2.1.0:'): 'png',
+        ('LMDB', '2.0.0:'): 'lmdb',
+        ('NASM', '2.0.0:'): 'nasm',
+        ('nsync', '2.0.0:'): 'nsync',
+        ('PCRE', '2.0.0:2.6.0'): 'pcre',
+        ('protobuf', '2.0.0:'): 'com_google_protobuf',
+        ('pybind11', '2.2.0:'): 'pybind11',
+        ('snappy', '2.0.0:'): 'snappy',
+        ('SQLite', '2.0.0:'): 'org_sqlite',
+        ('SWIG', '2.0.0:2.4.0'): 'swig',
+        ('zlib', '2.0.0:2.2.0'): 'zlib_archive',
+        ('zlib', '2.2.0:'): 'zlib',
+    }
+    # Software recognized by TF but which is always disabled (usually because no EC is known)
+    # Format: <TF name>: <version range>
+    unused_system_libs = {
+        'boringssl': '2.0.0:',
+        'com_github_googleapis_googleapis': '2.0.0:2.5.0',
+        'com_github_googlecloudplatform_google_cloud_cpp': '2.0.0:',  # Not used due to $TF_NEED_GCP=0
+        'com_github_grpc_grpc': '2.2.0:',
+        'com_googlesource_code_re2': '2.0.0:',
+        'grpc': '2.0.0:2.2.0',
+    }
+    # Python packages installed as extensions or in the Python module
+    # Will be checked for availabilitly
+    # Format: (<package name>, <version range>): <TF name>
+    python_system_libs = {
+        ('absl', '2.0.0:'): 'absl_py',
+        ('astor', '2.0.0:'): 'astor_archive',
+        ('astunparse', '2.2.0:'): 'astunparse_archive',
+        ('cython', '2.0.0:'): 'cython',  # Part of Python EC
+        ('dill', '2.4.0:'): 'dill_archive',
+        ('enum', '2.0.0:2.8.0'): 'enum34_archive',  # Part of Python3
+        ('flatbuffers', '2.4.0:'): 'flatbuffers',
+        ('functools', '2.0.0:'): 'functools32_archive',  # Part of Python3
+        ('gast', '2.0.0:'): 'gast_archive',
+        ('google.protobuf', '2.0.0:'): 'com_google_protobuf',
+        ('keras_applications', '2.0.0:2.2.0'): 'keras_applications_archive',
+        ('opt_einsum', '2.0.0:'): 'opt_einsum_archive',
+        ('pasta', '2.0.0:'): 'pasta',
+        ('six', '2.0.0:'): 'six_archive',  # Part of Python EC
+        ('tblib', '2.4.0:'): 'tblib_archive',
+        ('termcolor', '2.0.0:'): 'termcolor_archive',
+        ('typing_extensions', '2.4.0:'): 'typing_extensions_archive',
+        ('wrapt', '2.0.0:'): 'wrapt',
+    }
+
+    dependency_mapping = dict((dep_name, tf_name)
+                              for (dep_name, version_range), tf_name in available_system_libs.items()
+                              if is_version_ok(version_range))
+    python_mapping = dict((pkg_name, tf_name)
+                          for (pkg_name, version_range), tf_name in python_system_libs.items()
+                          if is_version_ok(version_range))
+
+    if as_valid_libs:
+        tf_names = [tf_name for tf_name, version_range in unused_system_libs.items()
+                    if is_version_ok(version_range)]
+        tf_names.extend(dependency_mapping.values())
+        tf_names.extend(python_mapping.values())
+        result = '\n'.join(['    "%s",' % name for name in sorted(tf_names)])
+    else:
+        result = dependency_mapping, python_mapping
+    return result
+
+
+class EB_TensorFlow(PythonPackage):
+    """Support for building/installing TensorFlow."""
+
+    @staticmethod
+    def extra_options():
+        extra_vars = {
+            'path_filter': [[], "List of patterns to be filtered out in paths in $CPATH and $LIBRARY_PATH", CUSTOM],
+            'with_jemalloc': [None, "Make TensorFlow use jemalloc (usually enabled by default). " +
+                                    "Unsupported starting at TensorFlow 1.12!", CUSTOM],
+            'with_mkl_dnn': [None, "Make TensorFlow use Intel MKL-DNN / oneDNN and configure with --config=mkl "
+                                   "(enabled by default where supported for TensorFlow versions before 2.4.0)",
+                             CUSTOM],
+            'with_xla': [None, "Enable XLA JIT compiler for possible runtime optimization of models", CUSTOM],
+            'test_script': [None, "Script to test TensorFlow installation with", CUSTOM],
+            'test_targets': [[], "List of Bazel targets which should be run during the test step", CUSTOM],
+            'test_tag_filters_cpu': ['', "Comma-separated list of tags to filter for during the CPU test step", CUSTOM],
+            'test_tag_filters_gpu': ['', "Comma-separated list of tags to filter for during the GPU test step", CUSTOM],
+            'testopts_gpu': ['', 'Test options for the GPU test step', CUSTOM],
+            'test_max_parallel': [None, "Maximum number of test jobs to run in parallel (GPU tests are limited by " +
+                                  "the number of GPUs). Use None (default) to automatically determine a value", CUSTOM],
+            'jvm_max_memory': [4096, "Maximum amount of memory in MB used for the JVM running Bazel." +
+                               "Use None to not set a specific limit (uses a default value).", CUSTOM],
+        }
+
+        return PythonPackage.extra_options(extra_vars)
+
+    def __init__(self, *args, **kwargs):
+        """Initialize TensorFlow easyblock."""
+        super(EB_TensorFlow, self).__init__(*args, **kwargs)
+
+        with self.cfg.disable_templating():
+            self.cfg['exts_defaultclass'] = 'PythonPackage'
+
+            self.cfg['exts_default_options']['download_dep_fail'] = True
+            self.cfg['exts_default_options']['use_pip'] = True
+            self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES
+
+        self.system_libs_info = None
+
+        self.test_script = None
+
+        # locate test script (if specified)
+        if self.cfg['test_script']:
+            # try to locate test script via obtain_file (just like sources & patches files)
+            self.test_script = self.obtain_file(self.cfg['test_script'])
+            if self.test_script and os.path.exists(self.test_script):
+                self.log.info("Test script found: %s", self.test_script)
+            else:
+                raise EasyBuildError("Specified test script %s not found!", self.cfg['test_script'])
+
+    def python_pkg_exists(self, name):
+        """Check if the given python package exists/can be imported"""
+        cmd = self.python_cmd + " -c 'import %s'" % name
+        out, ec = run_cmd(cmd, log_ok=False)
+        self.log.debug('Existence check for %s returned %s with output: %s', name, ec, out)
+        return ec == 0
+
+    def handle_jemalloc(self):
+        """Figure out whether jemalloc support should be enabled or not."""
+        if self.cfg['with_jemalloc'] is None:
+            if LooseVersion(self.version) > LooseVersion('1.6'):
+                # jemalloc bundled with recent versions of TensorFlow does not work on RHEL 6 or derivatives,
+                # so disable it automatically if with_jemalloc was left unspecified
+                os_name = get_os_name().replace(' ', '')
+                rh_based_os = any(os_name.startswith(x) for x in ['centos', 'redhat', 'rhel', 'sl'])
+                if rh_based_os and get_os_version().startswith('6.'):
+                    self.log.info("Disabling jemalloc since bundled jemalloc does not work on RHEL 6 and derivatives")
+                    self.cfg['with_jemalloc'] = False
+
+            # if the above doesn't disable jemalloc support, then enable it by default
+            if self.cfg['with_jemalloc'] is None:
+                self.log.info("Enabling jemalloc support by default, since it was left unspecified")
+                self.cfg['with_jemalloc'] = True
+
+        else:
+            # if with_jemalloc was specified, stick to that
+            self.log.info("with_jemalloc was specified as %s, so sticking to it", self.cfg['with_jemalloc'])
+
+    def write_wrapper(self, wrapper_dir, compiler, i_mpi_root):
+        """Helper function to write a compiler wrapper."""
+        wrapper_txt = INTEL_COMPILER_WRAPPER % {
+            'compiler_path': which(compiler),
+            'intel_mpi_root': i_mpi_root,
+            'cpath': os.getenv('CPATH'),
+            'intel_license_file': os.getenv('INTEL_LICENSE_FILE', os.getenv('LM_LICENSE_FILE')),
+            'wrapper_dir': wrapper_dir,
+        }
+        wrapper = os.path.join(wrapper_dir, compiler)
+        write_file(wrapper, wrapper_txt)
+        if self.dry_run:
+            self.dry_run_msg("Wrapper for '%s' was put in place: %s", compiler, wrapper)
+        else:
+            adjust_permissions(wrapper, stat.S_IXUSR)
+            self.log.info("Using wrapper script for '%s': %s", compiler, which(compiler))
+
+    def verify_system_libs_info(self):
+        """Verifies that the stored info about $TF_SYSTEM_LIBS is complete"""
+        available_libs_src = set(get_system_libs_from_tf(self.start_dir))
+        available_libs_eb = set(split_tf_libs_txt(get_system_libs_for_version(self.version, as_valid_libs=True)))
+        # If available_libs_eb is empty it is not an error e.g. it is not worth trying to make all old ECs work
+        # So we just log it so it can be verified manually if required
+        if not available_libs_eb:
+            self.log.warning('TensorFlow EasyBlock does not have any information for $TF_SYSTEM_LIBS stored. ' +
+                             'This means most dependencies will be downloaded at build time by TensorFlow.\n' +
+                             'Available $TF_SYSTEM_LIBS according to the TensorFlow sources: %s',
+                             sorted(available_libs_src))
+            return
+        # Those 2 sets should be equal. We determine the differences here to report better errors
+        missing_libs = available_libs_src - available_libs_eb
+        unknown_libs = available_libs_eb - available_libs_src
+        if missing_libs or unknown_libs:
+            if not available_libs_src:
+                msg = 'Failed to determine available $TF_SYSTEM_LIBS from the source'
+            else:
+                msg = 'Values for $TF_SYSTEM_LIBS in the TensorFlow EasyBlock are incomplete.\n'
+                if missing_libs:
+                    # Libs available according to TF sources but not listed in this EasyBlock
+                    msg += 'Missing entries for $TF_SYSTEM_LIBS: %s\n' % missing_libs
+                if unknown_libs:
+                    # Libs listed in this EasyBlock but not present in the TF sources -> Removed?
+                    msg += 'Unrecognized entries for $TF_SYSTEM_LIBS: %s\n' % unknown_libs
+                msg += 'The EasyBlock needs to be updated to fully work with TensorFlow version %s' % self.version
+            if build_option('strict') == run.ERROR:
+                raise EasyBuildError(msg)
+            else:
+                print_warning(msg)
+
+    def get_system_libs(self):
+        """
+        Get list of dependencies for $TF_SYSTEM_LIBS
+
+        Returns a tuple of lists: $TF_SYSTEM_LIBS names, include paths, library paths
+        """
+        dependency_mapping, python_mapping = get_system_libs_for_version(self.version)
+        # Some TF dependencies require both a (usually C++) dependency and a Python package
+        deps_with_python_pkg = set(tf_name for tf_name in dependency_mapping.values()
+                                   if tf_name in python_mapping.values())
+
+        system_libs = []
+        cpaths = []
+        libpaths = []
+        ignored_system_deps = []
+
+        # Check direct dependencies
+        dep_names = set(dep['name'] for dep in self.cfg.dependencies())
+        for dep_name, tf_name in sorted(dependency_mapping.items(), key=lambda i: i[0].lower()):
+            if dep_name in dep_names:
+                if tf_name in deps_with_python_pkg:
+                    pkg_name = next(cur_pkg_name for cur_pkg_name, cur_tf_name in python_mapping.items()
+                                    if cur_tf_name == tf_name)
+                    # Simply ignore. Error reporting is done in the other loop
+                    if not self.python_pkg_exists(pkg_name):
+                        continue
+                system_libs.append(tf_name)
+                # When using cURL (which uses the system OpenSSL), we also need to use "boringssl"
+                # which essentially resolves to using OpenSSL as the API and library names are compatible
+                if dep_name == 'cURL':
+                    system_libs.append('boringssl')
+                sw_root = get_software_root(dep_name)
+                # Dependency might be filtered via --filter-deps. In that case assume globally installed version
+                if not sw_root:
+                    continue
+                incpath = os.path.join(sw_root, 'include')
+                if os.path.exists(incpath):
+                    cpaths.append(incpath)
+                    if dep_name == 'JsonCpp' and LooseVersion(self.version) < LooseVersion('2.3'):
+                        # Need to add the install prefix or patch the sources:
+                        # https://github.com/tensorflow/tensorflow/issues/42303
+                        cpaths.append(sw_root)
+                    if dep_name == 'protobuf':
+                        if LooseVersion(self.version) < LooseVersion('2.4'):
+                            # Need to set INCLUDEDIR as TF wants to symlink files from there:
+                            # https://github.com/tensorflow/tensorflow/issues/37835
+                            env.setvar('INCLUDEDIR', incpath)
+                        else:
+                            env.setvar('PROTOBUF_INCLUDE_PATH', incpath)
+                libpath = get_software_libdir(dep_name)
+                if libpath:
+                    libpaths.append(os.path.join(sw_root, libpath))
+            else:
+                ignored_system_deps.append('%s (Dependency %s)' % (tf_name, dep_name))
+
+        for pkg_name, tf_name in sorted(python_mapping.items(), key=lambda i: i[0].lower()):
+            if self.python_pkg_exists(pkg_name):
+                # If it is in deps_with_python_pkg we already added it
+                if tf_name not in deps_with_python_pkg:
+                    system_libs.append(tf_name)
+            else:
+                ignored_system_deps.append('%s (Python package %s)' % (tf_name, pkg_name))
+
+        if ignored_system_deps:
+            print_warning('%d TensorFlow dependencies have not been resolved by EasyBuild. Check the log for details.',
+                          len(ignored_system_deps))
+            self.log.warning('For the following $TF_SYSTEM_LIBS dependencies TensorFlow will download a copy ' +
+                             'because an EB dependency was not found: \n%s\n' +
+                             'EC Dependencies: %s\n' +
+                             'Installed Python packages: %s\n',
+                             ', '.join(ignored_system_deps),
+                             ', '.join(dep_names),
+                             ', '.join(self.get_installed_python_packages()))
+        else:
+            self.log.info("All known TensorFlow $TF_SYSTEM_LIBS dependencies resolved via EasyBuild!")
+
+        return system_libs, cpaths, libpaths
+
+    def setup_build_dirs(self):
+        """Setup temporary build directories"""
+        # Path where Bazel will store its output, build artefacts etc.
+        self.output_user_root_dir = tempfile.mkdtemp(suffix='-bazel-tf', dir=self.builddir)
+        # Folder where wrapper binaries can be placed, where required. TODO: Replace by --action_env cmds
+        self.wrapper_dir = tempfile.mkdtemp(suffix='-wrapper_bin', dir=self.builddir)
+
+    def configure_step(self):
+        """Custom configuration procedure for TensorFlow."""
+
+        # Bazel seems to not be able to handle a large amount of parallel jobs, e.g. 176 on some Power machines,
+        # and will hang forever building the TensorFlow package.
+        # So limit to something high but still reasonable while allowing ECs to overwrite it
+        if self.cfg['maxparallel'] is None:
+            self.cfg['parallel'] = min(self.cfg['parallel'], 64)
+
+        binutils_root = get_software_root('binutils')
+        if not binutils_root:
+            raise EasyBuildError("Failed to determine installation prefix for binutils")
+        self.binutils_bin_path = os.path.join(binutils_root, 'bin')
+
+        # filter out paths from CPATH and LIBRARY_PATH. This is needed since bazel will pull some dependencies that
+        # might conflict with dependencies on the system and/or installed with EB. For example: protobuf
+        path_filter = self.cfg['path_filter']
+        if path_filter:
+            self.log.info("Filtering $CPATH and $LIBRARY_PATH with path filter %s", path_filter)
+            for var in ['CPATH', 'LIBRARY_PATH']:
+                path = os.getenv(var).split(os.pathsep)
+                self.log.info("$%s old value was %s" % (var, path))
+                filtered_path = os.pathsep.join([p for fil in path_filter for p in path if fil not in p])
+                env.setvar(var, filtered_path)
+
+        self.setup_build_dirs()
+
+        use_wrapper = False
+        if self.toolchain.comp_family() == toolchain.INTELCOMP:
+            # put wrappers for Intel C/C++ compilers in place (required to make sure license server is found)
+            # cfr. https://github.com/bazelbuild/bazel/issues/663
+            for compiler in ('icc', 'icpc'):
+                self.write_wrapper(self.wrapper_dir, compiler, 'NOT-USED-WITH-ICC')
+            use_wrapper = True
+
+        use_mpi = self.toolchain.options.get('usempi', False)
+        mpi_home = ''
+        if use_mpi:
+            impi_root = get_software_root('impi')
+            if impi_root:
+                # put wrappers for Intel MPI compiler wrappers in place
+                # (required to make sure license server and I_MPI_ROOT are found)
+                for compiler in (os.getenv('MPICC'), os.getenv('MPICXX')):
+                    self.write_wrapper(self.wrapper_dir, compiler, os.getenv('I_MPI_ROOT'))
+                use_wrapper = True
+                # set correct value for MPI_HOME
+                mpi_home = os.path.join(impi_root, 'intel64')
+            else:
+                self.log.debug("MPI module name: %s", self.toolchain.MPI_MODULE_NAME[0])
+                mpi_home = get_software_root(self.toolchain.MPI_MODULE_NAME[0])
+
+            self.log.debug("Derived value for MPI_HOME: %s", mpi_home)
+
+        if use_wrapper:
+            env.setvar('PATH', os.pathsep.join([self.wrapper_dir, os.getenv('PATH')]))
+
+        self.prepare_python()
+
+        self.verify_system_libs_info()
+        self.system_libs_info = self.get_system_libs()
+
+        # Options passed to the target (build/test), e.g. --config arguments
+        self.target_opts = []
+
+        cuda_root = get_software_root('CUDA')
+        cudnn_root = get_software_root('cuDNN')
+        opencl_root = get_software_root('OpenCL')
+        tensorrt_root = get_software_root('TensorRT')
+        nccl_root = get_software_root('NCCL')
+
+        self._with_cuda = bool(cuda_root)
+
+        config_env_vars = {
+            'CC_OPT_FLAGS': os.getenv('CXXFLAGS'),
+            'MPI_HOME': mpi_home,
+            'PYTHON_BIN_PATH': self.python_cmd,
+            'PYTHON_LIB_PATH': os.path.join(self.installdir, self.pylibdir),
+            'TF_CUDA_CLANG': '0',
+            'TF_DOWNLOAD_CLANG': '0',  # Still experimental in TF 2.1.0
+            'TF_ENABLE_XLA': ('0', '1')[bool(self.cfg['with_xla'])],  # XLA JIT support
+            'TF_NEED_CUDA': ('0', '1')[self._with_cuda],
+            'TF_NEED_OPENCL': ('0', '1')[bool(opencl_root)],
+            'TF_NEED_ROCM': '0',
+            'TF_NEED_TENSORRT': '0',
+            'TF_SET_ANDROID_WORKSPACE': '0',
+            'TF_SYSTEM_LIBS': ','.join(self.system_libs_info[0]),
+        }
+        if LooseVersion(self.version) < LooseVersion('1.10'):
+            config_env_vars['TF_NEED_S3'] = '0'  # Renamed to TF_NEED_AWS in 1.9.0-rc2 and 1.10, not 1.9.0
+        # Options removed in 1.12.0
+        if LooseVersion(self.version) < LooseVersion('1.12'):
+            self.handle_jemalloc()
+            config_env_vars.update({
+                'TF_NEED_AWS': '0',  # Amazon AWS Platform
+                'TF_NEED_GCP': '0',  # Google Cloud Platform
+                'TF_NEED_GDR': '0',
+                'TF_NEED_HDFS': '0',  # Hadoop File System
+                'TF_NEED_JEMALLOC': ('0', '1')[self.cfg['with_jemalloc']],
+                'TF_NEED_KAFKA': '0',  # Amazon Kafka Platform
+                'TF_NEED_VERBS': '0',
+            })
+        elif self.cfg['with_jemalloc'] is True:
+            print_warning('Jemalloc is not supported in TensorFlow %s, the EC option with_jemalloc has no effect',
+                          self.version)
+        # Disable support of some features via config switch introduced in 1.12.1
+        if LooseVersion(self.version) >= LooseVersion('1.12.1'):
+            self.target_opts += ['--config=noaws', '--config=nogcp', '--config=nohdfs']
+            # Removed in 2.1
+            if LooseVersion(self.version) < LooseVersion('2.1'):
+                self.target_opts.append('--config=nokafka')
+        # MPI support removed in 2.1
+        if LooseVersion(self.version) < LooseVersion('2.1'):
+            config_env_vars['TF_NEED_MPI'] = ('0', '1')[bool(use_mpi)]
+        # SYCL support removed in 2.4
+        if LooseVersion(self.version) < LooseVersion('2.4'):
+            config_env_vars['TF_NEED_OPENCL_SYCL'] = '0'
+
+        if self._with_cuda:
+            cuda_version = get_software_version('CUDA')
+            cuda_maj_min_ver = '.'.join(cuda_version.split('.')[:2])
+
+            # $GCC_HOST_COMPILER_PATH should be set to path of the actual compiler (not the MPI compiler wrapper)
+            if use_mpi:
+                compiler_path = which(os.getenv('CC_SEQ'))
+            else:
+                compiler_path = which(os.getenv('CC'))
+
+            # list of CUDA compute capabilities to use can be specifed in two ways (where (2) overrules (1)):
+            # (1) in the easyconfig file, via the custom cuda_compute_capabilities;
+            # (2) in the EasyBuild configuration, via --cuda-compute-capabilities configuration option;
+            ec_cuda_cc = self.cfg['cuda_compute_capabilities']
+            cfg_cuda_cc = build_option('cuda_compute_capabilities')
+            cuda_cc = cfg_cuda_cc or ec_cuda_cc or []
+
+            if cfg_cuda_cc and ec_cuda_cc:
+                warning_msg = "cuda_compute_capabilities specified in easyconfig (%s) are overruled by " % ec_cuda_cc
+                warning_msg += "--cuda-compute-capabilities configuration option (%s)" % cfg_cuda_cc
+                print_warning(warning_msg)
+            elif not cuda_cc:
+                warning_msg = "No CUDA compute capabilities specified, so using TensorFlow default "
+                warning_msg += "(which may not be optimal for your system).\nYou should use "
+                warning_msg += "the --cuda-compute-capabilities configuration option or the cuda_compute_capabilities "
+                warning_msg += "easyconfig parameter to specify a list of CUDA compute capabilities to compile with."
+                print_warning(warning_msg)
+
+            # TensorFlow 1.12.1 requires compute capability >= 3.5
+            # see https://github.com/tensorflow/tensorflow/pull/25767
+            if LooseVersion(self.version) >= LooseVersion('1.12.1'):
+                faulty_comp_caps = [x for x in cuda_cc if LooseVersion(x) < LooseVersion('3.5')]
+                if faulty_comp_caps:
+                    error_msg = "TensorFlow >= 1.12.1 requires CUDA compute capabilities >= 3.5, "
+                    error_msg += "found one or more older ones: %s"
+                    raise EasyBuildError(error_msg, ', '.join(faulty_comp_caps))
+
+            if cuda_cc:
+                self.log.info("Compiling with specified list of CUDA compute capabilities: %s", ', '.join(cuda_cc))
+
+            config_env_vars.update({
+                'CUDA_TOOLKIT_PATH': cuda_root,
+                'GCC_HOST_COMPILER_PATH': compiler_path,
+                # This is the binutils bin folder: https://github.com/tensorflow/tensorflow/issues/39263
+                'GCC_HOST_COMPILER_PREFIX': self.binutils_bin_path,
+                'TF_CUDA_COMPUTE_CAPABILITIES': ','.join(cuda_cc),
+                'TF_CUDA_VERSION': cuda_maj_min_ver,
+            })
+
+            # for recent TensorFlow versions, $TF_CUDA_PATHS and $TF_CUBLAS_VERSION must also be set
+            if LooseVersion(self.version) >= LooseVersion('1.14'):
+
+                # figure out correct major/minor version for CUBLAS from cublas_api.h
+                cublas_api_header_glob_pattern = os.path.join(cuda_root, 'targets', '*', 'include', 'cublas_api.h')
+                matches = glob.glob(cublas_api_header_glob_pattern)
+                if len(matches) == 1:
+                    cublas_api_header_path = matches[0]
+                    cublas_api_header_txt = read_file(cublas_api_header_path)
+                else:
+                    raise EasyBuildError("Failed to isolate path to cublas_api.h: %s", matches)
+
+                cublas_ver_parts = []
+                for key in ['CUBLAS_VER_MAJOR', 'CUBLAS_VER_MINOR', 'CUBLAS_VER_PATCH']:
+                    regex = re.compile("^#define %s ([0-9]+)" % key, re.M)
+                    res = regex.search(cublas_api_header_txt)
+                    if res:
+                        cublas_ver_parts.append(res.group(1))
+                    else:
+                        raise EasyBuildError("Failed to find pattern '%s' in %s", regex.pattern, cublas_api_header_path)
+
+                config_env_vars.update({
+                    'TF_CUDA_PATHS': cuda_root,
+                    'TF_CUBLAS_VERSION': '.'.join(cublas_ver_parts),
+                })
+
+            if cudnn_root:
+                cudnn_version = get_software_version('cuDNN')
+                cudnn_maj_min_patch_ver = '.'.join(cudnn_version.split('.')[:3])
+
+                config_env_vars.update({
+                    'CUDNN_INSTALL_PATH': cudnn_root,
+                    'TF_CUDNN_VERSION': cudnn_maj_min_patch_ver,
+                })
+            else:
+                raise EasyBuildError("TensorFlow has a strict dependency on cuDNN if CUDA is enabled")
+            if nccl_root:
+                nccl_version = get_software_version('NCCL')
+                # Ignore the PKG_REVISION identifier if it exists (i.e., report 2.4.6 for 2.4.6-1 or 2.4.6-2)
+                nccl_version = nccl_version.split('-')[0]
+
+                if nccl_version == "default":
+                    nccl_major = ''
+                    nccl_minor = ''
+                    nccl_patch = ''
+                    with open(nccl_root+'/include/nccl.h', 'r') as nccl_header:
+                        for line in nccl_header.readlines():
+                            if '#define NCCL_MAJOR' in line:
+                                nccl_major = line[line.rfind(' ')+1:-1]
+                            if '#define NCCL_MINOR' in line:
+                                nccl_minor = line[line.rfind(' ')+1:-1]
+                            if '#define NCCL_PATCH' in line:
+                                nccl_patch = line[line.rfind(' ')+1:-1]
+                    if nccl_major and nccl_minor and nccl_patch != '':
+                        nccl_version = nccl_major + '.' + nccl_minor + '.' + nccl_patch
+                config_env_vars.update({
+                    'NCCL_INSTALL_PATH': nccl_root,
+                })
+            else:
+                nccl_version = '1.3'  # Use simple downloadable version
+            config_env_vars.update({
+                'TF_NCCL_VERSION': nccl_version,
+            })
+            if tensorrt_root:
+                tensorrt_version = get_software_version('TensorRT')
+                config_env_vars.update({
+                    'TF_NEED_TENSORRT': '1',
+                    'TENSORRT_INSTALL_PATH': tensorrt_root,
+                    'TF_TENSORRT_VERSION': tensorrt_version,
+                })
+
+        configure_py_contents = read_file('configure.py')
+        for key, val in sorted(config_env_vars.items()):
+            if key.startswith('TF_') and key not in configure_py_contents:
+                self.log.warn('Did not find %s option in configure.py. Setting might not have any effect', key)
+            env.setvar(key, val)
+
+        # configure.py (called by configure script) already calls bazel to determine the bazel version
+        # Since 2.3.0 `bazel --version` is used which doesn't extract bazel, prior it did
+        # Hence make sure it doesn't extract into $HOME/.cache/bazel
+        if LooseVersion(self.version) < LooseVersion('2.3.0'):
+            regex_subs = [(r"('bazel', '--batch')",
+                           r"\1, '--output_user_root=%s'" % self.output_user_root_dir)]
+            apply_regex_substitutions('configure.py', regex_subs)
+
+        cmd = self.cfg['preconfigopts'] + './configure ' + self.cfg['configopts']
+        run_cmd(cmd, log_all=True, simple=True)
+
+    def patch_crosstool_files(self):
+        """Patches the CROSSTOOL files to include EasyBuild provided compiler paths"""
+        inc_paths, lib_paths = [], []
+
+        gcc_root = get_software_root('GCCcore') or get_software_root('GCC')
+        if gcc_root:
+            gcc_lib64 = os.path.join(gcc_root, 'lib64')
+            lib_paths.append(gcc_lib64)
+
+            gcc_ver = get_software_version('GCCcore') or get_software_version('GCC')
+
+            # figure out location of GCC include files
+            # make sure we don't pick up the nvptx-none directory by looking for a specific include file
+            res = glob.glob(os.path.join(gcc_root, 'lib', 'gcc', '*', gcc_ver, 'include', 'immintrin.h'))
+            if res and len(res) == 1:
+                gcc_lib_inc = os.path.dirname(res[0])
+                inc_paths.append(gcc_lib_inc)
+            else:
+                raise EasyBuildError("Failed to pinpoint location of GCC include files: %s", res)
+
+            # make sure include-fixed directory is where we expect it to be
+            gcc_lib_inc_fixed = os.path.join(os.path.dirname(gcc_lib_inc), 'include-fixed')
+            if os.path.exists(gcc_lib_inc_fixed):
+                inc_paths.append(gcc_lib_inc_fixed)
+            else:
+                self.log.info("Derived directory %s does not exist, so discarding it", gcc_lib_inc_fixed)
+
+            # also check on location of include/c++/<gcc version> directory
+            gcc_cplusplus_inc = os.path.join(gcc_root, 'include', 'c++', gcc_ver)
+            if os.path.exists(gcc_cplusplus_inc):
+                inc_paths.append(gcc_cplusplus_inc)
+            else:
+                raise EasyBuildError("Derived directory %s does not exist", gcc_cplusplus_inc)
+        else:
+            raise EasyBuildError("Failed to determine installation prefix for GCC")
+
+        cuda_root = get_software_root('CUDA')
+        if cuda_root:
+            inc_paths.append(os.path.join(cuda_root, 'include'))
+            lib_paths.append(os.path.join(cuda_root, 'lib64'))
+
+        # fix hardcoded locations of compilers & tools
+        cxx_inc_dirs = ['cxx_builtin_include_directory: "%s"' % resolve_path(p) for p in inc_paths]
+        cxx_inc_dirs += ['cxx_builtin_include_directory: "%s"' % p for p in inc_paths]
+        regex_subs = [
+            (r'-B/usr/bin/', '-B%s %s' % (self.binutils_bin_path, ' '.join('-L%s/' % p for p in lib_paths))),
+            (r'(cxx_builtin_include_directory:).*', ''),
+            (r'^toolchain {', 'toolchain {\n' + '\n'.join(cxx_inc_dirs)),
+        ]
+        for tool in ['ar', 'cpp', 'dwp', 'gcc', 'gcov', 'ld', 'nm', 'objcopy', 'objdump', 'strip']:
+            path = which(tool)
+            if path:
+                regex_subs.append((os.path.join('/usr', 'bin', tool), path))
+            else:
+                raise EasyBuildError("Failed to determine path to '%s'", tool)
+
+        # -fPIE/-pie and -fPIC are not compatible, so patch out hardcoded occurences of -fPIE/-pie if -fPIC is used
+        if self.toolchain.options.get('pic', None):
+            regex_subs.extend([('-fPIE', '-fPIC'), ('"-pie"', '"-fPIC"')])
+
+        # patch all CROSSTOOL* scripts to fix hardcoding of locations of binutils/GCC binaries
+        for path, dirnames, filenames in os.walk(os.getcwd()):
+            for filename in filenames:
+                if filename.startswith('CROSSTOOL'):
+                    full_path = os.path.join(path, filename)
+                    self.log.info("Patching %s", full_path)
+                    apply_regex_substitutions(full_path, regex_subs)
+
+    def build_step(self):
+        """Custom build procedure for TensorFlow."""
+
+        # pre-create target installation directory
+        mkdir(os.path.join(self.installdir, self.pylibdir), parents=True)
+
+        # This seems to be no longer required since at least 2.0, likely also for older versions
+        if LooseVersion(self.version) < LooseVersion('2.0'):
+            self.patch_crosstool_files()
+
+        # Options passed to the bazel command
+        self.bazel_opts = [
+            '--output_user_root=%s' % self.output_user_root_dir,
+        ]
+        jvm_max_memory = self.cfg['jvm_max_memory']
+        if jvm_max_memory:
+            jvm_startup_memory = min(512, int(jvm_max_memory))
+            self.bazel_opts.extend([
+                '--host_jvm_args=-Xms%sm' % jvm_startup_memory,
+                '--host_jvm_args=-Xmx%sm' % jvm_max_memory
+            ])
+
+        if self.toolchain.options.get('debug', None):
+            self.target_opts.append('--strip=never')
+            self.target_opts.append('--compilation_mode=dbg')
+            self.target_opts.append('--copt="-Og"')
+        else:
+            # build with optimization enabled
+            # cfr. https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode
+            self.target_opts.append('--compilation_mode=opt')
+
+            # select 'opt' config section (this is *not* the same as --compilation_mode=opt!)
+            # https://docs.bazel.build/versions/master/user-manual.html#flag--config
+            self.target_opts.append('--config=opt')
+
+        # make Bazel print full command line + make it verbose on failures
+        # https://docs.bazel.build/versions/master/user-manual.html#flag--subcommands
+        # https://docs.bazel.build/versions/master/user-manual.html#flag--verbose_failures
+        self.target_opts.extend(['--subcommands', '--verbose_failures'])
+
+        self.target_opts.append('--jobs=%s' % self.cfg['parallel'])
+
+        if self.toolchain.options.get('pic', None):
+            self.target_opts.append('--copt="-fPIC"')
+
+        # include install location of Python packages in $PYTHONPATH,
+        # and specify that value of $PYTHONPATH should be passed down into Bazel build environment;
+        # this is required to make sure that Python packages included as extensions are found at build time;
+        # see also https://github.com/tensorflow/tensorflow/issues/22395
+        pythonpath = os.getenv('PYTHONPATH', '')
+        env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, self.pylibdir), pythonpath]))
+
+        # Make TF find our modules. LD_LIBRARY_PATH gets automatically added by configure.py
+        cpaths, libpaths = self.system_libs_info[1:]
+        if cpaths:
+            self.target_opts.append("--action_env=CPATH='%s'" % ':'.join(cpaths))
+        if libpaths:
+            self.target_opts.append("--action_env=LIBRARY_PATH='%s'" % ':'.join(libpaths))
+        self.target_opts.append('--action_env=PYTHONPATH')
+        # Also export $EBPYTHONPREFIXES to handle the multi-deps python setup
+        # See https://github.com/easybuilders/easybuild-easyblocks/pull/1664
+        if 'EBPYTHONPREFIXES' in os.environ:
+            self.target_opts.append('--action_env=EBPYTHONPREFIXES')
+
+        # Ignore user environment for Python
+        self.target_opts.append('--action_env=PYTHONNOUSERSITE=1')
+
+        # Use the same configuration (i.e. environment) for compiling and using host tools
+        # This means that our action_envs are always passed
+        self.target_opts.append('--distinct_host_configuration=false')
+
+        # TF 2 (final) sets this in configure
+        if LooseVersion(self.version) < LooseVersion('2.0'):
+            if self._with_cuda:
+                self.target_opts.append('--config=cuda')
+
+        # note: using --config=mkl results in a significantly different build, with a different
+        # threading model (which may lead to thread oversubscription and significant performance loss,
+        # see https://github.com/easybuilders/easybuild-easyblocks/issues/2577) and different
+        # runtime behavior w.r.t. GPU vs CPU execution of functions like tf.matmul
+        # (see https://github.com/easybuilders/easybuild-easyconfigs/issues/14120),
+        # so make sure you really know you want to use this!
+
+        # auto-enable use of MKL-DNN/oneDNN and --config=mkl when possible if with_mkl_dnn is left unspecified;
+        # only do this for TensorFlow versions older than 2.4.0, since more recent versions
+        # oneDNN is used automatically for x86_64 systems (and mkl-dnn is no longer a dependency);
+        if self.cfg['with_mkl_dnn'] is None and LooseVersion(self.version) < LooseVersion('2.4.0'):
+            cpu_arch = get_cpu_architecture()
+            if cpu_arch == X86_64:
+                # Supported on x86 since forever
+                self.cfg['with_mkl_dnn'] = True
+                self.log.info("Auto-enabled use of MKL-DNN on %s CPU architecture", cpu_arch)
+            else:
+                self.log.info("Not enabling use of MKL-DNN on %s CPU architecture", cpu_arch)
+
+        # if mkl-dnn is listed as a dependency it is used
+        mkl_root = get_software_root('mkl-dnn')
+        if mkl_root:
+            self.target_opts.append('--config=mkl')
+            env.setvar('TF_MKL_ROOT', mkl_root)
+        elif self.cfg['with_mkl_dnn']:
+            # this makes TensorFlow use mkl-dnn (cfr. https://github.com/01org/mkl-dnn),
+            # and download it if needed
+            self.target_opts.append('--config=mkl')
+
+        # Compose final command
+        cmd = (
+            [self.cfg['prebuildopts']]
+            + ['bazel']
+            + self.bazel_opts
+            + ['build']
+            + self.target_opts
+            + [self.cfg['buildopts']]
+            # specify target of the build command as last argument
+            + ['//tensorflow/tools/pip_package:build_pip_package']
+        )
+
+        run_cmd(' '.join(cmd), log_all=True, simple=True, log_ok=True)
+
+        # run generated 'build_pip_package' script to build the .whl
+        cmd = "bazel-bin/tensorflow/tools/pip_package/build_pip_package %s" % self.builddir
+        run_cmd(cmd, log_all=True, simple=True, log_ok=True)
+
+    def test_step(self):
+        """Run TensorFlow unit tests"""
+        # IMPORTANT: This code allows experiments with running TF tests but may change
+        test_targets = self.cfg['test_targets']
+        if not test_targets:
+            self.log.info('No targets selected for tests. Set e.g. test_targets = ["//tensorflow/python/..."] '
+                          'to run TensorFlow tests.')
+            return
+        # Allow a string as the test_targets (useful for C&P testing from TF sources)
+        if not isinstance(test_targets, list):
+            test_targets = test_targets.split(' ')
+
+        test_opts = self.target_opts
+        test_opts.append('--test_output=errors')  # (Additionally) show logs from failed tests
+        test_opts.append('--build_tests_only')  # Don't build tests which won't be executed
+
+        # determine number of cores/GPUs to use for tests
+        max_num_test_jobs = int(self.cfg['test_max_parallel'] or self.cfg['parallel'])
+        if self._with_cuda:
+            if not which('nvidia-smi', on_error=IGNORE):
+                print_warning('Could not find nvidia-smi. Assuming a system without GPUs and skipping GPU tests!')
+                num_gpus_to_use = 0
+            elif os.environ.get('CUDA_VISIBLE_DEVICES') == '-1':
+                print_warning('GPUs explicitely disabled via CUDA_VISIBLE_DEVICES. Skipping GPU tests!')
+                num_gpus_to_use = 0
+            else:
+                # determine number of available GPUs via nvidia-smi command, fall back to just 1 GPU
+                # Note: Disable logging to also disable the error handling in run_cmd and do it explicitly below
+                (out, ec) = run_cmd("nvidia-smi --list-gpus", log_ok=False, log_all=False, regexp=False)
+                try:
+                    if ec != 0:
+                        raise RuntimeError("nvidia-smi returned exit code %s with output:\n%s" % (ec, out))
+                    else:
+                        self.log.info('nvidia-smi succeeded with output:\n%s' % out)
+                        gpu_ct = sum(line.startswith('GPU ') for line in out.strip().split('\n'))
+                except (RuntimeError, ValueError) as err:
+                    self.log.warning("Failed to get the number of GPUs on this system: %s", err)
+                    gpu_ct = 0
+
+                if gpu_ct == 0:
+                    print_warning('No GPUs found. Skipping GPU tests!')
+
+                num_gpus_to_use = min(max_num_test_jobs, gpu_ct)
+
+            # Can (likely) only run 1 test per GPU but don't need to limit CPU tests
+            num_test_jobs = {
+                CPU_DEVICE: max_num_test_jobs,
+                GPU_DEVICE: num_gpus_to_use,
+            }
+        else:
+            num_test_jobs = {
+                CPU_DEVICE: max_num_test_jobs,
+                GPU_DEVICE: 0,
+            }
+
+        cfg_testopts = {
+            CPU_DEVICE: self.cfg['testopts'],
+            GPU_DEVICE: self.cfg['testopts_gpu'],
+        }
+
+        devices = [CPU_DEVICE]
+        # Skip GPU tests if not build with CUDA or no test jobs set (e.g. due to no GPUs available)
+        if self._with_cuda and num_test_jobs[GPU_DEVICE]:
+            devices.append(GPU_DEVICE)
+
+        for device in devices:
+            # Determine tests to run
+            test_tag_filters_name = 'test_tag_filters_' + device
+            test_tag_filters = self.cfg[test_tag_filters_name]
+            if not test_tag_filters:
+                self.log.info('Skipping %s test because %s is not set', device, test_tag_filters_name)
+                continue
+            else:
+                self.log.info('Starting %s test', device)
+
+            current_test_opts = test_opts[:]
+            current_test_opts.append('--local_test_jobs=%s' % num_test_jobs[device])
+
+            # Add both build and test tag filters as done by the TF CI scripts
+            current_test_opts.extend("--%s_tag_filters='%s'" % (step, test_tag_filters) for step in ('test', 'build'))
+
+            # Disable all GPUs for the CPU tests, by setting $CUDA_VISIBLE_DEVICES to -1,
+            # otherwise TensorFlow will still use GPUs and fail.
+            # Only tests explicitely marked with the 'gpu' tag can run with GPUs visible;
+            # see https://github.com/tensorflow/tensorflow/issues/45664
+            if device == CPU_DEVICE:
+                current_test_opts.append("--test_env=CUDA_VISIBLE_DEVICES='-1'")
+            else:
+                # Propagate those environment variables to the GPU tests if they are set
+                important_cuda_env_vars = (
+                    'CUDA_CACHE_DISABLE',
+                    'CUDA_CACHE_MAXSIZE',
+                    'CUDA_CACHE_PATH',
+                    'CUDA_FORCE_PTX_JIT',
+                    'CUDA_DISABLE_PTX_JIT'
+                )
+                current_test_opts.extend(
+                    '--test_env=' + var_name
+                    for var_name in important_cuda_env_vars
+                    if var_name in os.environ
+                )
+
+                # These are used by the `parallel_gpu_execute` helper script from TF
+                current_test_opts.append('--test_env=TF_GPU_COUNT=%s' % num_test_jobs[GPU_DEVICE])
+                current_test_opts.append('--test_env=TF_TESTS_PER_GPU=1')
+
+            # Append user specified options last
+            current_test_opts.append(cfg_testopts[device])
+
+            # Compose final command
+            cmd = ' '.join(
+                [self.cfg['pretestopts']]
+                + ['bazel']
+                + self.bazel_opts
+                + ['test']
+                + current_test_opts
+                + ['--']
+                # specify targets to test as last argument
+                + test_targets
+            )
+
+            stdouterr, ec = run_cmd(cmd, log_ok=False, simple=False)
+            if ec:
+                fail_msg = 'Tests on %s (cmd: %s) failed with exit code %s and output:\n%s' % (
+                    device, cmd, ec, stdouterr)
+                self.log.warning(fail_msg)
+                # Try to enhance error message
+                failed_tests = []
+                failed_test_logs = dict()
+                # Bazel outputs failed tests like "//tensorflow/c:kernels_test   FAILED in[...]"
+                for match in re.finditer(r'^(//[a-zA-Z_/:]+)\s+FAILED', stdouterr, re.MULTILINE):
+                    test_name = match.group(1)
+                    failed_tests.append(test_name)
+                    # Logs are in a folder named after the test, e.g. tensorflow/c/kernels_test
+                    test_folder = test_name[2:].replace(':', '/')
+                    # Example file names:
+                    # <prefix>/k8-opt/testlogs/tensorflow/c/kernels_test/test.log
+                    # <prefix>/k8-opt/testlogs/tensorflow/c/kernels_test/shard_1_of_4/test_attempts/attempt_1.log
+                    test_log_re = re.compile(r'.*\n(.*\n)?\s*(/.*/testlogs/%s/(/[^/]*)?test.log)' % test_folder)
+                    log_match = test_log_re.match(stdouterr, match.end())
+                    if log_match:
+                        failed_test_logs[test_name] = log_match.group(2)
+                # When TF logs are found enhance the below error by additionally logging the details about failed tests
+                for test_name, log_path in failed_test_logs.items():
+                    if os.path.exists(log_path):
+                        self.log.warning('Test %s failed with output\n%s', test_name,
+                                         read_file(log_path, log_error=False))
+                if failed_tests:
+                    fail_msg = 'At least %s %s tests failed:\n%s' % (
+                        len(failed_tests), device, ', '.join(failed_tests))
+                self.report_test_failure(fail_msg)
+            else:
+                self.log.info('Tests on %s succeeded with output:\n%s', device, stdouterr)
+
+    def install_step(self):
+        """Custom install procedure for TensorFlow."""
+        # find .whl file that was built, and install it using 'pip install'
+        if ("-rc" in self.version):
+            whl_version = self.version.replace("-rc", "rc")
+        else:
+            whl_version = self.version
+
+        whl_paths = glob.glob(os.path.join(self.builddir, 'tensorflow-%s-*.whl' % whl_version))
+        if not whl_paths:
+            whl_paths = glob.glob(os.path.join(self.builddir, 'tensorflow-*.whl'))
+        if len(whl_paths) == 1:
+            # --ignore-installed is required to ensure *this* wheel is installed
+            cmd = "pip install --ignore-installed --prefix=%s %s" % (self.installdir, whl_paths[0])
+
+            # if extensions are listed, assume they will provide all required dependencies,
+            # so use --no-deps to prevent pip from downloading & installing them
+            if self.cfg['exts_list']:
+                cmd += ' --no-deps'
+
+            run_cmd(cmd, log_all=True, simple=True, log_ok=True)
+        else:
+            raise EasyBuildError("Failed to isolate built .whl in %s: %s", whl_paths, self.builddir)
+
+        # Fix for https://github.com/tensorflow/tensorflow/issues/6341 on Python < 3.3
+        # If the site-packages/google/__init__.py file is missing, make it an empty file.
+        # This fixes the "No module named google.protobuf" error that sometimes shows up during sanity_check
+        # For Python >= 3.3 the logic is reversed: The __init__.py must not exist.
+        # See e.g. http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html
+        google_protobuf_dir = os.path.join(self.installdir, self.pylibdir, 'google', 'protobuf')
+        google_init_file = os.path.join(self.installdir, self.pylibdir, 'google', '__init__.py')
+        if LooseVersion(det_python_version(self.python_cmd)) < LooseVersion('3.3'):
+            if os.path.isdir(google_protobuf_dir) and not is_readable(google_init_file):
+                self.log.debug("Creating (empty) missing %s", google_init_file)
+                write_file(google_init_file, '')
+        else:
+            if os.path.exists(google_init_file):
+                self.log.debug("Removing %s for Python >= 3.3", google_init_file)
+                remove_file(google_init_file)
+
+        # Fix cuda header paths
+        # This is needed for building custom TensorFlow ops
+        if LooseVersion(self.version) < LooseVersion('1.14'):
+            pyshortver = '.'.join(get_software_version('Python').split('.')[:2])
+            regex_subs = [(r'#include "cuda/include/', r'#include "')]
+            base_path = os.path.join(self.installdir, 'lib', 'python%s' % pyshortver, 'site-packages', 'tensorflow',
+                                     'include', 'tensorflow')
+            for header in glob.glob(os.path.join(base_path, 'stream_executor', 'cuda', 'cuda*.h')) + glob.glob(
+                    os.path.join(base_path, 'core', 'util', 'cuda*.h')):
+                apply_regex_substitutions(header, regex_subs)
+
+    def sanity_check_step(self):
+        """Custom sanity check for TensorFlow."""
+        if self.python_cmd is None:
+            self.prepare_python()
+
+        custom_paths = {
+            'files': ['bin/tensorboard'],
+            'dirs': [self.pylibdir],
+        }
+
+        custom_commands = [
+            "%s -c 'import tensorflow'" % self.python_cmd,
+            # tf_should_use importsweakref.finalize, which requires backports.weakref for Python < 3.4
+            "%s -c 'from tensorflow.python.util import tf_should_use'" % self.python_cmd,
+        ]
+        res = super(EB_TensorFlow, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)
+
+        # test installation using MNIST tutorial examples
+        if self.cfg['runtest']:
+            pythonpath = os.getenv('PYTHONPATH', '')
+            env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, self.pylibdir), pythonpath]))
+
+            mnist_pys = []
+
+            if LooseVersion(self.version) < LooseVersion('2.0'):
+                mnist_pys.append('mnist_with_summaries.py')
+
+            if LooseVersion(self.version) < LooseVersion('1.13'):
+                # mnist_softmax.py was removed in TensorFlow 1.13.x
+                mnist_pys.append('mnist_softmax.py')
+
+            for mnist_py in mnist_pys:
+                datadir = tempfile.mkdtemp(suffix='-tf-%s-data' % os.path.splitext(mnist_py)[0])
+                logdir = tempfile.mkdtemp(suffix='-tf-%s-logs' % os.path.splitext(mnist_py)[0])
+                mnist_py = os.path.join(self.start_dir, 'tensorflow', 'examples', 'tutorials', 'mnist', mnist_py)
+                cmd = "%s %s --data_dir %s --log_dir %s" % (self.python_cmd, mnist_py, datadir, logdir)
+                run_cmd(cmd, log_all=True, simple=True, log_ok=True)
+
+            # run test script (if any)
+            if self.test_script:
+                # copy test script to build dir before running it, to avoid that a file named 'tensorflow.py'
+                # (a customized TensorFlow easyblock for example) breaks 'import tensorflow'
+                test_script = os.path.join(self.builddir, os.path.basename(self.test_script))
+                copy_file(self.test_script, test_script)
+
+                run_cmd("python %s" % test_script, log_all=True, simple=True, log_ok=True)
+
+        return res
diff --git a/Golden_Repo/f/flatbuffers-python/flatbuffers-python-2.0-GCCcore-11.3.0.eb b/Golden_Repo/f/flatbuffers-python/flatbuffers-python-2.0-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..7913bb7c2be21d7965d61fabf3a309fb06ccb78f
--- /dev/null
+++ b/Golden_Repo/f/flatbuffers-python/flatbuffers-python-2.0-GCCcore-11.3.0.eb
@@ -0,0 +1,27 @@
+easyblock = 'PythonPackage'
+
+name = 'flatbuffers-python'
+version = '2.0'
+
+homepage = 'https://github.com/google/flatbuffers/'
+description = """Python Flatbuffers runtime library."""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+source_urls = ['https://pypi.python.org/packages/source/f/flatbuffers']
+sources = [{'download_filename': 'flatbuffers-%(version)s.tar.gz', 'filename': SOURCE_TAR_GZ}]
+checksums = ['12158ab0272375eab8db2d663ae97370c33f152b27801fa6024e1d6105fd4dd2']
+
+dependencies = [
+    ('binutils', '2.38'),
+    ('Python', '3.10.4'),
+]
+
+download_dep_fail = True
+use_pip = True
+sanity_pip_check = True
+
+preinstallopts = 'VERSION=%(version)s '
+options = {'modulename': 'flatbuffers'}
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/f/flatbuffers/flatbuffers-2.0.0-GCCcore-11.3.0.eb b/Golden_Repo/f/flatbuffers/flatbuffers-2.0.0-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..080188887fdcc55261c7aab80b923925bb837172
--- /dev/null
+++ b/Golden_Repo/f/flatbuffers/flatbuffers-2.0.0-GCCcore-11.3.0.eb
@@ -0,0 +1,33 @@
+##
+# Author:    Robert Mijakovic <robert.mijakovic@lxp.lu>
+##
+easyblock = 'CMakeNinja'
+
+name = 'flatbuffers'
+version = '2.0.0'
+
+homepage = 'https://github.com/google/flatbuffers/'
+description = """FlatBuffers: Memory Efficient Serialization Library"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+source_urls = ['https://github.com/google/flatbuffers/archive/v%(version)s/']
+sources = [SOURCE_TAR_GZ]
+checksums = ['9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4']
+
+builddependencies = [
+    ('binutils', '2.38'),
+    ('CMake', '3.23.1'),
+    ('Ninja', '1.10.2'),
+    ('Python', '3.10.4'),
+]
+
+configopts = '-DFLATBUFFERS_ENABLE_PCH=ON '
+
+sanity_check_paths = {
+    'files': ['include/flatbuffers/flatbuffers.h', 'bin/flatc', 'lib/libflatbuffers.a'],
+    'dirs': ['lib/cmake'],
+}
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/h/HDF5/HDF5-1.12.2-GCCcore-11.3.0-serial.eb b/Golden_Repo/h/HDF5/HDF5-1.12.2-GCCcore-11.3.0-serial.eb
new file mode 100644
index 0000000000000000000000000000000000000000..11228af032d16da6a57d72b82632f5f3a6ecc807
--- /dev/null
+++ b/Golden_Repo/h/HDF5/HDF5-1.12.2-GCCcore-11.3.0-serial.eb
@@ -0,0 +1,26 @@
+name = 'HDF5'
+version = '1.12.2'
+versionsuffix = '-serial'
+
+homepage = 'https://portal.hdfgroup.org/display/support'
+description = """HDF5 is a data model, library, and file format for storing and managing data.
+ It supports an unlimited variety of datatypes, and is designed for flexible
+ and efficient I/O and for high volume and complex data."""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+toolchainopts = {'pic': True}
+
+source_urls = ['https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-%(version_major_minor)s/hdf5-%(version)s/src']
+sources = [SOURCELOWER_TAR_GZ]
+checksums = ['2a89af03d56ce7502dcae18232c241281ad1773561ec00c0f0e8ee2463910f14']
+
+builddependencies = [
+    ('binutils', '2.38'),
+]
+
+dependencies = [
+    ('zlib', '1.2.12'),
+    ('Szip', '2.1.1'),
+]
+
+moduleclass = 'data'
diff --git a/Golden_Repo/h/h5py/h5py-3.7.0-foss-2022a.eb b/Golden_Repo/h/h5py/h5py-3.7.0-foss-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..99fcb645de89faf2a2a2eb5659e7e2d60cf38f8a
--- /dev/null
+++ b/Golden_Repo/h/h5py/h5py-3.7.0-foss-2022a.eb
@@ -0,0 +1,35 @@
+easyblock = 'PythonPackage'
+
+name = 'h5py'
+version = '3.7.0'
+
+homepage = 'https://www.h5py.org/'
+description = """HDF5 for Python (h5py) is a general-purpose Python interface to the Hierarchical Data Format library,
+ version 5. HDF5 is a versatile, mature scientific software library designed for the fast, flexible storage of enormous
+ amounts of data."""
+
+toolchain = {'name': 'foss', 'version': '2022a'}
+toolchainopts = {'usempi': True}
+
+sources = [SOURCE_TAR_GZ]
+checksums = ['3fcf37884383c5da64846ab510190720027dca0768def34dd8dcb659dbe5cbf3']
+
+builddependencies = [('pkgconfig', '1.5.5', '-python')]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('mpi4py', '3.1.4'),
+    ('SciPy-bundle', '2022.05', '', ('gcccoremkl', '11.3.0-2022.1.0')),
+    ('HDF5', '1.12.2'),
+]
+
+use_pip = True
+sanity_pip_check = True
+download_dep_fail = True
+
+# h5py's setup.py will disable setup_requires if H5PY_SETUP_REQUIRES is set to 0
+# without this environment variable, pip will fetch the minimum numpy version h5py supports during install,
+# even though SciPy-bundle provides a newer version that satisfies h5py's install_requires dependency.
+preinstallopts = 'HDF5_MPI=ON HDF5_DIR="$EBROOTHDF5" H5PY_SETUP_REQUIRES=0 '
+
+moduleclass = 'data'
diff --git a/Golden_Repo/j/JsonCpp/JsonCpp-1.9.5-GCCcore-11.3.0.eb b/Golden_Repo/j/JsonCpp/JsonCpp-1.9.5-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..76b697e431aaab3dd3edb23151e43e9d78337862
--- /dev/null
+++ b/Golden_Repo/j/JsonCpp/JsonCpp-1.9.5-GCCcore-11.3.0.eb
@@ -0,0 +1,29 @@
+easyblock = "CMakeNinja"
+
+name = 'JsonCpp'
+version = '1.9.5'
+
+homepage = 'https://open-source-parsers.github.io/jsoncpp-docs/doxygen/index.html'
+description = """ JsonCpp is a C++ library that allows manipulating JSON values,
+ including serialization and deserialization to and from strings. It can also preserve existing comment in
+ unserialization/serialization steps, making it a convenient format to store user input files. """
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+source_urls = ['https://github.com/open-source-parsers/jsoncpp/archive']
+sources = ['%(version)s.tar.gz']
+checksums = ['f409856e5920c18d0c2fb85276e24ee607d2a09b5e7d5f0a371368903c275da2']
+
+builddependencies = [
+    ('CMake', '3.23.1'),
+    ('Ninja', '1.10.2'),
+    ('pkgconf', '1.8.0'),
+    ('binutils', '2.38'),
+]
+
+sanity_check_paths = {
+    'files': ['include/json/json.h', 'lib/libjsoncpp.%s' % SHLIB_EXT],
+    'dirs': [],
+}
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/l/LMDB/LMDB-0.9.29-GCCcore-11.3.0.eb b/Golden_Repo/l/LMDB/LMDB-0.9.29-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..87af8c0082ceb55d27eed80a53c5949a34e94257
--- /dev/null
+++ b/Golden_Repo/l/LMDB/LMDB-0.9.29-GCCcore-11.3.0.eb
@@ -0,0 +1,34 @@
+easyblock = 'MakeCp'
+
+name = 'LMDB'
+version = '0.9.29'
+
+homepage = 'https://symas.com/lmdb'
+description = """LMDB is a fast, memory-efficient database. With memory-mapped files, it has the read performance
+ of a pure in-memory database while retaining the persistence of standard disk-based databases."""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+source_urls = ['https://github.com/LMDB/lmdb/archive/']
+sources = ['%(name)s_%(version)s.tar.gz']
+checksums = ['22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb']
+
+builddependencies = [('binutils', '2.38')]
+
+buildopts = 'CC="$CC" OPT="$CFLAGS"'
+
+runtest = 'test'
+
+files_to_copy = [
+    (['lmdb.h', 'midl.h'], 'include'),
+    (['mdb_copy', 'mdb_dump', 'mdb_load', 'mdb_stat'], 'bin'),
+    (['liblmdb.a', 'liblmdb.%s' % SHLIB_EXT], 'lib'),
+]
+
+sanity_check_paths = {
+    'files': ['bin/mdb_copy', 'bin/mdb_dump', 'bin/mdb_load', 'bin/mdb_stat', 'include/lmdb.h',
+              'include/midl.h', 'lib/liblmdb.a', 'lib/liblmdb.%s' % SHLIB_EXT],
+    'dirs': [],
+}
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/m/mpi4py/mpi4py-3.1.4-gompi-2022a.eb b/Golden_Repo/m/mpi4py/mpi4py-3.1.4-gompi-2022a.eb
new file mode 100644
index 0000000000000000000000000000000000000000..45422248566faa49f99085ba3827630787cf7b68
--- /dev/null
+++ b/Golden_Repo/m/mpi4py/mpi4py-3.1.4-gompi-2022a.eb
@@ -0,0 +1,24 @@
+easyblock = 'PythonPackage'
+
+name = 'mpi4py'
+version = '3.1.4'
+
+homepage = 'https://bitbucket.org/mpi4py/mpi4py'
+description = """MPI for Python (mpi4py) provides bindings of the Message Passing Interface (MPI) standard for
+ the Python programming language, allowing any Python program to exploit multiple processors.
+"""
+
+toolchain = {'name': 'gompi', 'version': '2022a'}
+
+source_urls = ['https://github.com/%(name)s/%(name)s/archive/']
+sources = ['%(version)s.tar.gz']
+checksums = ['0ac7cc1fbd1d026b519c4f439ffc15d8776e5615c262dd233a418d73539c352b']
+
+dependencies = [('Python', '3.10.4')]
+
+sanity_check_paths = {
+    'files': [],
+    'dirs': ['lib/python%(pyshortver)s/site-packages/mpi4py'],
+}
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/n/nsync/nsync-1.25.0-GCCcore-11.3.0.eb b/Golden_Repo/n/nsync/nsync-1.25.0-GCCcore-11.3.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..81bcc07cc76dcd5960a14b0767686e062d5e3fe4
--- /dev/null
+++ b/Golden_Repo/n/nsync/nsync-1.25.0-GCCcore-11.3.0.eb
@@ -0,0 +1,26 @@
+easyblock = 'CMakeNinja'
+
+name = 'nsync'
+version = '1.25.0'
+
+homepage = 'https://github.com/google/nsync'
+description = """nsync is a C library that exports various synchronization primitives, such as mutexes"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+source_urls = ['https://github.com/google/nsync/archive/v%(version)s/']
+sources = [SOURCE_TAR_GZ]
+checksums = ['2be9dbfcce417c7abcc2aa6fee351cd4d292518d692577e74a2c6c05b049e442']
+
+builddependencies = [
+    ('binutils', '2.38'),
+    ('CMake', '3.23.1'),
+    ('Ninja', '1.10.2'),
+]
+
+sanity_check_paths = {
+    'files': ['include/nsync.h', 'lib/libnsync.a', 'lib/libnsync_cpp.a'],
+    'dirs': [],
+}
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/p/pkgconfig/pkgconfig-1.5.5-GCCcore-11.3.0-python.eb b/Golden_Repo/p/pkgconfig/pkgconfig-1.5.5-GCCcore-11.3.0-python.eb
new file mode 100644
index 0000000000000000000000000000000000000000..568e0498ef7796bfd55558c09dcd0c0e392d384b
--- /dev/null
+++ b/Golden_Repo/p/pkgconfig/pkgconfig-1.5.5-GCCcore-11.3.0-python.eb
@@ -0,0 +1,28 @@
+easyblock = 'PythonPackage'
+
+name = 'pkgconfig'
+version = '1.5.5'
+# The -python versionsuffix is used to avoid confusion between
+# pkg-config (the tool) and pkgconfig (the Python wrappers)
+versionsuffix = '-python'
+
+homepage = 'https://github.com/matze/pkgconfig'
+description = """pkgconfig is a Python module to interface with the pkg-config command line tool"""
+
+toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
+
+sources = [SOURCE_TAR_GZ]
+checksums = ['deb4163ef11f75b520d822d9505c1f462761b4309b1bb713d08689759ea8b899']
+
+builddependencies = [('binutils', '2.38')]
+
+dependencies = [
+    ('Python', '3.10.4'),
+    ('pkgconf', '1.8.0'),
+]
+
+use_pip = True
+download_dep_fail = True
+sanity_pip_check = True
+
+moduleclass = 'devel'
diff --git a/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1-foss-2022a-CUDA-11.7.0.eb b/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1-foss-2022a-CUDA-11.7.0.eb
new file mode 100644
index 0000000000000000000000000000000000000000..751fe38e9731d856b8a4d9ec2e101c0c7388845c
--- /dev/null
+++ b/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1-foss-2022a-CUDA-11.7.0.eb
@@ -0,0 +1,237 @@
+easyblock = 'PythonBundle'
+
+name = 'TensorFlow'
+version = '2.9.1'
+versionsuffix = '-CUDA-%(cudaver)s'
+
+homepage = 'https://www.tensorflow.org/'
+description = "An open-source software library for Machine Intelligence"
+
+toolchain = {'name': 'foss', 'version': '2022a'}
+toolchainopts = {'pic': True}
+
+builddependencies = [
+    ('Bazel', '5.1.1', '', ('GCCcore', '11.3.0')),
+    ('protobuf', '3.19.4'),
+    # git 2.x required, see also https://github.com/tensorflow/tensorflow/issues/29053
+    ('git', '2.36.0', '-nodocs'),
+    ('pybind11', '2.9.2'),
+    ('UnZip', '6.0'),
+    ('LLVM', '14.0.3'),  # for debugging with llvm-symbolizer, to be removed
+]
+dependencies = [
+    ('CUDA', '11.7.0', '', SYSTEM),
+    ('cuDNN', '8.6.0.163', versionsuffix, SYSTEM),
+    ('NCCL', 'default', versionsuffix),
+    ('Python', '3.10.4'),
+    ('h5py', '3.7.0'),
+    ('cURL', '7.83.0'),
+    ('double-conversion', '3.2.0'),
+    ('flatbuffers', '2.0.0'),
+    ('giflib', '5.2.1'),
+    ('hwloc', '2.7.1'),
+    ('ICU', '71.1'),
+    ('JsonCpp', '1.9.5'),
+    ('libjpeg-turbo', '2.1.3'),
+    ('LMDB', '0.9.29'),
+    ('NASM', '2.15.05'),
+    ('nsync', '1.25.0'),
+    ('SQLite', '3.38.3'),
+    ('protobuf-python', '3.19.4'),
+    ('flatbuffers-python', '2.0'),
+    ('libpng', '1.6.37'),
+    ('snappy', '1.1.9'),
+    ('zlib', '1.2.12'),
+    ('networkx', '2.8.4', '', ('gcccoremkl', '11.3.0-2022.1.0')),  # required for pythran
+]
+
+use_pip = True
+sanity_pip_check = True
+
+# Dependencies created and updated using findPythonDeps.sh:
+# https://gist.github.com/Flamefire/49426e502cd8983757bd01a08a10ae0d
+exts_list = [
+    ('wrapt', '1.14.1', {
+        'checksums': ['380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d'],
+    }),
+    ('termcolor', '1.1.0', {
+        'checksums': ['1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b'],
+    }),
+    ('tensorflow-io-gcs-filesystem', '0.26.0', {
+        'source_tmpl': 'tensorflow_io_gcs_filesystem-0.26.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl',
+        'checksums': ['5457eeef1f0f5f294225808b2290a251a2e4639ec66db9d32aa4ae62e807d7e8'],
+    }),
+    ('tensorflow-estimator', '2.9.0', {
+        'source_tmpl': 'tensorflow_estimator-2.9.0-py2.py3-none-any.whl',
+        'checksums': ['e9762bb302f51bc1eb2f35d19f0190a6a2d809d754d5def788c4328fe3746744'],
+    }),
+    ('Werkzeug', '2.2.2', {
+        'checksums': ['7ea2d48322cc7c0f8b3a215ed73eabd7b5d75d0b50e31ab006286ccff9e00b8f'],
+    }),
+    ('tensorboard-plugin-wit', '1.8.1', {
+        'source_tmpl': 'tensorboard_plugin_wit-1.8.1-py3-none-any.whl',
+        'checksums': ['ff26bdd583d155aa951ee3b152b3d0cffae8005dc697f72b44a8e8c2a77a8cbe'],
+    }),
+    ('tensorboard-data-server', '0.6.1', {
+        'source_tmpl': 'tensorboard_data_server-0.6.1-py3-none-manylinux2010_x86_64.whl',
+        'checksums': ['d8237580755e58eff68d1f3abefb5b1e39ae5c8b127cc40920f9c4fb33f4b98a'],
+    }),
+    ('Markdown', '3.4.1', {
+        'checksums': ['3b809086bb6efad416156e00a0da66fe47618a5d6918dd688f53f40c8e4cfeff'],
+    }),
+    ('grpcio', '1.47.0', {
+        'modulename': 'grpc',
+        'preinstallopts': "export GRPC_PYTHON_BUILD_EXT_COMPILER_JOBS=%(parallel)s && ",
+        'checksums': ['5dbba95fab9b35957b4977b8904fc1fa56b302f9051eff4d7716ebb0c087f801'],
+    }),
+    ('oauthlib', '3.2.0', {
+        'checksums': ['23a8208d75b902797ea29fd31fa80a15ed9dc2c6c16fe73f5d346f83f6fa27a2'],
+    }),
+    ('requests-oauthlib', '1.3.1', {
+        'checksums': ['75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a'],
+    }),
+    ('rsa', '4.9', {
+        'checksums': ['e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21'],
+    }),
+    ('pyasn1-modules', '0.2.8', {
+        'checksums': ['905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e'],
+    }),
+    ('cachetools', '5.2.0', {
+        'checksums': ['6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757'],
+    }),
+    ('google-auth', '2.10.0', {
+        'modulename': 'google.auth',
+        'checksums': ['7904dbd44b745c7323fef29565adee2fe7ff48473e2d94443aced40b0404a395'],
+    }),
+    ('google-auth-oauthlib', '0.4.6', {
+        'checksums': ['a90a072f6993f2c327067bf65270046384cda5a8ecb20b94ea9a687f1f233a7a'],
+    }),
+    ('absl-py', '1.2.0', {
+        'modulename': 'absl',
+        'checksums': ['f568809938c49abbda89826223c992b630afd23c638160ad7840cfe347710d97'],
+    }),
+    ('tensorboard', version, {
+        'source_tmpl': 'tensorboard-2.9.1-py3-none-any.whl',
+        'checksums': ['baa727f791776f9e5841d347127720ceed4bbd59c36b40604b95fb2ae6029276'],
+    }),
+    ('opt-einsum', '3.3.0', {
+        'source_tmpl': 'opt_einsum-3.3.0-py3-none-any.whl',
+        'checksums': ['2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147'],
+    }),
+    ('libclang', '14.0.6', {
+        'modulename': 'clang',
+        'checksums': ['9052a8284d8846984f6fa826b1d7460a66d3b23a486d782633b42b6e3b418789'],
+    }),
+    ('Keras-Preprocessing', '1.1.2', {
+        'source_tmpl': 'Keras_Preprocessing-1.1.2-py2.py3-none-any.whl',
+        'checksums': ['7b82029b130ff61cc99b55f3bd27427df4838576838c5b2f65940e4fcec99a7b'],
+    }),
+    ('keras', '2.9.0', {
+        'source_tmpl': 'keras-2.9.0-py2.py3-none-any.whl',
+        'checksums': ['55911256f89cfc9343c9fbe4b61ec45a2d33d89729cbe1ab9dcacf8b07b8b6ab'],
+    }),
+    ('google-pasta', '0.2.0', {
+        'modulename': 'pasta',
+        'checksums': ['c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e'],
+    }),
+    ('astor', '0.8.1', {
+    }),
+    ('astunparse', '1.6.3', {
+        'checksums': ['5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872'],
+    }),
+    ('gast', '0.4.0', {
+        'checksums': ['40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1'],
+    }),
+    ('pythran', '0.9.11', {
+        'checksums': ['a317f91e2aade9f6550dc3bf40b5caeb45b7e012daf27e2b3e4ad928edb01667'],
+    }),
+    ('beniget', '0.3.0', {
+        'checksums': ['062c893be9cdf87c3144fb15041cce4d81c67107c1591952cd45fdce789a0ff1'],
+    }),
+    ('dill', '0.3.6', {
+    }),
+    ('portpicker', '1.5.2', {
+        'checksums': ['c55683ad725f5c00a41bc7db0225223e8be024b1fa564d039ed3390e4fd48fb3'],
+    }),
+    ('tblib', '1.7.0', {
+    }),
+    (name, version, {
+        'patches': [
+            'TensorFlow-2.1.0_fix-cuda-build.patch',
+            'TensorFlow-2.4.0_add-ldl.patch',
+            'TensorFlow-2.4.0_dont-use-var-lock.patch',
+            'TensorFlow-2.5.0_add-support-for-large-core-systems.patch',
+            'TensorFlow-2.5.0_disable-avx512-extensions.patch',
+            'TensorFlow-2.5.0-fix-alias-violation-in-absl.patch',
+            'TensorFlow-2.5.0_fix-arm-vector-intrinsics.patch',
+            'TensorFlow-2.5.0_fix-crash-on-shutdown.patch',
+            'TensorFlow-2.7.1_fix_cpu_count.patch',
+            'TensorFlow-2.9.1_fix-protobuf-include-def.patch',
+        ],
+        'source_tmpl': 'v%(version)s.tar.gz',
+        'source_urls': ['https://github.com/tensorflow/tensorflow/archive/'],
+        'test_script': 'TensorFlow-2.x_mnist-test.py',
+        'test_tag_filters_cpu': '-gpu,-tpu,-no_cuda_on_cpu_tap,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only',
+        'test_tag_filters_gpu': 'gpu,-no_gpu,-nogpu,-gpu_cupti,-no_cuda11,-no_pip,-no_oss,-oss_serial,-benchmark-test,-v1only',
+        'test_targets': [
+            '//tensorflow/core/...',
+            '-//tensorflow/core:example_java_proto',
+            '-//tensorflow/core/example:example_protos_closure',
+            '//tensorflow/cc/...',
+            '//tensorflow/c/...',
+            '//tensorflow/python/...',
+            '-//tensorflow/c/eager:c_api_test_gpu',
+            '-//tensorflow/c/eager:c_api_distributed_test',
+            '-//tensorflow/c/eager:c_api_distributed_test_gpu',
+            '-//tensorflow/c/eager:c_api_cluster_test_gpu',
+            '-//tensorflow/c/eager:c_api_remote_function_test_gpu',
+            '-//tensorflow/c/eager:c_api_remote_test_gpu',
+            '-//tensorflow/core/common_runtime:collective_param_resolver_local_test',
+            '-//tensorflow/core/common_runtime:mkl_layout_pass_test',
+            '-//tensorflow/core/kernels/mkl:mkl_fused_ops_test',
+            '-//tensorflow/core/kernels/mkl:mkl_fused_batch_norm_op_test',
+            '-//tensorflow/core/ir/importexport/tests/roundtrip/...',
+        ],
+        'testopts': "--test_timeout=3600 --test_size_filters=small",
+        'testopts_gpu': "--test_timeout=3600 --test_size_filters=small --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute",
+        'with_xla': True,
+        'checksums': [
+            '6eaf86ead73e23988fe192da1db68f4d3828bcdd0f3a9dc195935e339c95dbdc',  # v2.9.1.tar.gz
+            '78c20aeaa7784b8ceb46238a81e8c2461137d28e0b576deeba8357d23fbe1f5a',  # TensorFlow-2.1.0_fix-cuda-build.patch
+            '917ee7282e782e48673596d8917c3207e60e0851bb9acf230a2a439b067af2e3',  # TensorFlow-2.4.0_add-ldl.patch
+            # TensorFlow-2.4.0_dont-use-var-lock.patch
+            'b14f2493fd2edf79abd1c4f2dde6c98a3e7d5cb9c25ab9386df874d5f072d6b5',
+            # TensorFlow-2.5.0_add-support-for-large-core-systems.patch
+            '915f3477d6407fafd48269fe1e684a05ce361d9b9b85e58686682df87760f636',
+            # TensorFlow-2.5.0_disable-avx512-extensions.patch
+            '3655ce24c97569ac9738c07cac85347ba6f5c815ada95b19b606ffa46d4dda03',
+            # TensorFlow-2.5.0-fix-alias-violation-in-absl.patch
+            '12454fda3330fb45cd380377e283f04488b40e0b8ae7378e786ddf731a581f75',
+            # TensorFlow-2.5.0_fix-arm-vector-intrinsics.patch
+            '6abfadc0f67ff3b510d70430843201cb46d7bd65db045ec9b482af70e0c8c0c8',
+            # TensorFlow-2.5.0_fix-crash-on-shutdown.patch
+            '578c7493221ebd3dc25ca43d63a72cbb28fdf4112b1e2baa7390f25781bd78fd',
+            '5427a4cff0afc2fe5b24776ae9ca3616c56a79c1fde0025b37bec24837bb0698',  # TensorFlow-2.7.1_fix_cpu_count.patch
+        ],
+    }),
+    ('promise', '2.3', {
+        'checksums': ['dfd18337c523ba4b6a58801c164c1904a9d4d1b1747c7d5dbf45b693a49d93d0'],
+    }),
+    ('googleapis-common-protos', '1.56.4', {
+        'modulename': 'google',
+        'checksums': ['c25873c47279387cfdcbdafa36149887901d36202cb645a0e4f29686bf6e4417'],
+    }),
+    ('tensorflow_metadata', '1.10.0', {
+        'source_tmpl': '%(name)s-%(version)s-py3-none-any.whl',
+        'checksums': ['e3ff528496105c0d73b2a402877525b1695635378fbe5c1b47ac7b3780816bb3'],
+    }),
+    ('etils', '0.8.0', {
+        'checksums': ['d1d5af7bd9c784a273c4e1eccfaa8feaca5e0481a08717b5313fa231da22a903'],
+    }),
+    ('tensorflow-datasets', '4.7.0', {
+        'source_tmpl': 'v%(version)s.tar.gz',
+        'source_urls': ['https://github.com/tensorflow/datasets/archive'],
+    }),
+]
+
+moduleclass = 'lib'
diff --git a/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1_fix-protobuf-include-def.patch b/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1_fix-protobuf-include-def.patch
new file mode 100644
index 0000000000000000000000000000000000000000..08fed2f77ff9d127c3dbbb1764f2408577fbf01c
--- /dev/null
+++ b/Golden_Repo/t/TensorFlow/TensorFlow-2.9.1_fix-protobuf-include-def.patch
@@ -0,0 +1,13 @@
+Fix an issue where google/protobuf/port_def.inc is not found.
+
+diff -ruN tensorflow-2.9.1_old/third_party/systemlibs/protobuf.BUILD tensorflow-2.9.1/third_party/systemlibs/protobuf.BUILD
+--- tensorflow-2.9.1_old/third_party/systemlibs/protobuf.BUILD	2022-11-10 16:57:13.649126750 +0100
++++ tensorflow-2.9.1/third_party/systemlibs/protobuf.BUILD	2022-11-10 17:00:42.548576599 +0100
+@@ -43,5 +43,6 @@
+         ],
+     ),
+     "wrappers": ("google/protobuf/wrappers.proto", []),
++    "port_def": ("google/protobuf/port_def.inc", []),
+ }
+
+ RELATIVE_WELL_KNOWN_PROTOS = [proto[1][0] for proto in WELL_KNOWN_PROTO_MAP.items()]
\ No newline at end of file