Skip to content
Snippets Groups Projects
Commit eb859598 authored by Damian Alvarez's avatar Damian Alvarez
Browse files

To fix a bug in NVHPC which resulted in it using the system GCC

parent 23a2c721
No related branches found
No related tags found
No related merge requests found
##
# Copyright 2015-2022 Bart Oldeman
# Copyright 2016-2022 Forschungszentrum Juelich
# Copyright 2015-2023 Bart Oldeman
# Copyright 2016-2023 Forschungszentrum Juelich
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
......@@ -164,7 +164,10 @@ class EB_NVHPC(PackedBinary):
line = re.sub(r"^PATH=/", r"#PATH=/", line)
sys.stdout.write(line)
cmd = "%s -x %s -g77 gfortran" % (makelocalrc_filename, compilers_subdir)
if LooseVersion(self.version) >= LooseVersion('22.9'):
cmd = f"%s -x %s -cuda {default_cuda_version} -stdpar {default_compute_capability}" % (makelocalrc_filename, os.path.join(compilers_subdir, "bin"))
else:
cmd = "%s -x %s -g77 /" % (makelocalrc_filename, compilers_subdir)
run_cmd(cmd, log_all=True, simple=True)
# If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so
......@@ -306,4 +309,3 @@ class EB_NVHPC(PackedBinary):
if not self.cfg['module_add_cuda'] and get_software_root('CUDA'):
txt += self.module_generator.set_environment('NVHPC_CUDA_HOME', os.getenv('CUDA_HOME'))
return txt
name = 'NVHPC'
version = '22.11'
local_gccver = '11.3.0'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2022_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'e60e798657c33b06754d33dfd5ab3bea2882d4a9b9476102303edf2bbe3b7a95',
local_tarball_tmpl % 'ppc64le':
'ef800203cf6040b3a5df24f19944b272f62caee8362875bcb394e86dc1de2353',
local_tarball_tmpl % 'x86_64':
'cb91b3a04368457d5cfe3c0e9c0611591fdc8076b01ea977343fe7db7fdcfa3c',
}
]
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.38', '', ('GCCcore', local_gccver)),
('CUDA', '11.7', '', SYSTEM),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.15', '', ('GCCcore', local_gccver))
]
module_add_cuda = False
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/22.3/cuda/)
# for NVHPC 22.3, those are: 11.6, 11.0, 10.2;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="11.0" (for example)
default_cuda_version = '%(cudaver)s'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDA as a dependency, for example
# dependencies = [('CUDA', '11.5.0')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
# module_add_nccl = False # Add NVHPC's NCCL library
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
# module_add_cuda = False # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
name = 'NVHPC'
version = '22.9'
local_gccver = '11.3.0'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2022_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'aarch64':
'bc4473f04b49bc9a26f08c17a72360650ddf48a3b6eefacdc525d79c8d730f30',
local_tarball_tmpl % 'ppc64le':
'9aac31d36bb09f6653544978021f5b78c272112e7748871566f7e930f5e7475b',
local_tarball_tmpl % 'x86_64':
'aebfeb826ace3dabf9699f72390ca0340f8789a8ef6fe4032e3c7b794f073ea3',
}
]
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.38', '', ('GCCcore', local_gccver)),
('CUDA', '11.7', '', SYSTEM),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.15', '', ('GCCcore', local_gccver))
]
module_add_cuda = False
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/22.3/cuda/)
# for NVHPC 22.3, those are: 11.6, 11.0, 10.2;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="11.0" (for example)
default_cuda_version = '%(cudaver)s'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDA as a dependency, for example
# dependencies = [('CUDA', '11.5.0')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
# module_add_nccl = False # Add NVHPC's NCCL library
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
# module_add_cuda = False # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment