Skip to content
Snippets Groups Projects
Commit 23640523 authored by Sebastian Achilles's avatar Sebastian Achilles
Browse files

add easyconfigs: NVHPC-21.11.eb

parent f9cd6114
No related branches found
No related tags found
No related merge requests found
name = 'NVHPC'
version = '21.1'
local_gccver = '11.2.0'
homepage = 'https://developer.nvidia.com/hpc-sdk/'
description = """C, C++ and Fortran compilers included with the NVIDIA HPC SDK (previously: PGI)"""
toolchain = SYSTEM
# By downloading, you accept the HPC SDK Software License Agreement (https://docs.nvidia.com/hpc-sdk/eula/index.html)
accept_eula = True
source_urls = ['https://developer.download.nvidia.com/hpc-sdk/%(version)s/']
local_tarball_tmpl = 'nvhpc_2021_%%(version_major)s%%(version_minor)s_Linux_%s_cuda_multi.tar.gz'
sources = [local_tarball_tmpl % '%(arch)s']
checksums = [
{
local_tarball_tmpl % 'x86_64':
'd8d8ccd0e558d22bcddd955f2233219c96f7de56aa8e09e7be833e384d32d6aa',
}
]
dependencies = [
('GCCcore', local_gccver),
('binutils', '2.37', '', ('GCCcore', local_gccver)),
('CUDA', '11.5', '', SYSTEM),
# This is necessary to avoid cases where just libnuma.so.1 is present in the system and -lnuma fails
('numactl', '2.0.14', '', SYSTEM)
]
module_add_cuda = False
# specify default CUDA version that should be used by NVHPC
# should match one of the CUDA versions that are included with this NVHPC version
# (see install_components/Linux_x86_64/20.7/cuda/)
# for NVHPC 20.7, those are: 11.0, 10.2, 10.1;
# this version can be tweaked from the EasyBuild command line with
# --try-amend=default_cuda_version="10.2" (for example)
default_cuda_version = '11.5'
# NVHPC EasyBlock supports some features, which can be set via CLI or this easyconfig.
# The following list gives examples for the easyconfig
#
# NVHPC needs CUDA to work. Two options are available: 1) Use NVHPC-bundled CUDA, 2) use system CUDA
# 1) Bundled CUDA
# If no easybuild dependency to CUDA is present, the bundled CUDA is taken. A version needs to be specified with
# default_cuda_version = "11.0"
# in this easyconfig file; alternatively, it can be specified through the command line during installation with
# --try-amend=default_cuda_version="10.2"
# 2) CUDA provided via EasyBuild
# Use CUDAcore as a dependency, for example
# dependencies = [('CUDAcore', '11.0.2')]
# The parameter default_cuda_version still can be set as above.
# If not set, it will be deduced from the CUDA module (via $EBVERSIONCUDA)
#
# Define a NVHPC-default Compute Capability
# cuda_compute_capabilities = "8.0"
# Can also be specified on the EasyBuild command line via --cuda-compute-capabilities=8.0
# Only single values supported, not lists of values!
#
# Options to add/remove things to/from environment module (defaults shown)
# module_byo_compilers = False # Remove compilers from PATH (Bring-your-own compilers)
# module_nvhpc_own_mpi = False # Add NVHPC's own pre-compiled OpenMPI
# module_add_math_libs = False # Add NVHPC's math libraries (which should be there from CUDA anyway)
# module_add_profilers = False # Add NVHPC's NVIDIA Profilers
# module_add_nccl = False # Add NVHPC's NCCL library
# module_add_nvshmem = False # Add NVHPC's NVSHMEM library
# module_add_cuda = False # Add NVHPC's bundled CUDA
# this bundle serves as a compiler-only toolchain, so it should be marked as compiler (important for HMNS)
moduleclass = 'compiler'
easyblock = 'Toolchain'
name = 'npsmpic'
version = '2021'
homepage = '(none)'
description = 'NVHPC based compiler toolchain, including Parastation MPICH2 for MPI support.'
toolchain = SYSTEM
local_compiler = ('NVHPC', '21.11')
dependencies = [
local_compiler,
('CUDA', '11.5', '', SYSTEM),
('psmpi', '5.5.0-1', '', local_compiler),
]
moduleclass = 'toolchain'
easyblock = 'Toolchain'
name = 'nvompic'
version = '2021b'
homepage = '(none)'
description = 'NVHPC based compiler toolchain, including OpenMPI for MPI support.'
toolchain = SYSTEM
local_compiler = ('NVHPC', '21.11')
dependencies = [
local_compiler,
('CUDA', '11.5', '', SYSTEM),
('OpenMPI', '4.1.1', '', local_compiler),
]
moduleclass = 'toolchain'
easyblock = 'ConfigureMake'
name = 'OpenMPI'
version = '4.1.1'
homepage = 'https://www.open-mpi.org/'
description = """The Open MPI Project is an open source MPI-3 implementation."""
toolchain = {'name': 'NVHPC', 'version': '21.11'}
toolchainopts = {'pic': True}
source_urls = [
'https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
sources = [SOURCELOWER_TAR_BZ2]
checksums = ['e24f7a778bd11a71ad0c14587a7f5b00e68a71aa5623e2157bafee3d44c07cda']
osdependencies = [
# needed for --with-verbs
('libibverbs-dev', 'libibverbs-devel', 'rdma-core-devel'),
# needed for --with-pmix
('pmix-devel'),
]
builddependencies = [
('Autotools', '20210726', '', SYSTEM),
('pkg-config', '0.29.2'),
]
dependencies = [
('zlib', '1.2.11'),
('hwloc', '2.5.0'),
('UCX', '1.11.2', '', SYSTEM),
('CUDA', '11.5', '', SYSTEM),
('libevent', '2.1.12'),
]
configopts = '--enable-shared '
configopts += '--with-hwloc=$EBROOTHWLOC ' # hwloc support
configopts += '--with-ucx=$EBROOTUCX '
configopts += '--with-verbs '
configopts += '--with-libevent=$EBROOTLIBEVENT '
configopts += '--without-orte '
configopts += '--without-psm2 '
configopts += '--disable-oshmem '
configopts += '--with-cuda=$EBROOTCUDA '
configopts += '--with-ime=/opt/ddn/ime '
configopts += '--with-gpfs '
# to enable SLURM integration (site-specific)
configopts += '--with-slurm --with-pmix=external --with-libevent=external --with-ompi-pmix-rte'
local_libs = ["mpi_mpifh", "mpi", "ompitrace", "open-pal", "open-rte"]
sanity_check_paths = {
'files': ["bin/%s" % local_binfile for local_binfile in ["ompi_info", "opal_wrapper"]] +
["lib/lib%s.%s" % (local_libfile, SHLIB_EXT) for local_libfile in local_libs] +
["include/%s.h" % x for x in ["mpi-ext", "mpif-config",
"mpif", "mpi", "mpi_portable_platform"]],
'dirs': [],
}
moduleclass = 'mpi'
name = 'psmpi'
version = '5.5.0-1'
homepage = 'https://github.com/ParaStation/psmpi2'
description = """ParaStation MPI is an open source high-performance MPI 3.0 implementation,
based on MPICH v3. It provides extra low level communication libraries and integration with
various batch systems for tighter process control.
"""
toolchain = {'name': 'NVHPC', 'version': '21.11'}
sources = [SOURCE_TAR_BZ2]
source_urls = ['https://github.com/ParaStation/psmpi/archive/']
checksums = [
# psmpi-5.5.0-1.tar.bz2
'c178bf618f139857c1bc191938677145cf4fdbec5b8d3afa2ca1de666c791b48',
# psmpi-5.5.0-1_ime.patch
'c2418b9511560dca197242508de9c7b6b117122912b6d3a4aa18398834f465ff',
'978eb3223c978477c40987f745c07fda26ccbad2f468616faf92f0d71b81a156', # psmpi_shebang.patch
]
builddependencies = [
# needed for autogen.sh on CentOS 7
('Autotools', '20210726'),
# Autoconf >2.69 is generating a buggy configure script, so take it down to the one that works
('Autoconf', '2.69'),
]
dependencies = [
('pscom', '5.4-default', '', SYSTEM),
# needed due to the inclusion of hwloc
('libxml2', '2.9.10'),
# Including CUDA here to trigger the hook to add the gpu property, and because it is actually needed
('CUDA', '11.5', '', SYSTEM)
]
patches = [
'psmpi_shebang.patch',
'psmpi-5.5.0-1_ime.patch'
]
# mpich_opts = '--enable-static --with-file-system=ime+ufs+gpfs --enable-romio'
# We disable gpfs support, since it seems to be problematic under some circumstances. One can disable it by setting
# ROMIO_FSTYPE_FORCE="ufs:", but then we loose IME support
mpich_opts = '--enable-static --with-file-system=ime+ufs --enable-romio'
preconfigopts = "./autogen.sh && "
preconfigopts += 'export CFLAGS="-I/opt/ddn/ime/include $CFLAGS" && '
preconfigopts += 'export LDFLAGS="$LDFLAGS -L/opt/ddn/ime/lib -lim_client" && '
threaded = False
cuda = True
# We need this here since the hook does not consider the compiler toolchain when injecting these vars
# Add a family for our naming scheme
modluafooter = '''
add_property("arch","gpu")
family("mpi")
if not ( isloaded("mpi-settings/CUDA") ) then
load("mpi-settings/CUDA")
end
'''
moduleclass = 'mpi'
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment