python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Container stage"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
from hpccm.primitives.baseimage import baseimage
class Stage(object):
"""Class for container stages.
Docker may have one or more stages,
Singularity will always have a single stage.
# Parameters
name: Name to use when refering to the stage (Docker specific).
The default is an empty string.
separator: Separator to insert between stages. The default is
'\\n\\n'.
"""
def __init__(self, **kwargs):
"""Initialize stage"""
self.__layers = []
self.name = kwargs.get('name', '')
self.__separator = kwargs.get('separator', '\n\n')
def __iadd__(self, layer):
"""Add the layer to the stage. Allows "+=" syntax."""
# The name of the stage should reflect the name the user
# provided in the baseimage primitive (via the _as parameter).
# This violates the encapsulation of the baseimage primitive.
if layer.__class__.__name__ == 'baseimage' and not self.name:
self.name = layer._baseimage__as
if isinstance(layer, list):
self.__layers.extend(layer)
else:
self.__layers.append(layer)
return self
def __len__(self):
"""Return number of layers"""
return len(self.__layers)
def __str__(self):
"""String representation of the stage"""
return self.__separator.join(str(x) for x in self.__layers)
def baseimage(self, image, _distro=''):
"""Insert the baseimage as the first layer
# Arguments
image (string): The image identifier to use as the base image.
The value is passed to the `baseimage` primitive.
_distro: The underlying Linux distribution of the base image.
The value is passed to the `baseimage` primitive.
"""
if image:
self.__layers.insert(0, baseimage(image=image, _as=self.name,
_distro=_distro))
def runtime(self, _from=None, exclude=[]):
"""Generate the set of instructions to install the runtime specific
components from a previous stage.
This method invokes the runtime() method for every layer in
the stage. If a layer does not have a runtime() method, then
it is skipped.
# Arguments
_from: The name of the stage from which to copy the runtime.
The default is `0`.
exclude: List of building blocks to exclude when generating
the runtime. The default is an empty list.
# Examples
```python
Stage0 += baseimage(image='nvidia/cuda:9.0-devel')
Stage0 += gnu()
Stage0 += boost()
Stage0 += ofed()
Stage0 += openmpi()
...
Stage1 += baseimage(image='nvidia/cuda:9.0-base')
Stage1 += Stage0.runtime(exclude=['boost'])
```
"""
# If the name of the stage is not explicitly specified, use
# the name of the Stage if available, otherwise 0 (Docker's
# default)
if not _from and self.name:
_from = self.name
elif not _from:
if hpccm.config.g_ctype == container_type.SINGULARITY:
logging.warning('Multi-stage Singularity containers require a named first stage')
_from = '0'
instructions = []
for layer in self.__layers:
runtime = getattr(layer, 'runtime', None)
if callable(runtime) and layer.__class__.__name__ not in exclude:
inst = layer.runtime(_from=_from)
if inst:
instructions.append(inst)
return self.__separator.join(instructions)
| hpc-container-maker-master | hpccm/Stage.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-import
"""Container recipe"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six import raise_from
from distutils.version import StrictVersion
import logging
import os
import sys
import traceback
import hpccm
import hpccm.config
from hpccm.common import container_type
from hpccm.Stage import Stage
from hpccm.building_blocks import *
from hpccm.primitives import *
def include(recipe_file, _globals=None, _locals=None, prepend_path=True,
raise_exceptions=False):
"""Include a recipe file
# Arguments
recipe_file: path to a recipe file (required)
_globals: a dictionary representing the global symbol table
_locals: a dictionary representing the local symbol table
prepend_path: If True, prepend the path of the main recipe to the
recipe_file. If the recipe_file is an absolute path, then the path
is not prepended regardless of the value of this parameter.
raise_exceptions: If False, do not print stack traces when an
exception is raised. The default value is False.
"""
if _locals is None:
# caller's locals
_locals = sys._getframe(1).f_locals
if _globals is None:
# caller's globals
_globals = sys._getframe(1).f_globals
# If a recipe file is included from another recipe file, some way
# is needed to find the included recipe if it specified using a
# relative path (relative to the including recipe file). Since
# recipe files are exec'ed, the value of __file__ is this file,
# not the recipe file. In order to make including recipes in other
# recipes using relative paths more intuitive, prepend the path of
# the base recipe file.
if (prepend_path and hasattr(include, 'prepend_path')
and not os.path.isabs(recipe_file)):
recipe_file = os.path.join(include.prepend_path, recipe_file)
try:
with open(recipe_file) as f:
# pylint: disable=exec-used
exec(compile(f.read(), recipe_file, 'exec'), _globals, _locals)
except Exception as e:
if raise_exceptions:
raise_from(e, e)
else:
traceback.print_exc()
exit(1)
def recipe(recipe_file, cpu_target=None, ctype=container_type.DOCKER,
raise_exceptions=False, single_stage=False,
singularity_version='2.6', userarg=None,
working_directory='/var/tmp'):
"""Recipe builder
# Arguments
recipe_file: path to a recipe file (required).
cpu_target: A CPU microarchitecture string recognized by archspec.
ctype: Enum representing the container specification format. The
default is `container_type.DOCKER`.
raise_exceptions: If False, do not print stack traces when an
exception is raised. The default value is False.
single_stage: If True, only print the first stage of a multi-stage
recipe. The default is False.
singularity_version: Version of the Singularity definition file
format to use. Multi-stage support was added in version 3.2, but
the changes are incompatible with earlier versions of Singularity.
The default is '2.6'.
userarg: A dictionary of key / value pairs provided to the recipe
as the `USERARG` dictionary.
working_directory: path to use as the working directory in the
container specification
"""
# Make user arguments available
USERARG = {} # pylint: disable=unused-variable
if userarg:
USERARG = userarg # alias
# Consider just 2 stages for the time being
stages = [Stage(), Stage()]
Stage0 = stages[0] # alias # pylint: disable=unused-variable
Stage1 = stages[1] # alias # pylint: disable=unused-variable
# Set the CPU target
hpccm.config.set_cpu_target(cpu_target)
# Set the global container type
hpccm.config.g_ctype = ctype
# Set the global Singularity version
hpccm.config.g_singularity_version = StrictVersion(singularity_version)
# Set the global working directory
hpccm.config.g_wd = working_directory
# Any included recipes that are specified using relative paths will
# need to prepend the path to the main recipe in order to be found.
# Save the path to the main recipe.
include.prepend_path = os.path.dirname(recipe_file)
# Load in the recipe file
include(recipe_file, _locals=locals(), _globals=globals(),
prepend_path=False, raise_exceptions=raise_exceptions)
# Only process the first stage of a recipe
if single_stage:
del stages[1:]
elif len(Stage1) > 0:
if (ctype == container_type.SINGULARITY and
hpccm.config.g_singularity_version < StrictVersion('3.2')):
# Singularity prior to version 3.2 did not support
# multi-stage builds. If the Singularity version is not
# sufficient to support multi-stage, provide advice to
# specify a sufficient Singularity version or disable
# multi-stage.
logging.warning('This looks like a multi-stage recipe. '
'Singularity 3.2 or later is required for '
'multi-stage builds. Use '
'--singularity-version=3.2 to enable this '
'feature or --single-stage to get rid of this '
'warning. Only processing the first stage...')
del stages[1:]
elif ctype == container_type.BASH:
logging.warning('This looks like a multi-stage recipe, but '
'bash does not support multi-stage builds. '
'Use --single-stage to get rid of this warning. '
'Only processing the first stage...')
del stages[1:]
r = []
for index, stage in enumerate(stages):
if index >= 1:
r.append('')
r.append(str(stage))
return '\n'.join(r)
| hpc-container-maker-master | hpccm/recipe.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""KNEM building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class knem(bb_base, hpccm.templates.envvars):
"""The `knem` building block install the headers from the
[KNEM](http://knem.gforge.inria.fr) component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
environment: Boolean flag to specify whether the environment
(`CPATH`) should be modified to include knem. The default is True.
ospackages: List of OS packages to install prior to installing.
The default values are `ca-certificates` and `git`.
prefix: The top level install location. The default value is
`/usr/local/knem`.
version: The version of KNEM source to download. The default
value is `1.1.4`.
# Examples
```python
knem(prefix='/opt/knem/1.1.3', version='1.1.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(knem, self).__init__(**kwargs)
self.__ospackages = kwargs.pop('ospackages',
['ca-certificates', 'git'])
self.__prefix = kwargs.pop('prefix', '/usr/local/knem')
self.__repository = kwargs.pop('repository',
'https://gitlab.inria.fr/knem/knem.git')
self.__version = kwargs.pop('version', '1.1.4')
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
# Setup build configuration
self.__bb = generic_build(
base_annotation=self.__class__.__name__,
branch='knem-{}'.format(self.__version),
comment=False,
devel_environment=self.environment_variables,
install=['mkdir -p {}/include'.format(self.__prefix),
'cp common/*.h {}/include'.format(self.__prefix)],
runtime_environment=self.environment_variables,
prefix=self.__prefix,
repository=self.__repository,
**kwargs)
# Container instructions
self += comment('KNEM version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
k = knem(...)
Stage0 += k
Stage1 += k.runtime()
```
"""
self.rt += comment('KNEM')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/knem.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""GNU compiler building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.ConfigureMake
import hpccm.templates.envvars
import hpccm.templates.git
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class gnu(bb_base, hpccm.templates.ConfigureMake, hpccm.templates.envvars,
hpccm.templates.git, hpccm.templates.ldconfig, hpccm.templates.rm,
hpccm.templates.tar, hpccm.templates.wget):
"""The `gnu` building block installs the GNU compilers from the
upstream Linux distribution.
As a side effect, a toolchain is created containing the GNU
compilers. The toolchain can be passed to other operations that
want to build using the GNU compilers.
# Parameters
cc: Boolean flag to specify whether to install `gcc`. The default
is True.
configure_opts: List of options to pass to `configure`. The
default value is `--disable-multilib`. This option is only
recognized if a source build is enabled.
cxx: Boolean flag to specify whether to install `g++`. The
default is True.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
the GNU compiler. The default is True.
extra_repository: Boolean flag to specify whether to enable an
extra package repository containing addition GNU compiler
packages. For Ubuntu, setting this flag to True enables the
`ppa:ubuntu-toolchain-r/test` repository. For RHEL-based Linux
distributions, setting this flag to True enables the Software
Collections (SCL) repository. The default is False.
fortran: Boolean flag to specify whether to install `gfortran`.
The default is True.
ldconfig: Boolean flag to specify whether the GNU library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the GNU library
directory. The default value is False. This option is only
recognized if a source build is enabled.
openacc: Boolean flag to control whether a OpenACC enabled
compiler is built. If True, adds `--with-cuda-driver` and
`--enable-offload-targets=nvptx-none` to the list of host compiler
`configure` options and also builds the accelerator compiler and
dependencies (`nvptx-tools` and `nvptx-newlib`). The default value
is False. This option is only recognized if a source build is
enabled.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `bzip2`, `file`,
`gcc`, `g++`, `git`, `make`, `perl`, `tar`, `wget`, and
`xz-utils`. For RHEL-based Linux distributions, the default
values are `bzip2`, `file`, `gcc`, `gcc-c++`, `git`, `make`,
`perl`, `tar`, `wget`, and `xz`. This option is only recognized if
a source build is enabled.
prefix: The top level install location. The default value is
`/usr/local/gnu`. This option is only recognized if a source build
is enabled.
source: Boolean flag to control whether to build the GNU compilers
from source. The default value is False.
version: The version of the GNU compilers to install. Note that
the version refers to the Linux distribution packaging, not the
actual compiler version. For Ubuntu, the version is appended to
the default package name, e.g., `gcc-7`. For RHEL-based Linux
distributions, the version is inserted into the SCL Developer
Toolset package name, e.g., `devtoolset-7-gcc`. For RHEL-based
Linux distributions, specifying the version automatically sets
`extra_repository` to True. If a source build is enabled, the
version is the compiler tarball version on the GNU FTP site and
the version must be specified. The default is an empty value.
# Examples
```python
gnu()
```
```python
gnu(fortran=False)
```
```python
gnu(extra_repository=True, version='7')
```
```python
gnu(openacc=True, source=True, version='9.1.0')
```
```python
g = gnu()
openmpi(..., toolchain=g.toolchain, ...)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(gnu, self).__init__(**kwargs)
self.__baseurl = kwargs.get('baseurl', 'http://ftpmirror.gnu.org/gcc')
self.__cc = kwargs.get('cc', True)
self.configure_opts = kwargs.get('configure_opts',
['--disable-multilib'])
self.__cxx = kwargs.get('cxx', True)
self.__extra_repo = kwargs.get('extra_repository', False)
self.__fortran = kwargs.get('fortran', True)
self.__openacc = kwargs.get('openacc', False)
self.__ospackages = kwargs.get('ospackages', [])
self.prefix = kwargs.get('prefix', '/usr/local/gnu')
self.__source = kwargs.get('source', False)
self.__version = kwargs.get('version', None)
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.__commands = [] # Filled in below
self.__compiler_debs = [] # Filled in below
self.__compiler_rpms = [] # Filled in below
self.__extra_repo_apt = [] # Filled in below
self.__runtime_debs = ['libgomp1']
self.__runtime_rpms = ['libgomp']
# Output toolchain
self.toolchain = toolchain()
if self.__source:
self.__build()
else:
self.__repository()
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __configure_toolchain_on_build(self):
directory = posixpath.join(self.prefix, 'bin')
if self.__cc:
self.toolchain.CC = posixpath.join(directory, 'gcc')
self.toolchain.CFLAGS = hpccm.config.get_cpu_optimization_flags('gcc', self.__version)
if self.__cxx:
self.toolchain.CXX = posixpath.join(directory, 'g++')
self.toolchain.CXXFLAGS = hpccm.config.get_cpu_optimization_flags('gcc', self.__version)
if self.__fortran:
self.toolchain.FC = posixpath.join(directory, 'gfortran')
self.toolchain.F77 = posixpath.join(directory, 'gfortran')
self.toolchain.F90 = posixpath.join(directory, 'gfortran')
self.toolchain.FFLAGS = hpccm.config.get_cpu_optimization_flags('gcc', self.__version)
self.toolchain.FCFLAGS = hpccm.config.get_cpu_optimization_flags('gcc', self.__version)
if "LD_LIBRARY_PATH" in self.environment_variables:
self.toolchain.LD_LIBRARY_PATH = self.environment_variables["LD_LIBRARY_PATH"]
def __build(self):
"""Build compilers from source"""
if not self.__version:
raise RuntimeError('The compiler version must be specified when performing a source build')
# Determine which compiler frontends to build
languages = []
if self.__cc:
languages.append('c')
if self.__cxx:
languages.append('c++')
if self.__fortran:
languages.append('fortran')
if self.__openacc:
languages.append('lto')
# Download source from web
tarball = 'gcc-{0}.tar.xz'.format(self.__version)
url = '{0}/gcc-{1}/{2}'.format(self.__baseurl, self.__version, tarball)
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# Unpackage
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball),
directory=self.__wd))
# Download prerequisites
self.__commands.append(
'cd {} && ./contrib/download_prerequisites'.format(
posixpath.join(self.__wd, 'gcc-{}'.format(self.__version))))
# Configure accelerator compiler and dependencies
if self.__openacc:
# Build nvptx-tools
# Download
self.__commands.append(
self.clone_step(repository='https://github.com/MentorEmbedded/nvptx-tools.git',
branch='master', path=self.__wd))
# Configure
nvptx_tools = hpccm.templates.ConfigureMake(prefix=self.prefix)
self.__commands.append(nvptx_tools.configure_step(
directory=posixpath.join(self.__wd, 'nvptx-tools')))
# Build
self.__commands.append(nvptx_tools.build_step())
self.__commands.append(nvptx_tools.install_step())
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, 'nvptx-tools')]))
# Setup nvptx-newlib
self.__commands.append('cd {}'.format(self.__wd))
self.__commands.append(
self.clone_step(repository='https://github.com/MentorEmbedded/nvptx-newlib',
branch='master', path=self.__wd))
self.__commands.append('ln -s {0} {1}'.format(
posixpath.join(self.__wd, 'nvptx-newlib', 'newlib'),
posixpath.join(self.__wd, 'gcc-{}'.format(self.__version),
'newlib')))
# Accelerator compiler
# Configure
accel = hpccm.templates.ConfigureMake(prefix=self.prefix)
self.__commands.append(accel.configure_step(
build_directory=posixpath.join(self.__wd, 'accel_objdir'),
directory=posixpath.join(self.__wd,
'gcc-{}'.format(self.__version)),
opts=['--enable-languages={}'.format(','.join(languages)),
'--target=nvptx-none',
'--enable-as-accelerator-for=x86_64-pc-linux-gnu',
'--disable-sjlj-exceptions',
'--enable-newlib-io-long-long',
'--disable-multilib']))
# Build
self.__commands.append(accel.build_step())
# Install
self.__commands.append(accel.install_step())
# Configure host compiler
if self.__openacc:
self.configure_opts.extend(['--with-cuda-driver=/usr/local/cuda',
'--enable-offload-targets=nvptx-none={}/nvptx-none'.format(self.prefix)])
self.configure_opts.append('--enable-languages={}'.format(','.join(languages)))
self.__commands.append(self.configure_step(
build_directory=posixpath.join(self.__wd, 'objdir'),
directory=posixpath.join(self.__wd,
'gcc-{}'.format(self.__version))))
# Build
self.__commands.append(self.build_step())
# Install
self.__commands.append(self.install_step())
# Environment
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.prefix, 'bin'))
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(self.prefix, 'lib64')))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.prefix, 'lib64'))
# Setup toolchain
self.__configure_toolchain_on_build()
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, 'gcc-{}'.format(self.__version)),
posixpath.join(self.__wd, 'objdir')]))
if self.__openacc:
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, 'accel_objdir'),
posixpath.join(self.__wd, 'nvptx-newlib')]))
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if self.__source:
# Build dependencies
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__ospackages = ['bzip2', 'file', 'gcc', 'g++', 'git',
'make', 'perl', 'tar', 'wget', 'xz-utils']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
self.__ospackages = ['bzip2', 'file', 'gcc', 'gcc-c++', 'git',
'make', 'perl', 'tar', 'wget', 'xz']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
else:
# Set libfortran version depending on the Ubuntu version
if self.__fortran:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if hpccm.config.g_linux_version >= StrictVersion('20.0'):
self.__runtime_debs.append('libgfortran5')
elif hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__runtime_debs.append('libgfortran4')
elif hpccm.config.g_linux_version >= StrictVersion('16.0'):
self.__runtime_debs.append('libgfortran3')
else: # pragma: no cover
raise RuntimeError('Unrecognized Ubuntu version')
# Setup the environment so that the alternate compiler version
# is the new default
if self.__version:
alternatives = {}
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if self.__cc:
alternatives['gcc'] = '$(which gcc-{})'.format(
self.__version)
if self.__cxx:
alternatives['g++'] = '$(which g++-{})'.format(
self.__version)
if self.__fortran:
alternatives['gfortran'] = '$(which gfortran-{})'.format(
self.__version)
alternatives['gcov'] = '$(which gcov-{})'.format(
self.__version)
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
# Default for CentOS 7
toolset_path = '/opt/rh/devtoolset-{}/root/usr/bin'.format(
self.__version)
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
toolset_path = '/opt/rh/gcc-toolset-{}/root/usr/bin'.format(self.__version)
if self.__cc:
alternatives['gcc'] = posixpath.join(toolset_path, 'gcc')
if self.__cxx:
alternatives['g++'] = posixpath.join(toolset_path, 'g++')
if self.__fortran:
alternatives['gfortran'] = posixpath.join(toolset_path,
'gfortran')
alternatives['gcov'] = posixpath.join(toolset_path, 'gcov')
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
for tool,alt in sorted(alternatives.items()):
self.__commands.append('update-alternatives --install {0} {1} {2} 30'.format(posixpath.join('/usr/bin', tool), tool, alt))
def __instructions(self):
"""Fill in container instructions"""
self += comment('GNU compiler')
if self.__source:
# Installing from source
self += packages(ospackages=self.__ospackages)
else:
# Installing from package repository
self += packages(apt=self.__compiler_debs,
apt_ppas=self.__extra_repo_apt,
release_stream=bool(self.__version), # True/False
scl=bool(self.__version), # True / False
yum=self.__compiler_rpms)
if self.__commands:
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __repository(self):
"""Setup installation from a package repository"""
if self.__cc:
self.__compiler_debs.append('gcc')
self.__compiler_rpms.append('gcc')
self.toolchain.CC = 'gcc'
self.toolchain.CFLAGS = hpccm.config.get_cpu_optimization_flags('gcc')
if self.__cxx:
self.__compiler_debs.append('g++')
self.__compiler_rpms.append('gcc-c++')
self.toolchain.CXX = 'g++'
self.toolchain.CXXFLAGS = hpccm.config.get_cpu_optimization_flags('gcc')
if self.__fortran:
self.__compiler_debs.append('gfortran')
# libgfortran runtime deb is set is __distro()
self.__compiler_rpms.append('gcc-gfortran')
self.__runtime_rpms.append('libgfortran')
self.toolchain.F77 = 'gfortran'
self.toolchain.F90 = 'gfortran'
self.toolchain.FC = 'gfortran'
self.toolchain.FFLAGS = hpccm.config.get_cpu_optimization_flags('gcc')
self.toolchain.FCFLAGS = hpccm.config.get_cpu_optimization_flags('gcc')
# Install an alternate version, i.e., not the default for
# the Linux distribution
if self.__version:
if self.__extra_repo:
self.__extra_repo_apt = ['ppa:ubuntu-toolchain-r/test']
# Adjust package names based on specified version
self.__compiler_debs = [
'{0}-{1}'.format(x, self.__version)
for x in self.__compiler_debs]
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
self.__compiler_rpms = [
'gcc-toolset-{1}-{0}'.format(x, self.__version)
for x in self.__compiler_rpms]
else:
# CentOS 7
self.__compiler_rpms = [
'devtoolset-{1}-{0}'.format(x, self.__version)
for x in self.__compiler_rpms]
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
g = gnu(...)
Stage0 += g
Stage1 += g.runtime()
```
"""
self.rt += comment('GNU compiler runtime')
if self.__source:
self.rt += copy(_from=_from,
dest=posixpath.join(self.prefix, 'lib64'),
src=posixpath.join(self.prefix, 'lib64'))
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.prefix, 'lib64'))])
else:
self.rt += environment(variables=self.environment_step(
include_only=['LD_LIBRARY_PATH']))
else:
self.rt += packages(
apt=self.__runtime_debs,
apt_ppas=self.__extra_repo_apt,
release_stream=bool(self.__version), # True / False
scl=bool(self.__version), # True / False
yum=self.__runtime_rpms)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/gnu.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""OFED building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import posixpath
import hpccm.config
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.shell import shell
class ofed(bb_base):
"""The `ofed` building block installs the OpenFabrics Enterprise
Distribution packages that are part of the Linux distribution.
For Ubuntu 16.04, the following packages are installed:
`dapl2-utils`, `ibutils`, `ibverbs-utils`, `infiniband-diags`,
`libdapl2`, `libdapl-dev`, `libibcm1`, `libibcm-dev`, `libibmad5`,
`libibmad-dev`, `libibverbs1`, `libibverbs-dev`, `libmlx4-1`,
`libmlx4-dev`, `libmlx5-1`, `libmlx5-dev`, `librdmacm1`,
`librdmacm-dev`, and `rdmacm-utils`. For Ubuntu 16.04 and aarch64
processors, the `dapl2-utils`, `libdapl2`, `libdapl-dev`,
`libibcm1` and `libibcm-dev` packages are not installed because
they are not available. For Ubuntu 16.04 and ppc64le processors,
the `libibcm1` and `libibcm-dev` packages are not installed
because they are not available.
For Ubuntu 18.04, the following packages are installed:
`dapl2-utils`, `ibutils`, `ibverbs-providers`, `ibverbs-utils`,
`infiniband-diags`, `libdapl2`, `libdapl-dev`, `libibmad5`,
`libibmad-dev`, `libibverbs1`, `libibverbs-dev`, `librdmacm1`,
`librdmacm-dev`, and `rdmacm-utils`.
For RHEL-based 7.x distributions, the following packages are
installed: `dapl`, `dapl-devel`, `ibutils`, `libibcm`, `libibmad`,
`libibmad-devel`, `libmlx5`, `libibumad`, `libibverbs`,
`libibverbs-utils`, `librdmacm`, `rdma-core`, and
`rdma-core-devel`.
For RHEL-based 8.x distributions, the following packages are
installed: `libibmad`, `libibmad-devel`, `libmlx5`, `libibumad`,
`libibverbs`, `libibverbs-utils`, `librdmacm`, `rdma-core`, and
`rdma-core-devel`.
# Parameters
prefix: The top level install location. Install of installing the
packages via the package manager, they will be extracted to this
location. This option is useful if multiple versions of OFED need
to be installed. The environment must be manually configured to
recognize the OFED location, e.g., in the container entry
point. The default value is empty, i.e., install via the package
manager to the standard system locations.
# Examples
```python
ofed()
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(ofed, self).__init__(**kwargs)
self.__deppackages = [] # Filled in by __distro()
self.__extra_opts = [] # Filled in by __distro()
self.__ospackages = [] # Filled in by __distro()
self.__powertools = False # enable the CentOS PowerTools repo
self.__prefix = kwargs.get('prefix', None)
self.__symlink = kwargs.get('symlink', False)
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__deppackages = ['libnl-3-200', 'libnl-route-3-200',
'libnuma1']
if hpccm.config.g_linux_version >= StrictVersion('18.0'):
# Give priority to packages from the Ubuntu repositories over
# vendor repositories
if hpccm.config.g_linux_version >= StrictVersion('22.0'):
self.__extra_opts = ['-t jammy']
elif hpccm.config.g_linux_version >= StrictVersion('20.0'):
self.__extra_opts = ['-t focal']
else:
self.__extra_opts = ['-t bionic']
self.__ospackages= ['dapl2-utils', 'ibutils',
'ibverbs-providers', 'ibverbs-utils',
'infiniband-diags',
'libdapl2', 'libdapl-dev',
'libibmad5', 'libibmad-dev',
'libibverbs1', 'libibverbs-dev',
'librdmacm1', 'librdmacm-dev',
'rdmacm-utils']
else:
# Give priority to packages from the Ubuntu repositories over
# vendor repositories
self.__extra_opts = ['-t xenial']
self.__ospackages = ['dapl2-utils', 'ibutils', 'ibverbs-utils',
'infiniband-diags',
'libdapl2', 'libdapl-dev',
'libibcm1', 'libibcm-dev',
'libibmad5', 'libibmad-dev',
'libibverbs1', 'libibverbs-dev',
'libmlx4-1', 'libmlx4-dev',
'libmlx5-1', 'libmlx5-dev',
'librdmacm1', 'librdmacm-dev',
'rdmacm-utils']
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
# Ubuntu 16.04 for ARM is missing these packages
for missing in ['dapl2-utils', 'libdapl2', 'libdapl-dev',
'libibcm1', 'libibcm-dev']:
if missing in self.__ospackages:
self.__ospackages.remove(missing)
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
# Ubuntu 16.04 for Power is missing these packages
for missing in ['libibcm1', 'libibcm-dev']:
if missing in self.__ospackages:
self.__ospackages.remove(missing)
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
self.__extra_opts = [r'--disablerepo=mlnx\*']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__deppackages = ['libnl3', 'numactl-libs']
self.__ospackages = ['libibmad', 'libibmad-devel',
'libibumad', 'libibverbs',
'libibverbs-utils', 'libmlx5',
'librdmacm',
'rdma-core', 'rdma-core-devel']
self.__powertools = True
else:
self.__deppackages = ['libnl', 'libnl3', 'numactl-libs']
self.__ospackages = ['dapl', 'dapl-devel', 'ibutils',
'libibcm', 'libibmad', 'libibmad-devel',
'libmlx5', 'libibumad', 'libibverbs',
'libibverbs-utils', 'librdmacm',
'rdma-core', 'rdma-core-devel']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __instructions(self):
"""Fill in container instructions"""
self += comment('OFED')
if self.__prefix:
commands = []
# Extract to a prefix - not a "real" package manager install
self += packages(ospackages=self.__deppackages)
self += packages(download=True, extra_opts=self.__extra_opts,
extract=self.__prefix,
ospackages=self.__ospackages,
powertools=self.__powertools)
# library symlinks
if self.__symlink:
self.__deppackages.append('findutils')
commands.append('mkdir -p {0} && cd {0}'.format(
posixpath.join(self.__prefix, 'lib')))
# Prune the symlink directory itself and any debug
# libraries
commands.append('find .. -path ../lib -prune -o -name "*valgrind*" -prune -o -name "lib*.so*" -exec ln -s {} \;')
commands.append('cd {0} && ln -s usr/bin bin && ln -s usr/include include'.format(
self.__prefix))
# Suppress warnings from libibverbs
commands.append('mkdir -p /etc/libibverbs.d')
self += shell(commands=commands)
else:
# Install packages using package manager
self += packages(extra_opts=self.__extra_opts,
ospackages=self.__ospackages,
powertools=self.__powertools)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
o = ofed(...)
Stage0 += o
Stage1 += o.runtime()
```
"""
if self.__prefix:
self.rt += comment('OFED')
if self.__deppackages:
self.rt += packages(ospackages=self.__deppackages)
# Suppress warnings from libibverbs
self.rt += shell(commands=['mkdir -p /etc/libibverbs.d'])
self.rt += copy(_from=_from, dest=self.__prefix, src=self.__prefix)
return str(self.rt)
else:
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/ofed.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""'multi' OFED building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.annotate
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.mlnx_ofed import mlnx_ofed
from hpccm.building_blocks.ofed import ofed
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.label import label
from hpccm.primitives.shell import shell
class multi_ofed(bb_base, hpccm.templates.annotate):
"""The `multi_ofed` building block downloads and installs multiple
versions of the OpenFabrics Enterprise Distribution (OFED). Please
refer to the [`mlnx_ofed`](#mlnx_ofed) and [`ofed`](#ofed)
building blocks for more information.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
inbox: Boolean flag to specify whether to install the 'inbox' OFED
distributed by the Linux distribution. The default is True.
mlnx_oslabel: The Linux distribution label assigned by Mellanox to
the tarball. Please see the corresponding
[`mlnx_ofed`](#mlnx_ofed) parameter for more information.
mlnx_packages: List of packages to install from Mellanox
OFED. Please see the corresponding [`mlnx_ofed`](#mlnx_ofed)
parameter for more information.
mlnx_versions: A list of [Mellanox OpenFabrics Enterprise Distribution for Linux](http://www.mellanox.com/page/products_dyn?product_family=26)
versions to install. The default values are `3.4-2.0.0.0`,
`4.0-2.0.0.1`, `4.1-1.0.2.0`, `4.2-1.2.0.0`, `4.3-1.0.1.0`,
`4.4-1.0.0.0`, `4.5-1.0.1.0`, `4.6-1.0.1.1`, `4.7-3.2.9.0`,
`5.0-2.1.8.0`, and `5.1-2.3.7.1`.
ospackages: List of OS packages to install prior to installing
OFED. For Ubuntu, the default values are `libnl-3-200`,
`libnl-route-3-200`, and `libnuma1`. For RHEL-based Linux
distributions, the default values are `libnl`, `libnl3`, and
`numactl-libs`.
prefix: The top level install location. The OFED packages will be
extracted to this location as subdirectories named for the
respective Mellanox OFED version, or `inbox` for the 'inbox'
OFED. The environment must be manually configured to recognize the
desired OFED location, e.g., in the container entry point. The
default value is `/usr/local/ofed`.
# Examples
```python
multi_ofed(inbox=True, mlnx_versions=['4.5-1.0.1.0', '4.6-1.0.1.1'],
prefix='/usr/local/ofed')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(multi_ofed, self).__init__(**kwargs)
self.__inbox = kwargs.get('inbox', True)
self.__mlnx_oslabel = kwargs.get('mlnx_oslabel', '')
self.__mlnx_packages = kwargs.get('mlnx_packages', [])
self.__mlnx_versions = kwargs.get('mlnx_versions',
['3.4-2.0.0.0', '4.0-2.0.0.1',
'4.1-1.0.2.0', '4.2-1.2.0.0',
'4.3-1.0.1.0', '4.4-1.0.0.0',
'4.5-1.0.1.0', '4.6-1.0.1.1',
'4.7-3.2.9.0', '5.0-2.1.8.0',
'5.1-2.3.7.1'])
self.__ospackages = kwargs.get('ospackages', [])
self.__prefix = kwargs.get('prefix', '/usr/local/ofed')
self.__symlink = kwargs.get('symlink', False)
self.__commands = []
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __distro(self):
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['libnl-3-200', 'libnl-route-3-200',
'libnuma1']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__ospackages = ['libnl3', 'numactl-libs']
else:
self.__ospackages = ['libnl', 'libnl3', 'numactl-libs']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __instructions(self):
"""Fill in container instructions"""
# Mellanox OFED
for version in self.__mlnx_versions:
self += mlnx_ofed(annotate=False,
oslabel=self.__mlnx_oslabel,
packages=self.__mlnx_packages,
prefix=posixpath.join(self.__prefix, version),
symlink=self.__symlink,
version=version)
# Inbox OFED
if self.__inbox:
self += ofed(prefix=posixpath.join(self.__prefix, 'inbox'),
symlink=self.__symlink)
self += shell(commands=['ln -s {0} {1}'.format(
posixpath.join(self.__prefix, 'inbox'),
posixpath.join(self.__prefix, '5.0-0'))])
# Annotations
self.add_annotation('mlnx_versions', ', '.join(self.__mlnx_versions))
self.add_annotation('inbox', self.__inbox)
self += label(metadata=self.annotate_step())
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
"""
self.rt += comment('OFED')
if self.__ospackages:
self.rt += packages(ospackages=self.__ospackages)
# Suppress warnings from libibverbs
self.rt += shell(commands=['mkdir -p /etc/libibverbs.d'])
self.rt += copy(_from=_from, dest=self.__prefix, src=self.__prefix)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/multi_ofed.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""PGI building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import logging # pylint: disable=unused-import
import re
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class pgi(bb_base, hpccm.templates.envvars, hpccm.templates.rm,
hpccm.templates.tar, hpccm.templates.wget):
"""The `pgi` building block installs the PGI compiler from a
manually downloaded package.
Note: The [NVIDIA HPC SDK](https://developer.nvidia.com/hpc-sdk)
has replaced the PGI compilers. The [nvhpc](#nvhpc) building
block should be used instead of this building block.
You must agree to the [PGI End-User License Agreement](https://www.pgroup.com/doc/LICENSE.txt) to use this
building block.
As a side effect, a toolchain is created containing the PGI
compilers. The tool can be passed to other operations that want
to build using the PGI compilers.
# Parameters
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and potentially other variables)
should be modified to include the PGI compiler. The default is
True.
eula: By setting this value to `True`, you agree to the [PGI End-User License Agreement](https://www.pgroup.com/doc/LICENSE.txt).
The default value is `False`.
extended_environment: Boolean flag to specify whether an extended
set of environment variables should be defined. If True, the
following environment variables will be defined: `CC`, `CPP`,
`CXX`, `F77`, `F90`, `FC`, and `MODULEPATH`. In addition, if the
PGI MPI component is selected then `PGI_OPTL_INCLUDE_DIRS` and
`PGI_OPTL_LIB_DIRS` will also be defined and `PATH` and
`LD_LIBRARY_PATH` will include the PGI MPI component. If False,
then only `PATH` and `LD_LIBRARY_PATH` will be extended to include
the PGI compiler. The default value is `False`.
mpi: Boolean flag to specify whether the MPI component should be
installed. If True, MPI will be installed. The default value is
False.
ospackages: List of OS packages to install prior to installing the
PGI compiler. For Ubuntu, the default values are `gcc`, `g++`,
`libnuma1` and `perl`, and also `wget` (if downloading the PGI
compiler rather than using a tarball in the local build context).
For RHEL-based Linux distributions, the default values are `gcc`,
`gcc-c++`, `numactl-libs` and `perl`, and also `wget` (if
downloading the PGI compiler rather than using a tarball in the
local build context).
prefix: The top level install prefix. The default value is
`/opt/pgi`.
system_cuda: Boolean flag to specify whether the PGI compiler
should use the system CUDA. If False, the version(s) of CUDA
bundled with the PGI compiler will be installed. The default
value is False.
tarball: Path to the PGI compiler tarball relative to the local
build context. The default value is empty. This parameter is
required.
# Examples
```python
pgi(eula=True, tarball='pgilinux-2019-1910-x86_64.tar.gz')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(pgi, self).__init__(**kwargs)
self.__arch_directory = None # Filled in __cpu_arch()
self.__arch_pkg = None # Filled in by __cpu_arch()
self.__commands = [] # Filled in by __setup()
self.__libnuma_path = '' # Filled in __distro()
self.__runtime_commands = [] # Filled in by __setup()
# By setting this value to True, you agree to the PGI End-User
# License Agreement (https://www.pgroup.com/doc/LICENSE.txt)
self.__eula = kwargs.get('eula', False)
self.__extended_environment = kwargs.get('extended_environment', False)
self.__fix_ownership = kwargs.get('fix_ownership', False)
self.__mpi = kwargs.get('mpi', False)
self.__ospackages = kwargs.get('ospackages', [])
self.__runtime_ospackages = [] # Filled in by __distro()
self.__prefix = kwargs.get('prefix', '/opt/pgi')
self.__system_cuda = kwargs.get('system_cuda', False)
self.__system_libnuma = kwargs.get('system_libnuma', True)
self.__tarball = kwargs.get('tarball', '')
self.__version = '' # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.toolchain = toolchain(CC='pgcc', CXX='pgc++', F77='pgfortran',
F90='pgfortran', FC='pgfortran')
# tarball parameter is required
if not self.__tarball:
raise RuntimeError('PGI install package must be set')
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Set the Linux distribution specific parameters
self.__distro()
self.__basepath = posixpath.join(self.__prefix, self.__arch_directory)
self.__basepath_llvm = posixpath.join(self.__prefix,
'{}-llvm'.format(
self.__arch_directory))
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('PGI compiler version {}'.format(self.__version))
# Use tarball from local build context
self += copy(src=self.__tarball,
dest=posixpath.join(self.__wd,
posixpath.basename(self.__tarball)))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
self.__arch_directory = 'linuxpower'
self.__arch_pkg = 'openpower'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__arch_directory = 'linux86-64'
self.__arch_pkg = 'x64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['gcc', 'g++', 'libnuma1', 'perl']
if self.__mpi:
self.__ospackages.append('openssh-client')
self.__runtime_ospackages = ['libnuma1']
if self.__mpi:
self.__runtime_ospackages.append('openssh-client')
self.__libnuma_path = '/usr/lib/x86_64-linux-gnu'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['gcc', 'gcc-c++', 'numactl-libs', 'perl']
if self.__mpi:
self.__ospackages.append('openssh-clients')
self.__runtime_ospackages = ['numactl-libs']
if self.__mpi:
self.__runtime_ospackages.append('openssh-clients')
self.__libnuma_path = '/usr/lib64'
else:
raise RuntimeError('Unknown Linux distribution')
def __environment(self, runtime=False):
"""Define environment variables"""
e = {}
pgi_path = posixpath.join(self.__basepath, self.__version)
mpi_path = posixpath.join(pgi_path, 'mpi', 'openmpi')
if LooseVersion(self.__version) >= LooseVersion('19.4'):
mpi_path = posixpath.join(pgi_path, 'mpi', 'openmpi-3.1.3')
if runtime:
# Runtime environment
if self.__mpi:
# PGI MPI component is selected
e['LD_LIBRARY_PATH'] = '{}:{}:$LD_LIBRARY_PATH'.format(
posixpath.join(mpi_path, 'lib'),
posixpath.join(pgi_path, 'lib'))
e['PATH'] = '{}:$PATH'.format(
posixpath.join(mpi_path, 'bin'))
else:
# PGI MPI component is not selected
e['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(
posixpath.join(pgi_path, 'lib'))
else:
# Development environment
if self.__extended_environment:
# Mirror the environment defined by the pgi environment module
e = {'CC': posixpath.join(pgi_path, 'bin', 'pgcc'),
'CPP': '"{} -Mcpp"'.format(
posixpath.join(pgi_path, 'bin', 'pgcc')),
'CXX': posixpath.join(pgi_path, 'bin', 'pgc++'),
'F77': posixpath.join(pgi_path, 'bin', 'pgf77'),
'F90': posixpath.join(pgi_path, 'bin', 'pgf90'),
'FC': posixpath.join(pgi_path, 'bin', 'pgfortran'),
'MODULEPATH': '{}:$MODULEPATH'.format(
posixpath.join(self.__prefix, 'modulefiles'))}
if self.__mpi:
# PGI MPI component is selected
e['LD_LIBRARY_PATH'] = '{}:{}:$LD_LIBRARY_PATH'.format(
posixpath.join(mpi_path, 'lib'),
posixpath.join(pgi_path, 'lib'))
e['PATH'] = '{}:{}:$PATH'.format(
posixpath.join(mpi_path, 'bin'),
posixpath.join(pgi_path, 'bin'))
e['PGI_OPTL_INCLUDE_DIRS'] = posixpath.join(
mpi_path, 'include')
e['PGI_OPTL_LIB_DIRS'] = posixpath.join(mpi_path, 'lib')
else:
# PGI MPI component is not selected
e['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(
posixpath.join(pgi_path, 'lib'))
e['PATH'] = '{}:$PATH'.format(
posixpath.join(pgi_path, 'bin'))
else:
# Basic environment only
if self.__mpi:
e['LD_LIBRARY_PATH'] = '{}:{}:$LD_LIBRARY_PATH'.format(
posixpath.join(mpi_path, 'lib'),
posixpath.join(pgi_path, 'lib'))
e['PATH'] = '{}:{}:$PATH'.format(
posixpath.join(mpi_path, 'bin'),
posixpath.join(pgi_path, 'bin'))
else:
# PGI MPI component is not selected
e = {'PATH': '{}:$PATH'.format(posixpath.join(pgi_path,
'bin')),
'LD_LIBRARY_PATH': '{}:$LD_LIBRARY_PATH'.format(
posixpath.join(pgi_path, 'lib'))}
return e
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Use tarball from local build context
tarball = posixpath.basename(self.__tarball)
# Figure out the version from the tarball name
match = re.match(r'pgilinux-\d+-(?P<year>\d\d)0?(?P<month>[1-9][0-9]?)',
tarball)
if match and match.groupdict()['year'] and match.groupdict()['month']:
self.__version = '{0}.{1}'.format(match.groupdict()['year'],
match.groupdict()['month'])
else:
logging.warning('could not determine PGI version')
self.__version = '19.10'
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball),
directory=posixpath.join(self.__wd, 'pgi')))
flags = {'PGI_ACCEPT_EULA': 'accept',
'PGI_INSTALL_DIR': self.__prefix,
'PGI_INSTALL_MPI': 'false',
'PGI_INSTALL_NVIDIA': 'true',
'PGI_MPI_GPU_SUPPORT': 'false',
'PGI_SILENT': 'true'}
if not self.__eula:
# This will fail when building the container
logging.warning('PGI EULA was not accepted')
flags['PGI_ACCEPT_EULA'] = 'decline'
flags['PGI_SILENT'] = 'false'
if self.__system_cuda:
flags['PGI_INSTALL_NVIDIA'] = 'false'
if self.__mpi:
flags['PGI_INSTALL_MPI'] = 'true'
flags['PGI_MPI_GPU_SUPPORT'] = 'true'
flag_string = ' '.join('{0}={1}'.format(key, val)
for key, val in sorted(flags.items()))
self.__commands.append('cd {0} && {1} ./install'.format(
posixpath.join(self.__wd, 'pgi'), flag_string))
# Create siterc to specify use of the system CUDA
siterc = posixpath.join(self.__basepath, self.__version, 'bin', 'siterc')
if self.__system_cuda:
self.__commands.append('echo "set CUDAROOT=/usr/local/cuda;" >> {}'.format(siterc))
# Create siterc to respect LIBRARY_PATH
# https://www.pgroup.com/support/faq.htm#lib_path_ldflags
self.__commands.append(r'echo "variable LIBRARY_PATH is environment(LIBRARY_PATH);" >> {}'.format(siterc))
self.__commands.append(r'echo "variable library_path is default(\$if(\$LIBRARY_PATH,\$foreach(ll,\$replace(\$LIBRARY_PATH,":",), -L\$ll)));" >> {}'.format(siterc))
self.__commands.append(r'echo "append LDLIBARGS=\$library_path;" >> {}'.format(siterc))
# Override the installer behavior and force the use of the
# system libnuma library
if self.__system_libnuma:
self.__commands.append('ln -sf {0} {1}'.format(
posixpath.join(self.__libnuma_path, 'libnuma.so.1'),
posixpath.join(self.__basepath, self.__version, 'lib',
'libnuma.so')))
self.__commands.append('ln -sf {0} {1}'.format(
posixpath.join(self.__libnuma_path, 'libnuma.so.1'),
posixpath.join(self.__basepath, self.__version, 'lib',
'libnuma.so.1')))
# Some installed files are owned by uid 921 / gid 1004.
# Fix it so that all files are owned by root.
if self.__fix_ownership:
self.__commands.append('chown -R root.root {}'.format(
self.__prefix))
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, 'pgi')]))
# libnuma.so and libnuma.so.1 must be symlinks to the system
# libnuma library. They are originally symlinks, but Docker
# "COPY -from" copies the file pointed to by the symlink,
# converting them to files, so recreate the symlinks.
self.__runtime_commands.append('ln -sf {0} {1}'.format(
posixpath.join(self.__libnuma_path, 'libnuma.so.1'),
posixpath.join(self.__basepath, self.__version, 'lib',
'libnuma.so')))
self.__runtime_commands.append('ln -sf {0} {1}'.format(
posixpath.join(self.__libnuma_path, 'libnuma.so.1'),
posixpath.join(self.__basepath, self.__version, 'lib',
'libnuma.so.1')))
# Set the environment
self.environment_variables = self.__environment()
self.runtime_environment_variables = self.__environment(runtime=True)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
p = pgi(...)
Stage0 += p
Stage1 += p.runtime()
```
"""
self.rt += comment('PGI compiler')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
pgi_path = posixpath.join(self.__basepath, self.__version)
src_path = pgi_path
if (LooseVersion(self.__version) >= LooseVersion('19.4') and
hpccm.config.g_cpu_arch == cpu_arch.X86_64):
# Too many levels of symlinks for the Docker builder to
# handle, so use the real path
src_path = posixpath.join(self.__basepath_llvm, self.__version)
self.rt += copy(_from=_from,
src=posixpath.join(src_path, 'REDIST', '*.so*'),
dest=posixpath.join(pgi_path, 'lib', ''))
# REDIST workaround for incorrect libcudaforwrapblas.so
# symlink
if (LooseVersion(self.__version) >= LooseVersion('18.10') and
LooseVersion(self.__version) < LooseVersion('19.10') and
hpccm.config.g_cpu_arch == cpu_arch.X86_64):
self.rt += copy(_from=_from,
src=posixpath.join(pgi_path, 'lib',
'libcudaforwrapblas.so'),
dest=posixpath.join(pgi_path, 'lib',
'libcudaforwrapblas.so'))
if self.__mpi:
mpi_path = posixpath.join(pgi_path, 'mpi', 'openmpi')
if LooseVersion(self.__version) >= LooseVersion('19.4'):
mpi_path = posixpath.join(pgi_path, 'mpi', 'openmpi-3.1.3')
self.rt += copy(_from=_from, src=mpi_path, dest=mpi_path)
if self.__runtime_commands:
self.rt += shell(commands=self.__runtime_commands)
self.rt += environment(variables=self.environment_step(runtime=True))
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/pgi.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""SENSEI building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import hpccm.templates.git
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class sensei(bb_base, hpccm.templates.git):
"""The `sensei` building block configures, builds, and installs the
[SENSEI](https://sensei-insitu.org) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
In most cases, one or both of the [Catalyst](#catalyst) or
[Libsim](#libsim) building blocks should be installed.
If GPU rendering will be used then a
[cudagl](https://hub.docker.com/r/nvidia/cudagl) base image is
recommended.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
branch: The branch of SENSEI to use. The default value is
`v2.1.1`.
catalyst: Flag to specify the location of the ParaView/Catalyst
installation, e.g., `/usr/local/catalyst`. If set, then the
[Catalyst](#catalyst) building block should be installed prior to
this building block. The default value is empty.
cmake_opts: List of options to pass to `cmake`. The default value
is `-DENABLE_SENSEI=ON`.
libsim: Flag to specify the location of the VisIt/Libsim
installation, e.g., `/usr/local/visit`. If set, then the
[Libsim](#libsim) building block should be installed prior to this
building block. The `vtk` option should also be set. The default
value is empty.
miniapps: Boolean flag to specify whether the SENSEI mini-apps
should be built and installed. The default is False.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `ca-certificates`, `git`,
and `make`.
prefix: The top level install location. The default value is
`/usr/local/sensei`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
vtk: Flag to specify the location of the VTK installation. If
`libsim` is defined, this option must be set to the Libsim VTK
location, e.g.,
`/usr/local/visit/third-party/vtk/6.1.0/linux-x86_64_gcc-5.4/lib/cmake/vtk-6.1`. Note
that the compiler version is embedded in the Libsim VTK path. The
compiler version may differ depending on which base image is used;
version 5.4 corresponds to Ubuntu 16.04. The default value is
empty.
# Examples
```python
sensei(branch='v2.1.1', catalyst='/usr/local/catalyst',
prefix='/opt/sensei')
```
```python
sensei(libsim='/usr/local/visit',
vtk='/usr/local/visit/third-party/vtk/6.1.0/linux-x86_64_gcc-5.4/lib/cmake/vtk-6.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(sensei, self).__init__(**kwargs)
self.__branch = kwargs.pop('branch', 'v2.1.1')
self.__catalyst = kwargs.pop('catalyst', '')
self.__cmake_opts = kwargs.pop('cmake_opts', ['-DENABLE_SENSEI=ON'])
self.__libsim = kwargs.pop('libsim', '')
self.__miniapps = kwargs.pop('miniapps', False)
self.__ospackages = kwargs.pop('ospackages', ['ca-certificates', 'git',
'make'])
self.__prefix = kwargs.pop('prefix', '/usr/local/sensei')
self.__repository = kwargs.pop('repository', 'https://gitlab.kitware.com/sensei/sensei.git')
self.__vtk = kwargs.pop('vtk', '')
# Set the cmake options
self.__cmake()
# Setup build configuration
self.__bb = generic_cmake(
base_annotation=self.__class__.__name__,
branch=self.__branch,
comment=False,
cmake_opts=self.__cmake_opts,
prefix=self.__prefix,
repository=self.__repository,
**kwargs)
# Container instructions
self += comment('SENSEI version {}'.format(self.__branch))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __cmake(self):
"""Setup cmake options based on users parameters"""
# Configure
if self.__catalyst:
self.__cmake_opts.extend(
['-DENABLE_CATALYST=ON',
'-DParaView_DIR={}'.format(self.__catalyst)])
if self.__libsim:
self.__cmake_opts.extend(
['-DENABLE_LIBSIM=ON',
'-DLIBSIM_DIR={}'.format(self.__libsim)])
if not self.__miniapps:
self.__cmake_opts.extend(
['-DENABLE_PARALLEL3D=OFF', '-DENABLE_OSCILLATORS=OFF'])
else:
self.__cmake_opts.append('-DCMAKE_C_STANDARD=99')
if self.__vtk:
self.__cmake_opts.append(
'-DVTK_DIR={}'.format(self.__vtk))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
s = sensei(...)
Stage0 += s
Stage1 += s.runtime()
```
"""
self.rt += comment('SENSEI')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/sensei.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""UCX building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six import string_types
from distutils.version import StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class ucx(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig):
"""The `ucx` building block configures, builds, and installs the
[UCX](https://github.com/openucx/ucx) component.
An InfiniBand building block ([OFED](#ofed) or [Mellanox
OFED](#mlnx_ofed)) should be installed prior to this building
block. One or all of the [gdrcopy](#gdrcopy), [KNEM](#knem), and
[XPMEM](#xpmem) building blocks should also be installed prior to
this building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
configure_opts: List of options to pass to `configure`. The
default values are `--enable-optimizations`, `--disable-logging`,
`--disable-debug`, `--disable-assertions`,
`--disable-params-check`, and `--disable-doxygen-doc`.
cuda: Flag to control whether a CUDA aware build is performed. If
True, adds `--with-cuda=/usr/local/cuda` to the list of
`configure` options. If a string, uses the value of the string as
the CUDA path. If the toolchain specifies `CUDA_HOME`, then that
path is used. If False, adds `--without-cuda` to the list of
`configure` options. The default value is an empty string.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, and `PATH`) should be
modified to include UCX. The default is True.
gdrcopy: Flag to control whether gdrcopy is used by the build. If
True, adds `--with-gdrcopy` to the list of `configure` options.
If a string, uses the value of the string as the gdrcopy path,
e.g., `--with-gdrcopy=/path/to/gdrcopy`. If False, adds
`--without-gdrcopy` to the list of `configure` options. The
default is an empty string, i.e., include neither `--with-gdrcopy`
not `--without-gdrcopy` and let `configure` try to automatically
detect whether gdrcopy is present or not.
knem: Flag to control whether KNEM is used by the build. If True,
adds `--with-knem` to the list of `configure` options. If a
string, uses the value of the string as the KNEM path, e.g.,
`--with-knem=/path/to/knem`. If False, adds `--without-knem` to
the list of `configure` options. The default is an empty string,
i.e., include neither `--with-knem` not `--without-knem` and let
`configure` try to automatically detect whether KNEM is present or
not.
ldconfig: Boolean flag to specify whether the UCX library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the UCX library
directory. The default value is False.
ofed: Flag to control whether OFED is used by the build. If True,
adds `--with-verbs` and `--with-rdmacm` to the list of `configure`
options. If a string, uses the value of the string as the OFED
path, e.g., `--with-verbs=/path/to/ofed`. If False, adds
`--without-verbs` and `--without-rdmacm` to the list of
`configure` options. The default is an empty string, i.e.,
include neither `--with-verbs` not `--without-verbs` and let
`configure` try to automatically detect whether OFED is present or
not.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `binutils-dev`,
`file`, `libnuma-dev`, `make`, and `wget`. For RHEL-based Linux
distributions, the default values are `binutils-devel`, `file`,
`make`, `numactl-devel`, and `wget`. If the `repository`
parameter is set, then `autoconf`, `automake`, `ca-certificates`,
`git`, and `libtool` are also included.
prefix: The top level install location. The default value is
`/usr/local/ucx`.
repository: The location of the git repository that should be used to build UCX. If True, then use the default `https://github.com/openucx/ucx.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default value is empty.
url: The location of the tarball that should be used to build UCX.
The default is empty, i.e., use the release package specified by
`version`.
version: The version of UCX source to download. The default value
is `1.9.0`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
xpmem: Flag to control whether XPMEM is used by the build. If
True, adds `--with-xpmem` to the list of `configure` options. If
a string, uses the value of the string as the XPMEM path, e.g.,
`--with-xpmem=/path/to/xpmem`. If False, adds `--without-xpmem`
to the list of `configure` options. The default is an empty
string, i.e., include neither `--with-xpmem` not `--without-xpmem`
and let `configure` try to automatically detect whether XPMEM is
present or not.
# Examples
```python
ucx(cuda=False, prefix='/opt/ucx/1.4.0', version='1.4.0')
```
```python
ucx(cuda='/usr/local/cuda', gdrcopy='/usr/local/gdrcopy',
knem='/usr/local/knem', xpmem='/usr/local/xpmem')
```
```python
ucx(repository='https://github.com/openucx/ucx.git')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(ucx, self).__init__(**kwargs)
# Parameters
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/openucx/ucx/releases/download')
self.__configure_opts = kwargs.pop('configure_opts',
['--enable-optimizations',
'--disable-logging',
'--disable-debug',
'--disable-assertions',
'--disable-params-check',
'--disable-doxygen-doc'])
self.__cuda = kwargs.pop('cuda', True)
self.__default_repository = 'https://github.com/openucx/ucx.git'
self.__gdrcopy = kwargs.pop('gdrcopy', '')
self.__knem = kwargs.pop('knem', '')
self.__ofed = kwargs.pop('ofed', '')
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/ucx')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '1.9.0')
self.__xpmem = kwargs.pop('xpmem', '')
# Set the configure options
self.__configure()
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version} if not self.repository else {},
base_annotation=self.__class__.__name__,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
preconfigure=['./autogen.sh'] if self.repository else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
**kwargs)
# Container instructions
if self.repository:
if self.branch:
self += comment('UCX {} {}'.format(self.repository,
self.branch))
elif self.commit:
self += comment('UCX {} {}'.format(self.repository,
self.commit))
else:
self += comment('UCX {}'.format(self.repository))
else:
self += comment('UCX version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters"""
# CUDA
if self.__cuda:
if isinstance(self.__cuda, string_types):
# Use specified path
self.__configure_opts.append(
'--with-cuda={}'.format(self.__cuda))
elif self.__toolchain.CUDA_HOME:
self.__configure_opts.append(
'--with-cuda={}'.format(self.__toolchain.CUDA_HOME))
else:
# Default location
self.__configure_opts.append('--with-cuda=/usr/local/cuda')
else:
self.__configure_opts.append('--without-cuda')
# GDRCOPY
if self.__gdrcopy:
if isinstance(self.__gdrcopy, string_types):
# Use specified path
self.__configure_opts.append(
'--with-gdrcopy={}'.format(self.__gdrcopy))
else:
# Boolean, let UCX try to figure out where to find it
self.__configure_opts.append('--with-gdrcopy')
elif self.__gdrcopy == False:
self.__configure_opts.append('--without-gdrcopy')
# KNEM
if self.__knem:
if isinstance(self.__knem, string_types):
# Use specified path
self.__configure_opts.append(
'--with-knem={}'.format(self.__knem))
else:
# Boolean, let UCX try to figure out where to find it
self.__configure_opts.append('--with-knem')
elif self.__knem == False:
self.__configure_opts.append('--without-knem')
# OFED
if self.__ofed:
if isinstance(self.__ofed, string_types):
# Use specified path
self.__configure_opts.extend(
['--with-verbs={}'.format(self.__ofed),
'--with-rdmacm={}'.format(self.__ofed)])
else:
# Boolean, let UCX try to figure out where to find it
self.__configure_opts.extend(['--with-verbs', '--with-rdmacm'])
elif self.__ofed == False:
self.__configure_opts.extend(['--without-verbs',
'--without-rdmacm'])
# XPMEM
if self.__xpmem:
if isinstance(self.__xpmem, string_types):
# Use specified path
self.__configure_opts.append(
'--with-xpmem={}'.format(self.__xpmem))
else:
# Boolean, let UCX try to figure out where to find it
self.__configure_opts.append('--with-xpmem')
elif self.__xpmem == False:
self.__configure_opts.append('--without-xpmem')
# Workaround for format warning considered an error on Power
if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
if not self.__toolchain.CFLAGS:
self.__toolchain.CFLAGS = '-Wno-error=format'
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['binutils-dev', 'file', 'libnuma-dev',
'make', 'wget']
if self.repository:
self.__ospackages.extend(['autoconf', 'automake',
'ca-certificates', 'git',
'libtool'])
if hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__runtime_ospackages = ['libbinutils']
else:
self.__runtime_ospackages = ['binutils']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['binutils-devel', 'file', 'make',
'numactl-devel', 'wget']
if self.repository:
self.__ospackages.extend(['autoconf', 'automake',
'ca-certificates', 'git',
'libtool'])
self.__runtime_ospackages = ['binutils']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Set download source based on user parameters"""
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
tarball = 'ucx-{}.tar.gz'.format(self.__version)
self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,
tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
u = ucx(...)
Stage0 += u
Stage1 += u.runtime()
```
"""
self.rt += comment('UCX')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/ucx.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MVAPICH2 building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.sed
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class mvapich2(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.sed):
"""The `mvapich2` building block configures, builds, and installs the
[MVAPICH2](http://mvapich.cse.ohio-state.edu) component.
Depending on the parameters, the source will be downloaded from
the web (default) or copied from a source directory in the local
build context.
An InfiniBand building block ([OFED](#ofed) or [Mellanox
OFED](#mlnx_ofed)) should be installed prior to this building
block.
As a side effect, a toolchain is created containing the MPI
compiler wrappers. The tool can be passed to other operations
that want to build using the MPI compiler wrappers.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default values are `--disable-mcast`.
cuda: Boolean flag to control whether a CUDA aware build is
performed. If True, adds `--enable-cuda --with-cuda` to the list
of `configure` options, otherwise adds `--disable-cuda`. If the
toolchain specifies `CUDA_HOME`, then that path is used, otherwise
`/usr/local/cuda` is used for the path. The default value is
True.
directory: Path to the unpackaged source directory relative to
the local build context. The default value is empty. If this is
defined, the source in the local build context will be used rather
than downloading the source from the web.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
MVAPICH2. The default is True.
gpu_arch: The GPU architecture to use. Older versions of MVAPICH2
(2.3b and previous) were hard-coded to use "sm_20". This option
has no effect on more recent MVAPICH2 versions. The default value
is to use the MVAPICH2 default.
ldconfig: Boolean flag to specify whether the MVAPICH2 library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the MVAPICH2 library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `byacc`, `file`,
`flex`, `make`, `openssh-client`, and `wget`. For RHEL-based
Linux distributions, the default values are `byacc`, `file`,
`flex`, `make`, `openssh-clients`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/mvapich2`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of MVAPICH2 source to download. This value
is ignored if `directory` is set. The default value is `2.3.4`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
mvapich2(cuda=False, prefix='/opt/mvapich2/2.3a', version='2.3a')
```
```python
mvapich2(directory='sources/mvapich2-2.3b')
```
```python
n = nvhpc(eula=True)
mvapich2(toolchain=n.toolchain)
```
```python
mvapich2(configure_opts=['--disable-fortran', '--disable-mcast'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mvapich2, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl',
'http://mvapich.cse.ohio-state.edu/download/mvapich/mv2')
self.__configure_opts = kwargs.pop('configure_opts', ['--disable-mcast'])
self.__cuda = kwargs.pop('cuda', True)
self.__gpu_arch = kwargs.pop('gpu_arch', None)
self.__ospackages = kwargs.pop('ospackages', [])
self.__preconfigure = []
self.__prefix = kwargs.pop('prefix', '/usr/local/mvapich2')
self.__runtime_ospackages = [] # Filled in by __distro()
# Input toolchain, i.e., what to use when building
# Create a copy of the toolchain so that it can be modified
self.__toolchain = _copy(kwargs.pop('toolchain', toolchain()))
self.__version = kwargs.pop('version', '2.3.4')
# MVAPICH2 does not accept F90
self.toolchain_control = {'CC': True, 'CXX': True, 'F77': True,
'F90': False, 'FC': True}
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifort')
# Set the configure options
self.__configure()
# Set the Linux distribution specific parameters
self.__distro()
# Setup the environment variables
# Set library path
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
self.runtime_environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
self.runtime_environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
if self.__cuda:
# Workaround for using compiler wrappers in the build stage
self.environment_variables['PROFILE_POSTLIB'] = '"-L{} -lnvidia-ml -lcuda"'.format('/usr/local/cuda/lib64/stubs')
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
preconfigure=self.__preconfigure,
prefix=self.__prefix,
runtime_environment=self.runtime_environment_variables,
toolchain=self.__toolchain,
url='{0}/mvapich2-{1}.tar.gz'.format(self.__baseurl,
self.__version),
**kwargs)
# Container instructions
self += comment('MVAPICH2 version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['byacc', 'file', 'flex', 'make',
'openssh-client', 'wget']
self.__runtime_ospackages = ['openssh-client']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['byacc', 'file', 'flex', 'make',
'openssh-clients', 'wget']
self.__runtime_ospackages = ['openssh-clients']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __set_gpu_arch(self):
"""Older versions of MVAPICH2 (2.3b and previous) were hard-coded to
use the "sm_20" GPU architecture. Use the specified value
instead."""
def __configure(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# MVAPICH2 does not accept F90
self.__toolchain.F90 = ''
# Workarounds when using the NV compilers
if (self.__toolchain.CC and re.match('.*nvc', self.__toolchain.CC) and
not self.__toolchain.CFLAGS):
self.__toolchain.CFLAGS = '-fpic -DPIC'
self.__configure_opts.append('ac_cv_c_compiler_gnu=no')
if (self.__toolchain.F77 and
re.match('.*nvfortran', self.__toolchain.F77) and
not self.__toolchain.FFLAGS):
self.__toolchain.FFLAGS = '-fpic -DPIC'
if (self.__toolchain.FC and
re.match('.*nvfortran', self.__toolchain.FC) and
not self.__toolchain.FCFLAGS):
self.__toolchain.FCFLAGS = '-fpic -DPIC'
# CUDA
if self.__cuda:
cuda_home = "/usr/local/cuda"
if self.__toolchain.CUDA_HOME:
cuda_home = self.__toolchain.CUDA_HOME
# The PGI compiler needs some special handling for CUDA.
# http://mvapich.cse.ohio-state.edu/static/media/mvapich/mvapich2-2.0-userguide.html#x1-120004.5
if self.__toolchain.CC and re.match('.*pgcc', self.__toolchain.CC):
self.__configure_opts.append(
'--enable-cuda=basic --with-cuda={}'.format(cuda_home))
# Work around issue when using PGI 19.4
self.__configure_opts.append('--enable-fast=O1')
if not self.__toolchain.CFLAGS:
self.__toolchain.CFLAGS = '-ta=tesla:nordc'
if not self.__toolchain.CPPFLAGS:
self.__toolchain.CPPFLAGS = '-D__x86_64 -D__align__\(n\)=__attribute__\(\(aligned\(n\)\)\) -D__location__\(a\)=__annotate__\(a\) -DCUDARTAPI='
if not self.__toolchain.LD_LIBRARY_PATH:
self.__toolchain.LD_LIBRARY_PATH = posixpath.join(
cuda_home, 'lib64', 'stubs') + ':$LD_LIBRARY_PATH'
else:
if not self.__toolchain.LD_LIBRARY_PATH:
self.__toolchain.LD_LIBRARY_PATH = posixpath.join(
cuda_home, 'lib64', 'stubs') + ':$LD_LIBRARY_PATH'
self.__configure_opts.append(
'--enable-cuda --with-cuda={}'.format(cuda_home))
# Workaround for using compiler wrappers in the build stage
self.__preconfigure.append('ln -s {0} {1}'.format(
posixpath.join(cuda_home, 'lib64', 'stubs', 'libnvidia-ml.so'),
posixpath.join(cuda_home, 'lib64', 'stubs',
'libnvidia-ml.so.1')))
self.__preconfigure.append('ln -s {0} {1}'.format(
posixpath.join(cuda_home, 'lib64', 'stubs', 'libcuda.so'),
posixpath.join(cuda_home, 'lib64', 'stubs', 'libcuda.so.1')))
# Older versions of MVAPICH2 (2.3b and previous) were
# hard-coded to use the "sm_20" GPU architecture. Use the
# specified value instead.
if self.__gpu_arch:
self.__preconfigure.append(
self.sed_step(file='Makefile.in',
patterns=[r's/-arch sm_20/-arch {}/g'.format(self.__gpu_arch)]))
else:
self.__configure_opts.append('--disable-cuda')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mvapich2(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
self.rt += comment('MVAPICH2')
# TODO: move the definition of runtime ospackages
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/mvapich2.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""CMake building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import posixpath
import re
import hpccm.config
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from distutils.version import LooseVersion
from hpccm.primitives.comment import comment
from hpccm.primitives.shell import shell
from hpccm.primitives.environment import environment
class cmake(bb_base, hpccm.templates.rm, hpccm.templates.tar,
hpccm.templates.wget):
"""The `cmake` building block downloads and installs the
[CMake](https://cmake.org) component.
# Parameters
bootstrap_opts: List of options to pass to `bootstrap` when
building from source. The default is an empty list.
eula: By setting this value to `True`, you agree to the [CMake End-User License Agreement](https://gitlab.kitware.com/cmake/cmake/raw/master/Copyright.txt).
The default value is `False`.
ospackages: List of OS packages to install prior to installing.
The default values are `make` and `wget`.
prefix: The top level install location. The default value is
`/usr/local`.
source: Boolean flag to specify whether to build CMake from
source. If True, includes the `libssl-dev` package in the list of
OS packages for Ubuntu, and `openssl-devel` for RHEL-based
distributions. For x86_64 and aarch64 processors, the default is
False, i.e., use the available pre-compiled package. For all
other processors, the default is True.
version: The version of CMake to download. The default value is
`3.25.1`.
# Examples
```python
cmake(eula=True)
```
```python
cmake(eula=True, version='3.10.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(cmake, self).__init__(**kwargs)
self.__baseurl = kwargs.get('baseurl', 'https://github.com/Kitware/CMake/releases/download')
self.__bootstrap_opts = kwargs.get('bootstrap_opts', [])
# By setting this value to True, you agree to the CMake
# End-User License Agreement
# (https://gitlab.kitware.com/cmake/cmake/raw/master/Copyright.txt)
self.__eula = kwargs.get('eula', False)
self.__ospackages = kwargs.get('ospackages', ['make', 'wget'])
self.__parallel = kwargs.get('parallel', '$(nproc)')
self.__prefix = kwargs.get('prefix', '/usr/local')
self.__source = kwargs.get('source', False)
self.__version = kwargs.get('version', '3.25.1')
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('CMake version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables={'PATH': '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))})
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
if not self.__source and hpccm.config.g_cpu_arch == cpu_arch.X86_64:
# Use the pre-compiled x86_64 binary
self.__binary()
elif not self.__source and hpccm.config.g_cpu_arch == cpu_arch.AARCH64 and LooseVersion(self.__version) >= LooseVersion('3.20'):
# Use the pre-compiled aarch64 binary
self.__binary()
else:
# Build from source
self.__build()
def __binary(self):
"""Install the pre-compiled binary"""
runfile = 'cmake-{}-linux-x86_64.sh'
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
runfile = 'cmake-{}-linux-aarch64.sh'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64 and LooseVersion(self.__version) < LooseVersion('3.20'):
runfile = 'cmake-{}-Linux-x86_64.sh'
runfile = runfile.format(self.__version)
if LooseVersion(self.__version) < LooseVersion('3.1'):
runfile = 'cmake-{}-Linux-i386.sh'.format(self.__version)
# CMake releases of versions < 3.1 are only include 32-bit
# binaries:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__ospackages.append('libc6-i386')
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
self.__ospackages.append('glibc.i686')
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version, runfile)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
self.__commands.append('mkdir -p {}'.format(self.__prefix))
# Run the runfile
if self.__eula:
self.__commands.append(
'/bin/sh {0} --prefix={1} --skip-license'.format(
posixpath.join(self.__wd, runfile), self.__prefix))
else:
# This will fail when building the container
logging.warning('CMake EULA was not accepted')
self.__commands.append(
'/bin/sh {0} --prefix={1}'.format(
posixpath.join(self.__wd, runfile), self.__prefix))
# Cleanup runfile
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, runfile)]))
def __build(self):
"""Build from source"""
tarball = 'cmake-{}.tar.gz'.format(self.__version)
url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version, tarball)
# Include SSL packages
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__ospackages.append('libssl-dev')
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
self.__ospackages.append('openssl-devel')
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
# Build and install
if not self.__bootstrap_opts:
self.__bootstrap_opts.append(
'--parallel={}'.format(self.__parallel))
self.__commands.append('cd {} && ./bootstrap --prefix={} {}'.format(
posixpath.join(self.__wd, 'cmake-{}'.format(self.__version)),
self.__prefix,
' '.join(self.__bootstrap_opts)))
self.__commands.append('make -j{}'.format(self.__parallel))
self.__commands.append('make install')
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd,
'cmake-{}'.format(self.__version))]))
| hpc-container-maker-master | hpccm/building_blocks/cmake.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MKL building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class mkl(bb_base, hpccm.templates.envvars, hpccm.templates.wget):
"""The `mkl` building block downloads and installs the [Intel Math
Kernel Library](http://software.intel.com/mkl).
You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement)
to use this building block.
# Parameters
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and other variables) should be
modified to include MKL. The default is True.
eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement).
The default value is `False`.
mklvars: MKL provides an environment script (`mklvars.sh`) to
setup the MKL environment. If this value is `True`, the bashrc is
modified to automatically source this environment script.
However, the MKL environment is not automatically available to
subsequent container image build steps; the environment is
available when the container image is run. To set the MKL
environment in subsequent build steps you can explicitly call
`source /opt/intel/mkl/bin/mklvars.sh intel64` in each build step.
If this value is to set `False`, then the environment is set such
that the environment is visible to both subsequent container image
build steps and when the container image is run. However, the
environment may differ slightly from that set by `mklvars.sh`.
The default value is `True`.
ospackages: List of OS packages to install prior to installing
MKL. For Ubuntu, the default values are `apt-transport-https`,
`ca-certificates`, `gnupg`, and `wget`. For RHEL-based Linux
distributions, the default is an empty list.
version: The version of MKL to install. The default value is
`2020.0-088`.
# Examples
```python
mkl(eula=True, version='2018.3-051')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mkl, self).__init__(**kwargs)
# By setting this value to True, you agree to the
# corresponding Intel End User License Agreement
# (https://software.intel.com/en-us/articles/end-user-license-agreement)
self.__eula = kwargs.get('eula', False)
self.__mklvars = kwargs.get('mklvars', True)
self.__ospackages = kwargs.get('ospackages', [])
self.__version = kwargs.get('version', '2020.0-088')
self.__year = '2019' # Also used by 2018 and 2020 versions
self.__bashrc = '' # Filled in by __distro()
if hpccm.config.g_cpu_arch != cpu_arch.X86_64: # pragma: no cover
logging.warning('Using mkl on a non-x86_64 processor')
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('MKL version {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if not self.__eula:
raise RuntimeError('Intel EULA was not accepted. To accept, see the documentation for this building block')
self += packages(
apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
apt_repositories=['deb https://apt.repos.intel.com/mkl all main'],
ospackages=['intel-mkl-64bit-{}'.format(self.__version)],
yum_keys=['https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
yum_repositories=['https://yum.repos.intel.com/mkl/setup/intel-mkl.repo'])
# Set the environment
if self.__mklvars:
# Source the mklvars environment script when starting the
# container, but the variables not be available for any
# subsequent build steps.
self += shell(commands=['echo "source /opt/intel/mkl/bin/mklvars.sh intel64" >> {}'.format(self.__bashrc)])
else:
# Set the environment so that it will be available to
# subsequent build steps and when starting the container,
# but this may miss some things relative to the mklvars
# environment script.
self.environment_variables={
'CPATH': '/opt/intel/mkl/include:$CPATH',
'LD_LIBRARY_PATH': '/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LD_LIBRARY_PATH',
'LIBRARY_PATH': '/opt/intel/mkl/lib/intel64:/opt/intel/lib/intel64:$LIBRARY_PATH',
'MKLROOT': '/opt/intel/mkl'}
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gnupg', 'wget']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = []
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mkl(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/mkl.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""conda building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import logging
import posixpath
import hpccm.config
import hpccm.templates.rm
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.shell import shell
class conda(bb_base, hpccm.templates.rm, hpccm.templates.wget):
"""The `conda` building block installs Anaconda.
You must agree to the [Anaconda End User License Agreement](https://docs.anaconda.com/anaconda/eula/) to use this building block.
# Parameters
channels: List of additional Conda channels to enable. The
default is an empty list.
environment: Path to the Conda environment file to clone. The
default value is empty.
eula: By setting this value to `True`, you agree to the [Anaconda End User License Agreement](https://docs.anaconda.com/anaconda/eula/).
The default value is `False`.
ospackages: List of OS packages to install prior to installing
Conda. The default values are `ca-certificates` and `wget`.
packages: List of Conda packages to install. The default is an
empty list.
prefix: The top level install location. The default value is
`/usr/local/anaconda`.
python2: Boolean flag to specify that the Python 2 version of
Anaconda should be installed. The default is False.
python_subversion: The Python version to install. This value is
ignored if the Conda version is less than 4.8. The default is
`py310` if using Python 3, and `py27` if using Python 2.
version: The version of Anaconda to download. The default value
is `23.1.0-1` if using Python 3, and `4.8.3` if using Python 2.
# Examples
```python
conda(packages=['numpy'])
```
```python
conda(channels=['conda-forge', 'nvidia'], prefix='/opt/conda')
```
```python
conda(environment='environment.yml')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(conda, self).__init__(**kwargs)
self.__arch_pkg = '' # Filled in by __cpu_arch()
self.__baseurl = kwargs.get('baseurl',
'http://repo.anaconda.com/miniconda')
self.__channels = kwargs.get('channels', [])
self.__environment = kwargs.get('environment', None)
# By setting this value to True, you agree to the
# corresponding Anaconda End User License Agreement
# https://docs.anaconda.com/anaconda/eula/
self.__eula = kwargs.get('eula', False)
self.__ospackages = kwargs.get('ospackages',
['ca-certificates', 'wget'])
self.__packages = kwargs.get('packages', [])
self.__prefix = kwargs.get('prefix', '/usr/local/anaconda')
self.__python2 = kwargs.get('python2', False)
self.__python_version = '2' if self.__python2 else '3'
self.__python_subversion = kwargs.get(
'python_subversion', 'py27' if self.__python2 else 'py310')
self.__version = kwargs.get('version', '4.8.3' if self.__python2 else '23.1.0-1')
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
if not self.__eula:
logging.warning('Anaconda EULA was not accepted. To accept, see the documentation for this building block')
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Anaconda')
self += packages(ospackages=self.__ospackages)
if self.__environment:
self += copy(src=self.__environment, dest=posixpath.join(
self.__wd, posixpath.basename(self.__environment)))
self += shell(commands=self.__commands)
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
self.__arch_pkg = 'ppc64le'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__arch_pkg = 'x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
if LooseVersion(self.__version) >= LooseVersion('4.8'):
miniconda = 'Miniconda{0}-{1}_{2}-Linux-{3}.sh'.format(
self.__python_version, self.__python_subversion,
self.__version, self.__arch_pkg)
else:
miniconda = 'Miniconda{0}-{1}-Linux-{2}.sh'.format(
self.__python_version, self.__version, self.__arch_pkg)
url = '{0}/{1}'.format(self.__baseurl, miniconda)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# Install
install_args = ['-p {}'.format(self.__prefix)]
if self.__eula:
install_args.append('-b')
self.__commands.append('bash {0} {1}'.format(
posixpath.join(self.__wd, miniconda),
' '.join(sorted(install_args))))
# Initialize conda
self.__commands.append('{0} init'.format(
posixpath.join(self.__prefix, 'bin', 'conda')))
self.__commands.append('ln -s {} /etc/profile.d/conda.sh'.format(
posixpath.join(self.__prefix, 'etc', 'profile.d', 'conda.sh')))
# Activate
if self.__channels or self.__environment or self.__packages:
self.__commands.append('. {}'.format(
posixpath.join(self.__prefix, 'etc', 'profile.d', 'conda.sh')))
self.__commands.append('conda activate base')
# Enable channels
if self.__channels:
self.__commands.append('conda config {}'.format(
' '.join(['--add channels {}'.format(x)
for x in sorted(self.__channels)])))
# Install environment
if self.__environment:
self.__commands.append('conda env update -f {}'.format(
posixpath.join(self.__wd,
posixpath.basename(self.__environment))))
self.__commands.append(self.cleanup_step(
items=[posixpath.join(
self.__wd, posixpath.basename(self.__environment))]))
# Install conda packages
if self.__packages:
self.__commands.append('conda install -y {}'.format(
' '.join(sorted(self.__packages))))
# Cleanup conda install
self.__commands.append('{0} clean -afy'.format(
posixpath.join(self.__prefix, 'bin', 'conda')))
# Cleanup miniconda download file
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, miniconda)]))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
c = conda(...)
Stage0 += c
Stage1 += c.runtime()
```
"""
self.rt += comment('Anaconda')
self.rt += copy(_from=_from, src=self.__prefix, dest=self.__prefix)
self.rt += shell(commands=[
'{0} init'.format(
posixpath.join(self.__prefix, 'bin', 'conda')),
'ln -s {0} /etc/profile.d/conda.sh'.format(
posixpath.join(self.__prefix, 'etc', 'profile.d',
'conda.sh'))])
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/conda.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Charm++ building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.sed
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class charm(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.sed, hpccm.templates.tar,
hpccm.templates.wget):
"""The `charm` building block downloads and install the
[Charm++](http://charm.cs.illinois.edu/research/charm) component.
# Parameters
basedir: List of additional include and library paths for building
Charm++. The default is an empty list.
check: Boolean flag to specify whether the test cases should be
run. The default is False.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and other variables) should be
modified to include Charm++. The default is True.
ldconfig: Boolean flag to specify whether the Charm++ library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the Charm++ library
directory. The default value is False.
options: List of additional options to use when building Charm++.
The default values are `--build-shared`, and `--with-production`.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `autoconf`, `automake`,
`git`, `libtool`, `make`, and `wget`.
prefix: The top level install prefix. The default value is
`/usr/local`.
target: The target Charm++ framework to build. The default value
is `charm++`.
target_architecture: The target machine architecture to build.
For x86_64 processors, the default value is
`multicore-linux-x86_64`. For aarch64 processors, the default
value is `multicore-arm8`. For ppc64le processors, the default is
`multicore-linux-ppc64le`.
version: The version of Charm++ to download. The default value is
`6.10.2`.
# Examples
```python
charm(prefix='/opt', version='6.8.2')
```
```python
charm(target_architecture='mpi-linux-x86_64')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(charm, self).__init__(**kwargs)
self.__basedir = kwargs.get('basedir', [])
self.__baseurl = kwargs.get('baseurl',
'https://github.com/UIUC-PPL/charm/archive')
self.__check = kwargs.get('check', False)
self.__options = kwargs.get('options', ['--build-shared',
'--with-production'])
self.__ospackages = kwargs.get('ospackages',
['autoconf', 'automake', 'git',
'libtool', 'make', 'wget'])
self.__parallel = kwargs.get('parallel', '$(nproc)')
self.__prefix = kwargs.get('prefix', '/usr/local')
self.__target = kwargs.get('target', 'charm++')
self.__target_architecture = kwargs.get('target_architecture', '')
self.__version = kwargs.get('version', '6.10.2')
# Version 6.9.0 dropped the 'v' from directory name
if LooseVersion(self.__version) >= LooseVersion('6.9.0'):
self.__installdir = posixpath.join(
self.__prefix, 'charm-{}'.format(self.__version))
else:
self.__installdir = posixpath.join(
self.__prefix, 'charm-v{}'.format(self.__version))
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.__commands = [] # Filled in by __setup()
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Construct series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Charm++ version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
if not self.__target_architecture:
self.__target_architecture = 'multicore-arm8'
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
if not self.__target_architecture:
self.__target_architecture = 'multicore-linux-ppc64le'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if not self.__target_architecture:
self.__target_architecture = 'multicore-linux-x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
tarball = 'v{}.tar.gz'.format(self.__version)
url = '{0}/{1}'.format(self.__baseurl, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# Charm++ does not install nicely into a separate directory,
# even with "--destination". So just untar it into the
# destination directory prefix.
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball),
directory=self.__prefix))
# Charm++ is hard-coded to use pgCC rather than pgc++ when the
# PGI compiler is selected. Replace pgCC with pgc++.
# But... PGI is not really supported by Charm++:
# https://charm.cs.illinois.edu/redmine/issues/234
if 'pgcc' in self.__options:
self.__commands.append(
self.sed_step(
file=posixpath.join(self.__installdir, 'src', 'arch',
'common', 'cc-pgcc.sh'),
patterns=[r's/pgCC/pgc++/g']))
# Construct options string
options = []
if self.__options:
options.extend(self.__options)
if self.__basedir:
options.extend(['--basedir={}'.format(x) for x in self.__basedir])
# Build
self.__commands.append('cd {} && ./build {} {} {} -j{}'.format(
self.__installdir, self.__target, self.__target_architecture,
' '.join(options), self.__parallel))
# Set library path
libpath = posixpath.join(self.__installdir, 'lib_so')
if self.ldconfig:
self.__commands.append(self.ldcache_step(directory=libpath))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)
# Check the build
if self.__check:
self.__commands.append('cd {} && make test'.format(
posixpath.join(self.__installdir, 'tests', 'charm++')))
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball)]))
# Set the environment
self.environment_variables['CHARMBASE'] = self.__installdir
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__installdir, 'bin'))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Example
```python
c = charm(...)
Stage0 += c
Stage1 += c.runtime()
```
"""
self.rt += comment('Charm++')
self.rt += copy(_from=_from, src=self.__installdir,
dest=self.__installdir)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.__prefix, 'lib_so'))])
self.rt += environment(variables=self.environment_step())
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/charm.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""NCCL building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import posixpath
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.config import get_cpu_architecture
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
class nccl(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig):
"""The `nccl` building block installs the
[NCCL](https://developer.nvidia.com/nccl) component.
# Parameters
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
build: Boolean flag to specify whether NCCL should be built from
source. The default value is False.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
cuda: Flag to specify the CUDA version of the package to download.
The default is `11.6`. This option is ignored if build is True.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, and `PATH`) should be
modified to include NCCL. The default is True. This option is
ignored if build is False.
make_variables: Dictionary of environment variables and values to
set when building NCCL. The default is an empty dictionary. This
option is ignored if build is False.
ospackages: List of OS packages to install prior to building. The
default values are `make` and `wget`.
prefix: The top level install location. The default value is
`/usr/local/nccl`. This option is ignored if build is False.
repository: The location of the git repository that should be used to build NCCL. If True, then use the default `https://github.com/NVIDIA/nccl.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
version: The version of NCCL to install. The default value is
`2.12.10-1`.
# Examples
```python
nccl(cuda='11.0', version='2.7.6-1')
```
```python
nccl(build=True, version='2.7.6-1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(nccl, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/NVIDIA/nccl/archive')
self.__build = kwargs.pop('build', False)
self.__build_environment = '' # Filled in by __configure
self.__default_repository = 'https://github.com/NVIDIA/nccl.git'
self.__distro_label = '' # Filled in by __distro
self.__cuda = kwargs.pop('cuda', '11.6')
self.__make_variables = kwargs.pop('make_variables', {})
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/nccl')
self.__src_directory = kwargs.pop('src_directory', None)
self.__version = kwargs.pop('version', '2.12.10-1')
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
if not self.__build:
# Install prebuild package
# Set the Linux distribution specific parameters
self.__distro()
self += comment('NCCL {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += packages(
apt=['libnccl2={0}+cuda{1}'.format(self.__version,
self.__cuda),
'libnccl-dev={0}+cuda{1}'.format(self.__version,
self.__cuda)],
apt_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/3bf863cc.pub'.format(self.__distro_label, get_cpu_architecture())],
apt_repositories=['deb https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1} /'.format(self.__distro_label, get_cpu_architecture())],
yum=['libnccl-{0}+cuda{1}'.format(self.__version, self.__cuda),
'libnccl-devel-{0}+cuda{1}'.format(self.__version,
self.__cuda)],
yum_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/3bf863cc.pub'.format(self.__distro_label, get_cpu_architecture())],
yum_repositories=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}'.format(self.__distro_label, get_cpu_architecture())])
else:
# Build from source
# Set the build options
self.__configure()
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
self.__bb = generic_build(
base_annotation=self.__class__.__name__,
build = ['{} make -j$(nproc) install'.format(
self.__build_environment)],
comment=False,
devel_environment=self.environment_variables,
directory='nccl-{}'.format(self.__version) if not self.repository else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
**kwargs)
self += comment('NCCL')
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup build options based on user parameters"""
e = {}
e['PREFIX'] = self.__prefix
if self.__make_variables:
e.update(self.__make_variables)
l = []
if e:
for key, val in sorted(e.items()):
l.append('{0}={1}'.format(key, val))
self.__build_environment = ' '.join(l)
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gnupg', 'wget']
if hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__distro_label = 'ubuntu1804'
else:
self.__distro_label = 'ubuntu1604'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__distro_label = 'rhel8'
else:
self.__distro_label = 'rhel7'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Set download source based on user parameters"""
if not self.__ospackages:
self.__ospackages = ['make', 'wget']
if hpccm.config.g_linux_distro == linux_distro.CENTOS:
self.__ospackages.append('which')
if self.repository:
self.__ospackages.append('git')
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
self.url = '{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
n = nccl(...)
Stage0 += n
Stage1 += n.runtime()
```
"""
self.rt += comment('NCCL')
if self.__build:
self.rt += copy(_from=_from, src=self.__prefix, dest=self.__prefix)
self.rt += environment(variables=self.environment_step())
else:
self.rt += packages(ospackages=self.__ospackages)
self.rt += packages(
apt=['libnccl2={0}+cuda{1}'.format(self.__version,
self.__cuda)],
apt_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/3bf863cc.pub'.format(self.__distro_label, get_cpu_architecture())],
apt_repositories=['deb https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1} /'.format(self.__distro_label, get_cpu_architecture())],
yum=['libnccl-{0}+cuda{1}'.format(self.__version, self.__cuda)],
yum_keys=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}/3bf863cc.pub'.format(self.__distro_label, get_cpu_architecture())],
yum_repositories=['https://developer.download.nvidia.com/compute/cuda/repos/{0}/{1}'.format(self.__distro_label, get_cpu_architecture())])
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/nccl.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Boost building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import re
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class boost(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.tar, hpccm.templates.wget):
"""The `boost` building block downloads and installs the
[Boost](https://www.boost.org) component.
# Parameters
b2_opts: List of options to pass to `b2`. The default is an empty
list.
bootstrap_opts: List of options to pass to `bootstrap.sh`. The
default is an empty list.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`) should be modified to include Boost. The
default is True.
ldconfig: Boolean flag to specify whether the Boost library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the Boost library
directory. The default value is False.
ospackages: List of OS packages to install prior to building. For
Ubuntu, the default values are `bzip2`, `libbz2-dev`, `tar`,
`wget`, and `zlib1g-dev`. For RHEL-based Linux distributions the
default values are `bzip2`, `bzip2-devel`, `tar`, `wget`, `which`,
and `zlib-devel`.
prefix: The top level installation location. The default value
is `/usr/local/boost`.
python: Boolean flag to specify whether Boost should be built with
Python support. If enabled, the Python C headers need to be
installed (typically this can be done by adding `python-dev` or
`python-devel` to the list of OS packages). This flag is ignored
if `bootstrap_opts` is set. The default is False.
sourceforge: Boolean flag to specify whether Boost should be
downloaded from SourceForge rather than the current Boost
repository. For versions of Boost older than 1.63.0, the
SourceForge repository should be used. The default is False.
version: The version of Boost source to download. The default
value is `1.76.0`.
# Examples
```python
boost(prefix='/opt/boost/1.67.0', version='1.67.0')
```
```python
boost(sourceforge=True, version='1.57.0')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(boost, self).__init__(**kwargs)
self.__b2_opts = kwargs.get('b2_opts', [])
self.__baseurl = kwargs.get('baseurl',
'https://boostorg.jfrog.io/artifactory/main/release/__version__/source')
self.__bootstrap_opts = kwargs.get('bootstrap_opts', [])
self.__ospackages = kwargs.get('ospackages', [])
self.__parallel = kwargs.get('parallel', '$(nproc)')
self.__prefix = kwargs.get('prefix', '/usr/local/boost')
self.__python = kwargs.get('python', False)
self.__sourceforge = kwargs.get('sourceforge', False)
self.__version = kwargs.get('version', '1.76.0')
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
if self.__sourceforge:
self.__baseurl = 'https://sourceforge.net/projects/boost/files/boost/__version__'
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Boost version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'libbz2-dev', 'tar', 'wget',
'zlib1g-dev']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'bzip2-devel', 'tar', 'wget',
'which', 'zlib-devel']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL has the version format with underscores so
# pull apart the full version to get the individual
# components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)',
self.__version)
v_underscore = '{0}_{1}_{2}'.format(match.groupdict()['major'],
match.groupdict()['minor'],
match.groupdict()['revision'])
tarball = 'boost_{}.tar.bz2'.format(v_underscore)
url = '{0}/{1}'.format(self.__baseurl, tarball)
url = url.replace('__version__', self.__version)
# Python support requires pyconfig.h which is not part of the
# standard Python install. It requires the development
# package, python-dev or python-devel. So skip Python unless
# it's specifically enabled.
if not self.__bootstrap_opts and not self.__python:
self.__bootstrap_opts.append('--without-libraries=python')
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
# Configure
self.__commands.append(
'cd {} && ./bootstrap.sh --prefix={} {}'.format(
posixpath.join(self.__wd, 'boost_{}'.format(v_underscore)),
self.__prefix,
' '.join(self.__bootstrap_opts)))
# Build and install
self.__b2_opts.append('-j{}'.format(self.__parallel))
self.__b2_opts.append('-q install')
self.__commands.append('./b2 {0}'.format(' '.join(self.__b2_opts)))
# Set library path
libpath = posixpath.join(self.__prefix, 'lib')
if self.ldconfig:
self.__commands.append(self.ldcache_step(directory=libpath))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd,
'boost_{}'.format(v_underscore))]))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
b = boost(...)
Stage0 += b
Stage1 += b.runtime()
```
"""
self.rt += comment('Boost')
self.rt += copy(_from=_from, src=self.__prefix, dest=self.__prefix)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.__prefix, 'lib'))])
self.rt += environment(variables=self.environment_step())
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/boost.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Catalyst building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import posixpath
import re
import hpccm.config
import hpccm.templates.CMakeBuild
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class catalyst(bb_base, hpccm.templates.CMakeBuild, hpccm.templates.envvars,
hpccm.templates.ldconfig, hpccm.templates.rm,
hpccm.templates.tar, hpccm.templates.wget):
"""The `catalyst` building block configures, builds, and installs the
[ParaView Catalyst](https://www.paraview.org/in-situ/) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
A MPI building block should be installed prior to this building
block.
If GPU rendering will be used then a
[cudagl](https://hub.docker.com/r/nvidia/cudagl) base image is
recommended.
# Parameters
cmake_opts: List of options to pass to `cmake`. The default is an
empty list.
edition: The Catalyst edition to use. Valid choices are `Base`,
`Base-Essentials`, `Base-Essentials-Extras`,
`Base-Essentials-Extras-Rendering-Base`, `Base-Enable-Python`,
`Base-Enable-Python-Essentials`,
`Base-Enable-Python-Essentials-Extras`, and
`Base-Enable-Python-Essentials-Extras-Rendering-Base`. If a
Python edition is selected, then the [Python](#python) building
block should be installed with development libraries prior to this
building block. The default value is
`Base-Enable-Python-Essentials-Extras-Rendering-Base`.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
ParaView Catalyst. The default is True.
ldconfig: Boolean flag to specify whether the Catalyst library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the Catalyst library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `git`, `gzip`,
`make`, `tar`, and `wget`. If a rendering edition is selected
then `libxau-dev`, `libxext-dev`, `libxt-dev`, `libice-dev`,
`libsm-dev`, `libx11-dev`, `libgl1-mesa-dev` are also included.
For RHEL-based Linux distributions, the default values are `git`,
`gzip`, `make`, `tar`, `wget`, and `which`. If a rendering
edition is selected then `libX11-devel`, `libXau-devel`,
`libXext-devel`, `libXt-devel`, `libICE-devel`, `libSM-devel`,
`libglvnd-devel`, `mesa-libGL-devel` are also included.
prefix: The top level install location. The default value is
`/usr/local/catalyst`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of Catalyst source to download. The default
value is `5.6.1`.
# Examples
```python
catalyst(prefix='/opt/catalyst/5.6.0', version='5.6.0')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(catalyst, self).__init__(**kwargs)
self.cmake_opts = kwargs.get('cmake_opts', [])
self.__edition = kwargs.get('edition', 'Base-Enable-Python-Essentials-Extras-Rendering-Base')
self.__ospackages = kwargs.get('ospackages', [])
self.prefix = kwargs.get('prefix', '/usr/local/catalyst')
self.__runtime_ospackages = [] # Filled in by __distro()
# Input toolchain, i.e., what to use when building
self.__toolchain = kwargs.get('toolchain', toolchain())
self.__version = kwargs.get('version', '5.6.1')
self.__url = r'https://www.paraview.org/paraview-downloads/download.php?submit=Download\&version={0}\&type=catalyst\&os=Sources\&downloadFile={1}'
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Validate edition choice
if self.__edition not in [
'Base', 'Base-Essentials', 'Base-Essentials-Extras',
'Base-Essentials-Extras-Rendering-Base',
'Base-Enable-Python', 'Base-Enable-Python-Essentials',
'Base-Enable-Python-Essentials-Extras',
'Base-Enable-Python-Essentials-Extras-Rendering-Base']:
logging.warning('Invalid Catalyst edition "{0}", defaulting to '
'Base-Essentials'.format(self.__edition))
self.__edition = 'Base-Essentials'
self.__basename = 'Catalyst-v{0}-{1}'.format(self.__version,
self.__edition)
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('ParaView Catalyst version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['git', 'gzip', 'make', 'tar', 'wget']
if 'Rendering' in self.__edition:
self.__ospackages.extend([
'libxau-dev', 'libxext-dev', 'libxt-dev',
'libice-dev', 'libsm-dev', 'libx11-dev',
'libgl1-mesa-dev'])
if 'Rendering' in self.__edition:
self.__runtime_ospackages.extend([
'libxau6', 'libxext6', 'libxt6', 'libice6', 'libsm6',
'libx11-6', 'libgl1-mesa-glx'])
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['git', 'gzip', 'make', 'tar', 'wget',
'which']
if 'Rendering' in self.__edition:
self.__ospackages.extend([
'libX11-devel', 'libXau-devel', 'libXext-devel',
'libXt-devel', 'libICE-devel', 'libSM-devel',
'libglvnd-devel', 'mesa-libGL-devel'])
if 'Rendering' in self.__edition:
self.__runtime_ospackages.extend([
'libX11', 'libXau', 'libXext', 'libXt', 'libICE', 'libSM',
'libglvnd', 'libglvnd-opengl', 'mesa-libGL'])
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL has the format contains vMAJOR.MINOR in the
# path and the tarball contains MAJOR.MINOR.REVISION, so pull
# apart the full version to get the MAJOR and MINOR components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)', self.__version)
major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = self.__basename + '.tar.gz'
url = self.__url.format(major_minor, tarball)
# Download source from web
self.__commands.append(self.download_step(
url=url,
directory=self.__wd,
outfile=posixpath.join(self.__wd, tarball)))
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
# Configure
# Catalyst has a cmake.sh shell script that sets configuration
# options. Use that in place of cmake.
configure = self.configure_step(
directory=posixpath.join(self.__wd, self.__basename),
opts=self.cmake_opts, toolchain=self.__toolchain)
configure = configure.replace('cmake', '{}/cmake.sh'.format(posixpath.join(self.__wd, self.__basename)))
self.__commands.append(configure)
# Build
self.__commands.append(self.build_step())
# Install
self.__commands.append(self.build_step(target='install'))
# Set library path
libpath = posixpath.join(self.prefix, 'lib')
if self.ldconfig:
self.__commands.append(self.ldcache_step(directory=libpath))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, self.__basename)]))
# Set the environment
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.prefix, 'bin'))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
c = catalyst(...)
Stage0 += c
Stage1 += c.runtime()
```
"""
self.rt += comment('ParaView Catalyst')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += copy(_from=_from, src=self.prefix, dest=self.prefix)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.prefix, 'lib'))])
self.rt += environment(variables=self.environment_step())
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/catalyst.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""OpenMPI building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
from six import string_types
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class openmpi(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig):
"""The `openmpi` building block configures, builds, and installs the
[OpenMPI](https://www.open-mpi.org) component.
As a side effect, a toolchain is created containing the MPI
compiler wrappers. The tool can be passed to other operations
that want to build using the MPI compiler wrappers.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
configure_opts: List of options to pass to `configure`. The
default values are `--disable-getpwuid` and
`--enable-orterun-prefix-by-default`.
cuda: Boolean flag to control whether a CUDA aware build is
performed. If True, adds `--with-cuda` to the list of `configure`
options, otherwise adds `--without-cuda`. If the toolchain
specifies `CUDA_HOME`, then that path is used. The default value
is True.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
OpenMPI. The default is True.
infiniband: Boolean flag to control whether InfiniBand
capabilities are included. If True, adds `--with-verbs` to the
list of `configure` options, otherwise adds `--without-verbs`.
The default value is True.
ldconfig: Boolean flag to specify whether the OpenMPI library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the OpenMPI library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `bzip2`, `file`,
`hwloc`, `libnuma-dev`, `make`, `openssh-client`, `perl`, `tar`,
and `wget`. For RHEL-based Linux distributions, the default
values are `bzip2`, `file`, `hwloc`, `make`, `numactl-devl`,
`openssh-clients`, `perl`, `tar`, and `wget`. If the `repository`
parameter is set, then `autoconf`, `automake`, `ca-certificates`,
`git`, and `libtool` are also included.
pmi: Flag to control whether PMI is used by the build. If True,
adds `--with-pmi` to the list of `configure` options. If a
string, uses the value of the string as the PMI path, e.g.,
`--with-pmi=/usr/local/slurm-pmi2`. If False, does nothing. The
default is False.
pmix: Flag to control whether PMIX is used by the build. If True,
adds `--with-pmix` to the list of `configure` options. If a
string, uses the value of the string as the PMIX path, e.g.,
`--with-pmix=/usr/local/pmix`. If False, does nothing. The
default is False.
prefix: The top level install location. The default value is
`/usr/local/openmpi`.
repository: The location of the git repository that should be used to build OpenMPI. If True, then use the default `https://github.com/open-mpi/ompi.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
ucx: Flag to control whether UCX is used by the build. If True,
adds `--with-ucx` to the list of `configure` options. If a
string, uses the value of the string as the UCX path, e.g.,
`--with-ucx=/path/to/ucx`. If False, adds `--without-ucx` to the
list of `configure` options. The default is False.
url: The location of the tarball that should be used to build
OpenMPI. The default is empty, i.e., use the release package
specified by `version`.
version: The version of OpenMPI source to download. This
value is ignored if `directory` is set. The default value is
`4.0.5`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
openmpi(cuda=False, infiniband=False, prefix='/opt/openmpi/2.1.2',
version='2.1.2')
```
```python
openmpi(repository='https://github.com/open-mpi/ompi.git')
```
```python
n = nvhpc(eula=True)
openmpi(toolchain=n.toolchain)
```
```python
openmpi(configure_opts=['--disable-getpwuid', '--with-slurm'],
ospackages=['file', 'hwloc', 'libslurm-dev'])
```
```python
openmpi(pmi='/usr/local/slurm-pmi2', pmix='internal')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(openmpi, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl',
'https://www.open-mpi.org/software/ompi')
self.__configure_opts = kwargs.pop('configure_opts',
['--disable-getpwuid',
'--enable-orterun-prefix-by-default'])
self.__cuda = kwargs.pop('cuda', True)
self.__default_repository = 'https://github.com/open-mpi/ompi.git'
self.__infiniband = kwargs.pop('infiniband', True)
self.__ospackages = kwargs.pop('ospackages', [])
self.__pmi = kwargs.pop('pmi', False)
self.__pmix = kwargs.pop('pmix', False)
self.__prefix = kwargs.pop('prefix', '/usr/local/openmpi')
self.__recursive = kwargs.pop('recursive', True)
self.__runtime_ospackages = [] # Filled in by __distro()
# Input toolchain, i.e., what to use when building
# Create a copy of the toolchain so that it can be modified
# without impacting the original
self.__toolchain = _copy(kwargs.pop('toolchain', toolchain()))
self.__version = kwargs.pop('version', '4.0.5')
self.__ucx = kwargs.pop('ucx', False)
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifort')
# Set the configure options
self.__configure()
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version} if not self.repository else {},
base_annotation=self.__class__.__name__,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
preconfigure=['./autogen.pl'] if self.repository else None,
prefix=self.__prefix,
recursive=self.__recursive,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
**kwargs)
# Container instructions
if self.repository:
if self.branch:
self += comment('OpenMPI {} {}'.format(self.repository,
self.branch))
elif self.commit:
self += comment('OpenMPI {} {}'.format(self.repository,
self.commit))
else:
self += comment('OpenMPI {}'.format(self.repository))
else:
self += comment('OpenMPI version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters"""
# CUDA
if self.__cuda:
if isinstance(self.__cuda, string_types):
# Use specified path
self.__configure_opts.append(
'--with-cuda={}'.format(self.__cuda))
elif self.__toolchain.CUDA_HOME:
self.__configure_opts.append(
'--with-cuda={}'.format(self.__toolchain.CUDA_HOME))
else:
# Default location
self.__configure_opts.append('--with-cuda')
else:
self.__configure_opts.append('--without-cuda')
# PMI
if self.__pmi:
if isinstance(self.__pmi, string_types):
# Use specified path
self.__configure_opts.append(
'--with-pmi={}'.format(self.__pmi))
else:
self.__configure_opts.append('--with-pmi')
# PMIX
if self.__pmix:
if isinstance(self.__pmix, string_types):
# Use specified path
self.__configure_opts.append('--with-pmix={}'.format(
self.__pmix))
else:
self.__configure_opts.append('--with-pmix')
# InfiniBand
if self.__infiniband:
self.__configure_opts.append('--with-verbs')
else:
self.__configure_opts.append('--without-verbs')
# UCX
if self.__ucx:
if isinstance(self.__ucx, string_types):
# Use specified path
self.__configure_opts.append(
'--with-ucx={}'.format(self.__ucx))
else:
self.__configure_opts.append('--with-ucx')
# Workaround when using the NVIDIA compilers
if self.__toolchain.CC and re.match('.*nvc', self.__toolchain.CC):
if not self.__toolchain.CFLAGS:
self.__toolchain.CFLAGS = '-O1'
# PIC workaround when using the NVIDIA compilers
if self.__toolchain.FC and re.match('.*nvfortran',
self.__toolchain.FC):
if not self.__toolchain.FCFLAGS:
self.__toolchain.FCFLAGS = '-fpic -DPIC'
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'hwloc', 'libnuma-dev',
'make', 'openssh-client', 'perl',
'tar', 'wget']
if self.repository:
self.__ospackages.extend(['autoconf', 'automake',
'ca-certificates', 'git',
'libtool'])
self.__runtime_ospackages = ['hwloc', 'openssh-client']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'hwloc', 'make',
'numactl-devel', 'openssh-clients',
'perl', 'tar', 'wget']
if self.repository:
self.__ospackages.extend(['autoconf', 'automake',
'ca-certificates', 'git',
'libtool'])
self.__runtime_ospackages = ['hwloc', 'openssh-clients']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Set download source based on user parameters"""
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
# The download URL has the format contains vMAJOR.MINOR in the
# path and the tarball contains MAJOR.MINOR.REVISION, so pull
# apart the full version to get the MAJOR and MINOR components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)', self.__version)
major_minor = 'v{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = 'openmpi-{}.tar.bz2'.format(self.__version)
self.url = '{0}/{1}/downloads/{2}'.format(
self.__baseurl, major_minor, tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
o = openmpi(...)
Stage0 += o
Stage1 += o.runtime()
```
"""
self.rt += comment('OpenMPI')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/openmpi.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""NVIDIA HPC SDK building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging
import re
import posixpath
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.rm
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class nvhpc(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.rm):
"""The `nvhpc` building block downloads and installs the [NVIDIA HPC
SDK](https://developer.nvidia.com/hpc-sdk). By default, the
NVIDIA HPC SDK is installed from a package repository.
Alternatively the tar package can be downloaded by specifying the
`tarball` parameter, or a local tar package may used instead by
specifying the `package` parameter.
You must agree to the [NVIDIA HPC SDK End-User License Agreement](https://docs.nvidia.com/hpc-sdk/eula) to use this
building block.
As a side effect, a toolchain is created containing the NVIDIA
compilers. The tool can be passed to other operations that want
to build using the NVIDIA compilers.
# Parameters
cuda: The default CUDA version to configure. The default is an
empty value, i.e., use the latest version supported by the NVIDIA
HPC SDK. This value is ignored if installing from the package
repository.
cuda_multi: Boolean flag to specify whether the NVIDIA HPC SDK
support for multiple CUDA versions should be installed. The
default value is `True`.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `MANPATH`, and `PATH`) should be
modified to include the NVIDIA HPC SDK. The default is True.
eula: By setting this value to `True`, you agree to the [NVIDIA HPC SDK End-User License Agreement](https://docs.nvidia.com/hpc-sdk/eula).
The default value is `False`.
extended_environment: Boolean flag to specify whether an extended
set of environment variables should be defined. If True, the
following environment variables `CC`, `CPP`, `CXX`, `F77`, `F90`,
and `FC`. If False, then only `CPATH`, `LD_LIBRARY_PATH`,
`MANPATH`, and `PATH` will be extended to include the NVIDIA HPC
SDK. The default value is `False`.
mpi: Boolean flag to specify whether MPI should be included in the
environment. The default value is `True`.
ospackages: List of OS packages to install prior to installing the
NVIDIA HPC SDK. The default value is `ca-certificates`, `gnupg`,
and `wget` for Ubuntu, and `ca-certificates` for RHEL-based Linux
distributions. If not installing from the package repository,
then for Ubuntu, the default values are `bc`, `debianutils`,
`gcc`, `g++`, `gfortran`, `libatomic1`, `libnuma1`,
`openssh-client`, and `wget`, and for RHEL-based Linux
distributions, the default values are `bc`, `gcc`, `gcc-c++`,
`gcc-gfortran`, `libatomic`, `numactl-libs`, `openssh-clients`,
`wget`, and `which`.
package: Path to the NVIDIA HPC SDK tar package file relative to
the local build context. The default value is empty.
prefix: The top level install prefix. The default value is
`/opt/nvidia/hpc_sdk`. This value is ignored when installing from
the package repository.
redist: The list of redistributable files to copy into the runtime
stage. The paths are relative to the `REDIST` directory and
wildcards are supported. The default is an empty list.
tarball: Boolean flag to specify whether the NVIDIA HPC SDK should
be installed by downloading the tar package file. If False,
install from the package repository. The default is False.
toolchain: The toolchain object to be used to configure the HPC
SDK with a specific GNU toolchain. The default is empty, i.e., use
the default GNU toolchain.
url: The location of the package that should be installed. The default value is `https://developer.download.nvidia.com/hpc-sdk/nvhpc_X_Y_Z_cuda_multi.tar.gz`, where `X, `Y`, and `Z` are the year, version, and architecture whose values are automatically determined.
version: The version of the HPC SDK to use. Note when `package`
is set the version is determined automatically from the package
file name. The default value is `23.5`.
# Examples
```python
nvhpc(eula=True)
```
```python
nvhpc(eula=True, tarball=True)
```
```python
nvhpc(eula=True,
url='https://developer.download.nvidia.com/hpc-sdk/nvhpc_2020_207_Linux_x86_64_cuda_11.0.tar.gz')
```
```python
nvhpc(eula=True,
package='nvhpc_2020_207_Linux_x86_64_cuda_multi.tar.gz',
redist=['compilers/lib/*'])
```
```python
n = nvhpc(eula=True, ...)
openmpi(..., toolchain=n.toolchain, ...)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(nvhpc, self).__init__(**kwargs)
self.__arch_directory = None # Filled in by __cpu_arch()
self.__arch_label = '' # Filled in by __cpu_arch()
self.__cuda_multi = kwargs.get('cuda_multi', True)
self.__cuda_version = kwargs.get('cuda', None)
self.__commands = [] # Filled in by __setup_tarball()
# By setting this value to True, you agree to the NVIDIA HPC
# SDK End-User License Agreement
# (https://docs.nvidia.com/hpc-sdk/eula)
self.__eula = kwargs.get('eula', False)
self.__extended_environment = kwargs.get('extended_environment', False)
self.__hpcx = kwargs.get('_hpcx', False)
self.__mpi = kwargs.get('mpi', True)
self.__nvhpc_package = '' # Filled in by __distro()
self.__ospackages = kwargs.get('ospackages', [])
self.__runtime_ospackages = [] # Filled in by __distro()
self.__prefix = kwargs.get('prefix', '/opt/nvidia/hpc_sdk')
self.__redist = kwargs.get('redist', [])
self.__stdpar_cudacc = kwargs.get('stdpar_cudacc', None)
self.__tarball = kwargs.get('tarball', False)
self.__toolchain = kwargs.get('toolchain', None)
self.__url = kwargs.get('url', None)
self.__version = kwargs.get('version', '23.5')
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.__year = '' # Filled in by __get_version()
self.toolchain = toolchain(CC='nvc', CXX='nvc++', F77='nvfortran',
F90='nvfortran', FC='nvfortran')
if StrictVersion(self.__version) >= StrictVersion('23.5'):
self.__cuda_version_default = '12.1'
if StrictVersion(self.__version) >= StrictVersion('23.1'):
self.__cuda_version_default = '12.0'
elif StrictVersion(self.__version) >= StrictVersion('22.11'):
self.__cuda_version_default = '11.8'
elif StrictVersion(self.__version) >= StrictVersion('22.5'):
self.__cuda_version_default = '11.7'
elif StrictVersion(self.__version) >= StrictVersion('22.2'):
self.__cuda_version_default = '11.6'
elif StrictVersion(self.__version) >= StrictVersion('21.11'):
self.__cuda_version_default = '11.5'
elif StrictVersion(self.__version) >= StrictVersion('21.7'):
self.__cuda_version_default = '11.4'
elif StrictVersion(self.__version) >= StrictVersion('21.5'):
self.__cuda_version_default = '11.3'
elif StrictVersion(self.__version) >= StrictVersion('21.2'):
self.__cuda_version_default = '11.2'
elif StrictVersion(self.__version) >= StrictVersion('20.11'):
self.__cuda_version_default = '11.1'
else:
self.__cuda_version_default = '11.0'
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Set the Linux distribution specific parameters
self.__distro()
# Figure out the version information
self.__get_version()
# Set paths used extensively
self.__basepath = posixpath.join(self.__prefix, self.__arch_directory,
self.__version)
# Construct the series of steps to execute
if self.package or self.__tarball or self.__url:
self.__setup_tarball()
# Set the environment
self.environment_variables = self.__environment()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('NVIDIA HPC SDK version {}'.format(self.__version))
if self.package:
# Use package from local build context
self += copy(src=self.package,
dest=posixpath.join(self.__wd,
posixpath.basename(self.package)))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if self.package or self.__tarball or self.__url:
# tarball install
self += shell(commands=self.__commands)
else:
# repository install
if StrictVersion(self.__version) >= StrictVersion('22.9'):
# signed packages
self += packages(
apt_keys=['https://developer.download.nvidia.com/hpc-sdk/ubuntu/DEB-GPG-KEY-NVIDIA-HPC-SDK'],
apt_repositories=['deb [signed-by=/usr/share/keyrings/DEB-GPG-KEY-NVIDIA-HPC-SDK.gpg] https://developer.download.nvidia.com/hpc-sdk/ubuntu/{} /'.format(self.__arch_label)],
ospackages=[self.__nvhpc_package],
yum_repositories=['https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo'],
_apt_key=False)
else:
self += packages(
apt_repositories=['deb [trusted=yes] https://developer.download.nvidia.com/hpc-sdk/ubuntu/{} /'.format(self.__arch_label)],
ospackages=[self.__nvhpc_package],
yum_repositories=['https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo'])
if self.__toolchain:
# Regenerate the localrc using the compilers from the specified
# toolchain
compiler_bin = posixpath.join(self.__basepath, 'compilers', 'bin')
args = ['-x']
if self.__toolchain.CC:
args.append('-gcc {}'.format(self.__toolchain.CC))
if self.__toolchain.CXX:
args.append('-gpp {}'.format(self.__toolchain.CXX))
if self.__toolchain.F77:
args.append('-g77 {}'.format(self.__toolchain.F77))
self += shell(commands=['{0} {1} {2}'.format(
posixpath.join(compiler_bin, 'makelocalrc'), compiler_bin,
' '.join(args))])
self += environment(variables=self.environment_step())
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__arch_directory = 'Linux_aarch64'
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'arm64'
else:
self.__arch_label = 'aarch64'
if StrictVersion(self.__version) < StrictVersion('20.11'):
self.__cuda_multi = False # CUDA multi packages not available
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
self.__arch_directory = 'Linux_ppc64le'
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'ppc64el'
else:
self.__arch_label = 'ppc64le'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__arch_directory = 'Linux_x86_64'
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'amd64'
else:
self.__arch_label = 'x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
version = self.__version.replace('.', '-')
if self.__cuda_multi:
self.__nvhpc_package = 'nvhpc-{}-cuda-multi'.format(version)
else:
self.__nvhpc_package = 'nvhpc-{}'.format(version)
if not self.__ospackages:
if self.package or self.__tarball:
self.__ospackages = ['bc', 'debianutils', 'gcc', 'g++',
'gfortran', 'libatomic1', 'libnuma1',
'openssh-client', 'wget']
else:
self.__ospackages = ['ca-certificates', 'gnupg', 'wget']
self.__runtime_ospackages = ['libatomic1', 'libnuma1',
'openssh-client']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if self.__cuda_multi:
self.__nvhpc_package = 'nvhpc-cuda-multi-{}'.format(self.__version)
else:
self.__nvhpc_package = 'nvhpc-{}'.format(self.__version)
if not self.__ospackages:
if self.package or self.__tarball:
self.__ospackages = ['bc', 'gcc', 'gcc-c++',
'gcc-gfortran', 'libatomic',
'openssh-clients', 'numactl-libs',
'wget', 'which']
else:
self.__ospackages = ['ca-certificates']
self.__runtime_ospackages = ['libatomic', 'numactl-libs',
'openssh-clients']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __environment(self):
"""Define environment variables"""
e = {}
# Development environment
if self.__extended_environment:
# Mirror the environment defined by the environment module
e['CC'] = posixpath.join(self.__basepath, 'compilers', 'bin',
'nvc')
e['CPP'] = 'cpp'
e['CXX'] = posixpath.join(self.__basepath, 'compilers', 'bin',
'nvc++')
e['F77'] = posixpath.join(self.__basepath, 'compilers', 'bin',
'nvfortran')
e['F90'] = posixpath.join(self.__basepath, 'compilers', 'bin',
'nvfortran')
e['FC'] = posixpath.join(self.__basepath, 'compilers', 'bin',
'nvfortran')
cpath = [
posixpath.join(self.__basepath, 'comm_libs', 'nvshmem', 'include'),
posixpath.join(self.__basepath, 'comm_libs', 'nccl', 'include'),
posixpath.join(self.__basepath, 'compilers', 'extras', 'qd',
'include', 'qd'),
posixpath.join(self.__basepath, 'math_libs', 'include')]
ld_library_path = [
posixpath.join(self.__basepath, 'comm_libs', 'nvshmem', 'lib'),
posixpath.join(self.__basepath, 'comm_libs', 'nccl', 'lib'),
posixpath.join(self.__basepath, 'math_libs', 'lib64'),
posixpath.join(self.__basepath, 'compilers', 'lib'),
posixpath.join(self.__basepath, 'cuda', 'lib64')]
path = [
posixpath.join(self.__basepath, 'comm_libs', 'nvshmem', 'bin'),
posixpath.join(self.__basepath, 'comm_libs', 'nccl', 'bin'),
posixpath.join(self.__basepath, 'profilers', 'bin'),
posixpath.join(self.__basepath, 'compilers', 'bin'),
posixpath.join(self.__basepath, 'cuda', 'bin')]
if self.__mpi:
cpath.append(
posixpath.join(self.__basepath, 'comm_libs', 'mpi', 'include'))
ld_library_path.append(
posixpath.join(self.__basepath, 'comm_libs', 'mpi', 'lib'))
path.append(
posixpath.join(self.__basepath, 'comm_libs', 'mpi', 'bin'))
elif self.__hpcx and StrictVersion(self.__version) >= StrictVersion('23.5'):
path.append(
posixpath.join(self.__basepath, 'comm_libs', 'hpcx', 'bin'))
elif self.__hpcx:
# Set environment for HPC-X
if StrictVersion(self.__version) >= StrictVersion('22.2'):
hpcx_version = 'latest'
elif StrictVersion(self.__version) >= StrictVersion('21.11'):
hpcx_version = 'hpcx-2.10.beta'
elif StrictVersion(self.__version) >= StrictVersion('21.9'):
hpcx_version = 'hpcx-2.9.0'
elif StrictVersion(self.__version) >= StrictVersion('21.7'):
hpcx_version = 'hpcx-2.8.1'
elif StrictVersion(self.__version) < StrictVersion('21.5'):
hpcx_version = 'hpcx-2.7.4'
hpcx_dir = posixpath.join(self.__basepath, 'comm_libs', 'hpcx',
hpcx_version)
hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx', 'mt')
#hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx')
hpcx_sharp_dir = posixpath.join(hpcx_dir, 'sharp')
hpcx_nccl_rdma_sharp_plugin_dir = posixpath.join(
hpcx_dir, 'nccl_rdma_sharp_plugin')
hpcx_hcoll_dir = posixpath.join(hpcx_dir, 'hcoll')
hpcx_mpi_dir = posixpath.join(hpcx_dir, 'ompi')
hpcx_oshmem_dir = hpcx_mpi_dir
cpath.append(':'.join([
posixpath.join(hpcx_hcoll_dir, 'include'),
posixpath.join(hpcx_mpi_dir, 'include'),
posixpath.join(hpcx_sharp_dir, 'include'),
posixpath.join(hpcx_ucx_dir, 'include'),
'$CPATH']))
e['HPCX_DIR'] = hpcx_dir
e['HPCX_HCOLL_DIR'] = hpcx_hcoll_dir
e['HPCX_MPI_DIR'] = hpcx_mpi_dir
e['HPCX_NCCL_RDMA_SHARP_PLUGIN_DIR'] = hpcx_nccl_rdma_sharp_plugin_dir
e['HPCX_OSHMEM_DIR'] = hpcx_oshmem_dir
e['HPCX_SHARP_DIR'] = hpcx_sharp_dir
e['HPCX_UCX_DIR'] = hpcx_ucx_dir
e['LIBRARY_PATH'] = ':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib'),
posixpath.join(hpcx_mpi_dir, 'lib'),
posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
posixpath.join(hpcx_sharp_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib'),
'$LIBRARY_PATH'])
ld_library_path.append(':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib'),
posixpath.join(hpcx_mpi_dir, 'lib'),
posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
posixpath.join(hpcx_sharp_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib', 'ucx'),
'$LD_LIBRARY_PATH']))
e['MPI_HOME'] = hpcx_mpi_dir
e['OMPI_HOME'] = hpcx_mpi_dir
e['OPAL_PREFIX'] = hpcx_mpi_dir
e['OSHMEM_HOME'] = hpcx_mpi_dir
path.append(':'.join([
posixpath.join(hpcx_hcoll_dir, 'bin'),
posixpath.join(hpcx_mpi_dir, 'bin'),
posixpath.join(hpcx_ucx_dir, 'bin'),
'$PATH']))
e['PKG_CONFIG_PATH'] = ':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_mpi_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_sharp_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_ucx_dir, 'lib', 'pkgconfig'),
'$PKG_CONFIG_PATH'])
e['SHMEM_HOME'] = hpcx_mpi_dir
if cpath:
e['CPATH'] = '{}:$CPATH'.format(':'.join(cpath))
e['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(':'.join(
ld_library_path))
e['MANPATH'] = '{}:$MANPATH'.format(
posixpath.join(self.__basepath, 'compilers', 'man'))
e['PATH'] = '{}:$PATH'.format(':'.join(path))
return e
def __get_version(self):
"""Figure out the version information"""
if self.package:
# Figure out the version from the package name
match = re.search(r'nvhpc_\d+_(?P<year>\d\d)0?(?P<month>[1-9][0-9]?)',
self.package)
if (match and match.groupdict()['year'] and
match.groupdict()['month']):
self.__version = '{0}.{1}'.format(match.groupdict()['year'],
match.groupdict()['month'])
self.__year = '20' + match.groupdict()['year']
else:
raise RuntimeError('could not parse version from package name')
else:
match = re.search(r'(?P<year>\d\d)\.\d+', self.__version)
if match and match.groupdict()['year']:
self.__year = '20' + match.groupdict()['year']
def __setup_tarball(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Download / copy package
if not self.package:
if self.__url:
self.url = self.__url
else:
baseurl = 'https://developer.download.nvidia.com/hpc-sdk/{0}/nvhpc_{1}_{2}_{3}_cuda_{{}}.tar.gz'.format(
self.__version, self.__year,
self.__version.replace('.', ''), self.__arch_directory)
if self.__cuda_multi:
self.url = baseurl.format('multi')
else:
self.url = baseurl.format(
self.__cuda_version if self.__cuda_version
else self.__cuda_version_default)
self.__commands.append(self.download_step(wd=self.__wd))
# Set installer flags
flags = {'NVHPC_ACCEPT_EULA': 'accept',
'NVHPC_INSTALL_DIR': self.__prefix,
'NVHPC_SILENT': 'true'}
if self.__cuda_version:
flags['NVHPC_DEFAULT_CUDA'] = self.__cuda_version
if self.__stdpar_cudacc:
flags['NVHPC_STDPAR_CUDACC'] = self.__stdpar_cudacc
if not self.__eula:
# This will fail when building the container
logging.warning('NVIDIA HPC SDK EULA was not accepted')
flags['NVHPC_ACCEPT_EULA'] = 'decline'
flags['NVHPC_SILENT'] = 'false'
flag_string = ' '.join('{0}={1}'.format(key, val)
for key, val in sorted(flags.items()))
# Install
self.__commands.append('cd {0} && {1} ./install'.format(
self.src_directory, flag_string))
# Cleanup
remove = [self.src_directory]
if self.url:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.url)))
elif self.package:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.package)))
self.__commands.append(self.cleanup_step(items=remove))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
n = nvhpc(redist=[...], ...)
Stage0 += n
Stage1 += n.runtime()
```
"""
if self.__redist:
self.rt += comment('NVIDIA HPC SDK')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
redistpath = posixpath.join(self.__prefix,
self.__arch_directory,
self.__version, 'REDIST')
libdirs = {}
for r in self.__redist:
src = posixpath.join(redistpath, r)
if '*' in posixpath.basename(r):
# When using COPY with more than one source file,
# the destination must be a directory and end with
# a /
dest = posixpath.join(posixpath.dirname(redistpath),
posixpath.dirname(r)) + '/'
else:
dest = posixpath.join(posixpath.dirname(redistpath), r)
self.rt += copy(_from=_from, src=src, dest=dest)
# If the redist path looks like a library directory,
# add it to LD_LIBRARY_PATH
if '/lib' in posixpath.dirname(r):
libdirs[posixpath.join(posixpath.dirname(redistpath),
posixpath.dirname(r))] = True
if self.__redist and self.__mpi:
mpipath = posixpath.join(self.__basepath, 'comm_libs', 'mpi')
self.rt += copy(_from=_from, src=mpipath, dest=mpipath)
libdirs[posixpath.join(mpipath, 'lib')] = True
self.runtime_environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(mpipath, 'bin'))
if libdirs:
liblist = sorted(libdirs.keys())
liblist.append('$LD_LIBRARY_PATH')
self.runtime_environment_variables['LD_LIBRARY_PATH'] = ':'.join(liblist)
self.rt += environment(
variables=self.runtime_environment_variables)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/nvhpc.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""CGNS building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
import hpccm.config
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class cgns(bb_base):
"""The `cgns` building block downloads and installs the
[CGNS](https://cgns.github.io/index.html) component.
The [HDF5](#hdf5) building block should be installed prior to this
building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the test cases should be
run. The default is False.
configure_opts: List of options to pass to `configure`. The
default value is `--with-hdf5=/usr/local/hdf5` and `--with-zlib`.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
prefix: The top level install location. The default value is
`/usr/local/cgns`.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `file`, `make`,
`wget`, and `zlib1g-dev`. For RHEL-based Linux distributions the
default values are `bzip2`, `file`, `make`, `wget` and
`zlib-devel`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of CGNS source to download. The default
value is `4.1.2`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
cgns(prefix='/opt/cgns/3.3.1', version='3.3.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(cgns, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/CGNS/CGNS/archive')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts',
['--with-hdf5=/usr/local/hdf5',
'--with-zlib'])
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/cgns')
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '4.1.2')
# Set the configuration options
self.__configure()
# Set the Linux distribution specific parameters
self.__distro()
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
comment=False,
configure_opts=self.__configure_opts,
directory=posixpath.join('CGNS-{}'.format(self.__version), 'src'),
prefix=self.__prefix,
toolchain=self.__toolchain,
url='{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('CGNS version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters"""
# Create a copy of the toolchain so that it can be modified
# without impacting the original.
self.__toolchain = _copy(self.__toolchain)
# See https://cgns.github.io/download.html, Known Bugs
if not self.__toolchain.LIBS:
self.__toolchain.LIBS = '-Wl,--no-as-needed -ldl'
if not self.__toolchain.FLIBS:
self.__toolchain.FLIBS = '-Wl,--no-as-needed -ldl'
# See https://cgnsorg.atlassian.net/browse/CGNS-40
if (not self.__toolchain.FFLAGS and self.__toolchain.FC and
re.match('.*pgf.*', self.__toolchain.FC)):
self.__toolchain.FFLAGS = '-Mx,125,0x200'
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['file', 'make', 'wget', 'zlib1g-dev']
self.__runtime_ospackages = ['zlib1g']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'make', 'wget',
'zlib-devel']
if self.__check:
self.__ospackages.append('diffutils')
self.__runtime_ospackages = ['zlib']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Example
```python
c = cgns(...)
Stage0 += c
Stage1 += c.runtime()
```
"""
self.rt += comment('CGNS')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/cgns.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__all__ = ['amgx',
'apt_get',
'arm_allinea_studio',
'boost',
'catalyst',
'cgns',
'charm',
'cmake',
'conda',
'fftw',
'gdrcopy',
'generic_autotools',
'generic_build',
'generic_cmake',
'gnu',
'hdf5',
'hpcx',
'intel_mpi',
'intel_psxe',
'intel_psxe_runtime',
'julia',
'knem',
'kokkos',
'libsim',
'llvm',
'magma',
'mkl',
'mlnx_ofed',
'mpich',
'multi_ofed',
'mvapich2_gdr',
'mvapich2',
'nccl',
'netcdf',
'nsight_compute',
'nsight_systems',
'nvhpc',
'nvshmem',
'ofed',
'openblas',
'openmpi',
'packages',
'pgi',
'pip',
'pmix',
'pnetcdf',
'python',
'rdma_core',
'scif',
'sensei',
'slurm_pmi2',
'ucx',
'xpmem',
'yum']
from hpccm.building_blocks.amgx import amgx
from hpccm.building_blocks.apt_get import apt_get
from hpccm.building_blocks.arm_allinea_studio import arm_allinea_studio
from hpccm.building_blocks.boost import boost
from hpccm.building_blocks.catalyst import catalyst
from hpccm.building_blocks.cgns import cgns
from hpccm.building_blocks.charm import charm
from hpccm.building_blocks.cmake import cmake
from hpccm.building_blocks.conda import conda
from hpccm.building_blocks.fftw import fftw
from hpccm.building_blocks.gdrcopy import gdrcopy
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.gnu import gnu
from hpccm.building_blocks.hdf5 import hdf5
from hpccm.building_blocks.hpcx import hpcx
from hpccm.building_blocks.intel_mpi import intel_mpi
from hpccm.building_blocks.intel_psxe import intel_psxe
from hpccm.building_blocks.intel_psxe_runtime import intel_psxe_runtime
from hpccm.building_blocks.julia import julia
from hpccm.building_blocks.knem import knem
from hpccm.building_blocks.kokkos import kokkos
from hpccm.building_blocks.libsim import libsim
from hpccm.building_blocks.llvm import llvm
from hpccm.building_blocks.magma import magma
from hpccm.building_blocks.mkl import mkl
from hpccm.building_blocks.mlnx_ofed import mlnx_ofed
from hpccm.building_blocks.mpich import mpich
from hpccm.building_blocks.multi_ofed import multi_ofed
from hpccm.building_blocks.mvapich2_gdr import mvapich2_gdr
from hpccm.building_blocks.mvapich2 import mvapich2
from hpccm.building_blocks.nccl import nccl
from hpccm.building_blocks.netcdf import netcdf
from hpccm.building_blocks.nsight_compute import nsight_compute
from hpccm.building_blocks.nsight_systems import nsight_systems
from hpccm.building_blocks.nvhpc import nvhpc
from hpccm.building_blocks.nvshmem import nvshmem
from hpccm.building_blocks.ofed import ofed
from hpccm.building_blocks.openblas import openblas
from hpccm.building_blocks.openmpi import openmpi
from hpccm.building_blocks.packages import packages
from hpccm.building_blocks.pgi import pgi
from hpccm.building_blocks.pip import pip
from hpccm.building_blocks.pmix import pmix
from hpccm.building_blocks.pnetcdf import pnetcdf
from hpccm.building_blocks.python import python
from hpccm.building_blocks.rdma_core import rdma_core
from hpccm.building_blocks.scif import scif
from hpccm.building_blocks.sensei import sensei
from hpccm.building_blocks.slurm_pmi2 import slurm_pmi2
from hpccm.building_blocks.ucx import ucx
from hpccm.building_blocks.xpmem import xpmem
from hpccm.building_blocks.yum import yum
| hpc-container-maker-master | hpccm/building_blocks/__init__.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Intel Parallel Studio XE building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.rm
import hpccm.templates.sed
import hpccm.templates.tar
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.intel_psxe_runtime import intel_psxe_runtime
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class intel_psxe(bb_base, hpccm.templates.envvars, hpccm.templates.rm,
hpccm.templates.sed, hpccm.templates.tar):
"""The `intel_psxe` building block installs [Intel Parallel Studio
XE](https://software.intel.com/en-us/parallel-studio-xe).
You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement)
to use this building block.
# Parameters
components: List of Intel Parallel Studio XE components to
install. The default values is `DEFAULTS`. If only the Intel C++
and Fortran compilers are desired, then use `intel-icc__x86_64`
and `intel-ifort__x86_64`. Please note that the values are not
consistent between versions; for a list of components, extract
`pset/mediaconfig.xml` from the tarball and grep for `Abbr`.
daal: Boolean flag to specify whether the Intel Data Analytics
Acceleration Library environment should be configured when
`psxevars` is False. This flag also controls whether to install
the corresponding runtime in the `runtime` method. Note: this
flag does not control whether the developer environment is
installed; see `components`. The default is True.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and others) should be modified to
include Intel Parallel Studio XE. `psxevars` has precedence. The
default is True.
eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement).
The default value is `False`.
icc: Boolean flag to specify whether the Intel C++ Compiler
environment should be configured when `psxevars` is False. This
flag also controls whether to install the corresponding runtime in
the `runtime` method. Note: this flag does not control whether
the developer environment is installed; see `components`. The
default is True.
ifort: Boolean flag to specify whether the Intel Fortran Compiler
environment should be configured when `psxevars` is False. This
flag also controls whether to install the corresponding runtime in
the `runtime` method. Note: this flag does not control whether
the developer environment is installed; see `components`. The
default is True.
ipp: Boolean flag to specify whether the Intel Integrated
Performance Primitives environment should be configured when
`psxevars` is False. This flag also controls whether to install
the corresponding runtime in the `runtime` method. Note: this
flag does not control whether the developer environment is
installed; see `components`. The default is True.
license: The license to use to activate Intel Parallel Studio XE.
If the string contains a `@` the license is interpreted as a
network license, e.g., `12345@lic-server`. Otherwise, the string
is interpreted as the path to the license file relative to the
local build context. The default value is empty. While this
value is not required, the installation is unlikely to be
successful without a valid license.
mkl: Boolean flag to specify whether the Intel Math Kernel Library
environment should be configured when `psxevars` is False. This
flag also controls whether to install the corresponding runtime in
the `runtime` method. Note: this flag does not control whether
the developer environment is installed; see `components`. The
default is True.
mpi: Boolean flag to specify whether the Intel MPI Library
environment should be configured when `psxevars` is False. This
flag also controls whether to install the corresponding runtime in
the `runtime` method. Note: this flag does not control whether
the developer environment is installed; see `components`. The
default is True.
ospackages: List of OS packages to install prior to installing
Intel MPI. For Ubuntu, the default values are `build-essential`
and `cpio`. For RHEL-based Linux distributions, the default
values are `gcc`, `gcc-c++`, `make`, and `which`.
prefix: The top level install location. The default value is
`/opt/intel`.
psxevars: Intel Parallel Studio XE provides an environment script
(`compilervars.sh`) to setup the environment. If this value is
`True`, the bashrc is modified to automatically source this
environment script. However, the Intel runtime environment is not
automatically available to subsequent container image build steps;
the environment is available when the container image is run. To
set the Intel Parallel Studio XE environment in subsequent build
steps you can explicitly call `source
/opt/intel/compilers_and_libraries/linux/bin/compilervars.sh
intel64` in each build step. If this value is to set `False`,
then the environment is set such that the environment is visible
to both subsequent container image build steps and when the
container image is run. However, the environment may differ
slightly from that set by `compilervars.sh`. This option will be
used with the `runtime` method. The default value is
`True`.
runtime_version: The version of Intel Parallel Studio XE runtime
to install via the `runtime` method. The runtime is installed
using the [intel_psxe_runtime](#intel_psxe_runtime) building
block. This value is passed as its `version` parameter. In
general, the major version of the runtime should correspond to the
tarball version. The default value is `2020.2-14`.
tarball: Path to the Intel Parallel Studio XE tarball relative to
the local build context. The default value is empty. This
parameter is required.
tbb: Boolean flag to specify whether the Intel Threading Building
Blocks environment should be configured when `psxevars` is False.
This flag also controls whether to install the corresponding
runtime in the `runtime` method. Note: this flag does not control
whether the developer environment is installed; see `components`.
The default is True.
# Examples
```python
intel_psxe(eula=True, license='XXXXXXXX.lic',
tarball='parallel_studio_xe_2018_update1_professional_edition.tgz')
```
```python
i = intel_psxe(...)
openmpi(..., toolchain=i.toolchain, ...)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(intel_psxe, self).__init__(**kwargs)
# By setting this value to True, you agree to the
# corresponding Intel End User License Agreement
# (https://software.intel.com/en-us/articles/end-user-license-agreement)
self.__eula = kwargs.get('eula', False)
self.__components = kwargs.get('components', ['DEFAULTS'])
self.__daal = kwargs.get('daal', True)
self.__icc = kwargs.get('icc', True)
self.__ifort = kwargs.get('ifort', True)
self.__ipp = kwargs.get('ipp', True)
self.__license = kwargs.get('license', None)
self.__mkl = kwargs.get('mkl', True)
self.__mpi = kwargs.get('mpi', True)
self.__ospackages = kwargs.get('ospackages', [])
self.__prefix = kwargs.get('prefix', '/opt/intel')
self.__psxevars = kwargs.get('psxevars', True)
self.__runtime_version = kwargs.get('runtime_version', '2020.2-14')
self.__tarball = kwargs.get('tarball', None)
self.__tbb = kwargs.get('tbb', True)
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.toolchain = toolchain(CC='icc', CXX='icpc', F77='ifort',
F90='ifort', FC='ifort')
self.toolchain.CFLAGS = hpccm.config.get_cpu_optimization_flags('intel')
self.toolchain.CXXFLAGS = hpccm.config.get_cpu_optimization_flags('intel')
self.toolchain.FFLAGS = hpccm.config.get_cpu_optimization_flags('intel')
self.toolchain.FCFLAGS = hpccm.config.get_cpu_optimization_flags('intel')
self.__bashrc = '' # Filled in by __distro()
self.__commands = [] # Filled in by __setup()
if hpccm.config.g_cpu_arch != cpu_arch.X86_64: # pragma: no cover
logging.warning('Using intel_psxe on a non-x86_64 processor')
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Intel Parallel Studio XE')
self += packages(ospackages=self.__ospackages)
self += copy(src=self.__tarball,
dest=posixpath.join(self.__wd, self.__tarball_name))
if self.__license and not '@' in self.__license:
# License file
self += copy(src=self.__license,
dest=posixpath.join(self.__wd, 'license.lic'))
self += shell(commands=self.__commands)
if self.__psxevars:
# Source the mpivars environment script when starting the
# container, but the variables not be available for any
# subsequent build steps.
self += shell(commands=['echo "source {0}/compilers_and_libraries/linux/bin/compilervars.sh intel64" >> {1}'.format(self.__prefix, self.__bashrc)])
else:
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['build-essential', 'cpio']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['gcc', 'gcc-c++', 'make', 'which']
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __environment(self):
basepath = posixpath.join(self.__prefix, 'compilers_and_libraries',
'linux')
cpath = []
ld_library_path = []
library_path = []
path = []
env = {}
if self.__daal:
env['DAALROOT'] = posixpath.join(basepath, 'daal')
cpath.append(posixpath.join(basepath, 'daal', 'include'))
ld_library_path.append(posixpath.join(basepath, 'daal', 'lib',
'intel64'))
library_path.append(posixpath.join(basepath, 'daal', 'lib',
'intel64'))
if self.__icc:
cpath.append(posixpath.join(basepath, 'pstl', 'include'))
ld_library_path.append(posixpath.join(basepath, 'compiler', 'lib',
'intel64'))
path.append(posixpath.join(basepath, 'bin', 'intel64'))
if self.__ifort:
ld_library_path.append(posixpath.join(basepath, 'compiler', 'lib',
'intel64'))
path.append(posixpath.join(basepath, 'bin', 'intel64'))
if self.__ipp:
env['IPPROOT' ] = posixpath.join(basepath, 'ipp')
cpath.append(posixpath.join(basepath, 'ipp', 'include'))
ld_library_path.append(posixpath.join(basepath, 'ipp', 'lib',
'intel64'))
library_path.append(posixpath.join(basepath, 'ipp', 'lib',
'intel64'))
if self.__mkl:
env['MKLROOT'] = posixpath.join(basepath, 'mkl')
cpath.append(posixpath.join(basepath, 'mkl', 'include'))
ld_library_path.append(posixpath.join(basepath, 'mkl', 'lib',
'intel64'))
library_path.append(posixpath.join(basepath, 'mkl', 'lib',
'intel64'))
if self.__mpi:
# Handle libfabics case
env['I_MPI_ROOT' ] = posixpath.join(basepath, 'mpi')
cpath.append(posixpath.join(basepath, 'mpi', 'include'))
ld_library_path.append(posixpath.join(basepath, 'mpi', 'intel64',
'lib'))
path.append(posixpath.join(basepath, 'mpi', 'intel64', 'bin'))
if self.__tbb:
cpath.append(posixpath.join(basepath, 'tbb', 'include'))
ld_library_path.append(posixpath.join(basepath, 'tbb', 'lib',
'intel64', 'gcc4.7'))
library_path.append(posixpath.join(basepath, 'tbb', 'lib',
'intel64', 'gcc4.7'))
if cpath:
cpath.append('$CPATH')
env['CPATH'] = ':'.join(cpath)
if library_path:
library_path.append('$LIBRARY_PATH')
env['LIBRARY_PATH'] = ':'.join(library_path)
if ld_library_path:
ld_library_path.append('$LD_LIBRARY_PATH')
env['LD_LIBRARY_PATH'] = ':'.join(ld_library_path)
if path:
path.append('$PATH')
env['PATH'] = ':'.join(path)
return env
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# tarball must be specified
if not self.__tarball:
raise RuntimeError('Intel PSXE tarball not specified')
# Get the name of the directory that created when the tarball
# is extracted. Assume it is the same as the basename of the
# tarball.
self.__tarball_name = posixpath.basename(self.__tarball)
basedir = posixpath.splitext(self.__tarball_name)[0]
# Untar
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, self.__tarball_name),
directory=(self.__wd)))
# Configure silent install
silent_cfg=[
r's/^#\?\(COMPONENTS\)=.*/\1={}/g'.format(
';'.join(self.__components)),
r's|^#\?\(PSET_INSTALL_DIR\)=.*|\1={}|g'.format(self.__prefix)]
# EULA acceptance
if self.__eula:
silent_cfg.append(r's/^#\?\(ACCEPT_EULA\)=.*/\1=accept/g')
# License activation
if self.__license and '@' in self.__license:
# License server
silent_cfg.append(r's/^#\?\(ACTIVATION_TYPE\)=.*/\1=license_server/g')
silent_cfg.append(r's/^#\?\(ACTIVATION_LICENSE_FILE\)=.*/\1={}/g'.format(self.__license))
elif self.__license:
# License file
silent_cfg.append(r's/^#\?\(ACTIVATION_TYPE\)=.*/\1=license_file/g')
silent_cfg.append(r's|^#\?\(ACTIVATION_LICENSE_FILE\)=.*|\1={}|g'.format(posixpath.join(self.__wd, 'license.lic')))
else:
# No license, will most likely not work
logging.warning('No Intel Parallel Studio XE license specified')
# Update the silent config file
self.__commands.append(self.sed_step(
file=posixpath.join(self.__wd, basedir, 'silent.cfg'),
patterns=silent_cfg))
# Install
self.__commands.append(
'cd {} && ./install.sh --silent=silent.cfg'.format(
posixpath.join(self.__wd, basedir)))
# Cleanup runfile
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, self.__tarball_name),
posixpath.join(self.__wd, basedir)]))
# Set the environment
self.environment_variables = self.__environment()
def runtime(self, _from='0'):
"""Install the runtime from a full build in a previous stage"""
return str(intel_psxe_runtime(daal=self.__daal,
eula=self.__eula,
icc=self.__icc,
ifort=self.__ifort,
ipp=self.__ipp,
mkl=self.__mkl,
mpi=self.__mpi,
psxevars=self.__psxevars,
tbb=self.__tbb,
version=self.__runtime_version))
| hpc-container-maker-master | hpccm/building_blocks/intel_psxe.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""packages building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
from hpccm.building_blocks.apt_get import apt_get
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.yum import yum
from hpccm.common import linux_distro
class packages(bb_base):
"""The `packages` building block specifies the set of operating system
packages to install. Based on the Linux distribution, the
building block invokes either `apt-get` (Ubuntu) or `yum`
(RHEL-based).
This building block is preferred over directly using the
[`apt_get`](#apt_get) or [`yum`](#yum) building blocks.
# Parameters
apt: A list of Debian packages to install. The default is an
empty list.
aptitude: Boolean flag to specify whether `aptitude` should be
used instead of `apt-get`. The default is False.
apt_keys: A list of GPG keys to add. The default is an empty
list.
apt_ppas: A list of personal package archives to add. The default
is an empty list.
apt_repositories: A list of apt repositories to add. The default
is an empty list.
download: Boolean flag to specify whether to download the deb /
rpm packages instead of installing them. The default is False.
download_directory: The deb package download location. This
parameter is ignored if `download` is False. The default value is
`/var/tmp/packages_download`.
epel: Boolean flag to specify whether to enable the Extra Packages
for Enterprise Linux (EPEL) repository. The default is False.
This parameter is ignored if the Linux distribution is not
RHEL-based.
extract: Location where the downloaded packages should be
extracted. Note, this extracts and does not install the packages,
i.e., the package manager is bypassed. After the downloaded
packages are extracted they are deleted. This parameter is ignored
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
force_add_repo: Boolean flag to specify whether adding a
repository should be considered successful no matter the actual
result. This parameter is only valid for yum repositories. The
default value is False.
ospackages: A list of packages to install. The list is used for
both Ubuntu and RHEL-based Linux distributions, therefore only
packages with the consistent names across Linux distributions
should be specified. This parameter is ignored if `apt` or `yum`
is specified. The default value is an empty list.
powertools: Boolean flag to specify whether to enable the
PowerTools repository. The default is False. This parameter is
ignored if the Linux distribution is not RHEL-based.
release_stream: Boolean flag to specify whether to enable the [CentOS release stream](https://wiki.centos.org/Manuals/ReleaseNotes/CentOSStream)
repository. The default is False. This parameter is only
recognized if the Linux distribution is RHEL-based and the version
is 8.x.
scl: Boolean flag to specify whether to enable the Software
Collections (SCL) repository. The default is False. This
parameter is only recognized if the Linux distribution is
RHEL-based and the version is 7.x.
yum: A list of RPM packages to install. The default value is an
empty list.
yum4: Boolean flag to specify whether `yum4` should be used
instead of `yum`. The default is False. This parameter is only
recognized if the CentOS version is 7.x.
yum_keys: A list of GPG keys to import. The default is an empty
list.
yum_repositories: A list of yum repositories to add. The default
is an empty list.
# Examples
```python
packages(ospackages=['make', 'wget'])
```
```python
packages(apt=['zlib1g-dev'], yum=['zlib-devel'])
```
```python
packages(apt=['python3'], yum=['python34'], epel=True)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(packages, self).__init__()
self.__apt = kwargs.get('apt', [])
self.__apt_key = kwargs.get('_apt_key', True)
self.__apt_keys = kwargs.get('apt_keys', [])
self.__apt_ppas = kwargs.get('apt_ppas', [])
self.__apt_repositories = kwargs.get('apt_repositories', [])
self.__aptitude = kwargs.get('aptitude', False)
self.__download = kwargs.get('download', False)
self.__download_directory = kwargs.get(
'download_directory',
posixpath.join(hpccm.config.g_wd, 'packages_download'))
self.__extra_opts = kwargs.get('extra_opts', [])
self.__extract = kwargs.get('extract', None)
self.__epel = kwargs.get('epel', False)
self.__force_add_repo = kwargs.get('force_add_repo', False)
self.__ospackages = kwargs.get('ospackages', [])
self.__powertools = kwargs.get('powertools', False)
self.__release_stream = kwargs.get('release_stream', False)
self.__scl = kwargs.get('scl', False)
self.__yum = kwargs.get('yum', [])
self.__yum4 = kwargs.get('yum4', False)
self.__yum_keys = kwargs.get('yum_keys', [])
self.__yum_repositories = kwargs.get('yum_repositories', [])
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""String representation of the building block"""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if self.__apt:
ospackages = self.__apt
else:
ospackages = self.__ospackages
self += apt_get(_apt_key=self.__apt_key,
aptitude=self.__aptitude,
download=self.__download,
download_directory=self.__download_directory,
extra_opts=self.__extra_opts,
extract=self.__extract,
keys=self.__apt_keys,
ospackages=ospackages,
ppas=self.__apt_ppas,
repositories=self.__apt_repositories)
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if self.__yum:
ospackages = self.__yum
else:
ospackages = self.__ospackages
self += yum(download=self.__download,
download_directory=self.__download_directory,
extra_opts=self.__extra_opts,
extract=self.__extract,
epel=self.__epel,
force_add_repo=self.__force_add_repo,
keys=self.__yum_keys,
ospackages=ospackages,
powertools=self.__powertools,
release_stream=self.__release_stream,
scl=self.__scl,
repositories=self.__yum_repositories,
yum4=self.__yum4)
else:
raise RuntimeError('Unknown Linux distribution')
| hpc-container-maker-master | hpccm/building_blocks/packages.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""HDF5 building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class hdf5(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `hdf5` building block downloads, configures, builds, and
installs the [HDF5](http://www.hdfgroup.org) component. Depending
on the parameters, the source will be downloaded from the web
(default) or copied from a source directory in the local build
context.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default values are `--enable-cxx` and `--enable-fortran`.
directory: Path to the unpackaged source directory relative to the
local build context. The default value is empty. If this is
defined, the source in the local build context will be used rather
than downloading the source from the web.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, `PATH`, and others)
should be modified to include HDF5. The default is True.
ldconfig: Boolean flag to specify whether the HDF5 library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the HDF5 library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `bzip2`, `file`,
`make`, `wget`, and `zlib1g-dev`. For RHEL-based Linux
distributions the default values are `bzip2`, `file`, `make`,
`wget` and `zlib-devel`.
prefix: The top level install location. The default value is
`/usr/local/hdf5`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of HDF5 source to download. This value is
ignored if `directory` is set. The default value is `1.12.0`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
hdf5(prefix='/opt/hdf5/1.10.1', version='1.10.1')
```
```python
hdf5(directory='sources/hdf5-1.10.1')
```
```python
n = nvhpc(eula=True)
hdf5(toolchain=n.toolchain)
```
```python
hdf5(check=True, configure_opts=['--enable-cxx', '--enable-fortran',
'--enable-profiling=yes'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(hdf5, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'http://www.hdfgroup.org/ftp/HDF5/releases')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts',
['--enable-cxx',
'--enable-fortran'])
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/hdf5')
# Create a copy of the toolchain so that it can be modified
# without impacting the original
self.__toolchain = _copy(kwargs.pop('toolchain', toolchain()))
self.__runtime_ospackages = [] # Filled in by __distro()
self.__version = kwargs.pop('version', '1.12.0')
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
# Set the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['HDF5_DIR'] = self.__prefix
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# PIC workaround when using the NVIDIA compilers
if self.__toolchain.FC and re.match('.*nvfortran',
self.__toolchain.FC):
if not self.__toolchain.FCFLAGS:
self.__toolchain.FCFLAGS = '-fpic -DPIC'
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
configure_opts=self.__configure_opts,
comment=False,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
url=self.__url,
**kwargs)
# Container instructions
self += comment('HDF5 version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'make', 'wget',
'zlib1g-dev']
self.__runtime_ospackages = ['zlib1g']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['bzip2', 'file', 'make', 'wget',
'zlib-devel']
if self.__check:
self.__ospackages.append('diffutils')
self.__runtime_ospackages = ['zlib']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL has the format contains vMAJOR.MINOR in the
# path and the tarball contains MAJOR.MINOR.REVISION, so pull
# apart the full version to get the MAJOR and MINOR components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)', self.__version)
major_minor = '{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = 'hdf5-{}.tar.bz2'.format(self.__version)
self.__url = '{0}/hdf5-{1}/hdf5-{2}/src/{3}'.format(
self.__baseurl, major_minor, self.__version, tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
h = hdf5(...)
Stage0 += h
Stage1 += h.runtime()
```
"""
self.rt += comment('HDF5')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/hdf5.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""NVIDIA Nsight Systems building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import hpccm.config
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
class nsight_systems(bb_base):
"""The `nsight_systems` building block downloads and installs the
[NVIDIA Nsight Systems
profiler]](https://developer.nvidia.com/nsight-systems).
# Parameters
cli: Boolean flag to specify whether the command line only (CLI)
package should be installed. The default is True.
version: The version of Nsight Systems to install. The default
value is `2022.5.1`.
# Examples
```python
nsight_systems(version='2020.5.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(nsight_systems, self).__init__(**kwargs)
self.__arch_label = '' # Filled in __cpu_arch
self.__cli = kwargs.get('cli', True)
self.__distro_label = '' # Filled in by __distro
self.__ospackages = kwargs.get('ospackages', [])
self.__version = kwargs.get('version', '2022.5.1')
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('NVIDIA Nsight Systems {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if self.__cli:
package = 'nsight-systems-cli-{}'.format(self.__version)
else:
package = 'nsight-systems-{}'.format(self.__version)
self += packages(
apt_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
apt_repositories=['deb [signed-by=/usr/share/keyrings/nvidia.gpg] https://developer.download.nvidia.com/devtools/repos/{0}/{1}/ /'.format(self.__distro_label, self.__arch_label)],
# https://github.com/NVIDIA/hpc-container-maker/issues/367
force_add_repo=True,
ospackages=[package],
yum_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
yum_repositories=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}'.format(self.__distro_label, self.__arch_label)],
_apt_key=False)
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__arch_label = 'arm64'
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'ppc64el'
else:
self.__arch_label = 'ppc64le'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'amd64'
else:
self.__arch_label = 'x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gnupg', 'wget']
if hpccm.config.g_linux_version >= StrictVersion('22.04'):
self.__distro_label = 'ubuntu2204'
elif hpccm.config.g_linux_version >= StrictVersion('20.04'):
self.__distro_label = 'ubuntu2004'
elif hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__distro_label = 'ubuntu1804'
else:
self.__distro_label = 'ubuntu1604'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__distro_label = 'rhel8'
else:
self.__distro_label = 'rhel7'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
| hpc-container-maker-master | hpccm/building_blocks/nsight_systems.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""libsim building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class libsim(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.wget):
"""The `libsim` building block configures, builds, and installs the
[VisIt
Libsim](http://www.visitusers.org/index.php?title=Libsim_Batch)
component.
If GPU rendering will be used then a
[cudagl](https://hub.docker.com/r/nvidia/cudagl) base image is
recommended.
# Parameters
build_opts: List of VisIt build script options. The default values
are `--xdb` and `--server-components-only`.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
Libsim. The default is True.
ldconfig: Boolean flag to specify whether the Libsim library
directories should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the Libsim library
directories. The default value is False.
mpi: Boolean flag to specify whether Libsim should be built with
MPI support. VisIt uses MPI-1 routines that have been removed
from the MPI standard; the MPI library may need to be built with
special compatibility options, e.g., `--enable-mpi1-compatibility`
for OpenMPI. If True, then the build script options `--parallel`
and `--no-icet` are added and the environment variable
`PAR_COMPILER` is set to `mpicc`. If True, a MPI library building
block should be installed prior this building block. The default
value is True.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `gzip`, `make`,
`patch`, `tar`, `wget`, `zlib1g-dev`, `libxt-dev`,
`libgl1-mesa-dev`, and `libglu1-mesa-dev`. For RHEL-based Linux
distributions, the default values are `gzip`, `make`, `patch`,
`tar`, `wget`, `which`, `zlib-devel`, `libXt-devel`,
`libglvnd-devel`, `mesa-libGL-devel`, and `mesa-libGLU-devel`.
prefix: The top level install location. The default value is
`/usr/local/visit`.
system_cmake: Boolean flag to specify whether the system provided
cmake should be used. If False, then the build script downloads a
private copy of cmake. If True, then the build script option
`--system-cmake` is added. If True, then the [cmake](#cmake)
building block should be installed prior to this building block.
The default is True.
system_python: Boolean flag to specify whether the system provided
python should be used. If False, then the build script downloads
a private copy of python. If True, then the build script option
`--system-python` is added. If True, then the [Python](#python)
building block should be installed with development libraries
prior to this building block. The default is True.
thirdparty: Boolean flag to specify whether third-party components
included by the build script should be retained. If True, then
the build script option `--thirdparty-path` is added and set to
`<prefix>/third-party`. The default is True.
version: The version of Libsim source to download. The default
value is `2.13.3`.
# Examples
```python
libsim(prefix='/opt/libsim', version='2.13.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(libsim, self).__init__(**kwargs)
self.__arch = None # Filled in by __cpu_arch()
self.__buildscript = r'build_visit{0}'
self.__mpi = kwargs.get('mpi', True)
self.__opts = kwargs.get('build_opts',
['--xdb', '--server-components-only'])
self.__ospackages = kwargs.get('ospackages', [])
self.__parallel = kwargs.get('parallel', '$(nproc)')
self.__prefix = kwargs.get('prefix', '/usr/local/visit')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__system_cmake = kwargs.get('system_cmake', True)
self.__system_python = kwargs.get('system_python', True)
self.__thirdparty = kwargs.get('thirdparty', True)
self.__version = kwargs.get('version', '2.13.3')
self.__url = r'http://portal.nersc.gov/project/visit/releases/{0}/{1}'
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', posixpath.join(
hpccm.config.g_wd, 'visit')) # working directory
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('VisIt libsim version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
# Bug in the VisIt build config
self.__arch = 'linux-intel'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__arch = 'linux-x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['gzip', 'make', 'patch', 'tar', 'wget',
'zlib1g-dev', 'libxt-dev',
'libgl1-mesa-dev', 'libglu1-mesa-dev']
self.__runtime_ospackages = ['libxt6', 'libgl1-mesa-glx',
'libglu1-mesa', 'zlib1g']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['gzip', 'make', 'patch', 'tar', 'wget',
'which', 'zlib-devel', 'libXt-devel',
'libglvnd-devel', 'mesa-libGL-devel',
'mesa-libGLU-devel']
self.__runtime_ospackages = ['libXt', 'libglvnd', 'mesa-libGL',
'mesa-libGLU', 'zlib']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL format contains MAJOR.MINOR.REVSION and
# MAJOR_MINOR_REVISION
buildscript = self.__buildscript.format(
self.__version.replace('.', '_'))
url = self.__url.format(self.__version, buildscript)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# Set options
env = []
opts = self.__opts
if self.__mpi:
env.append('PAR_COMPILER=mpicc')
opts.extend(['--parallel', '--no-icet'])
if self.__parallel:
opts.append('--makeflags -j{}'.format(self.__parallel))
if self.__prefix:
opts.append('--prefix {}'.format(self.__prefix))
if self.__system_cmake:
opts.append('--system-cmake')
if self.__system_python:
opts.append('--system-python')
if self.__thirdparty:
thirdparty_path = posixpath.join(self.__prefix, 'third-party')
opts.append('--thirdparty-path {}'.format(thirdparty_path))
self.__commands.append('mkdir -p {}'.format(thirdparty_path))
# Build
self.__commands.append('cd {0} && {1} bash {2} {3}'.format(
self.__wd, ' '.join(env), buildscript, ' '.join(opts)))
# Set library path
libpath = posixpath.join(self.__prefix, self.__version, self.__arch)
suffix1 = 'lib'
suffix2 = posixpath.join('libsim', 'V2', 'lib')
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(libpath, suffix1)))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(libpath, suffix2)))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{0}:{1}:$LD_LIBRARY_PATH'.format(posixpath.join(libpath, suffix1), posixpath.join(libpath, suffix2))
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd)]))
# Set the environment
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
l = libsim(...)
Stage0 += l
Stage1 += l.runtime()
```
"""
self.rt += comment('VisIt libsim')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += copy(_from=_from, src=self.__prefix, dest=self.__prefix)
if self.ldconfig:
libpath = posixpath.join(self.__prefix, self.__version,
self.__arch)
suffix1 = 'lib'
suffix2 = posixpath.join('libsim', 'V2', 'lib')
self.rt += shell(commands=[
self.ldcache_step(
directory=posixpath.join(libpath, suffix1)),
self.ldcache_step(
directory=posixpath.join(libpath, suffix2))])
self.rt += environment(variables=self.environment_step())
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/libsim.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""LLVM compiler building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion, StrictVersion
import logging
import hpccm.config
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class llvm(bb_base, hpccm.templates.envvars):
"""The `llvm` building block installs the LLVM compilers (clang and
clang++) from the upstream Linux distribution.
As a side effect, a toolchain is created containing the LLVM
compilers. A toolchain can be passed to other operations that
want to build using the LLVM compilers.
# Parameters
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH` and `PATH`) should be modified to
include the LLVM compilers when necessary. The default is True.
extra_tools: Boolean flag to specify whether to also install
`clang-format` and `clang-tidy`. The default is False.
openmp: Boolean flag to specify whether to also install OpenMP
support. The default is True.
toolset: Boolean flag to specify whether to also install the
full LLVM toolset. The default is False.
upstream: Boolean flag to specify whether to use the [upstream LLVM packages](https://apt.llvm.org).
This option is ignored if the base image is not Ubuntu-based.
version: The version of the LLVM compilers to install. Note that
the version refers to the Linux distribution packaging, not the
actual compiler version. For RHEL-based 8.x Linux distributions,
the version is ignored. The default is an empty value.
# Examples
```python
llvm()
```
```python
llvm(version='7')
```
```python
llvm(upstream=True, version='11')
```
```python
l = llvm()
openmpi(..., toolchain=l.toolchain, ...)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(llvm, self).__init__(**kwargs)
self.__apt_keys = [] # Filled in below
self.__apt_repositories = [] # Filled in below
self.__commands = [] # Filled in below
self.__compiler_debs = [] # Filled in below
self.__compiler_rpms = [] # Filled in below
self.__extra_tools = kwargs.get('extra_tools', False)
self.__openmp = kwargs.get('openmp', True)
self.__ospackages = kwargs.get('ospackages', []) # Filled in below
self.__runtime_debs = [] # Filled in below
self.__runtime_ospackages = [] # Filled in below
self.__runtime_rpms = [] # Filled in below
self.__toolset = kwargs.get('toolset', False)
# Current LLVM trunk version
self.__trunk_version = kwargs.get('_trunk_version', '17')
self.__upstream = kwargs.get('upstream', False)
self.__version = kwargs.get('version', None)
# Output toolchain
self.toolchain = toolchain()
self.toolchain.CC = 'clang'
self.toolchain.CFLAGS = hpccm.config.get_cpu_optimization_flags('clang')
self.toolchain.CXX = 'clang++'
self.toolchain.CXXFLAGS = hpccm.config.get_cpu_optimization_flags('clang')
# Set the packages to install based on the Linux distribution
# and CPU architecture
self.__setup()
# Fill in container instructions
self.__instructions()
def __setup(self):
"""Based on the Linux distribution and CPU architecture, set values
accordingly."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__ospackages = []
if self.__upstream and not self.__version:
self.__version = self.__trunk_version
if self.__version:
if LooseVersion(self.__version) <= LooseVersion('6.0'):
self.__compiler_debs = ['clang-{}'.format(self.__version)]
self.__runtime_debs = [
'libclang1-{}'.format(self.__version)]
# Versioned OpenMP libraries do not exist for
# older versions
if self.__openmp:
self.__compiler_debs.append('libomp-dev')
self.__runtime_debs.append('libomp5')
else:
self.__compiler_debs = ['clang-{}'.format(self.__version)]
self.__runtime_debs = [
'libclang1-{}'.format(self.__version)]
if self.__openmp:
self.__compiler_debs.append(
'libomp-{}-dev'.format(self.__version))
self.__runtime_debs.append(
'libomp5-{}'.format(self.__version))
if self.__upstream:
# Upstream packages from apt.llvm.org
if hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
raise RuntimeError('LLVM upstream builds are not available for ppc64le')
self.__apt_keys = ['https://apt.llvm.org/llvm-snapshot.gpg.key']
self.__apt_repositories = self.__upstream_package_repos()
self.__runtime_debs.append(
'llvm-{}-runtime'.format(self.__version))
self.__ospackages = ['apt-transport-https',
'ca-certificates',
'gnupg', 'wget']
self.__runtime_ospackages = self.__ospackages
# Setup the environment so that the alternate compiler
# version is the new default
self.__commands.append('update-alternatives --install /usr/bin/clang clang $(which clang-{}) 30'.format(self.__version))
self.__commands.append('update-alternatives --install /usr/bin/clang++ clang++ $(which clang++-{}) 30'.format(self.__version))
# Install and configure clang-format and clang-tidy
if self.__toolset or self.__extra_tools:
self.__compiler_debs.extend([
'clang-format-{}'.format(self.__version),
'clang-tidy-{}'.format(self.__version)])
self.__commands.append('update-alternatives --install /usr/bin/clang-format clang-format $(which clang-format-{}) 30'.format(self.__version))
self.__commands.append('update-alternatives --install /usr/bin/clang-tidy clang-tidy $(which clang-tidy-{}) 30'.format(self.__version))
# Install and configure all packages
if self.__toolset:
self.__compiler_debs.extend([
'clang-tools-{}'.format(self.__version),
'libc++-{}-dev'.format(self.__version),
'libc++1-{}'.format(self.__version),
'libc++abi1-{}'.format(self.__version),
'libclang-{}-dev'.format(self.__version),
'libclang1-{}'.format(self.__version),
'liblldb-{}-dev'.format(self.__version),
'lld-{}'.format(self.__version),
'lldb-{}'.format(self.__version),
'llvm-{}-dev'.format(self.__version),
'llvm-{}-runtime'.format(self.__version),
'llvm-{}'.format(self.__version)])
self.__commands.append('update-alternatives --install /usr/bin/lldb lldb $(which lldb-{}) 30'.format(self.__version))
self.__commands.append('update-alternatives --install /usr/bin/llvm-config llvm-config $(which llvm-config-{}) 30'.format(self.__version))
self.__commands.append('update-alternatives --install /usr/bin/llvm-cov llvm-cov $(which llvm-cov-{}) 30'.format(self.__version))
else:
# Distro default
self.__compiler_debs = ['clang']
self.__runtime_debs = ['libclang1']
if self.__openmp:
self.__compiler_debs.append('libomp-dev')
self.__runtime_debs.append('libomp5')
if self.__toolset or self.__extra_tools:
self.__compiler_debs.extend(['clang-format', 'clang-tidy'])
if self.__toolset:
self.__compiler_debs.extend([
'libc++-dev',
'libc++1',
'libc++abi1',
'libclang-dev',
'libclang1',
'lldb',
'llvm-dev',
'llvm-runtime',
'llvm'])
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
# Dependencies on the GNU compiler
self.__ospackages = ['gcc', 'gcc-c++']
# Version that appears in paths below
compiler_version = ''
if self.__version:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# Multiple versions are not available for CentOS 8
self.__compiler_rpms = ['clang', 'llvm-libs']
self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
if self.__openmp:
self.__compiler_rpms.append('libomp')
self.__runtime_rpms.append('libomp')
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
if self.__toolset:
self.__compiler_rpms.append('llvm-toolset')
else:
# CentOS 7
self.__compiler_rpms = [
'llvm-toolset-{}-clang'.format(self.__version)]
self.__runtime_rpms = [
'llvm-toolset-{}-runtime'.format(self.__version),
'llvm-toolset-{}-compiler-rt'.format(self.__version)]
compiler_version = '4.8.2'
if self.__openmp:
self.__compiler_rpms.append(
'llvm-toolset-{}-libomp-devel'.format(self.__version))
self.__runtime_rpms.append(
'llvm-toolset-{}-libomp'.format(self.__version))
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('llvm-toolset-{}-clang-tools-extra'.format(self.__version))
if self.__toolset:
self.__compiler_rpms.append('llvm-toolset-{}'.format(self.__version))
# Setup environment for devtoolset
self.environment_variables['PATH'] = '/opt/rh/llvm-toolset-{}/root/usr/bin:$PATH'.format(self.__version)
self.environment_variables['LD_LIBRARY_PATH'] = '/opt/rh/llvm-toolset-{}/root/usr/lib64:$LD_LIBRARY_PATH'.format(self.__version)
else:
# Distro default
self.__compiler_rpms = ['clang']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
self.__runtime_rpms = ['llvm-libs']
compiler_version = '8'
if self.__openmp:
self.__runtime_rpms.append('libomp')
if self.__toolset or self.__extra_tools:
self.__compiler_rpms.append('clang-tools-extra')
if self.__toolset:
self.__compiler_rpms.append('llvm-toolset')
else:
# CentOS 7
self.__runtime_rpms = ['llvm-libs', 'libgomp']
compiler_version = '4.8.2'
if self.__extra_tools: # pragma: no cover
logging.warning('llvm extra tools are not available for default CentOS 7, specify a LLVM version')
if self.__toolset:
self.__compiler_rpms.append('llvm')
# The default llvm configuration for CentOS is unable to
# locate some gcc components. Setup the necessary gcc
# environment.
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.environment_variables['COMPILER_PATH'] = '/usr/lib/gcc/aarch64-redhat-linux/{}:$COMPILER_PATH'.format(compiler_version)
self.environment_variables['CPATH'] = '/usr/include/c++/{0}:/usr/include/c++/{0}/aarch64-redhat-linux:/usr/lib/gcc/aarch64-redhat-linux/{0}/include:$CPATH'.format(compiler_version)
self.environment_variables['LIBRARY_PATH'] = '/usr/lib/gcc/aarch64-redhat-linux/{}'.format(compiler_version)
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.environment_variables['CPATH'] = '/usr/lib/gcc/x86_64-redhat-linux/{}/include:$CPATH'.format(compiler_version)
else:
# Packages for CentOS + PPC64LE are not available
raise RuntimeError('Unsupported processor architecture')
else: # pragma: no cover
raise RuntimeError('unknown Linux distribution')
def __instructions(self):
"""Fill in container instructions"""
self += comment('LLVM compiler')
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
self += packages(apt=self.__compiler_debs,
apt_keys=self.__apt_keys,
apt_repositories=self.__apt_repositories,
scl=bool(self.__version), # True / False
yum=self.__compiler_rpms)
if self.__commands:
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __upstream_package_repos(self):
"""Return the package repositories for the given distro and llvm
version. The development branch repositories are not
versioned and must be handled differently. Currently the
development branch is version 14."""
codename = 'xenial'
codename_ver = 'xenial'
if (hpccm.config.g_linux_version >= StrictVersion('22.0') and
hpccm.config.g_linux_version < StrictVersion('23.0')):
codename = 'jammy'
if self.__version == self.__trunk_version:
codename_ver = 'jammy'
else:
codename_ver = 'jammy-{}'.format(self.__version)
elif (hpccm.config.g_linux_version >= StrictVersion('20.0') and
hpccm.config.g_linux_version < StrictVersion('21.0')):
codename = 'focal'
if self.__version == self.__trunk_version:
codename_ver = 'focal'
else:
codename_ver = 'focal-{}'.format(self.__version)
elif (hpccm.config.g_linux_version >= StrictVersion('18.0') and
hpccm.config.g_linux_version < StrictVersion('19.0')):
codename = 'bionic'
if self.__version == self.__trunk_version:
codename_ver = 'bionic'
else:
codename_ver = 'bionic-{}'.format(self.__version)
elif (hpccm.config.g_linux_version >= StrictVersion('16.0') and
hpccm.config.g_linux_version < StrictVersion('17.0')):
codename = 'xenial'
if self.__version == self.__trunk_version:
codename_ver = 'xenial'
else:
codename_ver = 'xenial-{}'.format(self.__version)
else: # pragma: no cover
raise RuntimeError('Unsupported Ubuntu version')
return [
'deb http://apt.llvm.org/{0}/ llvm-toolchain-{1} main'.format(codename, codename_ver),
'deb-src http://apt.llvm.org/{0}/ llvm-toolchain-{1} main'.format(codename, codename_ver)]
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
l = llvm(...)
Stage0 += l
Stage1 += l.runtime()
```
"""
self.rt += comment('LLVM compiler runtime')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += packages(apt=self.__runtime_debs,
apt_keys=self.__apt_keys,
apt_repositories=self.__apt_repositories,
scl=bool(self.__version), # True / False
yum=self.__runtime_rpms)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/llvm.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Mellanox OFED building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion, StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.annotate
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.label import label
from hpccm.primitives.shell import shell
class mlnx_ofed(bb_base, hpccm.templates.annotate, hpccm.templates.rm,
hpccm.templates.tar, hpccm.templates.wget):
"""The `mlnx_ofed` building block downloads and installs the [Mellanox
OpenFabrics Enterprise Distribution for
Linux](http://www.mellanox.com/page/products_dyn?product_family=26).
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
oslabel: The Linux distribution label assigned by Mellanox to the
tarball. For Ubuntu, the default value is `ubuntu16.04`. For
RHEL-based Linux distributions, the default value is `rhel7.2` for
x86_64 processors and `rhel7.6alternate` for aarch64 processors.
ospackages: List of OS packages to install prior to installing
OFED. For Ubuntu, the default values are `findutils`,
`libnl-3-200`, `libnl-route-3-200`, `libnuma1`, and `wget`. For
RHEL-based 7.x distributions, the default values are `findutils`,
`libnl`, `libnl3`, `numactl-libs`, and `wget`. For RHEL-based 8.x
distributions, the default values are `findutils`, `libnl3`,
`numactl-libs`, and `wget`.
packages: List of packages to install from Mellanox OFED. For
version 5.0 and later on Ubuntu, `ibverbs-providers`,
`ibverbs-utils` `libibmad-dev`, `libibmad5`, `libibumad3`,
`libibumad-dev`, `libibverbs-dev` `libibverbs1`, `librdmacm-dev`,
and `librdmacm1`. For earlier versions on Ubuntu, the default
values are `libibverbs1`, `libibverbs-dev`, `libibmad`,
`libibmad-devel`, `libibumad`, `libibumad-devel`, `libmlx4-1`,
`libmlx4-dev`, `libmlx5-1`, `libmlx5-dev`, `librdmacm1`,
`librdmacm-dev`, and `ibverbs-utils`. For version 5.0 and later
on RHEL-based Linux distributions, the default values are
`libibumad`, `libibverbs`, `libibverbs-utils`, `librdmacm`,
`rdma-core`, and `rdma-core-devel`. For earlier versions on
RHEL-based Linux distributions, the default values are
`libibverbs`, `libibverbs-devel`, `libibverbs-utils`, `libibmad`,
`libibmad-devel`, `libibumad`, `libibumad-devel`, `libmlx4`,
`libmlx4-devel`, `libmlx5`, `libmlx5-devel`, `librdmacm`, and
`librdmacm-devel`.
prefix: The top level install location. Instead of installing the
packages via the package manager, they will be extracted to this
location. This option is useful if multiple versions of Mellanox
OFED need to be installed. The environment must be manually
configured to recognize the Mellanox OFED location, e.g., in the
container entry point. The default value is empty, i.e., install
via the package manager to the standard system locations.
version: The version of Mellanox OFED to download. The default
value is `5.6-2.0.9.0`.
# Examples
```python
mlnx_ofed(version='4.2-1.0.0.0')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mlnx_ofed, self).__init__(**kwargs)
self.__deppackages = [] # Filled in by __distro()
self.__key = 'https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox'
self.__oslabel = kwargs.get('oslabel', '')
self.__ospackages = kwargs.get('ospackages',
['ca-certificates', 'gnupg', 'wget'])
self.__packages = kwargs.get('packages', [])
self.__prefix = kwargs.get('prefix', None)
self.__symlink = kwargs.get('symlink', False)
self.__version = kwargs.get('version', '5.6-2.0.9.0')
# Add annotation
self.add_annotation('version', self.__version)
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Mellanox OFED version {}'.format(self.__version))
if self.__prefix:
self += packages(ospackages=self.__deppackages + self.__ospackages)
else:
self += packages(ospackages=self.__ospackages)
self += packages(
apt_keys=[self.__key],
apt_repositories=['https://linux.mellanox.com/public/repo/mlnx_ofed/{0}/{1}/mellanox_mlnx_ofed.list'.format(self.__version, self.__oslabel)],
download=bool(self.__prefix),
extract=self.__prefix,
ospackages=self.__packages,
yum_keys=[self.__key],
yum_repositories=['https://linux.mellanox.com/public/repo/mlnx_ofed/{0}/{1}/mellanox_mlnx_ofed.repo'.format(self.__version, self.__oslabel)])
if self.__prefix:
commands = []
if self.__symlink:
commands.append('mkdir -p {0} && cd {0}'.format(
posixpath.join(self.__prefix, 'lib')))
# Prune the symlink directory itself and any debug
# libraries
commands.append('find .. -path ../lib -prune -o -name "*valgrind*" -prune -o -name "lib*.so*" -exec ln -s {} \;')
commands.append('cd {0} && ln -s usr/bin bin && ln -s usr/include include'.format(
self.__prefix))
# Suppress warnings from libibverbs
commands.append('mkdir -p /etc/libibverbs.d')
self += shell(commands=commands)
self += label(metadata=self.annotate_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__deppackages = ['libnl-3-200', 'libnl-route-3-200',
'libnuma1']
if not self.__oslabel:
if hpccm.config.g_linux_version >= StrictVersion('22.0'):
self.__oslabel = 'ubuntu22.04'
elif hpccm.config.g_linux_version >= StrictVersion('20.0'):
self.__oslabel = 'ubuntu20.04'
elif hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__oslabel = 'ubuntu18.04'
else:
self.__oslabel = 'ubuntu16.04'
if not self.__packages:
if LooseVersion(self.__version) >= LooseVersion('5.0'):
# Uses UPSTREAM libs
self.__packages = ['libibverbs1', 'libibverbs-dev',
'ibverbs-providers', 'ibverbs-utils',
'libibmad5', 'libibmad-dev',
'libibumad3', 'libibumad-dev',
'librdmacm-dev', 'librdmacm1']
else:
# Uses MLNX_OFED libs
self.__packages = ['libibverbs1', 'libibverbs-dev',
'ibverbs-utils',
'libibmad', 'libibmad-devel',
'libibumad', 'libibumad-devel',
'libmlx4-1', 'libmlx4-dev',
'libmlx5-1', 'libmlx5-dev',
'librdmacm-dev', 'librdmacm1']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__deppackages = ['libnl3', 'numactl-libs']
else:
self.__deppackages = ['libnl', 'libnl3', 'numactl-libs']
if not self.__oslabel:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__oslabel = 'rhel8.0'
else:
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__oslabel = 'rhel7.6alternate'
else:
self.__oslabel = 'rhel7.2'
if not self.__packages:
if LooseVersion(self.__version) >= LooseVersion('5.0'):
# Uses UPSTREAM libs
self.__packages = ['libibverbs', 'libibverbs-utils',
'libibumad', 'librdmacm',
'rdma-core', 'rdma-core-devel']
else:
# Uses MLNX_OFED libs
self.__packages = ['libibverbs', 'libibverbs-devel',
'libibverbs-utils',
'libibmad', 'libibmad-devel',
'libibumad', 'libibumad-devel',
'libmlx4', 'libmlx4-devel',
'libmlx5', 'libmlx5-devel',
'librdmacm-devel', 'librdmacm']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mlnx_ofed(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
if self.__prefix:
self.rt += comment('Mellanox OFED version {}'.format(
self.__version))
if self.__deppackages:
self.rt += packages(ospackages=self.__deppackages)
# Suppress warnings from libibverbs
self.rt += shell(commands=['mkdir -p /etc/libibverbs.d'])
self.rt += copy(_from=_from, dest=self.__prefix, src=self.__prefix)
return str(self.rt)
else:
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/mlnx_ofed.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Arm Allinea Studio building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging
import posixpath
import re
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class arm_allinea_studio(bb_base, hpccm.templates.envvars, hpccm.templates.rm,
hpccm.templates.tar, hpccm.templates.wget):
"""The `arm_allinea_studio` building block downloads and installs the
[Arm Allinea
Studio](https://developer.arm.com/tools-and-software/server-and-hpc/arm-architecture-tools/arm-allinea-studio).
You must agree to the [Arm End User License Agreement](https://developer.arm.com/tools-and-software/server-and-hpc/arm-architecture-tools/arm-allinea-studio/licensing/eula)
to use this building block.
As a side effect, a toolchain is created containing the Arm
Allinea Studio compilers. The toolchain can be passed to other
operations that want to build using the Arm Allinea Studio
compilers. However, the environment is not automatically
configured for the Arm Allinea Studio compilers. The desired
environment module must be manually loaded, e.g., `module load
Generic-AArch64/RHEL/7/arm-linux-compiler/20.3`.
# Parameters
environment: Boolean flag to specify whether the environment
(`MODULEPATH`) should be modified to include Arm Allinea
Studio. The default is True.
eula: By setting this value to `True`, you agree to the [Arm End User License Agreement](https://developer.arm.com/tools-and-software/server-and-hpc/arm-architecture-tools/arm-allinea-studio/licensing/eula).
The default value is `False`.
microarchitectures: List of microarchitectures to install.
From 22.0 version, only `generic` is available.
Available values are `generic`, `generic-sve` for version 21.1,
and `neoverse-n1`, `thunderx2t99` are valid for versions <= 20.3.
Irrespective of this setting, the generic implementation will
always be installed.
ospackages: List of OS packages to install prior to installing Arm
Allinea Studio. For Ubuntu, the default values are `libc6-dev`,
`lmod`, `python`, `tar`, `tcl`, and `wget`. For RHEL-based Linux
distributions, the default values are `glibc-devel`, `Lmod`,
`tar`, and `wget`.
prefix: The top level install prefix. The default value is
`/opt/arm`.
tarball: Path to the Arm Allinea Studio tarball relative to the
local build context. The default value is empty. If this is
defined, the tarball in the local build context will be used
rather than downloading the tarball from the web.
version: The version of Arm Allinea Studio to install. The
default value is `22.0`. Due to differences in the packaging
scheme, versions prior to 20.2 are not supported.
# Examples
```python
arm_allinea_studio(eula=True,
microarchitectures=['generic', 'thunderx2t99'],
version='20.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(arm_allinea_studio, self).__init__(**kwargs)
self.__baseurl = kwargs.get('baseurl',
'https://developer.arm.com/-/media/Files/downloads/hpc/arm-allinea-studio')
self.__commands = [] # Filled in by __setup()
self.__directory_string = '' # Filled in by __distro()
# By setting this value to True, you agree to the
# corresponding Arm Allinea Studio End User License Agreement
# https://developer.arm.com/tools-and-software/server-and-hpc/arm-architecture-tools/arm-allinea-studio/licensing/eula
self.__eula = kwargs.get('eula', False)
self.__installer_template = '' # Filled in by __distro()
self.__microarchitectures = kwargs.get('microarchitectures',
['generic'])
self.__ospackages = kwargs.get('ospackages', [])
self.__package_string = '' # Filled in by __distro()
self.__prefix = kwargs.get('prefix', '/opt/arm')
self.__tarball = kwargs.get('tarball', None)
self.__version = kwargs.get('version', '22.0')
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.toolchain = toolchain(CC='armclang', CXX='armclang++',
F77='armflang', F90='armflang',
FC='armflang')
self.toolchain.CFLAGS = hpccm.config.get_cpu_optimization_flags('clang')
self.toolchain.CXXFLAGS = hpccm.config.get_cpu_optimization_flags('clang')
if hpccm.config.g_cpu_arch != cpu_arch.AARCH64: # pragma: no cover
logging.warning('Using arm_allinea_studio on a non-aarch64 processor')
if not self.__eula:
raise RuntimeError('Arm Allinea Studio EULA was not accepted. To accept, see the documentation for this building block')
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Arm Allinea Studio version {}'.format(self.__version))
if self.__ospackages:
# EPEL necessary for Lmod
self += packages(epel=True, ospackages=self.__ospackages)
if self.__tarball:
self += copy(src=self.__tarball, dest=self.__wd)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if StrictVersion(self.__version) <= StrictVersion('20.3'):
self.__directory_string = 'Ubuntu-16.04'
self.__package_string = 'Ubuntu-16.04'
self.__url_string = 'Ubuntu16.04'
elif hpccm.config.g_linux_version <= StrictVersion('18.04'):
self.__directory_string = 'Ubuntu-18.04'
self.__package_string = 'Ubuntu-18.04'
self.__url_string = "ACfL"
else:
self.__directory_string = 'Ubuntu-20.04'
self.__package_string = 'Ubuntu-20.04'
self.__url_string = "ACfL"
self.__installer_template = 'arm-compiler-for-linux_{{}}_{0}.sh'.format(self.__directory_string)
if hpccm.config.g_linux_version >= StrictVersion('22.04'):
python2_package = "python2"
else:
python2_package = "python"
if not self.__ospackages:
self.__ospackages = ['libc6-dev', 'lmod', python2_package, 'tar',
'tcl', 'wget']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__directory_string = 'RHEL-8'
self.__package_string = 'RHEL-8'
if StrictVersion(self.__version) <= StrictVersion('20.3'):
self.__url_string = 'RHEL8'
else:
self.__url_string = 'ACfL'
else:
self.__directory_string = 'RHEL-7'
self.__package_string = 'RHEL-7'
if StrictVersion(self.__version) <= StrictVersion('20.3'):
self.__url_string = 'RHEL7'
else:
self.__url_string = 'ACfL'
self.__installer_template = 'arm-compiler-for-linux_{{}}_{0}.sh'.format(self.__directory_string)
if not self.__ospackages:
self.__ospackages = ['Lmod', 'glibc-devel', 'tar', 'wget']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Use a tarball. Figure out the version from the tarball name.
if self.__tarball:
tarball = posixpath.basename(self.__tarball)
# Figure out the version from the tarball name
match = re.match(r'arm-compiler-for-linux_(?P<year>\d\d)\.0?(?P<month>[0-9][0-9]?)',
tarball)
if match and match.groupdict()['year'] and match.groupdict()['month']:
self.__version = '{0}.{1}'.format(match.groupdict()['year'],
match.groupdict()['month'])
else:
# The download URL has the format MAJOR-MINOR in the path
# and the tarball contains MAJOR.MINOR, so pull apart the
# full version to get the individual components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)', self.__version)
major_minor = '{0}-{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = 'arm-compiler-for-linux_{0}_{1}_aarch64.tar'.format(
self.__version, self.__package_string)
url = '{0}/{1}/{2}/{3}'.format(self.__baseurl, major_minor,
self.__url_string, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url,
directory=self.__wd))
# Untar package
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
# Install
install_args = ['--install-to {}'.format(self.__prefix)]
if self.__eula:
install_args.append('--accept')
if self.__microarchitectures and StrictVersion(self.__version) <= StrictVersion('20.3'):
install_args.append('--only-install-microarchitectures={}'.format(
','.join(self.__microarchitectures)))
if StrictVersion(self.__version) >= StrictVersion("21.1"):
arch_string = ""
else:
arch_string = "_aarch64"
package_directory = 'arm-compiler-for-linux_{0}_{1}{2}'.format(self.__version, self.__package_string, arch_string)
self.__commands.append('cd {0} && ./{1} {2}'.format(
posixpath.join(self.__wd, package_directory),
self.__installer_template.format(self.__version),
' '.join(install_args)))
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, package_directory)]))
# Set environment
self.environment_variables['MODULEPATH'] = '{}:$MODULEPATH'.format(
posixpath.join(self.__prefix, 'modulefiles'))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
a = arm_allinea_compiler(...)
Stage0 += a
Stage1 += a.runtime()
```
"""
self.rt += comment('Arm Allinea Studio')
paths = []
# Redistributable libraries from redistributables.txt
# The allowed list of redistributable libraries does not
# include all Arm Allinea Studio libraries that get typically
# linked; consider using '-static-arm-libs'.
# OpenMP and Fortran runtime libraries
compiler_redist_path = posixpath.join(
self.__prefix,
'arm-linux-compiler-{0}_Generic-AArch64_{1}_aarch64-linux'.format(
self.__version, self.__directory_string),
'lib')
paths.append(compiler_redist_path)
self.rt += copy(_from=_from,
src=[posixpath.join(compiler_redist_path, lib)
for lib in ['libgomp.so', 'libiomp5.so',
'libomp.so', 'libflang.so',
'libflangrti.so']],
dest=posixpath.join(compiler_redist_path, ''))
# Performance libraries
microarch_string = {
'20.3': {
'generic': 'Generic-AArch64',
'generic-sve': 'Generic-SVE',
'neoverse-n1': 'Neoverse-N1',
'thunderx2t99': 'ThunderX2CN99'
},
'21.1': {
'generic': 'AArch64',
'generic-sve': 'AArch64-SVE'
},
'22.0': {
'generic': 'AArch64'
}
}
for microarch in self.__microarchitectures:
armpl_arm_redist_path = posixpath.join(
self.__prefix,
'armpl-{0}.0_{1}_{2}_arm-linux-compiler_aarch64-linux'.format(
self.__version, microarch_string[self.__version][microarch],
self.__directory_string),
'lib')
paths.append(armpl_arm_redist_path)
self.rt += copy(_from=_from,
src=[posixpath.join(armpl_arm_redist_path, lib)
for lib in ['libamath.so',
'libastring.so']],
dest=posixpath.join(armpl_arm_redist_path, ''))
armpl_gcc_redist_path = posixpath.join(
self.__prefix,
'armpl-{0}.0_{1}_{2}_gcc_aarch64-linux'.format(
self.__version, microarch_string[self.__version][microarch],
self.__directory_string),
'lib')
paths.append(armpl_gcc_redist_path)
self.rt += copy(_from=_from,
src=[posixpath.join(armpl_gcc_redist_path, lib)
for lib in ['libamath.so',
'libastring.so']],
dest=posixpath.join(armpl_gcc_redist_path, ''))
paths.append('$LD_LIBRARY_PATH') # tack on existing value at end
self.runtime_environment_variables['LD_LIBRARY_PATH'] = ':'.join(paths)
self.rt += environment(variables=self.environment_step(runtime=True))
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/arm_allinea_studio.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""yum building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging # pylint: disable=unused-import
import posixpath
import hpccm.config
from hpccm.building_blocks.base import bb_base
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.shell import shell
class yum(bb_base):
"""The `yum` building block specifies the set of operating system
packages to install. This building block should only be used on
images that use the Red Hat package manager (e.g., CentOS).
In most cases, the [`packages` building block](#packages) should
be used instead of `yum`.
# Parameters
download: Boolean flag to specify whether to download the rpm
packages instead of installing them. The default is False.
download_directory: The deb package download location. This
parameter is ignored if `download` is False. The default value is
`/var/tmp/yum_download`.
epel: - Boolean flag to specify whether to enable the Extra
Packages for Enterprise Linux (EPEL) repository. The default is
False.
extract: Location where the downloaded packages should be
extracted. Note, this extracts and does not install the packages,
i.e., the package manager is bypassed. After the downloaded
packages are extracted they are deleted. This parameter is ignored
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
force_add_repo: Boolean flag to specify whether adding a
repository should be considered successful no matter the actual
result. The default value is False.
keys: A list of GPG keys to import. The default is an empty list.
ospackages: A list of packages to install. The default is an
empty list.
powertools: Boolean flag to specify whether to enable the
PowerTools repository. The default is False. This parameter is
only recognized if the distribution version is 8.x.
release_stream: Boolean flag to specify whether to enable the [CentOS release stream](https://wiki.centos.org/Manuals/ReleaseNotes/CentOSStream)
repository. The default is False. This parameter is only
recognized if the distribution version is 8.x.
repositories: A list of yum repositories to add. The default is
an empty list.
scl: - Boolean flag to specify whether to enable the Software
Collections (SCL) repository. The default is False. This
parameter is only recognized if the distribution version is 7.x.
yum4: Boolean flag to specify whether `yum4` should be used
instead of `yum`. The default is False. This parameter is only
recognized if the distribution version is 7.x.
# Examples
```python
yum(ospackages=['make', 'wget'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(yum, self).__init__()
self.__commands = []
self.__download = kwargs.get('download', False)
self.__download_args = kwargs.get('download_args', '')
self.__download_directory = kwargs.get(
'download_directory',
posixpath.join(hpccm.config.g_wd, 'yum_download'))
self.__epel = kwargs.get('epel', False)
self.__extra_opts = kwargs.get('extra_opts', [])
self.__extract = kwargs.get('extract', None)
self.__force_add_repo = kwargs.get('force_add_repo', False)
self.__keys = kwargs.get('keys', [])
self.__opts = ['-y']
self.ospackages = kwargs.get('ospackages', [])
self.__powertools = kwargs.get('powertools', False)
self.__release_stream = kwargs.get('release_stream', False)
self.__repositories = kwargs.get('repositories', [])
self.__scl = kwargs.get('scl', False)
self.__yum4 = kwargs.get('yum4', False)
if hpccm.config.g_linux_distro != linux_distro.CENTOS: # pragma: no cover
logging.warning('Using yum on a non-RHEL based Linux distribution')
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Construct the series of commands that form the building
# block
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += shell(chdir=False, commands=self.__commands)
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if not self.__download_args:
self.__download_args = '-x \*i?86 --archlist=x86_64'
def __setup(self):
"""Construct the series of commands to execute"""
if self.__extra_opts:
self.__download_args += ' ' + ' '.join(self.__extra_opts)
self.__opts.extend(self.__extra_opts)
# Use yum version 4 is requested. yum 4 is the default on
# CentOS 8.
yum = 'yum'
if self.__yum4 and hpccm.config.g_linux_version < StrictVersion('8.0'):
self.__commands.append('yum install -y nextgen-yum4')
yum = 'yum4'
if self.__keys:
self.__commands.append('rpm --import {}'.format(
' '.join(self.__keys)))
if self.__repositories:
# Need yum-config-manager
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# CentOS 8
self.__commands.append('yum install -y dnf-utils')
else:
# CentOS 7
self.__commands.append('yum install -y yum-utils')
for repo in self.__repositories:
if self.__force_add_repo:
self.__commands.append(
'(yum-config-manager --add-repo {} || true)'.format(repo))
else:
self.__commands.append(
'yum-config-manager --add-repo {}'.format(repo))
if self.__epel:
# This needs to be a discrete, preliminary step so that
# packages from EPEL are available to be installed.
self.__commands.append('yum install -y epel-release')
if (self.__powertools and
hpccm.config.g_linux_version >= StrictVersion('8.0')):
# This needs to be a discrete, preliminary step so that
# packages from PowerTools are available to be installed.
if not self.__repositories:
# dnf-utils will be installed above if repositories are
# enabled
self.__commands.append('yum install -y dnf-utils')
self.__commands.append('yum-config-manager --set-enabled powertools')
if (self.__release_stream and
hpccm.config.g_linux_version >= StrictVersion('8.0')):
# This needs to be a discrete, preliminary step so that
# packages from release stream are available to be installed.
self.__commands.append('yum install -y centos-release-stream')
if (self.__scl and
hpccm.config.g_linux_version < StrictVersion('8.0')):
# This needs to be a discrete, preliminary step so that
# packages from SCL are available to be installed.
self.__commands.append('yum install -y centos-release-scl')
if self.ospackages:
packages = []
for pkg in sorted(self.ospackages):
packages.append(' {}'.format(pkg))
if self.__download:
# Download packages
# Need yumdownloader
self.__commands.append('yum install -y yum-utils')
self.__commands.append('mkdir -p {0}'.format(
self.__download_directory))
download = 'yumdownloader --destdir={0} {1} \\\n'.format(
self.__download_directory, self.__download_args)
download = download + ' \\\n'.join(packages)
self.__commands.append(download)
if self.__extract:
# Extract the packages to a prefix - not a "real"
# package manager install
self.__commands.append('mkdir -p {0} && cd {0}'.format(
self.__extract))
regex = posixpath.join(
self.__download_directory,
'(' + '|'.join(sorted(self.ospackages)) + ').*rpm')
self.__commands.append('find {0} -regextype posix-extended -type f -regex "{1}" -exec sh -c "rpm2cpio {{}} | cpio -idm" \;'.format(self.__download_directory, regex))
# Cleanup downloaded packages
self.__commands.append(
'rm -rf {}'.format(self.__download_directory))
else:
# Install packages
install = '{0} install {1} \\\n'.format(yum,
' '.join(self.__opts))
install = install + ' \\\n'.join(packages)
self.__commands.append(install)
if self.__epel or self.ospackages:
self.__commands.append('rm -rf /var/cache/yum/*')
| hpc-container-maker-master | hpccm/building_blocks/yum.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""AmgX building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class amgx(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `amgx` building block downloads, configures, builds, and
installs the [AMGX](https://developer.nvidia.com/amgx) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
Installing an MPI building block before this one is optional and
will build the [AMGX](https://developer.nvidia.com/amgx) library
with MPI support. Some Eigensolvers make use of the MAGMA and/or
MKL libraries and are only available if the paths to these
libraries is specified as shown below in the cmake_opts.
# Parameters
annotate: Boolean flag to specify whether to include annotations (labels).
The default is False.
branch: The git branch to clone. AMGX releases are tagged, that
is, specifying `branch='v2.1.0'` downloads a particular AMGX
version. The default is `master`.
cmake_opts: List of options to pass to `cmake`. The default value is an empty list. See the ["Building"](https://github.com/NVIDIA/AMGX#-building) section of the AMGX documentation of the specified library version for more details. Some options are `CMAKE_NO_MPI:Boolean` (default=`False`) - build without MPI support even if the `FindMPI` script finds an MPI library. `AMGX_NO_RPATH:Boolean` (default=`False`) - by default CMake adds `-rpath` flags to binaries, this option disables that. `MKL_ROOT_DIR:String`, `MAGMA_ROOT_DIR:String` - MAGMA/MKL are used to accelerate some of the Eigensolvers. These solvers will return "error 'not supported'" if AMGX was not build with MKL/MAGMA support.
commit: The git commit to clone. The default is empty and uses
the latest commit on the selected branch of the repository.
directory: Build from an unpackaged source directory relative to
the local build context instead of fetching AMGX sources from a
git repository. This option is incompatible with
`repository`/`branch`/ `commit`. The default is `None`.
ospackages: List of OS packages to install prior to downloading,
configuring, and building. The default value is `[git]`.
prefix: The top level install location. The default is
`/usr/local/amgx`.
repository: The git repository to clone. The default is `https://github.com/NVIDIA/AMGX`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
# Examples
```python
amgx(branch='v2.1.0')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(amgx, self).__init__(**kwargs)
self.__branch = kwargs.pop('branch', 'master')
self.__cmake_opts = kwargs.pop('cmake_opts', [])
self.__ospackages = kwargs.pop('ospackages', ['git', 'make'])
self.__prefix = kwargs.pop('prefix', '/usr/local/amgx')
self.__repository = kwargs.pop('repository', 'https://github.com/NVIDIA/amgx')
# Set the environment
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_cmake(
annotations={'branch': self.__branch},
base_annotation=self.__class__.__name__,
branch=self.__branch,
comment=False,
cmake_opts=self.__cmake_opts,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
repository=self.__repository,
**kwargs)
# Container instructions
self += comment('AMGX branch {}'.format(self.__branch))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
a = amgx(...)
Stage0 += a
Stage1 += a.runtime()
```
"""
self.rt += comment('AMGX')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/amgx.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""NetCDF building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
class netcdf(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `netcdf` building block downloads, configures, builds, and
installs the
[NetCDF](https://www.unidata.ucar.edu/software/netcdf/) component.
The [HDF5](#hdf5) building block should be installed prior to this
building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default value is an empty list.
cxx: Boolean flag to specify whether the NetCDF C++ library should
be installed. The default is True.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH` and `PATH`) should be
modified to include NetCDF. The default is True.
fortran: Boolean flag to specify whether the NetCDF Fortran
library should be installed. The default is True.
ldconfig: Boolean flag to specify whether the NetCDF library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the NetCDF library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are
`ca-certificates`, `file`, `libcurl4-openssl-dev`, `m4`, `make`,
`wget`, and `zlib1g-dev`. For RHEL-based Linux distributions the
default values are `ca-certificates`, `file`, `libcurl-devel`
`m4`, `make`, `wget`, and `zlib-devel`.
prefix: The top level install location. The default location is
`/usr/local/netcdf`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of NetCDF to download. The default value is
`4.7.4`.
version_cxx: The version of NetCDF C++ to download. The default
value is `4.3.1`.
version_fortran: The version of NetCDF Fortran to download. The
default value is `4.5.3`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
netcdf(prefix='/opt/netcdf/4.6.1', version='4.6.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(netcdf, self).__init__(**kwargs)
self.__baseurl_c = 'https://github.com/Unidata/netcdf-c/archive'
self.__baseurl_cxx = 'https://github.com/Unidata/netcdf-cxx4/archive'
self.__baseurl_fortran = 'https://github.com/Unidata/netcdf-fortran/archive'
self.__check = kwargs.pop('check', False)
self.__cxx = kwargs.pop('cxx', True)
self.__fortran = kwargs.pop('fortran', True)
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/netcdf')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__version = kwargs.pop('version', '4.7.4')
self.__version_cxx = kwargs.pop('version_cxx', '4.3.1')
self.__version_fortran = kwargs.pop('version_fortran', '4.5.3')
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
comments = ['NetCDF version {}'.format(self.__version)]
self.__bb = [generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
comment=False,
devel_environment=self.environment_variables,
directory=self.__directory_c,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url=self.__url_c,
**kwargs)]
# Setup optional CXX build configuration
if self.__cxx:
comments.append('NetCDF C++ version {}'.format(self.__version_cxx))
self.__bb.append(generic_autotools(
annotations={'version': self.__version_cxx},
base_annotation='{}-cxx4'.format(self.__class__.__name__),
check=self.__check,
comment=False,
directory='netcdf-cxx4-{}'.format(self.__version_cxx),
# Checks fail when using parallel make. Disable it.
parallel=1 if self.__check else '$(nproc)',
prefix=self.__prefix,
url='{0}/v{1}.tar.gz'.format(self.__baseurl_cxx,
self.__version_cxx),
**kwargs))
# Setup optional Fortran build configuration
if self.__fortran:
comments.append('NetCDF Fortran version {}'.format(self.__version_fortran))
self.__bb.append(generic_autotools(
annotations={'version': self.__version_fortran},
base_annotation='{}-fortran'.format(self.__class__.__name__),
check=self.__check,
comment=False,
directory='netcdf-fortran-{}'.format(self.__version_fortran),
# Checks fail when using parallel make. Disable it.
parallel=1 if self.__check else '$(nproc)',
prefix=self.__prefix,
url='{0}/v{1}.tar.gz'.format(self.__baseurl_fortran,
self.__version_fortran),
**kwargs))
# Container instructions
self += comment(', '.join(comments))
self += packages(ospackages=self.__ospackages)
self += [bb for bb in self.__bb]
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['ca-certificates', 'file',
'libcurl4-openssl-dev', 'm4', 'make',
'wget', 'zlib1g-dev']
self.__runtime_ospackages = ['zlib1g']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['ca-certificates', 'file',
'libcurl-devel', 'm4', 'make',
'wget', 'zlib-devel']
if self.__check:
self.__ospackages.append('diffutils')
self.__runtime_ospackages = ['zlib']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Set download source based on user parameters"""
# Version 4.3.1 changed the package name
if LooseVersion(self.__version) >= LooseVersion('4.3.1'):
pkgname = 'netcdf-c'
tarball = 'v{0}.tar.gz'.format(self.__version)
else:
pkgname = 'netcdf'
tarball = '{0}-{1}.tar.gz'.format(pkgname, self.__version)
self.__directory_c = '{0}-{1}'.format(pkgname, self.__version)
self.__url_c = '{0}/{1}'.format(self.__baseurl_c, tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
n = netcdf(...)
Stage0 += n
Stage1 += n.runtime()
```
"""
self.rt += comment('NetCDF')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb[0].runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/netcdf.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Generic autotools building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import posixpath
import hpccm.templates.ConfigureMake
import hpccm.templates.annotate
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
from hpccm.building_blocks.base import bb_base
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.label import label
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class generic_autotools(bb_base, hpccm.templates.ConfigureMake,
hpccm.templates.annotate, hpccm.templates.downloader,
hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm):
"""The `generic_autotools` building block downloads, configures,
builds, and installs a specified GNU Autotools enabled package.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
annotations: Dictionary of additional annotations to include. The
default is an empty dictionary.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
build_directory: The location to build the package. The default
value is the source code location.
build_environment: Dictionary of environment variables and values
to set when building the package. The default is an empty
dictionary.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
configure_opts: List of options to pass to `configure`. The
default value is an empty list.
devel_environment: Dictionary of environment variables and values,
e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the development
stage after the package is built and installed. The default is an
empty dictionary.
directory: The source code location. The default value is the
basename of the downloaded package. If the value is not an
absolute path, then the temporary working directory is prepended.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
should be modified (see `devel_environment` and
`runtime_environment`). The default is True.
export_build_environment: Boolean flag to specify whether the
build environment should be exported, or merely set on the
configure command line. The default is False.
install: Boolean flag to specify whether the `make install` step
should be performed. The default is True.
ldconfig: Boolean flag to specify whether the library directory
should be added dynamic linker cache. The default value is False.
libdir: The path relative to the install prefix to use when
configuring the dynamic linker cache. The default value is `lib`.
make: Boolean flag to specify whether the `make` step should be
performed. The default is True.
package: Path to the local source package relative to the local
build context. One of this parameter or the `repository` or `url`
parameters must be specified.
postinstall: List of shell commands to run after running 'make
install'. The working directory is the install prefix. The
default is an empty list.
preconfigure: List of shell commands to run prior to running
`configure`. The working directory is the source code location.
The default is an empty list.
prefix: The top level install location. The default value is
`/usr/local`. It is highly recommended not use use this default
and instead set the prefix to a package specific directory.
recursive: Initialize and checkout git submodules. `repository` parameter
must be specified. The default is False.
repository: The git repository of the package to build. One of
this parameter or the `package` or `url` parameters must be
specified.
_run_arguments: Specify additional [Dockerfile RUN arguments](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md) (Docker specific).
runtime: The list of files / directories to copy into the runtime
stage. The default is an empty list, i.e., copy the entire
prefix.
runtime_environment: Dictionary of environment variables and
values, e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the runtime
stage. The default is an empty dictionary.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
url: The URL of the package to build. One of this
parameter or the `package` or `repository` parameters must be
specified.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
generic_autotools(directory='tcl8.6.9/unix',
prefix='/usr/local/tcl',
url='https://prdownloads.sourceforge.net/tcl/tcl8.6.9-src.tar.gz')
```
```python
generic_autotools(preconfigure=['./autogen.sh'],
prefix='/usr/local/zeromq',
repository='https://github.com/zeromq/libzmq.git')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(generic_autotools, self).__init__(**kwargs)
self.__annotations = kwargs.get('annotations', {})
self.__build_directory = kwargs.get('build_directory', None)
self.__build_environment = kwargs.get('build_environment', {})
self.__check = kwargs.get('check', False)
self.__comment = kwargs.get('comment', True)
self.configure_opts = kwargs.get('configure_opts', [])
self.__directory = kwargs.get('directory', None)
self.environment_variables = kwargs.get('devel_environment', {})
self.__export_build_environment = kwargs.get('export_build_environment', False)
self.__install = kwargs.get('install', True)
self.__libdir = kwargs.get('libdir', 'lib')
self.__make = kwargs.get('make', True)
self.__postconfigure = kwargs.get('postconfigure', [])
self.__postinstall = kwargs.get('postinstall', [])
self.__preconfigure = kwargs.get('preconfigure', [])
self.__recursive = kwargs.get('recursive', False)
self.__run_arguments = kwargs.get('_run_arguments', None)
self.__runtime = kwargs.get('runtime', [])
self.runtime_environment_variables = kwargs.get('runtime_environment', {})
self.__toolchain = kwargs.get('toolchain', toolchain())
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
if self.__comment:
if self.url:
self += comment(self.url, reformat=False)
elif self.repository:
self += comment(self.repository, reformat=False)
elif self.package:
self += comment(self.package, reformat=False)
if self.package:
self += copy(src=self.package,
dest=posixpath.join(self.__wd,
os.path.basename(self.package)))
self += shell(_arguments=self.__run_arguments,
commands=self.__commands)
self += environment(variables=self.environment_step())
self += label(metadata=self.annotate_step())
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Get source
self.__commands.append(self.download_step(recursive=self.__recursive,
wd=self.__wd))
# directory containing the unarchived package
if self.__directory:
if posixpath.isabs(self.__directory):
self.src_directory = self.__directory
else:
self.src_directory = posixpath.join(self.__wd,
self.__directory)
# sanity check
if not self.src_directory:
raise RuntimeError('source directory is not defined')
# Preconfigure setup
if self.__preconfigure:
# Assume the preconfigure commands should be run from the
# source directory
self.__commands.append('cd {}'.format(self.src_directory))
self.__commands.extend(self.__preconfigure)
# Configure
build_environment = []
if self.__build_environment:
for key, val in sorted(self.__build_environment.items()):
build_environment.append('{0}={1}'.format(key, val))
self.__commands.append(self.configure_step(
build_directory=self.__build_directory,
directory=self.src_directory, environment=build_environment,
export_environment=self.__export_build_environment,
toolchain=self.__toolchain))
# Post configure setup
if self.__postconfigure:
# Assume the postconfigure commands should be run from the
# source directory
self.__commands.append('cd {}'.format(self.src_directory))
self.__commands.extend(self.__postconfigure)
# Build
if self.__make:
self.__commands.append(self.build_step())
# Check the build
if self.__check:
self.__commands.append(self.check_step())
# Install
if self.__install:
self.__commands.append(self.install_step())
if self.__postinstall:
# Assume the postinstall commands should be run from the
# install directory
self.__commands.append('cd {}'.format(self.prefix))
self.__commands.extend(self.__postinstall)
# Set library path
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(self.prefix, self.__libdir)))
# Add annotations
for key,value in self.__annotations.items():
self.add_annotation(key, value)
# Cleanup
remove = [self.src_directory]
if self.url:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.url)))
elif self.package:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.package)))
if self.__build_directory:
if posixpath.isabs(self.__build_directory):
remove.append(self.__build_directory)
self.__commands.append(self.cleanup_step(items=remove))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
g = generic_autotools(...)
Stage0 += g
Stage1 += g.runtime()
```
"""
if self.prefix:
if self.__comment:
if self.url:
self.rt += comment(self.url, reformat=False)
elif self.repository:
self.rt += comment(self.repository, reformat=False)
if self.__runtime:
for src in self.__runtime:
if '*' in posixpath.basename(src):
# When using COPY with more than one source file,
# the destination must be a directory and end with
# a /
dest = posixpath.dirname(src) + '/'
else:
dest = src
self.rt += copy(_from=_from, src=src, dest=dest)
else:
# Copy the entire prefix
self.rt += copy(_from=_from, src=self.prefix, dest=self.prefix)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.prefix, self.__libdir))])
if self.runtime_environment_variables:
self.rt += environment(
variables=self.environment_step(runtime=True))
if self.annotate:
self.rt += label(metadata=self.annotate_step())
return str(self.rt)
else: # pragma: no cover
return
| hpc-container-maker-master | hpccm/building_blocks/generic_autotools.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""GDRCOPY building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
from six.moves import shlex_quote
from distutils.version import LooseVersion
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class gdrcopy(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `gdrcopy` building block builds and installs the user space
library from the [gdrcopy](https://github.com/NVIDIA/gdrcopy)
component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LIBRARY_PATH`, and `LD_LIBRARY_PATH`) should be
modified to include the gdrcopy. The default is True.
ldconfig: Boolean flag to specify whether the gdrcopy library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the gdrcopy library
directory. The default value is False.
ospackages: List of OS packages to install prior to building. The
default values are `make` and `wget`.
prefix: The top level install location. The default value is
`/usr/local/gdrcopy`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of gdrcopy source to download. The default
value is `2.2`.
# Examples
```python
gdrcopy(prefix='/opt/gdrcopy/2.1', version='2.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(gdrcopy, self).__init__(**kwargs)
# Parameters
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/NVIDIA/gdrcopy/archive')
self.__ospackages = kwargs.pop('ospackages', ['make', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/gdrcopy')
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '2.2')
# Since gdrcopy does not use autotools or CMake, the toolchain
# requires special handling.
make_opts = vars(self.__toolchain)
if 'CFLAGS' in make_opts:
# CFLAGS is derived from COMMONCFLAGS, so rename. See
# https://github.com/NVIDIA/gdrcopy/blob/master/src/Makefile#L9
make_opts['COMMONCFLAGS'] = make_opts.pop('CFLAGS')
# Version 2.2 changed the flag to lowercase prefix and the lib directory
if LooseVersion(self.__version) >= LooseVersion('2.2'):
make_opts['prefix'] = self.__prefix
libdir = 'lib'
else:
make_opts['PREFIX'] = self.__prefix
libdir = 'lib64'
make_opts_str = ' '.join(['{0}={1}'.format(key, shlex_quote(value))
for key, value in sorted(make_opts.items())])
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, libdir))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(
posixpath.join(self.__prefix, libdir))
# Setup build configuration
self.__bb = generic_build(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
# Work around "install -D" issue on CentOS
build=['mkdir -p {0}/include {0}/{1}'.format(self.__prefix, libdir),
'make {} lib lib_install'.format(make_opts_str)],
comment=False,
devel_environment=self.environment_variables,
directory='gdrcopy-{}'.format(self.__version),
libdir=libdir,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('GDRCOPY version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
g = gdrcopy(...)
Stage0 += g
Stage1 += g.runtime()
```
"""
self.rt += comment('GDRCOPY')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/gdrcopy.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""NVIDIA Nsight Compute building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
from distutils.version import StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.building_blocks.generic_build import generic_build
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
class nsight_compute(bb_base, hpccm.templates.envvars):
"""The `nsight_compute` building block downloads and installs the
[NVIDIA Nsight Compute
profiler]](https://developer.nvidia.com/nsight-compute).
# Parameters
eula: Required, by setting this value to `True`, you agree to the
Nsight Compute End User License Agreement that is displayed when
running the installer interactively. The default value is
`False`.
ospackages: List of OS packages to install prior to building.
When using a runfile, the default values are `perl` for Ubuntu and
`perl` and `perl-Env` for RHEL-based Linux distributions.
Otherwise, the default values are `apt-transport-https`,
`ca-certificates`, `gnupg`, and `wget` for Ubuntu and an empty
list for RHEL-based Linux distributions.
prefix: The top level install prefix. The default value is
`/usr/local/NVIDIA-Nsight-Compute`. This parameter is ignored
unless `runfile` is set.
runfile: Path or URL to NSight Compute's `.run` file relative to the
local build context. The default value is empty.
version: the version of Nsight Compute to install. Note when
`runfile` is set this parameter is ignored. The default value is
`2022.4.0`.
# Examples
```python
nsight_compute(version='2020.4.0')
```
```python
nsight_compute(eula=True, runfile='nsight-compute-linux-2020.2.0.18-28964561.run')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(nsight_compute, self).__init__(**kwargs)
self.__arch_label = '' # Filled in __cpu_arch
self.__distro_label = '' # Filled in by __distro
self.__eula = kwargs.get('eula', False)
self.__ospackages = kwargs.get('ospackages', [])
self.__prefix = kwargs.get('prefix',
'/usr/local/NVIDIA-Nsight-Compute')
self.__runfile = kwargs.get('runfile', None)
self.__version = kwargs.get('version', '2022.4.0')
self.__wd = kwargs.get('wd', posixpath.join(
hpccm.config.g_wd, 'nsight_compute')) # working directory
# Set the Linux distribution specific parameters
self.__distro()
# Disables deployment of section files to prevent warning
# when there is no home or home is read-only:
self.environment_variables[
'NV_COMPUTE_PROFILER_DISABLE_STOCK_FILE_DEPLOYMENT'
] = '1'
if self.__runfile:
# Runfile based installation
if not self.__eula:
raise RuntimeError('Nsight Compute EULA was not accepted.')
self.__instructions_runfile()
else:
# Package repository based installation
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Fill in container instructions
self.__instructions_repository()
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__arch_label = 'arm64'
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'ppc64el'
else:
self.__arch_label = 'ppc64le'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__arch_label = 'amd64'
else:
self.__arch_label = 'x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
if self.__runfile:
self.__ospackages = ['perl', 'wget']
else:
self.__ospackages = ['apt-transport-https',
'ca-certificates', 'gnupg', 'wget']
if hpccm.config.g_linux_version >= StrictVersion('22.04'):
self.__distro_label = 'ubuntu2204'
elif hpccm.config.g_linux_version >= StrictVersion('20.04'):
self.__distro_label = 'ubuntu2004'
elif hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__distro_label = 'ubuntu1804'
else:
self.__distro_label = 'ubuntu1604'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
if self.__runfile:
self.__ospackages = ['perl', 'perl-Env', 'wget']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__distro_label = 'rhel8'
else:
self.__distro_label = 'rhel7'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __instructions_repository(self):
"""Fill in container instructions"""
self += comment('NVIDIA Nsight Compute {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
self += packages(
apt_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
apt_repositories=['deb [signed-by=/usr/share/keyrings/nvidia.gpg] https://developer.download.nvidia.com/devtools/repos/{0}/{1}/ /'.format(self.__distro_label, self.__arch_label)],
# https://github.com/NVIDIA/hpc-container-maker/issues/367
force_add_repo=True,
ospackages=['nsight-compute-{}'.format(self.__version)],
yum_keys=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}/nvidia.pub'.format(self.__distro_label, self.__arch_label)],
yum_repositories=['https://developer.download.nvidia.com/devtools/repos/{0}/{1}'.format(self.__distro_label, self.__arch_label)],
_apt_key=False)
# The distro packages do not link nsight-compute binaries to /usr/local/bin
self.environment_variables['PATH'] = '/opt/nvidia/nsight-compute/{}:$PATH'.format(self.__version)
self += environment(variables=self.environment_step())
def __instructions_runfile(self):
"""Fill in container instructions"""
pkg = os.path.basename(self.__runfile)
install_cmds = [
'sh ./{} --nox11 -- -noprompt -targetpath={}'.format(
pkg, self.__prefix)
]
# Commands needed to predeploy target-specific files. When
# connecting through the GUI on another machine to the
# container, this removes the need to copy the files over.
install_cmds += [
'mkdir -p /tmp/var/target',
'ln -sf {}/target/* /tmp/var/target'.format(self.__prefix),
'ln -sf {}/sections /tmp/var/'.format(self.__prefix),
'chmod -R a+w /tmp/var'
]
kwargs = {}
if self.__runfile.strip().startswith(('http://', 'https://')):
kwargs['url'] = self.__runfile
else:
kwargs['package'] = self.__runfile
self.__bb = generic_build(
annotations={'runfile': pkg},
base_annotation=self.__class__.__name__,
comment = False,
devel_environment={'PATH': '{}:$PATH'.format(self.__prefix)},
directory=self.__wd,
install=install_cmds,
unpack=False,
wd=self.__wd,
**kwargs
)
self += comment('NVIDIA Nsight Compute {}'.format(pkg), reformat=False)
self += packages(ospackages=self.__ospackages)
self += self.__bb
self += environment(variables=self.environment_variables)
| hpc-container-maker-master | hpccm/building_blocks/nsight_compute.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Intel MPI building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import logging
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class intel_mpi(bb_base, hpccm.templates.envvars, hpccm.templates.wget):
"""The `intel_mpi` building block downloads and installs the [Intel
MPI Library](https://software.intel.com/en-us/intel-mpi-library).
You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement)
to use this building block.
# Parameters
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and others) should be modified to
include Intel MPI. `mpivars` has precedence. The default is True.
eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement).
The default value is `False`.
mpivars: Intel MPI provides an environment script (`mpivars.sh`)
to setup the Intel MPI environment. If this value is `True`, the
bashrc is modified to automatically source this environment
script. However, the Intel MPI environment is not automatically
available to subsequent container image build steps; the
environment is available when the container image is run. To set
the Intel MPI environment in subsequent build steps you can
explicitly call `source
/opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh
intel64` in each build step. If this value is to set `False`,
then the environment is set such that the environment is visible
to both subsequent container image build steps and when the
container image is run. However, the environment may differ
slightly from that set by `mpivars.sh`. The default value is
`True`.
ospackages: List of OS packages to install prior to installing
Intel MPI. For Ubuntu, the default values are
`apt-transport-https`, `ca-certificates`, `gnupg`, `man-db`,
`openssh-client`, and `wget`. For RHEL-based Linux distributions,
the default values are `man-db` and `openssh-clients`.
version: The version of Intel MPI to install. The default value
is `2019.6-088`.
# Examples
```python
intel_mpi(eula=True, version='2018.3-051')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(intel_mpi, self).__init__(**kwargs)
# By setting this value to True, you agree to the
# corresponding Intel End User License Agreement
# (https://software.intel.com/en-us/articles/end-user-license-agreement)
self.__eula = kwargs.get('eula', False)
self.__mpivars = kwargs.get('mpivars', True)
self.__ospackages = kwargs.get('ospackages', [])
self.__version = kwargs.get('version', '2019.6-088')
self.__year = '2019' # Also used by 2018 versions
self.__bashrc = '' # Filled in by __distro()
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifc')
if hpccm.config.g_cpu_arch != cpu_arch.X86_64: # pragma: no cover
logging.warning('Using intel_mpi on a non-x86_64 processor')
# Set the Linux distribution specific parameters
self.__distro()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Intel MPI version {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if not self.__eula:
raise RuntimeError('Intel EULA was not accepted. To accept, see the documentation for this building block')
self += packages(
apt_keys=['https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
apt_repositories=['deb https://apt.repos.intel.com/mpi all main'],
ospackages=['intel-mpi-{}'.format(self.__version)],
yum_keys=['https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-{}.PUB'.format(self.__year)],
yum_repositories=['https://yum.repos.intel.com/mpi/setup/intel-mpi.repo'])
# Set the environment
if self.__mpivars:
# Source the mpivars environment script when starting the
# container, but the variables not be available for any
# subsequent build steps.
self += shell(commands=['echo "source /opt/intel/compilers_and_libraries/linux/mpi/intel64/bin/mpivars.sh intel64" >> {}'.format(self.__bashrc)])
else:
# Set the environment so that it will be available to
# subsequent build steps and when starting the container,
# but this may miss some things relative to the mpivars
# environment script.
if LooseVersion(self.__version) >= LooseVersion('2019.0'):
self.environment_variables={
'FI_PROVIDER_PATH': '/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/lib/prov',
'I_MPI_ROOT': '/opt/intel/compilers_and_libraries/linux/mpi',
'LD_LIBRARY_PATH': '/opt/intel/compilers_and_libraries/linux/mpi/intel64/lib:/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/lib:$LD_LIBRARY_PATH',
'PATH': '/opt/intel/compilers_and_libraries/linux/mpi/intel64/bin:/opt/intel/compilers_and_libraries/linux/mpi/intel64/libfabric/bin:$PATH'}
else:
self.environment_variables={
'I_MPI_ROOT': '/opt/intel/compilers_and_libraries/linux/mpi',
'LD_LIBRARY_PATH': '/opt/intel/compilers_and_libraries/linux/mpi/intel64/lib:$LD_LIBRARY_PATH',
'PATH': '/opt/intel/compilers_and_libraries/linux/mpi/intel64/bin:$PATH'}
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gnupg', 'man-db', 'openssh-client',
'wget']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['man-db', 'openssh-clients']
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
i = intel_mpi(...)
Stage0 += i
Stage1 += i.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/intel_mpi.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MVAPICH2-GDR building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import posixpath
import re
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class mvapich2_gdr(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.wget):
"""The `mvapich2_gdr` building blocks installs the
[MVAPICH2-GDR](http://mvapich.cse.ohio-state.edu) component.
Depending on the parameters, the package will be downloaded from
the web (default) or copied from the local build context.
MVAPICH2-GDR is distributed as a binary package, so certain
dependencies need to be met and only certain combinations of
recipe components are supported; please refer to the MVAPICH2-GDR
documentation for more information.
The [GNU compiler](#gnu) or [PGI compiler](#pgi) building blocks
should be installed prior to this building block.
The [Mellanox OFED](#mlnx_ofed) building block should be installed
prior to this building block.
The [gdrcopy](#gdrcopy) building block should be installed prior
to this building block.
As a side effect, a toolchain is created containing the MPI
compiler wrappers. The toolchain can be passed to other
operations that want to build using the MPI compiler wrappers.
Note: Using MVAPICH2-GDR on non-RHEL-based Linux distributions has
several issues, including compiler version mismatches and libnuma
incompatibilities.
# Parameters
arch: The processor architecture of the MVAPICH2-GDR package. The
default value is set automatically based on the processor
architecture of the base image.
cuda_version: The version of CUDA the MVAPICH2-GDR package was
built against. The version string format is X.Y. The version
should match the version of CUDA provided by the base image. This
value is ignored if `package` is set. The default value is `10.2`.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
MVAPICH2-GDR. The default is True.
gnu: Boolean flag to specify whether a GNU build should be used.
The default value is True.
ldconfig: Boolean flag to specify whether the MVAPICH2-GDR library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the MVAPICH2-GDR library
directory. The default value is False.
mlnx_ofed_version: The version of Mellanox OFED the
MVAPICH2-GDR package was built against. The version string format
is X.Y. The version should match the version of Mellanox OFED
installed by the `mlnx_ofed` building block. This value is
ignored if `package` is set. The default value is `4.7`.
ospackages: List of OS packages to install prior to installation.
For Ubuntu, the default values are `cpio`, `libnuma1`,
`openssh-client`, `rpm2cpio` and `wget`, plus `libgfortran3` if a
GNU compiled package is selected. For RHEL-based Linux
distributions, the default values are `libpciaccess`,
`numactl-libs`, `openssh-clients`, and `wget`, plus `libgfortran`
if a GNU compiled package is selected.
package: Specify the package name to download. The package should
correspond to the other recipe components (e.g., compiler version,
CUDA version, Mellanox OFED version). If specified, this option
overrides all other building block options (e.g., compiler family,
compiler version, CUDA version, Mellanox OFED version,
MVAPICH2-GDR version).
pgi: Boolean flag to specify whether a PGI build should be used.
The default value is False.
release: The release of MVAPICH2-GDR to download. The value is
ignored is `package` is set. The default value is `1`.
version: The version of MVAPICH2-GDR to download. The value is
ignored if `package` is set. The default value is `2.3.4`. Due
to differences in the packaging scheme, versions prior to 2.3 are
not supported.
# Examples
```python
mvapich2_gdr(version='2.3.1')
```
```python
mvapich2_gdr(package='mvapich2-gdr-mcast.cuda10.0.mofed4.3.gnu4.8.5-2.3-1.el7.x86_64.rpm')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mvapich2_gdr, self).__init__(**kwargs)
self.__arch = kwargs.get('arch', hpccm.config.get_cpu_architecture())
self.__baseurl = kwargs.get('baseurl',
'http://mvapich.cse.ohio-state.edu/download/mvapich/gdr')
self.__cuda_version = kwargs.get('cuda_version', '10.2')
self.__gnu = kwargs.get('gnu', True)
self.__gnu_version = kwargs.get('gnu_version', '4.8.5')
self.__install_path_template = '/opt/mvapich2/gdr/{0}/mcast/no-openacc/{1}/{2}/mpirun/{3}'
self.__mofed_version = kwargs.get('mlnx_ofed_version', '4.7')
self.__ospackages = kwargs.get('ospackages', [])
self.__package = kwargs.get('package', '')
self.__package_template = 'mvapich2-gdr-mcast.{0}.{1}.{2}-{3}-{4}.el7.{5}.rpm'
self.__pgi = kwargs.get('pgi', False)
self.__pgi_version = kwargs.get('pgi_version', '19.10')
self.__release = kwargs.get('release', '1')
self.version = kwargs.get('version', '2.3.4')
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifort')
# Validate compiler choice
if self.__gnu and self.__pgi and not self.__package:
logging.warning('Multiple compilers selected, using PGI')
self.__gnu = False
elif not self.__gnu and not self.__pgi:
logging.warning('No compiler selected, using GNU')
self.__gnu = True
self.__commands = [] # Filled in by __setup()
self.__install_path = '' # Filled in by __setup()
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('MVAPICH2-GDR version {}'.format(self.version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['cpio', 'libnuma1', 'libpciaccess0',
'openssh-client', 'rpm2cpio', 'wget']
if self.__gnu:
self.__ospackages.append('libgfortran3')
self.__runtime_ospackages = ['libnuma1', 'libpciaccess0',
'openssh-client']
if self.__gnu:
self.__runtime_ospackages.append('libgfortran3')
self.__installer_template = 'cd / && rpm2cpio {} | cpio -id'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['libpciaccess', 'numactl-libs',
'openssh-clients', 'wget']
if self.__gnu:
self.__ospackages.append('libgfortran')
self.__runtime_ospackages = ['libpciaccess', 'numactl-libs',
'openssh-clients']
if self.__gnu:
self.__runtime_ospackages.append('libgfortran')
# The RPM has dependencies on some CUDA libraries that are
# present, but not in the RPM database. Use --nodeps as a
# workaround.
self.__installer_template = 'rpm --install --nodeps {}'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands and environment variables,
i.e., fill in self.__commands and self.environment_variables"""
if self.__package:
# Override individual settings and just use the specified package
package = self.__package
# Deduce version strings from package name
match = re.search(r'(?P<cuda>cuda\d+\.\d+)\.(?P<mofed>mofed\d+\.\d+)\.(?P<compiler>(gnu\d+\.\d+\.\d+)|(pgi\d+\.\d+))-(?P<version>\d+\.\d+)', package)
cuda_string = match.groupdict()['cuda']
mofed_string = match.groupdict()['mofed']
compiler_string = match.groupdict()['compiler']
self.version = match.groupdict()['version']
else:
# Build the version strings based on the specified options
if self.__gnu:
compiler_string = 'gnu{}'.format(self.__gnu_version)
elif self.__pgi:
compiler_string = 'pgi{}'.format(self.__pgi_version)
else:
logging.error('Unknown compiler')
compiler_string = 'unknown'
cuda_string = 'cuda{}'.format(self.__cuda_version)
mofed_string = 'mofed{}'.format(self.__mofed_version)
# Package filename
package = self.__package_template.format(
cuda_string, mofed_string, compiler_string, self.version,
self.__release, self.__arch)
self.__install_path = self.__install_path_template.format(
self.version, cuda_string, mofed_string, compiler_string)
# Download source from web
url = '{0}/{1}/{2}/{3}'.format(self.__baseurl, self.version,
mofed_string, package)
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# Install the package
self.__commands.append(
self.__installer_template.format(posixpath.join(self.__wd,
package)))
# Workaround for bad path in the MPI compiler wrappers
self.__commands.append('(test -f /usr/bin/bash || ln -s /bin/bash /usr/bin/bash)')
# Workaround for using compiler wrappers in the build stage
cuda_home = '/usr/local/cuda'
self.__commands.append('ln -s {0} {1}'.format(
posixpath.join(cuda_home, 'lib64', 'stubs', 'nvidia-ml.so'),
posixpath.join(cuda_home, 'lib64', 'stubs', 'nvidia-ml.so.1')))
# Cleanup
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, package)]))
# Setup environment variables
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__install_path, 'bin'))
# Workaround for using compiler wrappers in the build stage
self.environment_variables['PROFILE_POSTLIB'] = '"-L{} -lnvidia-ml"'.format('/usr/local/cuda/lib64/stubs')
# Set library path
libpath = posixpath.join(self.__install_path, 'lib64')
if self.ldconfig:
self.__commands.append(self.ldcache_step(directory=libpath))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mvapich2_gdr(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
self.rt += comment('MVAPICH2-GDR')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += copy(src=self.__install_path,
dest=self.__install_path, _from=_from)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.__install_path, 'lib64'))])
# No need to workaround compiler wrapper issue for the runtime.
self.rt += environment(
variables=self.environment_step(exclude=['PROFILE_POSTLIB']))
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/mvapich2_gdr.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""HPC-X building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import posixpath
import re
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class hpcx(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.tar, hpccm.templates.wget):
"""The `hpcx` building block downloads and installs the [Mellanox
HPC-X](https://developer.nvidia.com/networking/hpc-x)
component.
# Parameters
buildlabel: The build label assigned by Mellanox to the tarball.
For version 2.16 the default value is `cuda12-gdrcopy2-nccl2.18`.
For version 2.15 the default value is `cuda12-gdrcopy2-nccl2.17`.
For version 2.14 the default value is `cuda11-gdrcopy2-nccl2.16`.
For versions 2.12 and 2.13 the default value is `cuda11-gdrcopy2-nccl2.12`.
For versions 2.10 and 2.11 the default value is `cuda11-gdrcopy2-nccl2.11`.
This value is ignored for HPC-X version 2.9 and earlier.
environment: Boolean flag to specify whether the environment
should be modified to include HPC-X. This option is only
recognized if `hpcxinit` is False. The default is True.
hpcxinit: Mellanox HPC-X provides an environment script
(`hpcx-init.sh`) to setup the HPC-X environment. If this value is
`True`, the bashrc is modified to automatically source this
environment script. However, HPC-X is not automatically available
to subsequent container image build steps; the environment is
available when the container image is run. To set the HPC-X
environment in subsequent build steps you can explicitly call
`source /usr/local/hpcx/hpcx-init.sh && hpcx_load` in each build
step. If this value is set to `False`, then the environment is
set such that the environment is visible to both subsequent
container image build steps and when the container image is run.
However, the environment may differ slightly from that set by
`hpcx-init.sh`. The default value is `True`.
inbox: Boolean flag to specify whether to use Mellanox HPC-X built
for Inbox OFED. If the value is `True`, use Inbox OFED. If the
value is `False`, use Mellanox OFED. The default is `False`.
ldconfig: Boolean flag to specify whether the Mellanox HPC-X
library directories should be added dynamic linker cache. If
False, then `LD_LIBRARY_PATH` is modified to include the HPC-X
library directories. This value is ignored if `hpcxinit` is
`True`. The default value is False.
mlnx_ofed: The version of Mellanox OFED that should be matched.
This value is ignored if Inbox OFED is selected. The default
value is `5` for HPC-X version 2.10 and later, and `5.2-2.2.0.0`
for earlier HPC-X versions.
multi_thread: Boolean flag to specify whether the multi-threaded
version of Mellanox HPC-X should be used. The default is `False`.
ofedlabel: The Mellanox OFED label assigned by Mellanox to the
tarball. For version 2.16 and later, the default value is
`gcc-mlnx_ofed`. For earlier versions, the default value is
`gcc-MLNX_OFED_LINUX-5`. This value is ignored if `inbox` is `True`.
oslabel: The Linux distribution label assigned by Mellanox to the
tarball. For Ubuntu, the default value is `ubuntu16.04` for
Ubuntu 16.04, `ubuntu18.04` for Ubuntu 18.04, `ubuntu20.04` for
Ubuntu 20.04, and `ubuntu22.04` for Ubuntu 22.04. For HPC-X
version 2.10 and later and RHEL-based Linux distributions, the
default value is `redhat7` for version 7 and `redhat8` for version
8. For HPC-X version 2.9 and earlier and RHEL-based Linux
distributions, the default value is `redhat7.6` for version 7 and
`redhat8.0` for version 8.
ospackages: List of OS packages to install prior to installing
Mellanox HPC-X. For Ubuntu, the default values are `bzip2`,
`libnuma1`, `openssh-client`, `tar`, and `wget`. For RHEL-based
distributions the default values are `bzip2`, `numactl-libs`,
`openssh-clients`, `tar`, and `wget`.
prefix: The top level installation location. The default value is
`/usr/local/hpcx`.
version: The version of Mellanox HPC-X to install. The default
value is `2.16`.
# Examples
```python
hpcx(prefix='/usr/local/hpcx', version='2.16')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(hpcx, self).__init__(**kwargs)
self.__arch = hpccm.config.get_cpu_architecture()
self.__baseurl = kwargs.get('baseurl',
'https://content.mellanox.com/hpc/hpc-x')
self.__bashrc = '' # Filled in by __distro()
self.__buildlabel = kwargs.get('buildlabel', None)
self.__hpcxinit = kwargs.get('hpcxinit', True)
self.__inbox = kwargs.get('inbox', False)
self.__mlnx_ofed = kwargs.get('mlnx_ofed', None)
self.__multi_thread = kwargs.get('multi_thread', False)
self.__ofedlabel = kwargs.get('ofedlabel', None)
self.__oslabel = kwargs.get('oslabel', '') # Filled in by __distro()
self.__ospackages = kwargs.get('ospackages', []) # Filled in by _distro()
self.__packages = kwargs.get('packages', [])
self.__prefix = kwargs.get('prefix', '/usr/local/hpcx')
self.__version = kwargs.get('version', '2.16')
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
if not self.__buildlabel:
if StrictVersion(self.__version) >= StrictVersion('2.16'):
self.__buildlabel = 'cuda12-gdrcopy2-nccl2.18'
elif StrictVersion(self.__version) >= StrictVersion('2.15'):
self.__buildlabel = 'cuda12-gdrcopy2-nccl2.17'
elif StrictVersion(self.__version) >= StrictVersion('2.14'):
self.__buildlabel = 'cuda11-gdrcopy2-nccl2.16'
elif StrictVersion(self.__version) >= StrictVersion('2.12'):
self.__buildlabel = 'cuda11-gdrcopy2-nccl2.12'
elif StrictVersion(self.__version) >= StrictVersion('2.10'):
self.__buildlabel = 'cuda11-gdrcopy2-nccl2.11'
if not self.__mlnx_ofed:
if StrictVersion(self.__version) >= StrictVersion('2.10'):
self.__mlnx_ofed = '5'
else:
self.__mlnx_ofed = '5.2-2.2.0.0'
if not self.__ofedlabel:
if StrictVersion(self.__version) >= StrictVersion('2.16'):
self.__ofedlabel = 'gcc-mlnx_ofed'
else:
self.__ofedlabel = 'gcc-MLNX_OFED_LINUX-{}'.format(self.__mlnx_ofed)
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifort')
# Set the Linux distribution specific parameters
self.__distro()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Mellanox HPC-X version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__oslabel:
if hpccm.config.g_linux_version >= StrictVersion('22.0'):
self.__oslabel = 'ubuntu22.04'
elif hpccm.config.g_linux_version >= StrictVersion('20.0'):
self.__oslabel = 'ubuntu20.04'
elif hpccm.config.g_linux_version >= StrictVersion('18.0'):
self.__oslabel = 'ubuntu18.04'
else:
self.__oslabel = 'ubuntu16.04'
if not self.__ospackages:
self.__ospackages = ['bzip2', 'libnuma1', 'openssh-client',
'tar', 'wget']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__oslabel:
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
if StrictVersion(self.__version) >= StrictVersion('2.10'):
self.__oslabel = 'redhat8'
else:
self.__oslabel = 'redhat8.0'
else:
if StrictVersion(self.__version) >= StrictVersion('2.10'):
self.__oslabel = 'redhat7'
else:
self.__oslabel = 'redhat7.6'
if not self.__ospackages:
self.__ospackages = ['bzip2', 'numactl-libs',
'openssh-clients', 'tar', 'wget']
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# For version 2.8 and earlier, the download URL has the format
# MAJOR.MINOR in the path and the tarball contains
# MAJOR.MINOR.REVISION, so pull apart the full version to get
# the individual components.
version_string = self.__version
if StrictVersion(self.__version) <= StrictVersion('2.8'):
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)',
self.__version)
version_string = '{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
if self.__inbox:
# Use inbox OFED
if StrictVersion(self.__version) >= StrictVersion('2.10'):
# Version 2.11 and later include an extra label
self.__label = 'hpcx-v{0}-gcc-inbox-{1}-{2}-{3}'.format(
self.__version, self.__oslabel, self.__buildlabel,
self.__arch)
else:
self.__label = 'hpcx-v{0}-gcc-inbox-{1}-{2}'.format(
self.__version, self.__oslabel, self.__arch)
else:
# Use MLNX OFED
if StrictVersion(self.__version) >= StrictVersion('2.10'):
# Version 2.10 and later include an extra label
self.__label = 'hpcx-v{0}-{1}-{2}-{3}-{4}'.format(
self.__version, self.__ofedlabel, self.__oslabel, self.__buildlabel, self.__arch)
else:
self.__label = 'hpcx-v{0}-{1}-{2}-{3}'.format(
self.__version, self.__ofedlabel, self.__oslabel, self.__arch)
tarball = self.__label + '.tbz'
url = '{0}/v{1}/{2}'.format(self.__baseurl, version_string, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
# "Install"
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
self.__commands.append('cp -a {0} {1}'.format(
posixpath.join(self.__wd, self.__label), self.__prefix))
# Set the environment
if self.__hpcxinit:
# Use hpcxinit script
if self.__multi_thread:
self.__commands.append('echo "source {0}" >> {1}'.format(
posixpath.join(self.__prefix, 'hpcx-mt-init-ompi.sh'),
self.__bashrc))
else:
self.__commands.append('echo "source {0}" >> {1}'.format(
posixpath.join(self.__prefix, 'hpcx-init-ompi.sh'),
self.__bashrc))
self.__commands.append('echo "hpcx_load" >> {0}'.format(
self.__bashrc))
else:
# Set environment manually
hpcx_dir = self.__prefix
if self.__multi_thread:
hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx', 'mt')
else:
hpcx_ucx_dir = posixpath.join(hpcx_dir, 'ucx')
hpcx_sharp_dir = posixpath.join(hpcx_dir, 'sharp')
hpcx_nccl_rdma_sharp_plugin_dir = posixpath.join(
hpcx_dir, 'nccl_rdma_sharp_plugin')
hpcx_hcoll_dir = posixpath.join(hpcx_dir, 'hcoll')
hpcx_mpi_dir = posixpath.join(hpcx_dir, 'ompi')
hpcx_oshmem_dir = hpcx_mpi_dir
hpcx_mpi_tests_dir = posixpath.join(hpcx_mpi_dir, 'tests')
if StrictVersion(self.__version) >= StrictVersion('2.7'):
hpcx_osu_dir = posixpath.join(hpcx_mpi_tests_dir,
'osu-micro-benchmarks-5.6.2')
hpcx_osu_cuda_dir = posixpath.join(
hpcx_mpi_tests_dir, 'osu-micro-benchmarks-5.6.2-cuda')
else:
hpcx_osu_dir = posixpath.join(hpcx_mpi_tests_dir,
'osu-micro-benchmarks-5.3.2')
hpcx_osu_cuda_dir = posixpath.join(
hpcx_mpi_tests_dir, 'osu-micro-benchmarks-5.3.2-cuda')
hpcx_ipm_dir = posixpath.join(hpcx_mpi_tests_dir, 'ipm-2.0.6')
hpcx_ipm_lib = posixpath.join(hpcx_ipm_dir, 'lib', 'libipm.so')
hpcx_clusterkit_dir = posixpath.join(hpcx_dir, 'clusterkit')
self.environment_variables = {
'CPATH': ':'.join([
posixpath.join(hpcx_hcoll_dir, 'include'),
posixpath.join(hpcx_mpi_dir, 'include'),
posixpath.join(hpcx_sharp_dir, 'include'),
posixpath.join(hpcx_ucx_dir, 'include'),
'$CPATH']),
'HPCX_CLUSTERKIT_DIR': hpcx_clusterkit_dir,
'HPCX_DIR': hpcx_dir,
'HPCX_HCOLL_DIR': hpcx_hcoll_dir,
'HPCX_IPM_DIR': hpcx_ipm_dir,
'HPCX_IPM_LIB': hpcx_ipm_lib,
'HPCX_MPI_DIR': hpcx_mpi_dir,
'HPCX_MPI_TESTS_DIR': hpcx_mpi_tests_dir,
'HPCX_NCCL_RDMA_SHARP_PLUGIN_DIR': hpcx_nccl_rdma_sharp_plugin_dir,
'HPCX_OSHMEM_DIR': hpcx_oshmem_dir,
'HPCX_OSU_CUDA_DIR': hpcx_osu_cuda_dir,
'HPCX_OSU_DIR': hpcx_osu_dir,
'HPCX_SHARP_DIR': hpcx_sharp_dir,
'HPCX_UCX_DIR': hpcx_ucx_dir,
'LIBRARY_PATH': ':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib'),
posixpath.join(hpcx_mpi_dir, 'lib'),
posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
posixpath.join(hpcx_sharp_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib'),
'$LIBRARY_PATH']),
'MPI_HOME': hpcx_mpi_dir,
'OMPI_HOME': hpcx_mpi_dir,
'OPAL_PREFIX': hpcx_mpi_dir,
'OSHMEM_HOME': hpcx_mpi_dir,
'PATH': ':'.join([
posixpath.join(hpcx_clusterkit_dir, 'bin'),
posixpath.join(hpcx_hcoll_dir, 'bin'),
posixpath.join(hpcx_mpi_dir, 'bin'),
posixpath.join(hpcx_ucx_dir, 'bin'),
'$PATH']),
'PKG_CONFIG_PATH': ':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_mpi_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_sharp_dir, 'lib', 'pkgconfig'),
posixpath.join(hpcx_ucx_dir, 'lib', 'pkgconfig'),
'$PKG_CONFIG_PATH']),
'SHMEM_HOME': hpcx_mpi_dir}
# Set library path
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_hcoll_dir, 'lib')))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_mpi_dir, 'lib')))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir,
'lib')))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_sharp_dir, 'lib')))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_ucx_dir, 'lib')))
self.__commands.append(self.ldcache_step(
directory=posixpath.join(hpcx_ucx_dir, 'lib', 'ucx')))
else:
self.environment_variables['LD_LIBRARY_PATH'] = ':'.join([
posixpath.join(hpcx_hcoll_dir, 'lib'),
posixpath.join(hpcx_mpi_dir, 'lib'),
posixpath.join(hpcx_nccl_rdma_sharp_plugin_dir, 'lib'),
posixpath.join(hpcx_sharp_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib'),
posixpath.join(hpcx_ucx_dir, 'lib', 'ucx'),
'$LD_LIBRARY_PATH'])
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, self.__label)]))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
h = hpcx(...)
Stage0 += h
Stage1 += h.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/hpcx.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Generic build building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import posixpath
import hpccm.templates.annotate
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
from hpccm.building_blocks.base import bb_base
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.label import label
from hpccm.primitives.shell import shell
class generic_build(bb_base, hpccm.templates.annotate,
hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig, hpccm.templates.rm):
"""The `generic_build` building block downloads and builds
a specified package.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
annotations: Dictionary of additional annotations to include. The
default is an empty dictionary.
build: List of shell commands to run in order to build the
package. The working directory is the source directory. The
default is an empty list.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
devel_environment: Dictionary of environment variables and values,
e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the development
stage after the package is built and installed. The default is an
empty dictionary.
directory: The source code location. The default value is the
basename of the downloaded package. If the value is not an
absolute path, then the temporary working directory is prepended.
environment: Boolean flag to specify whether the environment
should be modified (see `devel_environment` and
`runtime_environment`). The default is True.
install: List of shell commands to run in order to install the
package. The working directory is the source directory. If
`prefix` is defined, it will be automatically created if the list
is non-empty. The default is an empty list.
ldconfig: Boolean flag to specify whether the library directory
should be added dynamic linker cache. The default value is False.
libdir: The path relative to the install prefix to use when
configuring the dynamic linker cache. The default value is `lib`.
package: Path to the local source package relative to the local
build context. One of this parameter or the `repository` or `url`
parameters must be specified.
prefix: The top level install location. The default value is
empty. If defined then the location is copied as part of the
runtime method.
recursive: Initialize and checkout git submodules. `repository` parameter
must be specified. The default is False.
repository: The git repository of the package to build. One of
this parameter or the `package` or `url` parameters must be
specified.
_run_arguments: Specify additional [Dockerfile RUN arguments](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md) (Docker specific).
runtime: The list of files / directories to copy into the runtime
stage. The default is an empty list, i.e., copy the entire
prefix.
runtime_environment: Dictionary of environment variables and
values, e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the runtime
stage. The default is an empty dictionary.
unpack: Unpack the sources after downloading. Default is `True`.
url: The URL of the package to build. One of this parameter or
the `package` or `repository` or parameters must be specified.
# Examples
```python
generic_build(build=['make ARCH=sm_70'],
install=['cp stream /usr/local/bin/cuda-stream'],
repository='https://github.com/bcumming/cuda-stream')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(generic_build, self).__init__(**kwargs)
self.__annotations = kwargs.get('annotations', {})
self.__build = kwargs.get('build', [])
self.__comment = kwargs.get('comment', True)
self.__directory = kwargs.get('directory', None)
self.environment_variables = kwargs.get('devel_environment', {})
self.__install = kwargs.get('install', [])
self.__libdir = kwargs.get('libdir', 'lib')
self.__prefix = kwargs.get('prefix', None)
self.__recursive = kwargs.get('recursive', False)
self.__run_arguments = kwargs.get('_run_arguments', None)
self.__runtime = kwargs.get('runtime', [])
self.runtime_environment_variables = kwargs.get('runtime_environment', {})
self.__unpack = kwargs.get('unpack', True)
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.__commands = [] # Filled in by __setup()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
if self.__comment:
if self.url:
self += comment(self.url, reformat=False)
elif self.repository:
self += comment(self.repository, reformat=False)
elif self.package:
self += comment(self.package, reformat=False)
if self.package:
self += copy(src=self.package,
dest=posixpath.join(self.__wd,
os.path.basename(self.package)))
self += shell(_arguments=self.__run_arguments,
commands=self.__commands)
self += environment(variables=self.environment_step())
self += label(metadata=self.annotate_step())
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Get source
self.__commands.append(self.download_step(recursive=self.__recursive,
wd=self.__wd, unpack=self.__unpack))
# directory containing the unarchived package
if self.__directory:
if posixpath.isabs(self.__directory):
self.src_directory = self.__directory
else:
self.src_directory = posixpath.join(self.__wd,
self.__directory)
# sanity check
if not self.src_directory:
raise RuntimeError('source directory is not defined')
# Build
if self.__build:
self.__commands.append('cd {}'.format(self.src_directory))
self.__commands.extend(self.__build)
# Install
if self.__install:
if self.__prefix:
self.__commands.append('mkdir -p {}'.format(self.__prefix))
self.__commands.append('cd {}'.format(self.src_directory))
self.__commands.extend(self.__install)
# Set library path
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(self.__prefix, self.__libdir)))
# Add annotations
for key,value in self.__annotations.items():
self.add_annotation(key, value)
# Cleanup
remove = [self.src_directory]
if self.url:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.url)))
elif self.package:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.package)))
self.__commands.append(self.cleanup_step(items=remove))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
g = generic_build(...)
Stage0 += g
Stage1 += g.runtime()
```
"""
if self.__prefix:
if self.__comment:
if self.url:
self.rt += comment(self.url, reformat=False)
elif self.repository:
self.rt += comment(self.repository, reformat=False)
if self.__runtime:
for src in self.__runtime:
if '*' in posixpath.basename(src):
# When using COPY with more than one source file,
# the destination must be a directory and end with
# a /
dest = posixpath.dirname(src) + '/'
else:
dest = src
self.rt += copy(_from=_from, src=src, dest=dest)
else:
# Copy the entire prefix
self.rt += copy(_from=_from, src=self.__prefix,
dest=self.__prefix)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.__prefix, self.__libdir))])
if self.runtime_environment_variables:
self.rt += environment(
variables=self.environment_step(runtime=True))
if self.annotate:
self.rt += label(metadata=self.annotate_step())
return str(self.rt)
else: #pragma: no cover
return
| hpc-container-maker-master | hpccm/building_blocks/generic_build.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MPICH building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
from copy import copy as _copy
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class mpich(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `mpich` building block configures, builds, and installs the
[MPICH](https://www.mpich.org) component.
As a side effect, a toolchain is created containing the MPI
compiler wrappers. The tool can be passed to other operations
that want to build using the MPI compiler wrappers.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` and `make
testing` steps should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default is an empty list.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
MPICH. The default is True.
ldconfig: Boolean flag to specify whether the MPICH library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the MPICH library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `file`, `gzip`,
`make`, `openssh-client`, `perl`, `tar`, and `wget`. For
RHEL-based Linux distributions, the default values are `file`,
`gzip`, `make`, `openssh-clients`, `perl`, `tar`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/mpich`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of MPICH source to download. The default
value is `3.3.2`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
mpich(prefix='/opt/mpich/3.3', version='3.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(mpich, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl',
'https://www.mpich.org/static/downloads')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts', [])
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/mpich')
self.__runtime_ospackages = [] # Filled in by __distro()
# Input toolchain, i.e., what to use when building
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '3.3.2')
# Output toolchain
self.toolchain = toolchain(CC='mpicc', CXX='mpicxx', F77='mpif77',
F90='mpif90', FC='mpifort')
# Set the configuration options
self.__configure()
# Set the Linux distribution specific parameters
self.__distro()
# Set the environment variables
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
# Run test suite (must be after install)
postinstall=['cd {0}/mpich-{1}'.format(hpccm.config.g_wd,
self.__version),
'RUNTESTS_SHOWPROGRESS=1 make testing'] if self.__check else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
url='{0}/{1}/mpich-{1}.tar.gz'.format(self.__baseurl,
self.__version),
**kwargs)
# Container instructions
self += comment('MPICH version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters"""
# Create a copy of hte toolchain so that it can be modified
# without impacting the original
self.__toolchain = _copy(self.__toolchain)
# MPICH does not accept F90
self.__toolchain.F90 = ''
# Workaround issue with the PGI compiler
# https://lists.mpich.org/pipermail/discuss/2017-July/005235.html
if self.__toolchain.CC and re.match('.*pgcc', self.__toolchain.CC):
self.__configure_opts.append('--disable-fast')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['file', 'gzip', 'make', 'openssh-client',
'perl', 'tar', 'wget']
self.__runtime_ospackages = ['openssh-client']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['file', 'gzip', 'make',
'openssh-clients', 'perl', 'tar', 'wget']
self.__runtime_ospackages = ['openssh-clients']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = mpich(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
self.rt += comment('MPICH')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/mpich.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""XPMEM building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class xpmem(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `xpmem` building block builds and installs the user space
library from the [XPMEM](https://github.com/hjelmn/xpmem)
component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
branch: The branch of XPMEM to use. The default value is
`master`.
configure_opts: List of options to pass to `configure`. The
default values are `--disable-kernel-module`.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH` and `LIBRARY_PATH`) should be modified
to include XPMEM. The default is True.
ldconfig: Boolean flag to specify whether the XPMEM library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the XPMEM library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. The default value are `autoconf`, `automake`,
`ca-certificates`, `file, `git`, `libtool`, and `make`.
prefix: The top level install location. The default value is
`/usr/local/xpmem`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
xpmem(prefix='/opt/xpmem', branch='master')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(xpmem, self).__init__(**kwargs)
# Parameters
self.__branch = kwargs.pop('branch', 'master')
self.__configure_opts = kwargs.pop('configure_opts',
['--disable-kernel-module'])
self.__ospackages = kwargs.pop('ospackages', ['autoconf', 'automake',
'ca-certificates',
'file', 'git',
'libtool', 'make'])
self.__prefix = kwargs.pop('prefix', '/usr/local/xpmem')
self.__repository = kwargs.pop('repository',
'https://github.com/hjelmn/xpmem.git')
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
base_annotation=self.__class__.__name__,
branch=self.__branch,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
preconfigure=['autoreconf --install'],
prefix=self.__prefix,
repository=self.__repository,
runtime_environment=self.environment_variables,
**kwargs)
# Container instructions
self += comment('XPMEM branch {}'.format(self.__branch))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
x = xpmem(...)
Stage0 += x
Stage1 += x.runtime()
```
"""
self.rt += comment('XPMEM')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/xpmem.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""SLURM PMI2 building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class slurm_pmi2(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `slurm_pmi2` building block configures, builds, and installs
the PMI2 component from SLURM.
Note: this building block does not install SLURM itself.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
configure_opts: List of options to pass to `configure`. The
default is an empty list.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH` and `LD_LIBRARY_PATH`) should be modified to include
PMI2. The default is False.
ldconfig: Boolean flag to specify whether the PMI2 library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the PMI2 library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `bzip2`, `file`, `make`,
`perl`, `tar`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/slurm-pmi2`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default value is empty.
version: The version of SLURM source to download. The default
value is `21.08.8`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
slurm_pmi2(prefix='/opt/pmi', version='20.11.9')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(slurm_pmi2, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'https://download.schedmd.com/slurm')
self.__environment = kwargs.pop('environment', False)
self.__ospackages = kwargs.pop('ospackages', ['bzip2', 'file', 'make',
'perl', 'tar', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/slurm-pmi2')
self.__version = kwargs.pop('version', '21.08.8')
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include', 'slurm'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
comment=False,
devel_environment=self.environment_variables,
environment=self.__environment,
install=False,
make=False,
postconfigure=['make -C contribs/pmi2 install'],
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/slurm-{1}.tar.bz2'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('SLURM PMI2 version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
p = slurm_pmi2(...)
Stage0 += p
Stage1 += p.runtime()
```
"""
self.rt += comment('SLURM PMI2')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/slurm_pmi2.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Python building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import hpccm.config
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.shell import shell
class python(bb_base):
"""The `python` building block installs Python from the upstream Linux
distribution.
# Parameters
alternatives: Boolean flag to specify whether to configure alternatives for `python` and `python-config` (if `devel` is enabled). RHEL-based 8.x distributions do not setup `python` by [default](https://developers.redhat.com/blog/2019/05/07/what-no-python-in-red-hat-enterprise-linux-8/). The default is False.
devel: Boolean flag to specify whether to also install the Python
development headers and libraries. The default is False.
python2: Boolean flag to specify whether to install Python version
2. The default is True.
python3: Boolean flag to specify whether to install Python version
3. The default is True.
# Examples
```python
python()
```
```python
python(python3=False)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(python, self).__init__(**kwargs)
self.__alternatives = kwargs.get('alternatives', False)
self.__devel = kwargs.get('devel', False)
self.__python2 = kwargs.get('python2', True)
self.__python3 = kwargs.get('python3', True)
self.__debs = [] # Filled in below
self.__rpms = [] # Filled in below
if self.__python2:
if (hpccm.config.g_linux_distro == linux_distro.UBUNTU and
hpccm.config.g_linux_version >= StrictVersion('22.0')):
self.__debs.append('python2')
else:
self.__debs.append('python')
self.__rpms.append('python2')
if self.__devel:
if (hpccm.config.g_linux_distro == linux_distro.UBUNTU and
hpccm.config.g_linux_version >= StrictVersion('22.0')):
self.__debs.append('python2-dev')
else:
self.__debs.append('python-dev')
self.__rpms.append('python2-devel')
if self.__python3:
self.__debs.append('python3')
self.__rpms.append('python3')
if self.__devel:
self.__debs.append('python3-dev')
self.__rpms.append('python3-devel')
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Python')
self += packages(apt=self.__debs, yum=self.__rpms)
if self.__alternatives:
alternatives = ['alternatives --set python /usr/bin/python2']
if self.__devel:
alternatives.append('alternatives --install /usr/bin/python-config python-config /usr/bin/python2-config 30')
self += shell(commands=alternatives)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
p = python(...)
Stage0 += p
Stage1 += p.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/python.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""OpenBLAS building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class openblas(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `openblas` building block builds and installs the
[OpenBLAS](https://www.openblas.net) component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
OpenBLAS. The default is True.
ldconfig: Boolean flag to specify whether the OpenBLAS library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the OpenBLAS library
directory. The default value is False.
make_opts: List of options to pass to `make`. For aarch64
processors, the default values are `TARGET=ARMV8` and
`USE_OPENMP=1`. For ppc64le processors, the default values are
`TARGET=POWER8` and `USE_OPENMP=1`. For x86_64 processors, the
default value is `USE_OPENMP=1`.
ospackages: List of OS packages to install prior to building. The
default values are `make`, `perl`, `tar`, and `wget`.
prefix: The top level installation location. The default value is
`/usr/local/openblas`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of OpenBLAS source to download. The default
value is `0.3.21`.
# Examples
```python
openblas(prefix='/opt/openblas/0.3.1', version='0.3.1')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(openblas, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/xianyi/OpenBLAS/archive')
self.__make_opts = kwargs.pop('make_opts',
[]) # Filled in by __cpu_arch()
self.__ospackages = kwargs.pop('ospackages', ['make', 'perl', 'tar',
'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/openblas')
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '0.3.21')
# Set the make options
self.__make()
# Setup the environment variables
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_build(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
build=['make {}'.format(' '.join(self.__make_opts))],
comment=False,
directory='OpenBLAS-{}'.format(self.__version),
devel_environment=self.environment_variables,
install=['make install PREFIX={}'.format(self.__prefix)],
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('OpenBLAS version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __make(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if not self.__make_opts:
if self.__toolchain.CC:
self.__make_opts.append('CC={}'.format(self.__toolchain.CC))
if self.__toolchain.FC:
self.__make_opts.append('FC={}'.format(self.__toolchain.FC))
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__make_opts.extend(['TARGET=ARMV8', 'USE_OPENMP=1'])
elif hpccm.config.g_cpu_arch == cpu_arch.PPC64LE:
self.__make_opts.extend(['TARGET=POWER8', 'USE_OPENMP=1'])
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__make_opts.extend(['USE_OPENMP=1'])
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
o = openblas(...)
Stage0 += o
Stage1 += o.runtime()
```
"""
self.rt += comment('OpenBLAS')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/openblas.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""PMIX building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
class pmix(bb_base,hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `pmix` building block configures, builds, and installs the
[PMIX](https://github.com/openpmix/openpmix) component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default is an empty list.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, and `PATH`) should be modified to
include PMIX. The default is True.
ldconfig: Boolean flag to specify whether the PMIX library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the PMIX library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `file`, `hwloc`,
`libevent-dev`, `make`, `tar`, and `wget`. For RHEL-based Linux
distributions, the default values are `file`, `hwloc`,
`libevent-devel`, `make`, `tar`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/pmix`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default value is empty.
version: The version of PMIX source to download. The default value
is `4.1.2`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
pmix(prefix='/opt/pmix/3.1.4', version='3.1.4')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(pmix, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/openpmix/openpmix/releases/download')
self.__check = kwargs.pop('check', False)
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/pmix')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__version = kwargs.pop('version', '4.1.2')
# Set the Linux distribution specific parameters
self.__distro()
# Set the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
comment=False,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/v{1}/pmix-{1}.tar.gz'.format(self.__baseurl,
self.__version),
**kwargs)
# Container instructions
self += comment('PMIX version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['file', 'hwloc', 'libevent-dev', 'make',
'tar', 'wget']
if self.__check:
self.__ospackages.append('perl')
self.__runtime_ospackages = ['libevent-2.*',
'libevent-pthreads-2.*']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['file', 'hwloc', 'libevent-devel', 'make',
'tar', 'wget']
if self.__check:
self.__ospackages.append('perl')
self.__runtime_ospackages = ['libevent']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
p = pmix(...)
Stage0 += p
Stage1 += p.runtime()
```
"""
self.rt += comment('PMIX')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/pmix.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Intel Parallel Studio XE runtime building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import logging # pylint: disable=unused-import
import posixpath
import hpccm.config
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch, linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class intel_psxe_runtime(bb_base, hpccm.templates.envvars):
"""The `intel_mpi` building block downloads and installs the [Intel
Parallel Studio XE runtime](https://software.intel.com/en-us/articles/intel-parallel-studio-xe-runtime-by-version).
You must agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement)
to use this building block.
Note: this building block does *not* install development versions
of the Intel software tools. Please see the
[intel_psxe](#intel_psxe), [intel_mpi](#intel_mpi), or [mkl](#mkl)
building blocks for development environments.
# Parameters
daal: Boolean flag to specify whether the Intel Data Analytics
Acceleration Library runtime should be installed. The default is
True.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`, `PATH`, and others) should be modified to
include Intel Parallel Studio XE runtime. `psxevars` has
precedence. The default is True.
eula: By setting this value to `True`, you agree to the [Intel End User License Agreement](https://software.intel.com/en-us/articles/end-user-license-agreement).
The default value is `False`.
icc: Boolean flag to specify whether the Intel C++ Compiler
runtime should be installed. The default is True.
ifort: Boolean flag to specify whether the Intel Fortran Compiler
runtime should be installed. The default is True.
ipp: Boolean flag to specify whether the Intel Integrated
Performance Primitives runtime should be installed. The default
is True.
mkl: Boolean flag to specify whether the Intel Math Kernel Library
runtime should be installed. The default is True.
mpi: Boolean flag to specify whether the Intel MPI Library runtime
should be installed. The default is True.
psxevars: Intel Parallel Studio XE provides an environment script
(`psxevars.sh`) to setup the environment. If this value is
`True`, the bashrc is modified to automatically source this
environment script. However, the Intel runtime environment is not
automatically available to subsequent container image build steps;
the environment is available when the container image is run. To
set the Intel Parallel Studio XE runtime environment in subsequent
build steps you can explicitly call `source
/opt/intel/psxe_runtime/linux/bin/psxevars.sh intel64` in each
build step. If this value is to set `False`, then the environment
is set such that the environment is visible to both subsequent
container image build steps and when the container image is run.
However, the environment may differ slightly from that set by
`psxevars.sh`. The default value is `True`.
ospackages: List of OS packages to install prior to installing
Intel MPI. For Ubuntu, the default values are
`apt-transport-https`, `ca-certificates`, `gcc`, `gnupg`,
`man-db`, `openssh-client`, and `wget`. For RHEL-based Linux
distributions, the default values are `man-db`, `openssh-clients`,
and `which`.
tbb: Boolean flag to specify whether the Intel Threading Building
Blocks runtime should be installed. The default is True.
version: The version of the Intel Parallel Studio XE runtime to
install. The default value is `2020.2-14`.
# Examples
```python
intel_psxe_runtime(eula=True, version='2018.5-281')
```
```python
intel_psxe_runtime(daal=False, eula=True, ipp=False, psxevars=False)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(intel_psxe_runtime, self).__init__(**kwargs)
# By setting this value to True, you agree to the
# corresponding Intel End User License Agreement
# (https://software.intel.com/en-us/articles/end-user-license-agreement)
self.__eula = kwargs.get('eula', False)
self.__daal = kwargs.get('daal', True)
self.__icc = kwargs.get('icc', True)
self.__ifort = kwargs.get('ifort', True)
self.__ipp = kwargs.get('ipp', True)
self.__mkl = kwargs.get('mkl', True)
self.__mpi = kwargs.get('mpi', True)
self.__psxevars = kwargs.get('psxevars', True)
self.__ospackages = kwargs.get('ospackages', [])
self.__tbb = kwargs.get('tbb', True)
self.__version = kwargs.get('version', '2020.2-14')
self.__year = self.__version.split('.')[0]
self.__bashrc = '' # Filled in by __distro()
self.__apt = [] # Filled in by __setup()
self.__yum = [] # Filled in by __setup()
if hpccm.config.g_cpu_arch != cpu_arch.X86_64: # pragma: no cover
logging.warning('Using intel_psxe_runtime on a non-x86_64 processor')
# Set the Linux distribution specific parameters
self.__distro()
# Construct the list of runtime packages to install
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Intel Parallel Studio XE runtime version {}'.format(self.__version))
if self.__ospackages:
self += packages(ospackages=self.__ospackages)
if not self.__eula:
raise RuntimeError('Intel EULA was not accepted. To accept, see the documentation for this building block')
if int(self.__year) >= 2019:
apt_repositories = ['deb https://apt.repos.intel.com/{0} intel-psxe-runtime main'.format(self.__year)]
else:
# The APT keys expired and had to be reissued. They were only
# reissued for 2019 and later. Blindly (and insecurely!) trust
# the 2018 and earlier repositories.
apt_repositories = ['deb [trusted=yes] https://apt.repos.intel.com/{0} intel-psxe-runtime main'.format(self.__year)]
self += packages(
apt=self.__apt,
apt_keys = ['https://apt.repos.intel.com/{0}/GPG-PUB-KEY-INTEL-PSXE-RUNTIME-{0}'.format(self.__year)],
apt_repositories=apt_repositories,
aptitude=True,
yum=self.__yum,
yum_keys=['https://yum.repos.intel.com/{0}/setup/RPM-GPG-KEY-intel-psxe-runtime-{0}'.format(self.__year)],
yum_repositories=['https://yum.repos.intel.com/{0}/setup/intel-psxe-runtime-{0}.repo'.format(self.__year)],
yum4=True)
# Set the environment
if self.__psxevars:
# Source the psxevars environment script when starting the
# container, but the variables not be available for any
# subsequent build steps.
self += shell(commands=['echo "source /opt/intel/psxe_runtime/linux/bin/psxevars.sh intel64" >> {}'.format(self.__bashrc)])
else:
# Set the environment so that it will be available to
# subsequent build steps and when starting the container,
# but this may miss some things relative to the psxevars
# environment script.
self += environment(variables=self.environment_step())
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['apt-transport-https', 'ca-certificates',
'gcc', 'gnupg', 'man-db',
'openssh-client', 'wget']
self.__bashrc = '/etc/bash.bashrc'
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['man-db', 'openssh-clients', 'which']
self.__bashrc = '/etc/bashrc'
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __environment(self):
"""Manually set the environment as an alternative to psxevars.sh"""
basepath = '/opt/intel/psxe_runtime/linux'
ld_library_path = []
path = []
env = {}
if self.__daal:
env['DAALROOT'] = posixpath.join(basepath, 'daal')
ld_library_path.append(posixpath.join(basepath, 'daal', 'lib',
'intel64'))
if self.__icc:
ld_library_path.append(posixpath.join(basepath, 'compiler', 'lib',
'intel64_lin'))
if self.__ifort:
ld_library_path.append(posixpath.join(basepath, 'compiler', 'lib',
'intel64_lin'))
if self.__ipp:
env['IPPROOT' ] = posixpath.join(basepath, 'ipp')
ld_library_path.append(posixpath.join(basepath, 'ipp', 'lib',
'intel64'))
if self.__mkl:
env['MKLROOT'] = posixpath.join(basepath, 'mkl')
ld_library_path.append(posixpath.join(basepath, 'mkl', 'lib',
'intel64'))
if self.__mpi:
env['I_MPI_ROOT'] = posixpath.join(basepath, 'mpi')
ld_library_path.append(posixpath.join(basepath, 'mpi', 'intel64',
'lib'))
path.append(posixpath.join(basepath, 'mpi', 'intel64', 'bin'))
if LooseVersion(self.__version) >= LooseVersion('2019'):
env['FI_PROVIDER_PATH'] = posixpath.join(
basepath, 'mpi', 'intel64', 'libfabric', 'lib', 'prov')
ld_library_path.append(posixpath.join(
basepath, 'mpi', 'intel64', 'libfabric', 'lib'))
path.append(posixpath.join(basepath, 'mpi', 'intel64',
'libfabric', 'bin'))
if LooseVersion(self.__version) >= LooseVersion('2020'):
ld_library_path.append(posixpath.join(
basepath, 'mpi', 'intel64', 'lib', 'release'))
if self.__tbb:
if int(self.__year) >= 2020:
ld_library_path.append(posixpath.join(basepath, 'tbb', 'lib',
'intel64', 'gcc4.8'))
else:
ld_library_path.append(posixpath.join(basepath, 'tbb', 'lib',
'intel64', 'gcc4.7'))
if ld_library_path:
ld_library_path.append('$LD_LIBRARY_PATH')
env['LD_LIBRARY_PATH'] = ':'.join(ld_library_path)
if path:
path.append('$PATH')
env['PATH'] = ':'.join(path)
return env
def __setup(self):
"""Construct the list of packages, i.e., fill in
self.__apt and self.__yum"""
if (self.__daal and self.__icc and self.__ifort and self.__ipp
and self.__mkl and self.__mpi and self.__tbb):
# Everything selected, so install the omnibus runtime package
self.__apt = ['intel-psxe-runtime={}'.format(self.__version)]
self.__yum = ['intel-psxe-runtime-{}'.format(self.__version)]
else:
if self.__daal:
self.__apt.append(
'intel-daal-runtime={}'.format(self.__version))
self.__yum.append(
'intel-daal-runtime-64bit-{}'.format(self.__version))
if self.__icc:
self.__apt.append(
'intel-icc-runtime={}'.format(self.__version))
self.__yum.append(
'intel-icc-runtime-64bit-{}'.format(self.__version))
if self.__ifort:
self.__apt.append(
'intel-ifort-runtime={}'.format(self.__version))
self.__yum.append(
'intel-ifort-runtime-64bit-{}'.format(self.__version))
if self.__ipp:
self.__apt.append(
'intel-ipp-runtime={}'.format(self.__version))
self.__yum.append(
'intel-ipp-runtime-64bit-{}'.format(self.__version))
if self.__mkl:
self.__apt.append(
'intel-mkl-runtime={}'.format(self.__version))
self.__yum.append(
'intel-mkl-runtime-64bit-{}'.format(self.__version))
if self.__mpi:
self.__apt.append(
'intel-mpi-runtime={}'.format(self.__version))
self.__yum.append(
'intel-mpi-runtime-64bit-{}'.format(self.__version))
if self.__tbb:
self.__apt.append(
'intel-tbb-runtime={}'.format(self.__version))
self.__yum.append(
'intel-tbb-runtime-64bit-{}'.format(self.__version))
# Set the environment
self.environment_variables = self.__environment()
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
i = intel_psxe_runtime(...)
Stage0 += i
Stage1 += i.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/intel_psxe_runtime.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""pip building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging
import posixpath
import hpccm.config
import hpccm.templates.rm
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.shell import shell
class pip(bb_base, hpccm.templates.rm):
"""The `pip` building block installs Python packages from PyPi.
# Parameters
alternatives: Boolean flag to specify whether to configure alternatives for `python` and `pip`. RHEL-based 8.x distributions do not setup `python` by [default](https://developers.redhat.com/blog/2019/05/07/what-no-python-in-red-hat-enterprise-linux-8/). The default is False.
args: List of arguments to pass to pip. The default is
`--no-cache-dir`.
ospackages: List of OS packages to install prior to installing
PyPi packages. For Ubuntu, the default values are `python-pip`,
`python-setuptools`, and `python-wheel` for Python 2.x and
`python3-pip`, `python3-setuptools`, and `python3-wheel` for
Python 3.x. For RHEL-based distributions, the default
values are `python2-pip` for Python 2.x and `python3-pip` for
Python 3.x.
packages: List of PyPi packages to install. The default is
an empty list.
pip: The name of the `pip` tool to use. The default is `pip`.
requirements: Path to pip requirements file. The default is
empty.
upgrade: Boolean flag to control whether pip itself should be
upgraded prior to installing any PyPi packages. The default is
False.
# Examples
```python
pip(packages=['hpccm'])
```
```python
pip(packages=['hpccm'], pip='pip3')
```
```python
pip(requirements='requirements.txt')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(pip, self).__init__(**kwargs)
self.__alternatives = kwargs.get('alternatives', False)
self.__args = kwargs.get('args', ['--no-cache-dir'])
self.__epel = False
self.__ospackages = kwargs.get('ospackages', None)
self.__packages = kwargs.get('packages', [])
self.__pip = kwargs.get('pip', 'pip')
self.__requirements = kwargs.get('requirements', None)
self.__upgrade = kwargs.get('upgrade', False)
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
self.__debs = [] # Filled in below
self.__rpms = [] # Filled in below
if self.__ospackages is None:
if self.__pip.startswith('pip3'):
self.__debs.extend(['python3-pip', 'python3-setuptools',
'python3-wheel'])
self.__rpms.append('python3-pip')
else:
self.__debs.extend(['python-pip', 'python-setuptools',
'python-wheel'])
self.__rpms.append('python2-pip')
if (hpccm.config.g_linux_distro == linux_distro.CENTOS and
hpccm.config.g_linux_version < StrictVersion('8.0')):
# python2-pip is an EPEL package in CentOS 7.x
self.__epel = True
elif (hpccm.config.g_linux_distro == linux_distro.UBUNTU and
hpccm.config.g_linux_version >= StrictVersion('20.0')):
# python-pip is not supported in Ubuntu 20.04
logging.warning('pip2 is not supported in Ubuntu 20.04. Use pip3.')
elif self.__ospackages:
self.__debs = self.__ospackages
self.__rpms = self.__ospackages
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('pip')
if self.__debs or self.__rpms:
self += packages(apt=self.__debs, epel=self.__epel,
yum=self.__rpms)
if self.__alternatives:
self += shell(commands=[
'alternatives --set python /usr/bin/python2',
'alternatives --install /usr/bin/pip pip /usr/bin/pip2 30'])
if self.__pip:
if self.__args:
self.__pip = '{0} {1}'.format(self.__pip,
' '.join(self.__args))
cmds = []
if self.__upgrade:
# pip version 21 and later no longer support Python 2
if self.__pip.startswith('pip3'):
cmds.append('{0} install --upgrade pip'.format(self.__pip))
else:
cmds.append('{0} install --upgrade "pip < 21.0"'.format(
self.__pip))
if self.__requirements:
self += copy(src=self.__requirements,
dest=posixpath.join(
self.__wd,
posixpath.basename(self.__requirements)))
cmds.append('{0} install -r {1}'.format(
self.__pip,
posixpath.join(self.__wd,
posixpath.basename(self.__requirements))))
cmds.append(self.cleanup_step(items=[
posixpath.join(self.__wd,
posixpath.basename(self.__requirements))]))
if self.__packages:
cmds.append('{0} install {1}'.format(self.__pip,
' '.join(self.__packages)))
self += shell(commands=cmds)
| hpc-container-maker-master | hpccm/building_blocks/pip.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""apt-get building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import os
import posixpath
import hpccm.config
import hpccm.templates.sed
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.common import linux_distro
from hpccm.primitives.shell import shell
class apt_get(bb_base, hpccm.templates.sed, hpccm.templates.wget):
"""The `apt_get` building block specifies the set of operating system
packages to install. This building block should only be used on
images that use the Debian package manager (e.g., Ubuntu).
In most cases, the [`packages` building block](#packages) should be
used instead of `apt_get`.
# Parameters
aptitude: Boolean flag to specify whether `aptitude` should be
used instead of `apt-get`. The default is False.
download: Boolean flag to specify whether to download the deb
packages instead of installing them. The default is False.
download_directory: The deb package download location. This
parameter is ignored if `download` is False. The default value is
`/var/tmp/apt_get_download`.
extract: Location where the downloaded packages should be
extracted. Note, this extracts and does not install the packages,
i.e., the package manager is bypassed. After the downloaded
packages are extracted they are deleted. This parameter is ignored
if `download` is False. If empty, then the downloaded packages are
not extracted. The default value is an empty string.
keys: A list of GPG keys to add. The default is an empty list.
ospackages: A list of packages to install. The default is an
empty list.
ppas: A list of personal package archives to add. The default is
an empty list.
repositories: A list of apt repositories to add. The default is
an empty list.
# Examples
```python
apt_get(ospackages=['make', 'wget'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(apt_get, self).__init__()
self.__apt_key = kwargs.get('_apt_key', True)
self.__aptitude = kwargs.get('aptitude', False)
self.__commands = []
self.__download = kwargs.get('download', False)
self.__download_directory = kwargs.get(
'download_directory',
posixpath.join(hpccm.config.g_wd, 'apt_get_download'))
self.__extra_opts = kwargs.get('extra_opts', [])
self.__extract = kwargs.get('extract', None)
self.__keys = kwargs.get('keys', [])
self.__opts = ['-y', '--no-install-recommends']
self.ospackages = kwargs.get('ospackages', [])
self.__ppas = kwargs.get('ppas', [])
self.__repositories = kwargs.get('repositories', [])
if hpccm.config.g_linux_distro != linux_distro.UBUNTU: # pragma: no cover
logging.warning('Using apt-get on a non-Ubuntu Linux distribution')
# Construct the series of commands that form the building
# block
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += shell(chdir=False, commands=self.__commands)
def __setup(self):
"""Construct the series of commands to execute"""
if self.__extra_opts:
self.__opts.extend(self.__extra_opts)
apt_get_download = 'DEBIAN_FRONTEND=noninteractive apt-get download {}'.format(' '.join(self.__opts))
apt_get_install = 'DEBIAN_FRONTEND=noninteractive apt-get install {}'.format(' '.join(self.__opts))
if self.__keys:
for key in self.__keys:
if self.__apt_key:
self.__commands.append(
'wget -qO - {} | apt-key add -'.format(key))
else:
self.__commands.extend([
'mkdir -p /usr/share/keyrings',
'rm -f /usr/share/keyrings/{}.gpg'.format(
os.path.splitext(os.path.basename(key))[0]),
'wget -qO - {0} | gpg --dearmor -o /usr/share/keyrings/{1}.gpg'.format(
key, os.path.splitext(os.path.basename(key))[0])])
if self.__ppas:
# Need to install apt-add-repository
self.__commands.extend(['apt-get update -y',
apt_get_install + ' software-properties-common'])
for ppa in self.__ppas:
self.__commands.append('apt-add-repository {} -y'.format(ppa))
if self.__repositories:
for repo in self.__repositories:
if repo.startswith('http'):
# Repository is a URL to a repository configuration file
self.__commands.append(
self.download_step(directory='/etc/apt/sources.list.d',
url=repo))
else:
# Repository is a configuration string
self.__commands.append(
'echo "{}" >> /etc/apt/sources.list.d/hpccm.list'.format(repo))
if self.ospackages:
packages = []
for pkg in sorted(self.ospackages):
packages.append(' {}'.format(pkg))
self.__commands.append('apt-get update -y')
if self.__download:
# Download packages
# Assign mode 777 to work around warnings
# Ubuntu 16: Can't drop privileges for downloading as file
# Ubuntu 18: Download is performed unsandboxed as root as file
self.__commands.append('mkdir -m 777 -p {0} && cd {0}'.format(
self.__download_directory))
self.__commands.append(apt_get_download + ' \\\n' + ' \\\n'.join(packages))
if self.__extract:
# Extract the packages to a prefix - not a "real"
# package manager install
self.__commands.append('mkdir -p {0}'.format(
self.__extract))
regex = posixpath.join(
self.__download_directory,
'(' + '|'.join(sorted(self.ospackages)) + ').*deb')
self.__commands.append('find {0} -regextype posix-extended -type f -regex "{1}" -exec dpkg --extract {{}} {2} \;'.format(self.__download_directory, regex, self.__extract))
# Cleanup downloaded packages
self.__commands.append(
'rm -rf {}'.format(self.__download_directory))
# Cleanup repository file(s)
for repo in self.__repositories:
if repo.startswith('http'):
# Repository is a URL to a repository
# configuration file
self.__commands.append(
'rm -f {}'.format(
posixpath.join('/etc/apt/sources.list.d',
os.path.basename(repo))))
else:
# Repository is a configuration string
# Use '|' as separator to try to avoid conflicts or
# the need to escape the repo string.
self.__commands.append(
self.sed_step(
file='/etc/apt/sources.list.d/hpccm.list',
patterns=[r'\|^{}$|d'.format(repo)]))
else:
if self.__aptitude:
self.__commands.append(apt_get_install + ' aptitude')
install = 'aptitude install -y --without-recommends -o Aptitude::ProblemResolver::SolutionCost=\'100*canceled-actions,200*removals\' \\\n'
install = install + ' \\\n'.join(packages)
self.__commands.append(install)
else:
self.__commands.append(apt_get_install + ' \\\n' + ' \\\n'.join(packages))
self.__commands.append('rm -rf /var/lib/apt/lists/*')
| hpc-container-maker-master | hpccm/building_blocks/apt_get.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""FFTW building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
class fftw(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `fftw` building block downloads, configures, builds, and
installs the [FFTW](http://www.fftw.org) component. Depending on
the parameters, the source will be downloaded from the web
(default) or copied from a source directory in the local build
context.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. For
x86_64 processors, the default values are `--enable-shared`,
`--enable-openmp`, `--enable-threads`, and `--enable-sse2`. For
other processors, the default values are `--enable-shared`,
`--enable-openmp`, and `--enable-threads`.
directory: Path to the unpackaged source directory relative to the
local build context. The default value is empty. If this is
defined, the source in the local build context will be used rather
than downloading the source from the web.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH`) should be modified to include FFTW. The
default is True.
ldconfig: Boolean flag to specify whether the FFTW library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the FFTW library
directory. The default value is False.
mpi: Boolean flag to specify whether to build with MPI support
enabled. The default is False.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `file`, `make`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/fftw`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of FFTW source to download. This value is
ignored if `directory` is set. The default value is `3.3.10`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
fftw(prefix='/opt/fftw/3.3.7', version='3.3.7')
```
```python
fftw(directory='sources/fftw-3.3.7')
```
```python
n = nvhpc(eula=True)
fftw(toolchain=n.toolchain)
```
```python
fftw(check=True, configure_opts=['--enable-shared', '--enable-threads',
'--enable-sse2', '--enable-avx'])
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(fftw, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'ftp://ftp.fftw.org/pub/fftw')
self.__check = kwargs.pop('check', False)
self.__configure_opts = kwargs.pop('configure_opts', [])
self.__directory = kwargs.pop('directory', '')
self.__mpi = kwargs.pop('mpi', False)
self.__ospackages = kwargs.pop('ospackages', ['file', 'make', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/fftw')
self.__version = kwargs.pop('version', '3.3.10')
# Set the configure options
self.__configure()
# Set the environment variables
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
check=self.__check,
configure_opts=self.__configure_opts,
comment=False,
devel_environment=self.environment_variables,
# PGI compiler needs a larger stack size
postconfigure=['ulimit -s unlimited'] if self.__check else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/fftw-{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('FFTW version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __configure(self):
"""Setup configure options based on user parameters and CPU
architecture"""
if hpccm.config.g_cpu_arch == cpu_arch.X86_64:
if not self.__configure_opts:
self.__configure_opts = ['--enable-shared', '--enable-openmp',
'--enable-threads', '--enable-sse2']
if hpccm.config.test_cpu_feature_flag('avx'):
self.__configure_opts.append('--enable-avx')
if hpccm.config.test_cpu_feature_flag('avx2'):
self.__configure_opts.append('--enable-avx2')
if hpccm.config.test_cpu_feature_flag('avx512'):
self.__configure_opts.append('--enable-avx512')
else:
if not self.__configure_opts:
self.__configure_opts = ['--enable-shared', '--enable-openmp',
'--enable-threads']
if self.__mpi:
self.__configure_opts.append('--enable-mpi')
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
f = fftw(...)
Stage0 += f
Stage1 += f.runtime()
```
"""
self.rt += comment('FFTW')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/fftw.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Julia building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import re
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
import hpccm.templates.wget
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.primitives.comment import comment
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class julia(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm, hpccm.templates.tar, hpccm.templates.wget):
"""The `julia` building block downloads and installs the
[Julia](https://julialang.org) programming environment.
# Parameters
cuda: Boolean flag to specify whether the JuliaGPU packages should
be installed. If True, the `CUDAapi`, `CUDAdrv`, `CUDAnative`,
and `CuArrays` packages are installed. Note that the `CUDAdrv`
package must be rebuilt when the container is running to align
with the host CUDA driver. The default is False.
depot: Path to the location of "user" Julia package depot. The
default is an empty string, i.e., `~/.julia`. The depot location
needs to be writable by the user running the container.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
Julia. The default is True.
history: Path to the Julia history file. The default value is an
empty string, i.e., `~/.julia/logs/repl_history.jl`. The history
location needs to be writable by the user running the container.
ldconfig: Boolean flag to specify whether the Julia library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the Julia library
directory. The default value is False.
ospackages: List of OS packages to install prior to building. The
default values are `tar` and `wget`.
packages: List of Julia packages to install. The default is an
empty list.
prefix: The top level installation location. The default value
is `/usr/local/julia`.
version: The version of Julia to install. The default value is
`1.5.1`.
# Examples
```python
julia(prefix='/usr/local/julia', version='1.3.1')
```
```python
julia(depot='/tmp', history='/tmp/repl_history.jl')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(julia, self).__init__(**kwargs)
self.__arch_directory = None # Filled in by __cpu_arch()
self.__arch_pkg = None # Filled in by __cpu_arch()
self.__baseurl = kwargs.get('baseurl',
'https://julialang-s3.julialang.org/bin/linux')
self.__cuda = kwargs.get('cuda', False)
self.__depot = kwargs.get('depot', None)
self.__history = kwargs.get('history', None)
self.__ospackages = kwargs.get('ospackages', ['tar', 'wget'])
self.__packages = kwargs.get('packages', [])
self.__prefix = kwargs.get('prefix', '/usr/local/julia')
self.__version = kwargs.get('version', '1.5.1')
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Set the CPU architecture specific parameters
self.__cpu_arch()
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
self += comment('Julia version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += shell(commands=self.__commands)
self += environment(variables=self.environment_step())
def __cpu_arch(self):
"""Based on the CPU architecture, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_cpu_arch == cpu_arch.AARCH64:
self.__arch_directory = 'aarch64'
self.__arch_pkg = 'aarch64'
elif hpccm.config.g_cpu_arch == cpu_arch.X86_64:
self.__arch_directory = 'x64'
self.__arch_pkg = 'x86_64'
else: # pragma: no cover
raise RuntimeError('Unknown CPU architecture')
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# The download URL has the format MAJOR.MINOR in the path and
# the tarball contains MAJOR.MINOR.REVISION, so pull apart the
# full version to get the individual components.
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<revision>\d+)',
self.__version)
major_minor = '{0}.{1}'.format(match.groupdict()['major'],
match.groupdict()['minor'])
tarball = 'julia-{0}-linux-{1}.tar.gz'.format(self.__version,
self.__arch_pkg)
url = '{0}/{1}/{2}/{3}'.format(self.__baseurl, self.__arch_directory,
major_minor, tarball)
# Download source from web
self.__commands.append(self.download_step(url=url, directory=self.__wd))
self.__commands.append(self.untar_step(
tarball=posixpath.join(self.__wd, tarball), directory=self.__wd))
# "Install"
self.__commands.append('cp -a {0} {1}'.format(
posixpath.join(self.__wd, 'julia-{}'.format(self.__version)),
self.__prefix))
# Install packages
if self.__cuda:
self.__packages.extend(['CUDAapi', 'CUDAdrv', 'CUDAnative',
'CuArrays'])
if self.__packages:
# remove duplicates
self.__packages = sorted(list(set(self.__packages)))
# convert into PackageSpec() entries
self.__packages = map(
lambda pkg: 'PackageSpec(name="{0}")'.format(pkg)
if not pkg.startswith('PackageSpec')
else pkg, self.__packages)
# Comma separated list of package
packages_csv = ', '.join('{}'.format(pkg)
for pkg in self.__packages)
julia = posixpath.join(self.__prefix, 'bin', 'julia')
# Install packages in the default location alongside Julia
# itself.
julia = 'JULIA_DEPOT_PATH={0} {1}'.format(
posixpath.join(self.__prefix, 'share', 'julia'), julia)
self.__commands.append(
'{0} -e \'using Pkg; Pkg.add([{1}])\''.format(julia,
packages_csv))
# Startup file
if self.__depot:
# The "user" depot path mist be writable by the user
# running the container. Modify the Julia startup file to
# modify the "user" depot from ~/.julia to another
# location.
startup = posixpath.join(self.__prefix, 'etc', 'julia',
'startup.jl')
self.__commands.append('echo "DEPOT_PATH[1] = \\"{0}\\"" >> {1}'.format(
self.__depot, startup))
# Set library path
libpath = posixpath.join(self.__prefix, 'lib')
if self.ldconfig:
self.__commands.append(self.ldcache_step(directory=libpath))
else:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(libpath)
# Cleanup tarball and directory
self.__commands.append(self.cleanup_step(
items=[posixpath.join(self.__wd, tarball),
posixpath.join(self.__wd, 'julia-{}'.format(self.__version))]))
# Setup environment
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if self.__history:
self.environment_variables['JULIA_HISTORY'] = self.__history
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
j = julia(...)
Stage0 += j
Stage1 += j.runtime()
```
"""
return str(self)
| hpc-container-maker-master | hpccm/building_blocks/julia.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""SCI-F building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import os
import posixpath
import hpccm.base_object
import hpccm.config
from hpccm.common import container_type
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.shell import shell
class scif(hpccm.base_object):
"""The `scif` building blocks installs components using the
[Scientific Filesystem (SCI-F)](https://sci-f.github.io).
Other building blocks and / or primitives should be added to
the `scif` building block using the `+=` syntax.
If not generating a Singularity definition file, SCI-F should be
installed using the [`pip`](#pip) building block prior to this
building block.
If not generating a Singularity definition file, this module
creates SCI-F recipe files in the current directory (see also the
`file` parameter).
# Parameters
_arguments: Specify additional [Dockerfile RUN arguments](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md) (Docker specific).
_env: Boolean flag to specify whether the general container
environment should be also be loaded when executing a SCI-F
`%appinstall` block. The default is False (Singularity specific).
file: The SCI-F recipe file name. The default value is the name
parameter with the `.scif` suffix.
name: The name to use to label the SCI-F application. This
parameter is required.
_native: Boolean flag to specify whether to use the native
Singularity support for SCI-F when generating Singularity
definition files. The default is True (Singularity specific).
# Examples
```python
pip(packages=['scif'])
s = scif(name='example')
s += openmpi(prefix='/scif/apps/example')
s += shell(commands=[...])
```
"""
__runtime_called = False
def __init__(self, **kwargs):
"""Initialize scif building block"""
super(scif, self).__init__(**kwargs)
self.__appenv = []
self.__appfiles = []
self.__apphelp = []
self.__appinstall = []
self.__applabels = []
self.__apprun = []
self.__apptest = []
self.__arguments = kwargs.get('_arguments')
self.__env = kwargs.get('_env', False)
self.__name = kwargs.get('name', None)
if not self.__name:
raise RuntimeError('"name" must be defined')
self.__native = kwargs.get('_native', True)
self.__scif_file = kwargs.get('file', '{}.scif'.format(self.__name))
def __iadd__(self, item):
"""Add the item to the corresponding type list. Allows "+=" syntax."""
if isinstance(item, list):
for i in item:
self.__add(i)
else:
self.__add(item)
return self
def __add(self, item):
"""Break the item down into its constituent primitives and append each
primitive to the appropriate list"""
primitives = self.__primitives(item)
for p in primitives:
ptype = p.__class__.__name__
if ptype == 'comment':
self.__apphelp.append(p)
elif ptype == 'copy':
self.__appfiles.append(p)
elif ptype == 'environment':
self.__appenv.append(p)
elif ptype == 'label':
self.__applabels.append(p)
elif ptype == 'runscript':
self.__apprun.append(p)
elif ptype == 'shell':
if p._test:
self.__apptest.append(p)
else:
self.__appinstall.append(p)
else:
raise RuntimeError('unrecognized primitive type: {}'.format(ptype))
def __scif_recipe(self):
"""Generate the SCI-F recipe instructions, merging primitives of the
same type because SCI-F does not support duplicate
sections."""
recipe = []
if self.__appenv:
appenv = self.__appenv[0].merge(self.__appenv,
_app=self.__name)
recipe.append(appenv)
if self.__appfiles:
appfiles = self.__appfiles[0].merge(self.__appfiles,
_app=self.__name)
recipe.append(appfiles)
if self.__apphelp:
apphelp = self.__apphelp[0].merge(self.__apphelp,
_app=self.__name)
recipe.append(apphelp)
if self.__appinstall:
appinstall = self.__appinstall[0].merge(self.__appinstall,
_app=self.__name,
_appenv=self.__env)
recipe.append(appinstall)
if self.__applabels:
applabels = self.__applabels[0].merge(self.__applabels,
_app=self.__name)
recipe.append(applabels)
if self.__apprun:
apprun = self.__apprun[0].merge(self.__apprun,
_app=self.__name)
recipe.append(apprun)
if self.__apptest:
apptest = self.__apptest[0].merge(self.__apptest,
_app=self.__name, _test=True)
recipe.append(apptest)
return recipe
def __primitives(self, item):
"""Item is a building block or a primitive. A building block consists
of one or more other building blocks or primitives.
Ultimately, every building block consists of primitives.
"Flatten" the item to a list of its constituent primitives.
"""
return [i for i in self.__iter_flatten(item)]
def __iter_flatten(self, iterable):
"""Recursively flatten"""
try:
for i in iter(iterable):
for f in self.__iter_flatten(i):
yield f
except TypeError:
# not iterable
yield iterable
def __str__(self):
"""String representation of the building block"""
scif_recipe = self.__scif_recipe()
if (self.__native and
hpccm.config.g_ctype == container_type.SINGULARITY):
# Take advantage of Singularity's native support for SCI-F.
return '\n'.join(str(x) for x in scif_recipe)
else:
# Generate an external SCI-F recipe file and manually call scif
# Temporarily switch container format to Singularity to write
# the SCI-F recipe file
preserved_ctype = hpccm.config.g_ctype
hpccm.config.set_container_format('singularity')
logging.info('Writing {}'.format(self.__scif_file))
with open(self.__scif_file, 'w') as f:
f.write('\n\n'.join(str(x) for x in scif_recipe))
# Restore original container format
hpccm.config.g_ctype = preserved_ctype
# Container instructions to copy the SCI-F recipe file
# into the container and then run scif
c_scif_file = posixpath.join('/scif/recipes',
os.path.basename(self.__scif_file))
instructions = []
instructions.append(comment('SCI-F "{}"'.format(self.__name)))
instructions.append(
copy(src=self.__scif_file, dest=c_scif_file))
instructions.append(
shell(_arguments = self.__arguments,
chdir=False,
commands=['scif install {}'.format(c_scif_file)]))
return '\n'.join(str(x) for x in instructions)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
The entire `/scif` directory is copied into the runtime stage
on the first call. Subsequent calls do nothing.
# Examples
```python
s = scif(...)
Stage0 += s
Stage1 += s.runtime()
```
"""
if not scif.__runtime_called:
scif.__runtime_called = True
return str(copy(_from=_from, src='/scif', dest='/scif'))
else:
return ''
| hpc-container-maker-master | hpccm/building_blocks/scif.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""PnetCDF building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import LooseVersion
import posixpath
import hpccm.config
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_autotools import generic_autotools
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class pnetcdf(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `pnetcdf` building block downloads, configures, builds, and
installs the
[PnetCDF](http://cucis.ece.northwestern.edu/projects/PnetCDF/index.html)
component.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
configure_opts: List of options to pass to `configure`. The
default values are `--enable-shared`.
disable_FEATURE: Flags to control disabling features when
configuring. For instance, `disable_foo=True` maps to
`--disable-foo`. Underscores in the parameter name are converted
to dashes.
enable_FEATURE[=ARG]: Flags to control enabling features when
configuring. For instance, `enable_foo=True` maps to
`--enable-foo` and `enable_foo='yes'` maps to `--enable-foo=yes`.
Underscores in the parameter name are converted to dashes.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, and `PATH`) should be
modified to include PnetCDF. The default is True.
ldconfig: Boolean flag to specify whether the PnetCDF library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the PnetCDF library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `file`, `m4`, `make`,
`perl`, `tar`, and `wget`.
prefix: The top level install location. The default value is
`/usr/local/pnetcdf`.
toolchain: The toolchain object. A MPI compiler toolchain must be
used. The default is to use the standard MPI compiler wrappers,
e.g., `CC=mpicc`, `CXX=mpicxx`, etc.
version: The version of PnetCDF source to download. The default
value is `1.12.1`.
with_PACKAGE[=ARG]: Flags to control optional packages when
configuring. For instance, `with_foo=True` maps to `--with-foo`
and `with_foo='/usr/local/foo'` maps to
`--with-foo=/usr/local/foo`. Underscores in the parameter name
are converted to dashes.
without_PACKAGE: Flags to control optional packages when
configuring. For instance `without_foo=True` maps to
`--without-foo`. Underscores in the parameter name are converted
to dashes.
# Examples
```python
pnetcdf(prefix='/opt/pnetcdf/1.10.0', version='1.10.0')
```
```python
ompi = openmpi(...)
pnetcdf(toolchain=ompi.toolchain, ...)
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(pnetcdf, self).__init__(**kwargs)
self.__baseurl = kwargs.get('baseurl', 'https://parallel-netcdf.github.io/Release')
self.__configure_opts = kwargs.pop('configure_opts',
['--enable-shared'])
self.__ospackages = kwargs.pop('ospackages', ['file', 'm4', 'make',
'perl', 'tar', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/pnetcdf')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__toolchain = kwargs.pop('toolchain',
toolchain(CC='mpicc', CXX='mpicxx',
F77='mpif77', F90='mpif90',
FC='mpifort'))
self.__url = None # Filled in by __download()
self.__version = kwargs.get('version', '1.12.1')
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
# Set the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_autotools(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
comment=False,
configure_opts=self.__configure_opts,
devel_environment=self.environment_variables,
# For some compilers, --enable-shared leads to the following error:
# GEN libpnetcdf.la
# /usr/bin/ld: .libs/libpnetcdf.lax/libf77.a/strerrnof.o: relocation R_X86_64_32 against `.data' can not be used when making a shared object; recompile with -fPIC
# .libs/libpnetcdf.lax/libf77.a/strerrnof.o: error adding symbols: Bad value
# Apply the workaround
postconfigure=['sed -i -e \'s#pic_flag=""#pic_flag=" -fpic -DPIC"#\' -e \'s#wl=""#wl="-Wl,"#\' libtool'] if '--enable-shared' in self.__configure_opts else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
url=self.__url,
**kwargs)
# Container instructions
self += comment('PnetCDF version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
self.__runtime_ospackages = ['libatomic1']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
pass
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
def __download(self):
"""Set download source based on user parameters"""
# Version 1.11.0 changed the package name
if LooseVersion(self.__version) >= LooseVersion('1.11.0'):
pkgname = 'pnetcdf'
else:
pkgname = 'parallel-netcdf'
tarball = '{0}-{1}.tar.gz'.format(pkgname, self.__version)
self.__url = '{0}/{1}'.format(self.__baseurl, tarball)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
p = pnetcdf(...)
Stage0 += p
Stage1 += p.runtime()
```
"""
self.rt += comment('PnetCDF')
if self.__runtime_ospackages:
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/pnetcdf.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Kokkos building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
class kokkos(bb_base, hpccm.templates.downloader, hpccm.templates.envvars):
"""The `kokkos` building block downloads and installs the
[Kokkos](https://github.com/kokkos/kokkos) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
arch: List of target architectures to build. If set adds
`-DKokkos_ARCH_<value>=ON` to the list of CMake options. The
default value is `VOLTA70`, i.e., sm_70. If a CUDA aware build is
not selected, then a non-default value should be used.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
check: Boolean flag to specify whether the build should be
checked. If True, adds `-DKokkos_ENABLE_TESTS=ON` to the list of
CMake options. The default is False.
cmake_opts: List of options to pass to `cmake`. The default is
`-DCMAKE_BUILD_TYPE=RELEASE`.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
cuda: Flag to control whether a CUDA aware build is performed. If
True, adds `-DKokkos_ENABLE_CUDA=ON` and
`-DCMAKE_CXX_COMPILER=$(pwd)/../bin/nvcc_wrapper` to the list of
CMake options. The default value is True.
environment: Boolean flag to specify whether the environment
(`LD_LIBRARY_PATH` and `PATH`) should be modified to include
Kokkos. The default is True.
hwloc: Flag to control whether a hwloc aware build is performed.
If True, adds `-DKokkos_ENABLE_HWLOC=ON` to the list of CMake
options. The default value is True.
ospackages: List of OS packages to install prior to building. For
Ubuntu, the default values are `gzip`, `libhwloc-dev`, `make`,
`tar`, and `wget`. For RHEL-based Linux distributions the default
values are `gzip`, `hwloc-devel`, `make`, `tar`, and `wget`.
prefix: The top level installation location. The default value
is `/usr/local/kokkos`.
repository: The location of the git repository that should be used to build OpenMPI. If True, then use the default `https://github.com/kokkos/kokkos.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
url: The location of the tarball that should be used to build
Kokkos. The default is empty, i.e., use the release package
specified by `version`.
version: The version of Kokkos source to download. The default
value is `3.2.00`.
# Examples
```python
kokkos(prefix='/opt/kokkos/3.1.01', version='3.1.01')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(kokkos, self).__init__(**kwargs)
self.__arch = kwargs.pop('arch', ['VOLTA70'])
self.__baseurl = kwargs.pop('baseurl',
'https://github.com/kokkos/kokkos/archive')
self.__check = kwargs.pop('check', False)
self.__cmake_opts = kwargs.pop('cmake_opts',
['-DCMAKE_BUILD_TYPE=RELEASE'])
self.__cuda = kwargs.pop('cuda', True)
self.__default_repository = 'https://github.com/kokkos/kokkos.git'
self.__hwloc = kwargs.pop('hwloc', True)
self.__ospackages = kwargs.pop('ospackages', [])
self.__powertools = False # enable the CentOS PowerTools repo
self.__prefix = kwargs.pop('prefix', '/usr/local/kokkos')
self.__version = kwargs.pop('version', '3.2.00')
if self.repository:
self.__directory = ''
else:
self.__directory = kwargs.pop('directory',
'kokkos-{}'.format(self.__version))
# Set the CMake options
self.__cmake()
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['PATH'] = '{}/bin:$PATH'.format(
self.__prefix)
# Setup build configuration
self.__bb = generic_cmake(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
cmake_opts=self.__cmake_opts,
comment=False,
devel_environment=self.environment_variables,
directory=self.__directory,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
**kwargs)
# Container instructions
self += comment('Kokkos version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages,
powertools=self.__powertools)
self += self.__bb
def __cmake(self):
"""Set CMake options based on user input"""
# Set options
if self.__arch:
for arch in self.__arch:
self.__cmake_opts.append('-DKokkos_ARCH_{}=ON'.format(
arch.upper()))
if self.__check:
self.__cmake_opts.append('-DKokkos_ENABLE_TESTS=ON')
if self.__cuda:
self.__cmake_opts.append('-DKokkos_ENABLE_CUDA=ON')
self.__cmake_opts.append(
'-DCMAKE_CXX_COMPILER=$(pwd)/../bin/nvcc_wrapper')
if self.__hwloc:
self.__cmake_opts.append('-DKokkos_ENABLE_HWLOC=ON')
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['libhwloc-dev', 'make']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['hwloc-devel', 'make']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
# hwloc-devel is in the CentOS powertools repository
self.__powertools = True
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
if self.repository:
self.__ospackages.extend(['ca-certificates', 'git'])
else:
self.__ospackages.extend(['gzip', 'tar', 'wget'])
def __download(self):
"""Set download source based on user parameters"""
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
self.url='{0}/{1}.tar.gz'.format(self.__baseurl, self.__version)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
k = kokkos(...)
Stage0 += k
Stage1 += k.runtime()
```
"""
self.rt += comment('Kokkos')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/kokkos.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""Generic cmake building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import posixpath
import hpccm.templates.CMakeBuild
import hpccm.templates.annotate
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
from hpccm.building_blocks.base import bb_base
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.label import label
from hpccm.primitives.shell import shell
from hpccm.toolchain import toolchain
class generic_cmake(bb_base, hpccm.templates.CMakeBuild,
hpccm.templates.annotate, hpccm.templates.downloader,
hpccm.templates.envvars, hpccm.templates.ldconfig,
hpccm.templates.rm):
"""The `generic_cmake` building block downloads, configures,
builds, and installs a specified CMake enabled package.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
annotations: Dictionary of additional annotations to include. The
default is an empty dictionary.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
build_directory: The location to build the package. The default
value is a `build` subdirectory in the source code location.
build_environment: Dictionary of environment variables and values
to set when building the package. The default is an empty
dictionary.
check: Boolean flag to specify whether the `make check` step
should be performed. The default is False.
cmake_opts: List of options to pass to `cmake`. The default value
is an empty list.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
devel_environment: Dictionary of environment variables and values,
e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the development
stage after the package is built and installed. The default is an
empty dictionary.
directory: The source code location. The default value is the
basename of the downloaded package. If the value is not an
absolute path, then the temporary working directory is prepended.
environment: Boolean flag to specify whether the environment
should be modified (see `devel_environment` and
`runtime_environment`). The default is True.
install: Boolean flag to specify whether the `make install` step
should be performed. The default is True.
ldconfig: Boolean flag to specify whether the library directory
should be added dynamic linker cache. The default value is False.
libdir: The path relative to the install prefix to use when
configuring the dynamic linker cache. The default value is `lib`.
make: Boolean flag to specify whether the `make` step should be
performed. The default is True.
package: Path to the local source package relative to the local
build context. One of this parameter or the `repository` or `url`
parameters must be specified.
postinstall: List of shell commands to run after running 'make
install'. The working directory is the install prefix. The
default is an empty list.
preconfigure: List of shell commands to run prior to running
`cmake`. The working directory is the source code location. The
default is an empty list.
prefix: The top level install location. The default value is
`/usr/local`. It is highly recommended not to use this default and
instead set the prefix to a package specific directory.
recursive: Initialize and checkout git submodules. `repository` parameter
must be specified. The default is False.
repository: The git repository of the package to build. One of
this parameter or the `package` or `url` parameters must be
specified.
_run_arguments: Specify additional [Dockerfile RUN arguments](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md) (Docker specific).
runtime: The list of files / directories to copy into the runtime
stage. The default is an empty list, i.e., copy the entire
prefix.
runtime_environment: Dictionary of environment variables and
values, e.g., `LD_LIBRARY_PATH` and `PATH`, to set in the runtime
stage. The default is an empty dictionary.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
url: The URL of the package to build. One of this parameter or
the `repository` or `package` parameters must be specified.
# Examples
```python
generic_cmake(cmake_opts=['-D CMAKE_BUILD_TYPE=Release',
'-D CUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda',
'-D GMX_BUILD_OWN_FFTW=ON',
'-D GMX_GPU=ON',
'-D GMX_MPI=OFF',
'-D GMX_OPENMP=ON',
'-D GMX_PREFER_STATIC_LIBS=ON',
'-D MPIEXEC_PREFLAGS=--allow-run-as-root'],
directory='gromacs-2018.2',
prefix='/usr/local/gromacs',
url='https://github.com/gromacs/gromacs/archive/v2018.2.tar.gz')
```
```python
generic_cmake(branch='v0.8.0',
cmake_opts=['-D CMAKE_BUILD_TYPE=RELEASE',
'-D QUDA_DIRAC_CLOVER=ON',
'-D QUDA_DIRAC_DOMAIN_WALL=ON',
'-D QUDA_DIRAC_STAGGERED=ON',
'-D QUDA_DIRAC_TWISTED_CLOVER=ON',
'-D QUDA_DIRAC_TWISTED_MASS=ON',
'-D QUDA_DIRAC_WILSON=ON',
'-D QUDA_FORCE_GAUGE=ON',
'-D QUDA_FORCE_HISQ=ON',
'-D QUDA_GPU_ARCH=sm_70',
'-D QUDA_INTERFACE_MILC=ON',
'-D QUDA_INTERFACE_QDP=ON',
'-D QUDA_LINK_HISQ=ON',
'-D QUDA_MPI=ON'],
prefix='/usr/local/quda',
repository='https://github.com/lattice/quda.git')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(generic_cmake, self).__init__(**kwargs)
self.__annotations = kwargs.get('annotations', {})
self.__build_directory = kwargs.get('build_directory', 'build')
self.__build_environment = kwargs.get('build_environment', {})
self.__check = kwargs.get('check', False)
self.cmake_opts = kwargs.get('cmake_opts', [])
self.__comment = kwargs.get('comment', True)
self.__directory = kwargs.get('directory', None)
self.environment_variables = kwargs.get('devel_environment', {})
self.__install = kwargs.get('install', True)
self.__libdir = kwargs.get('libdir', 'lib')
self.__make = kwargs.get('make', True)
self.__postinstall = kwargs.get('postinstall', [])
self.__preconfigure = kwargs.get('preconfigure', [])
self.__recursive = kwargs.get('recursive', False)
self.__run_arguments = kwargs.get('_run_arguments', None)
self.__runtime = kwargs.get('runtime', [])
self.runtime_environment_variables = kwargs.get('runtime_environment', {})
self.__toolchain = kwargs.get('toolchain', toolchain())
self.__commands = [] # Filled in by __setup()
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Construct the series of steps to execute
self.__setup()
# Fill in container instructions
self.__instructions()
def __instructions(self):
"""Fill in container instructions"""
if self.__comment:
if self.url:
self += comment(self.url, reformat=False)
elif self.repository:
self += comment(self.repository, reformat=False)
elif self.package:
self += comment(self.package, reformat=False)
if self.package:
self += copy(src=self.package,
dest=posixpath.join(self.__wd,
os.path.basename(self.package)))
self += shell(_arguments=self.__run_arguments,
commands=self.__commands)
self += environment(variables=self.environment_step())
self += label(metadata=self.annotate_step())
def __setup(self):
"""Construct the series of shell commands, i.e., fill in
self.__commands"""
# Get source
self.__commands.append(self.download_step(recursive=self.__recursive,
wd=self.__wd))
# directory containing the unarchived package
if self.__directory:
if posixpath.isabs(self.__directory):
self.src_directory = self.__directory
else:
self.src_directory = posixpath.join(self.__wd,
self.__directory)
# sanity check
if not self.src_directory:
raise RuntimeError('source directory is not defined')
# Preconfigure setup
if self.__preconfigure:
# Assume the preconfigure commands should be run from the
# source directory
self.__commands.append('cd {}'.format(self.src_directory))
self.__commands.extend(self.__preconfigure)
# Configure
build_environment = []
if self.__build_environment:
for key, val in sorted(self.__build_environment.items()):
build_environment.append('{0}={1}'.format(key, val))
self.__commands.append(self.configure_step(
build_directory=self.__build_directory,
directory=self.src_directory, environment=build_environment,
toolchain=self.__toolchain))
# Build
if self.__make:
self.__commands.append(self.build_step())
# Check the build
if self.__check:
self.__commands.append(self.build_step(target='check'))
# Install
if self.__install:
self.__commands.append(self.build_step(target='install'))
if self.__postinstall:
# Assume the postinstall commands should be run from the
# install directory
self.__commands.append('cd {}'.format(self.prefix))
self.__commands.extend(self.__postinstall)
# Set library path
if self.ldconfig:
self.__commands.append(self.ldcache_step(
directory=posixpath.join(self.prefix, self.__libdir)))
for key,value in self.__annotations.items():
self.add_annotation(key, value)
# Cleanup
remove = [self.src_directory]
if self.url:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.url)))
elif self.package:
remove.append(posixpath.join(self.__wd,
posixpath.basename(self.package)))
if self.__build_directory:
if posixpath.isabs(self.__build_directory):
remove.append(self.__build_directory)
self.__commands.append(self.cleanup_step(items=remove))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
g = generic_cmake(...)
Stage0 += g
Stage1 += g.runtime()
```
"""
if self.prefix:
if self.__comment:
if self.url:
self.rt += comment(self.url, reformat=False)
elif self.repository:
self.rt += comment(self.repository, reformat=False)
if self.__runtime:
for src in self.__runtime:
if '*' in posixpath.basename(src):
# When using COPY with more than one source file,
# the destination must be a directory and end with
# a /
dest = posixpath.dirname(src) + '/'
else:
dest = src
self.rt += copy(_from=_from, src=src, dest=dest)
else:
# Copy the entire prefix
self.rt += copy(_from=_from, src=self.prefix, dest=self.prefix)
if self.ldconfig:
self.rt += shell(commands=[self.ldcache_step(
directory=posixpath.join(self.prefix, self.__libdir))])
if self.runtime_environment_variables:
self.rt += environment(
variables=self.environment_step(runtime=True))
if self.annotate:
self.rt += label(metadata=self.annotate_step())
return str(self.rt)
else: # pragma: no cover
return
| hpc-container-maker-master | hpccm/building_blocks/generic_cmake.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""MAGMA building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import posixpath
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
class magma(bb_base, hpccm.templates.envvars, hpccm.templates.ldconfig):
"""The `magma` building block configures, builds, and installs the
[MAGMA](https://icl.cs.utk.edu/magma) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
Either the [MKL](#mkl) or [OpenBLAS](#openblas) building block
should also be installed.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
cmake_opts: List of options to pass to `cmake`. The default value
is an empty list.
gpu_target: List of GPU architectures to compile. The default
values are `Pascal`, `Volta`, and `Turing`.
ospackages: List of OS packages to install prior to configuring
and building. The default values are `tar` and `wget`.
prefix: The top level install location. The default value is
`/usr/local/magma`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default is empty.
version: The version of MAGMA source to download. The default
value is `2.5.3`.
# Examples
```python
magma(prefix='/opt/magma', version='2.5.3')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(magma, self).__init__(**kwargs)
self.__baseurl = kwargs.pop('baseurl', 'http://icl.utk.edu/projectsfiles/magma/downloads')
self.__cmake_opts = kwargs.pop('cmake_opts', [])
self.__gpu_target = kwargs.pop('gpu_target',
['Pascal', 'Volta', 'Turing'])
self.__ospackages = kwargs.pop('ospackages', ['tar', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/magma')
self.__version = kwargs.pop('version', '2.5.3')
# Set the cmake options
self.__cmake()
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Setup build configuration
self.__bb = generic_cmake(
annotations={'version': self.__version},
base_annotation=self.__class__.__name__,
comment=False,
cmake_opts=self.__cmake_opts,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
url='{0}/magma-{1}.tar.gz'.format(self.__baseurl, self.__version),
**kwargs)
# Container instructions
self += comment('MAGMA version {}'.format(self.__version))
self += packages(ospackages=self.__ospackages)
self += self.__bb
def __cmake(self):
"""Setup cmake options based on users parameters"""
# GPU architectures
if self.__gpu_target:
self.__cmake_opts.append('-DGPU_TARGET="{}"'.format(
' '.join(self.__gpu_target)))
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
m = magma(...)
Stage0 += m
Stage1 += m.runtime()
```
"""
self.rt += comment('MAGMA')
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/magma.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Building block base class"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import hpccm.base_object
class bb_instructions(hpccm.base_object):
"""Base class for building block instructions."""
def __init__(self, **kwargs):
"""Initialize building block instruction class"""
super(bb_instructions, self).__init__(**kwargs)
self.__instructions_bb = []
def __iadd__(self, instruction):
"""Add the instruction to the list of instructions. Allows "+="
syntax."""
if isinstance(instruction, list):
self.__instructions_bb.extend(instruction)
else:
self.__instructions_bb.append(instruction)
return self
def __getitem__(self, key):
"""Return the specified element from the list of instructions"""
return self.__instructions_bb[key]
def __len__(self):
"""Return the size of the list of instructions"""
return len(self.__instructions_bb)
def __str__(self):
"""String representation of the building block"""
return '\n'.join(str(x) for x in self.__instructions_bb if str(x))
class bb_base(bb_instructions):
"""Base class for building blocks."""
def __init__(self, **kwargs):
super(bb_base, self).__init__(**kwargs)
# Runtime instructions are kept in a separate list from the
# "regular" instructions
self.rt = bb_instructions()
| hpc-container-maker-master | hpccm/building_blocks/base.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""NVSHMEM building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import os
import posixpath
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
import hpccm.templates.rm
import hpccm.templates.tar
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_build import generic_build
from hpccm.building_blocks.packages import packages
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.shell import shell
class nvshmem(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig, hpccm.templates.rm,
hpccm.templates.tar):
"""The `nvshmem` building block builds and installs the
[NVSHMEM](https://developer.nvidia.com/nvshmem) component.
# Parameters
binary_tarball: Path to NVSHMEM binary tarball relative to the
build context. The default value is empty. Either this parameter
or `package` must be specified.
cuda: Flag to specify the path to the CUDA installation. The
default is `/usr/local/cuda`.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LIBRARY_PATH`, and `PATH`) should be modified to
include NVSHMEM. The default is True.
gdrcopy: Flag to specify the path to the GDRCOPY installation.
The default is empty.
hydra: Boolean flag to specify whether the Hydra process launcher
should be installed. If True, adds `automake` to the list of OS
packages. The default is False.
ldconfig: Boolean flag to specify whether the NVSHMEM library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the NVSHMEM library
directory. The default value is False.
make_variables: Dictionary of environment variables and values to
set when building NVSHMEM. The default is an empty dictionary.
mpi: Flag to specify the path to the MPI installation. The
default is empty, i.e., do not build NVSHMEM with MPI support.
ospackages: List of OS packages to install prior to building. The
default values are `make` and `wget`.
package: Path to the NVSHMEM source package relative to the build
context. The default value is empty. Either this parameter or
`binary_tarball` must be specified.
prefix: The top level install location. The default value is
`/usr/local/nvshmem`.
shmem: Flag to specify the path to the SHMEM installation. The
default is empty, i.e., do not build NVSHMEM with SHMEM support.
version: The version of NVSHMEM source to download. The default
value is `2.2.1`.
# Examples
```python
nvshmem(mpi='/usr/local/openmpi', version='2.1.2')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(nvshmem, self).__init__(**kwargs)
self.__binary_tarball = kwargs.pop('binary_tarball', None)
self.__cuda = kwargs.pop('cuda', '/usr/local/cuda')
self.__gdrcopy = kwargs.pop('gdrcopy', None)
self.__hydra = kwargs.pop('hydra', False)
self.__make_variables = kwargs.pop('make_variables', {})
self.__mpi = kwargs.pop('mpi', None)
self.__ospackages = kwargs.pop('ospackages', ['make', 'wget'])
self.__prefix = kwargs.pop('prefix', '/usr/local/nvshmem')
self.__release = kwargs.pop('release', '0')
self.__shmem = kwargs.pop('shmem', None)
self.__src_directory = kwargs.pop('src_directory', None)
self.__version = kwargs.pop('version', '2.2.1')
self.__wd = kwargs.get('wd', hpccm.config.g_wd) # working directory
# Set the download specific parameters
self.__download()
kwargs['url'] = self.url
if self.__src_directory:
kwargs['directory'] = self.__src_directory
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{}:$LD_LIBRARY_PATH'.format(posixpath.join(self.__prefix, 'lib'))
# Add packages
if self.__hydra:
self.__ospackages.append('automake')
if self.__version and not self.__binary_tarball and not self.package:
self += comment('NVSHMEM {}'.format(self.__version))
else:
self += comment('NVSHMEM')
self += packages(ospackages=self.__ospackages)
if self.__binary_tarball:
# Shorthand for the tarball file inside the container
tarball = posixpath.join(self.__wd,
os.path.basename(self.__binary_tarball))
self += copy(src=self.__binary_tarball, dest=tarball)
self += shell(commands=[
# Untar binary package
self.untar_step(
tarball=tarball,
# remove the leading directory, e.g., install in
# /usr/local/nvshmem not
# /usr/local/nvshmem/nvshmem_<version>_<arch>.
args=['--strip-components=1'],
directory=self.__prefix),
# Install Hydra process launcher
'{0}/scripts/install_hydra.sh {1} {0}'.format(
self.__prefix, self.__wd) if self.__hydra else None,
# Remove temporary files and cleanup
self.cleanup_step(items=[tarball])])
self += environment(variables=self.environment_variables)
else:
# Build from source
# Set the build options
self.__configure()
self.__bb = generic_build(
build = [
'{} make -j$(nproc) install'.format(
self.__build_environment),
'./scripts/install_hydra.sh {1} {0}'.format(
self.__prefix, self.__wd) if self.__hydra else None],
comment=False,
devel_environment=self.environment_variables,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
**kwargs)
self += self.__bb
def __configure(self):
"""Setup build options based on user parameters"""
e = {}
e['NVSHMEM_PREFIX'] = self.__prefix
# Default to 0 unless MPI/SHMEM is requested
e['NVSHMEM_MPI_SUPPORT'] = 0
if self.__cuda:
e['CUDA_HOME'] = self.__cuda
if self.__gdrcopy:
e['GDRCOPY_HOME'] = self.__gdrcopy
if self.__mpi:
e['NVSHMEM_MPI_SUPPORT'] = 1
e['MPI_HOME'] = self.__mpi
if self.__shmem:
e['NVSHMEM_SHMEM_SUPPORT'] = 1
e['SHMEM_HOME'] = self.__shmem
if self.__make_variables:
e.update(self.__make_variables)
l = []
if e:
for key, val in sorted(e.items()):
l.append('{0}={1}'.format(key, val))
self.__build_environment = ' '.join(l)
def __download(self):
"""Set download source based on user parameters"""
if not self.package and not self.repository and not self.url:
self.url = 'https://developer.download.nvidia.com/compute/redist/nvshmem/{0}/source/nvshmem_src_{0}-{1}.txz'.format(self.__version, self.__release)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
n = nvshmem(...)
Stage0 += n
Stage1 += n.runtime()
```
"""
self.rt += comment('NVSHMEM')
if self.__binary_tarball:
self.rt += copy(_from=_from, src=self.__prefix, dest=self.__prefix)
self.rt += environment(variables=self.environment_step())
else:
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/nvshmem.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
# pylint: disable=too-many-instance-attributes
"""rdma-core building block"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six import string_types
from distutils.version import StrictVersion
import posixpath
import hpccm.config
import hpccm.templates.downloader
import hpccm.templates.envvars
import hpccm.templates.ldconfig
from hpccm.building_blocks.base import bb_base
from hpccm.building_blocks.generic_cmake import generic_cmake
from hpccm.building_blocks.packages import packages
from hpccm.common import cpu_arch
from hpccm.common import linux_distro
from hpccm.primitives.comment import comment
from hpccm.toolchain import toolchain
class rdma_core(bb_base, hpccm.templates.downloader, hpccm.templates.envvars,
hpccm.templates.ldconfig):
"""The `rdma_core` building block configures, builds, and installs the
[RDMA Core](https://github.com/linux-rdma/rdma-core) component.
The [CMake](#cmake) building block should be installed prior to
this building block.
# Parameters
annotate: Boolean flag to specify whether to include annotations
(labels). The default is False.
branch: The git branch to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the default branch for the repository.
commit: The git commit to clone. Only recognized if the
`repository` parameter is specified. The default is empty, i.e.,
use the latest commit on the default branch for the repository.
environment: Boolean flag to specify whether the environment
(`CPATH`, `LD_LIBRARY_PATH`, `LIBRARY_PATH`, and `PATH`) should be
modified to include RDMA Core. The default is True.
ldconfig: Boolean flag to specify whether the RDMA Core library
directory should be added dynamic linker cache. If False, then
`LD_LIBRARY_PATH` is modified to include the RDMA Core library
directory. The default value is False.
ospackages: List of OS packages to install prior to configuring
and building. For Ubuntu, the default values are `libudev-dev`,
`libnl-3-dev`, `libnl-route-3-dev`, `make`, `pkg-config`,
`python3-docutils`, `pandoc`, and `wget`. For RHEL-based Linux
distributions, the default values are `libnl3-devel`,
`libudev-devel`, `make`, `pkgconfig`, `pandoc`, `python-docutils`,
and `wget`. If the `repository` parameter is set, then
`ca-certificates` and `git` are also included.
prefix: The top level install location. The default value is
`/usr/local/rdma-core`.
repository: The location of the git repository that should be used to build RDMA Core. If True, then use the default `https://github.com/linux-rdma/rdma-core.git`
repository. The default is empty, i.e., use the release package
specified by `version`.
toolchain: The toolchain object. This should be used if
non-default compilers or other toolchain options are needed. The
default value is empty.
url: The location of the tarball that should be used to build RDMA
Core. The default is empty, i.e., use the release package
specified by `version`.
version: The version of RDMA Core source to download. The default
value is `31.2`.
# Examples
```python
rdma_core(prefix='/opt/rdma-core/31.2', version='31.2')
```
```python
rdma_core(repository='https://github.com/linux-rdma/rdma-core.git')
```
"""
def __init__(self, **kwargs):
"""Initialize building block"""
super(rdma_core, self).__init__(**kwargs)
# Parameters
self.__baseurl = kwargs.pop('baseurl', 'https://github.com/linux-rdma/rdma-core/archive')
self.__default_repository = 'https://github.com/linux-rdma/rdma-core.git'
self.__ospackages = kwargs.pop('ospackages', [])
self.__prefix = kwargs.pop('prefix', '/usr/local/rdma-core')
self.__runtime_ospackages = [] # Filled in by __distro()
self.__toolchain = kwargs.pop('toolchain', toolchain())
self.__version = kwargs.pop('version', '31.2')
# Set the Linux distribution specific parameters
self.__distro()
# Set the download specific parameters
self.__download()
kwargs['repository'] = self.repository
kwargs['url'] = self.url
# Setup the environment variables
self.environment_variables['CPATH'] = '{}:$CPATH'.format(
posixpath.join(self.__prefix, 'include'))
self.environment_variables['LIBRARY_PATH'] = '{0}:{1}:$LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'),
posixpath.join(self.__prefix, 'lib64'))
self.environment_variables['PATH'] = '{}:$PATH'.format(
posixpath.join(self.__prefix, 'bin'))
if not self.ldconfig:
self.environment_variables['LD_LIBRARY_PATH'] = '{0}:{1}:$LD_LIBRARY_PATH'.format(
posixpath.join(self.__prefix, 'lib'),
posixpath.join(self.__prefix, 'lib64'))
# Setup build configuration
self.__bb = generic_cmake(
annotations={'version': self.__version} if not self.repository else {},
base_annotation=self.__class__.__name__,
comment=False,
devel_environment=self.environment_variables,
directory='rdma-core-{}'.format(self.__version) if self.url else None,
prefix=self.__prefix,
runtime_environment=self.environment_variables,
toolchain=self.__toolchain,
**kwargs)
# Container instructions
if self.repository:
if self.branch:
self += comment('RDMA Core {} {}'.format(self.repository,
self.branch))
elif self.commit:
self += comment('RDMA Core {} {}'.format(self.repository,
self.commit))
else:
self += comment('RDMA Core {}'.format(self.repository))
else:
self += comment('RDMA Core version {}'.format(self.__version))
# pandoc is in EPEL on CentOS 7 and PowerTools on CentOS 8
self += packages(epel=True, ospackages=self.__ospackages,
powertools=True)
self += self.__bb
def __distro(self):
"""Based on the Linux distribution, set values accordingly. A user
specified value overrides any defaults."""
if hpccm.config.g_linux_distro == linux_distro.UBUNTU:
if not self.__ospackages:
self.__ospackages = ['libudev-dev', 'libnl-3-dev',
'libnl-route-3-dev', 'make',
'pkg-config', 'python3-docutils',
'pandoc', 'wget']
self.__runtime_ospackages = ['libnl-3-200', 'libnl-route-3-200',
'libnuma1']
elif hpccm.config.g_linux_distro == linux_distro.CENTOS:
if not self.__ospackages:
self.__ospackages = ['libnl3-devel', 'libudev-devel', 'make',
'pkgconfig', 'pandoc', 'wget']
if hpccm.config.g_linux_version >= StrictVersion('8.0'):
self.__ospackages.append('python3-docutils')
else:
self.__ospackages.append('python-docutils')
self.__runtime_ospackages = ['libnl', 'libnl3', 'numactl-libs']
else: # pragma: no cover
raise RuntimeError('Unknown Linux distribution')
if self.repository:
self.__ospackages.extend(['ca-certificates', 'git'])
def __download(self):
"""Set download source based on user parameters"""
# Use the default repository if set to True
if self.repository is True:
self.repository = self.__default_repository
if not self.repository and not self.url:
self.url = '{0}/v{1}.tar.gz'.format(self.__baseurl, self.__version)
def runtime(self, _from='0'):
"""Generate the set of instructions to install the runtime specific
components from a build in a previous stage.
# Examples
```python
r = rdma_core(...)
Stage0 += r
Stage1 += r.runtime()
```
"""
self.rt += comment('RDMA Core')
self.rt += packages(ospackages=self.__runtime_ospackages)
self.rt += self.__bb.runtime(_from=_from)
return str(self.rt)
| hpc-container-maker-master | hpccm/building_blocks/rdma_core.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""User primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class user(object):
"""The `user` primitive sets the user name to use for any subsequent
steps.
This primitive is the null operation for Singularity.
# Parameters
user: The user name to use. The default is an empty string.
# Examples
```python
user(user='ncognito')
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
self.user = kwargs.get('user', '')
def __str__(self):
"""String representation of the primitive"""
if self.user:
if hpccm.config.g_ctype == container_type.DOCKER:
return 'USER {}'.format(self.user)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
return ''
elif hpccm.config.g_ctype == container_type.BASH:
return ''
else:
raise RuntimeError('Unknown container type')
else:
logging.error('No user specified')
return ''
| hpc-container-maker-master | hpccm/primitives/user.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Comment primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import re
import textwrap
import hpccm.config
from hpccm.common import container_type
class comment(object):
"""The `comment` primitive inserts a comment into the corresponding
place in the container specification file.
# Parameters
_app: String containing the
[SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the comment to be enclosed in a
Singularity block to named `%apphelp` (Singularity specific).
reformat: Boolean flag to specify whether the comment string
should be wrapped to fit into lines not exceeding 80 characters.
The default is True.
# Examples
```python
comment('libfoo version X.Y')
```
"""
def __init__(self, *args, **kwargs):
"""Initialize primitive"""
#super(comment, self).__init__()
try:
self.__string = args[0]
except IndexError:
self.__string = ''
self._app = kwargs.get('_app', False) # Singularity specific
self.__reformat = kwargs.get('reformat', True)
def __str__(self):
"""String representation of the primitive"""
if self.__string:
# Comments are universal (so far...)
if (self._app and
hpccm.config.g_ctype == container_type.SINGULARITY):
return '%apphelp {0}\n{1}'.format(self._app,
self.__string)
if self.__reformat:
# Wrap comments
return textwrap.fill(self.__string, initial_indent='# ',
subsequent_indent='# ', width=70)
else:
# Just prepend but otherwise apply no formatting
return re.sub('^', '# ', self.__string, flags=re.MULTILINE)
else:
return ''
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact merger.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
s = []
for item in lst:
if not item.__class__.__name__ == 'comment': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
s.append(item._comment__string)
return comment('\n'.join(s), reformat=False, _app=_app)
| hpc-container-maker-master | hpccm/primitives/comment.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Working directory primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
from hpccm.primitives.shell import shell
class workdir(object):
"""The `workdir` primitive sets the working directory for any
subsequent operations. As a side effect, if the directory does
not exist, it is created.
# Parameters
directory: The directory path.
# Examples
```python
workdir(directory='/path/to/directory')
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(workdir, self).__init__()
self.directory = kwargs.get('directory', '')
def __str__(self):
"""String representation of the primitive"""
if self.directory:
if hpccm.config.g_ctype == container_type.DOCKER:
return 'WORKDIR {}'.format(self.directory)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
s = shell(commands=['mkdir -p {}'.format(self.directory),
'cd {}'.format(self.directory)])
return str(s)
elif hpccm.config.g_ctype == container_type.BASH:
logging.warning('workdir primitive does not map into bash')
return ''
else:
raise RuntimeError('Unknown container type')
else:
logging.error('No directory specified')
return ''
| hpc-container-maker-master | hpccm/primitives/workdir.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Raw primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import hpccm.config
from hpccm.common import container_type
class raw(object):
"""The `raw` primitive inserts the specified string, without
modification, into the corresponding place in the container
specification file.
Generally, the string should be functionally equivalent for each
container format.
Wherever possible, the raw primitive should be avoided and other,
more portable, primitives should be used instead.
# Parameters
docker: String containing the Dockerfile instruction (Docker
specific).
singularity: String containing the Singularity instruction
(Singularity specific).
# Examples
```python
raw(docker='COPY --from=0 /usr/local/openmpi /usr/local/openmpi',
singularity='# no equivalent to --from')
```
"""
def __init__(self, **kwargs):
"""Raw primitive"""
#super(raw, self).__init__()
self.__docker = kwargs.get('docker', '') # Docker specific
self.__singularity = kwargs.get('singularity', '') # Singularity
# specific
def __str__(self):
"""String representation of the primitive"""
if hpccm.config.g_ctype == container_type.DOCKER:
return str(self.__docker)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
return str(self.__singularity)
elif hpccm.config.g_ctype == container_type.BASH:
return ''
else:
raise RuntimeError('Unknown container type')
| hpc-container-maker-master | hpccm/primitives/raw.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Shell primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class shell(object):
"""The `shell` primitive specifies a series of shell commands to
execute.
# Parameters
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named
`%appinstall` rather than `%post` (Singularity specific).
_appenv: Boolean flag to specify whether the general container
environment should be also be loaded when executing a SCI-F
`%appinstall` block. The default is False.
_arguments: Specify additional [Dockerfile RUN arguments](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md) (Docker specific).
chdir: Boolean flag to specify whether to change the working
directory to `/` before executing any commands. Docker
automatically resets the working directory for each `RUN`
instruction. Setting this option to True makes Singularity behave
the same. This option is ignored for Docker. The default is
True.
commands: A list of commands to execute. The default is an empty
list.
_test: Boolean flag to specify whether to use `%test` instead of
`%post` and `%apptest` instead of `%appinstall` as the Singularity
section headings (Singularity specific).
# Examples
```python
shell(commands=['cd /path/to/src', './configure', 'make install'])
```
```python
# Cache Go packages
shell(_arguments='--mount=type=cache,target=/root/.cache/go-build',
commands=['cd /path/to/go-src', 'go build'])
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(wget, self).__init__()
self._app = kwargs.get('_app', '') # Singularity specific
self._appenv = kwargs.get('_appenv', False) # Singularity specific
self._arguments = kwargs.get('_arguments', '') # Docker specific
self.chdir = kwargs.get('chdir', True)
self.commands = kwargs.get('commands', [])
self._test = kwargs.get('_test', False) # Singularity specific
def __str__(self):
"""String representation of the primitive"""
if self.commands:
# Remove empty items from the list of commands
self.commands = [x for x in self.commands if x]
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific %app.. syntax '
'was requested. Docker does not have an '
'equivalent: using regular RUN!')
if self._appenv:
logging.warning('The Singularity specific _appenv argument '
'was given: ignoring argument!')
# Format:
# RUN cmd1 && \
# cmd2 && \
# cmd3
s = ['RUN ']
if self._arguments:
s[0] += self._arguments + ' '
s[0] += self.commands[0]
s.extend([' {}'.format(x) for x in self.commands[1:]])
return ' && \\\n'.join(s)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
if self._arguments:
logging.warning('The Docker specific _arguments was given: '
'ignoring statement!')
# Format:
# %post [OR %appinstall app_name]
# cmd1
# cmd2
# cmd3
if self._app:
s = ['%appinstall {0}'.format(self._app)]
if self._test:
s = ['%apptest {0}'.format(self._app)]
# Do not `cd /` here: Singularity %appinstall is already
# run in its own working directory at /scif/apps/[appname].
# %appinstall commands do not run in regular Singularity
# environment. If _appenv=True load environment.
if self._appenv:
s.append(' for f in /.singularity.d/env/*; do . $f; '
'done')
else:
if self._appenv:
logging.warning('The _appenv argument has to be used '
'together with the _app argument: '
'ignoring argument!')
s = ['%post']
if self._test:
s = ['%test']
# For consistency with Docker. Docker resets the
# working directory to '/' at the beginning of each
# 'RUN' instruction.
if self.chdir:
s.append(' cd /')
s.extend([' {}'.format(x) for x in self.commands])
return '\n'.join(s)
elif hpccm.config.g_ctype == container_type.BASH:
s = []
if self.chdir:
s.insert(0, 'cd /')
s.extend(self.commands)
return '\n'.join(s)
else:
raise RuntimeError('Unknown container type')
else:
return ''
def merge(self, lst, _app=None, _appenv=False, _test=False):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact merger.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
cmds = []
for item in lst:
if not item.__class__.__name__ == 'shell': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
cmds.extend(item.commands)
return shell(commands=cmds, _app=_app, _appenv=_appenv, _test=_test)
| hpc-container-maker-master | hpccm/primitives/shell.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Runscript primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import shlex
from six.moves import shlex_quote
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class runscript(object):
"""The `runscript` primitive specifies the commands to be invoked
when the container starts.
# Parameters
_args: Boolean flag to specify whether `"$@"` should be appended
to the command. If more than one command is specified, nothing is
appended regardless of the value of this flag. The default is
True (Singularity specific).
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named `%apprun`
rather than `%runscript` (Singularity specific).
commands: A list of commands to execute. The default is an empty
list.
_exec: Boolean flag to specify whether `exec` should be inserted
to preface the final command. The default is True (Singularity
specific).
# Examples
```python
runscript(commands=['cd /workdir', 'source env.sh'])
```
```python
runscript(commands=['/usr/local/bin/entrypoint.sh'])
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(wget, self).__init__()
self._args = kwargs.get('_args', True) # Singularity specific
self._app = kwargs.get('_app', '') # Singularity specific
self._exec = kwargs.get('_exec', True) # Singularity specific
self.commands = kwargs.get('commands', [])
def __str__(self):
"""String representation of the primitive"""
if self.commands:
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific %app.. syntax was '
'requested. Docker does not have an '
'equivalent: using regular ENTRYPOINT!')
if len(self.commands) > 1:
logging.warning('Multiple commands given to runscript. '
'Docker ENTRYPOINT supports just one cmd: '
'ignoring remaining commands!')
# Format:
# ENTRYPOINT ["cmd1", "arg1", "arg2", ...]
s = []
s.extend('"{}"'.format(shlex_quote(x))
for x in shlex.split(self.commands[0]))
return 'ENTRYPOINT [' + ', '.join(s) + ']'
elif hpccm.config.g_ctype == container_type.SINGULARITY:
if self._exec:
# prepend last command with exec
self.commands[-1] = 'exec {0}'.format(self.commands[-1])
if len(self.commands) == 1 and self._args:
# append "$@" to singleton command
self.commands[0] = '{} "$@"'.format(self.commands[0])
# Format:
# %runscript
# cmd1
# cmd2
# exec cmd3
if self._app:
s = ['%apprun {0}'.format(self._app)]
else:
s = ['%runscript']
s.extend([' {}'.format(x) for x in self.commands])
return '\n'.join(s)
elif hpccm.config.g_ctype == container_type.BASH:
logging.warning('runscript primitive does not map into bash')
return ''
else:
raise RuntimeError('Unknown container type')
else:
return ''
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
cmds = []
for item in lst:
if not item.__class__.__name__ == 'runscript': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
cmds.extend(item.commands)
return runscript(commands=cmds, _app=_app)
| hpc-container-maker-master | hpccm/primitives/runscript.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__all__ = ['baseimage', 'blob', 'comment', 'copy', 'environment', 'label',
'raw', 'runscript', 'shell', 'user', 'workdir']
from hpccm.primitives.baseimage import baseimage
from hpccm.primitives.blob import blob
from hpccm.primitives.comment import comment
from hpccm.primitives.copy import copy
from hpccm.primitives.environment import environment
from hpccm.primitives.label import label
from hpccm.primitives.raw import raw
from hpccm.primitives.runscript import runscript
from hpccm.primitives.shell import shell
from hpccm.primitives.user import user
from hpccm.primitives.workdir import workdir
| hpc-container-maker-master | hpccm/primitives/__init__.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Copy primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging # pylint: disable=unused-import
import posixpath
import hpccm.config
from hpccm.common import container_type
class copy(object):
"""The `copy` primitive copies files from the host to the container
image.
# Parameters
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named
`%appfiles` rather than `%files` (Singularity specific).
_chown: Set the ownership of the file(s) in the container image
(Docker specific).
dest: Path in the container image to copy the file(s)
files: A dictionary of file pairs, source and destination, to copy
into the container image. If specified, has precedence over
`dest` and `src`.
_from: Set the source location to a previous build stage rather
than the host filesystem (Docker specific).
_mkdir: Boolean flag specifying that the destination directory
should be created in a separate `%setup` step. This can be used
to work around the Singularity limitation that the destination
directory must exist in the container image prior to copying files
into the image. The default is False (Singularity specific).
_post: Boolean flag specifying that file(s) should be first copied
to `/` and then moved to the final destination by a `%post` step.
This can be used to work around the Singularity limitation that
the destination must exist in the container image prior to copying
files into the image. The default is False (Singularity
specific).
src: A file, or a list of files, to copy
# Examples
```python
copy(src='component', dest='/opt/component')
```
```python
copy(src=['a', 'b', 'c'], dest='/tmp')
```
```python
copy(files={'a': '/tmp/a', 'b': '/opt/b'})
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(copy, self).__init__()
self._app = kwargs.get('_app', '') # Singularity specific
self.__chown = kwargs.get('_chown', '') # Docker specific
self.__dest = kwargs.get('dest', '')
self.__files = kwargs.get('files', {})
self.__from = kwargs.get('_from', '') # Docker specific
self._mkdir = kwargs.get('_mkdir', '') # Singularity specific
self._post = kwargs.get('_post', '') # Singularity specific
self.__src = kwargs.get('src', '')
if self._mkdir and self._post:
logging.error('_mkdir and _post are mutually exclusive!')
self._post = False # prefer _mkdir
if self._app and (self._mkdir or self._post):
logging.error('_app cannot be used with _mkdir or _post!')
self._mkdir = False # prefer _app
self._post = False
def __str__(self):
"""String representation of the primitive"""
# Build a list of files to make the logic a bit simpler below.
# The items in the files list are dictionaries with keys 'src'
# and 'dest'.
files = []
if self.__files:
# Sort to make it deterministic
files.extend([{'dest': dest, 'src': src}
for src, dest in sorted(self.__files.items())])
elif self.__dest and self.__src:
files.append({'dest': self.__dest, 'src': self.__src})
else:
# No files!
return ''
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific SCI-F syntax '
'was requested. Docker does not have an '
'equivalent: using regular COPY!')
# Format:
# COPY src1 \
# src2 \
# src3 \
# dest/
# COPY src1 dest1
# COPY src2 dest2
# COPY src3 dest3
base_inst = 'COPY '
if self.__chown:
base_inst = base_inst + '--chown={} '.format(self.__chown)
if self.__from:
base_inst = base_inst + '--from={} '.format(self.__from)
# Docker does not have the notion of copying a set of
# files to different locations inside the container in a
# single instruction. So generate multiple COPY
# instructions in that case.
instructions = []
for pair in files:
dest = pair['dest']
src = pair['src']
c = [base_inst]
if isinstance(src, list):
c[0] = c[0] + src[0]
c.extend([' {}'.format(x) for x in src[1:]])
# Docker requires a trailing slash. Add one if missing.
c.append(' {}'.format(posixpath.join(dest, '')))
else:
c[0] = c[0] + '{0} {1}'.format(src, dest)
instructions.append(' \\\n'.join(c))
return '\n'.join(instructions)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
# If any of the files are being staged in /tmp or /var/tmp,
# issue a warning or error depending on the Singularity
# version.
# https://github.com/NVIDIA/hpc-container-maker/issues/345
if (not self.__from and
any(f['dest'].startswith(('/var/tmp', '/tmp')) for f in files)):
msg = 'Singularity 3.6 and later no longer allow a temporary directory to be used to stage files into the container image. Modify the recipe or, in many cases, use --working-directory or hpccm.config.set_working_directory() to specify another location.'
if hpccm.config.g_singularity_version >= StrictVersion('3.6'):
raise RuntimeError(msg)
else:
logging.warning(msg)
# Format:
# %files
# src1 dest
# src2 dest
# src3 dest
# %files
# src1 dest1
# src2 dest2
# src3 dest3
section = '%files'
if (self.__from and
hpccm.config.g_singularity_version >= StrictVersion('3.2')):
section = section + ' from {}'.format(self.__from)
if self._app:
# SCIF appfiles does not support "from"
section = '%appfiles {0}'.format(self._app)
# Singularity will error if the destination does not
# already exist in the container. The workarounds are to
# either 1) prior to copying the files, create the
# destination directories with %setup or 2) copy the files
# to a path guaranteed to exist, "/", and then move them
# later with %post. Option 1 is the "pre" approach,
# option 2 is the "post" approach.
flat_files = []
post = [] # post actions if _post is enabled
pre = [] # pre actions if _mkdir is enabled
for pair in files:
dest = pair['dest']
src = pair['src']
if self._post:
dest = '/'
if isinstance(src, list):
for s in src:
flat_files.append(' {0} {1}'.format(s, dest))
if self._post:
post.append(' mv /{0} {1}'.format(posixpath.basename(s), posixpath.join(pair['dest'], s)))
if (self._mkdir and
posixpath.dirname(dest) != '/' and
posixpath.basename(dest) != dest):
# When multiple files are to be copied to the
# same destination, assume the destination is
# a directory
pre.append(' mkdir -p ${{SINGULARITY_ROOTFS}}{0}'.format(dest))
else:
flat_files.append(' {0} {1}'.format(src, dest))
if (self._mkdir and
posixpath.dirname(dest) != '/' and
posixpath.basename(dest) != dest):
# When a single file is to be copied to a
# destination, assume the destination is a
# file.
pre.append(' mkdir -p ${{SINGULARITY_ROOTFS}}{0}'.format(posixpath.dirname(dest)))
elif self._post:
post.append(' mv /{0} {1}'.format(posixpath.basename(src), pair['dest']))
s = ''
if pre:
s += '%setup\n' + '\n'.join(pre) + '\n'
s += section + '\n' + '\n'.join(flat_files)
if post:
s += '\n%post\n' + '\n'.join(post)
return s
elif hpccm.config.g_ctype == container_type.BASH:
logging.warning('copy primitive does not map into bash')
return ''
else:
raise RuntimeError('Unknown container type')
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact merger.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
files = {}
for item in lst:
if not item.__class__.__name__ == 'copy': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
if item._copy__files:
files.update(item._copy__files)
elif isinstance(item._copy__src, list):
# Build a files dictionary from src / dest options.
# src is a list.
for s in item._copy__src:
files.update({s: item._copy__dest})
else:
# Build a files dictionary from src / dest options.
files.update({item._copy__src: item._copy__dest})
return copy(files=files, _app=_app)
| hpc-container-maker-master | hpccm/primitives/copy.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Base image primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from distutils.version import StrictVersion
import logging # pylint: disable=unused-import
import re
import hpccm.config
from hpccm.common import container_type
from hpccm.primitives.comment import comment
from hpccm.primitives.shell import shell
class baseimage(object):
"""The `baseimage` primitive defines the base image to be used.
# Parameters
_arch: The underlying CPU architecture of the base image. Valid
values are `aarch64`, `ppc64le`, and `x86_64`. By default, the
primitive attemps to figure out the CPU architecture by inspecting
the image identifier, and falls back to system architecture if
unable to determine the CPU architecture automatically.
_as: Name for the stage. When using Singularity multi-stage
recipes, this value must be specified. The default value is
empty.
_bootstrap: The Singularity bootstrap agent. This default value
is `docker` (Singularity specific).
_distro: The underlying Linux distribution of the base image.
Valid values are `centos`, `centos7`, `centos8`, `redhat`, `rhel`,
`rhel7`, `rhel8`, `rockylinux8`, `ubuntu`, `ubuntu16`, `ubuntu18`,
`ubuntu20`, and `ubuntu22`. By default, the primitive attempts to
figure out the Linux distribution by inspecting the image
identifier, and falls back to `ubuntu` if unable to determine the
Linux distribution automatically.
_docker_env: Boolean specifying whether to load the Docker base
image environment, i.e., source
`/.singularity.d/env/10-docker*.sh` (Singularity specific). The
default value is True.
image: The image identifier to use as the base image. The default value is `ubuntu:18.04`.
AS: Name for the build stage (Docker specific). The default value
is empty. This parameter is deprecated; use `_as` instead.
# Examples
```python
baseimage(image='nvidia/cuda:9.1-devel')
```
"""
def __init__(self, **kwargs):
"""Initialize the primitive"""
#super(baseimage, self).__init__()
self.__arch = kwargs.get('_arch', '')
self.__as = kwargs.get('AS', '') # Deprecated
self.__as = kwargs.get('_as', self.__as)
self.__bootstrap = kwargs.get('_bootstrap', 'docker')
self.image = kwargs.get('image', 'ubuntu:18.04')
self.__distro = kwargs.get('_distro', '')
self.__docker_env = kwargs.get('_docker_env', True) # Singularity specific
# Set the global CPU architecture. User the user specified
# value if available, otherwise try to figure it out based on
# the image name.
self.__arch = self.__arch.lower()
if self.__arch == 'aarch64':
hpccm.config.set_cpu_architecture('aarch64')
elif self.__arch == 'ppc64le':
hpccm.config.set_cpu_architecture('ppc64le')
elif self.__arch == 'x86_64':
hpccm.config.set_cpu_architecture('x86_64')
elif re.search(r'aarch64|arm64', self.image):
hpccm.config.set_cpu_architecture('aarch64')
elif re.search(r'ppc64le', self.image):
hpccm.config.set_cpu_architecture('ppc64le')
else:
# Unable to figure out the architecture, so use the
# default, which should be the architecture of the machine
# running HPCCM
pass
# Set the global Linux distribution. Use the user specified
# value if available, otherwise try to figure it out based on
# the image name.
self.__distro = self.__distro.lower()
if self.__distro == 'ubuntu':
hpccm.config.set_linux_distro('ubuntu')
elif self.__distro == 'ubuntu16':
hpccm.config.set_linux_distro('ubuntu16')
elif self.__distro == 'ubuntu18':
hpccm.config.set_linux_distro('ubuntu18')
elif self.__distro == 'ubuntu20':
hpccm.config.set_linux_distro('ubuntu20')
elif self.__distro == 'ubuntu22':
hpccm.config.set_linux_distro('ubuntu22')
elif self.__distro == 'centos':
hpccm.config.set_linux_distro('centos')
elif self.__distro == 'centos7':
hpccm.config.set_linux_distro('centos7')
elif self.__distro == 'centos8':
hpccm.config.set_linux_distro('centos8')
elif (self.__distro == 'rhel' or self.__distro == 'redhat'):
hpccm.config.set_linux_distro('rhel')
elif self.__distro == 'rhel7':
hpccm.config.set_linux_distro('rhel7')
elif self.__distro == 'rhel8':
hpccm.config.set_linux_distro('rhel8')
elif self.__distro == 'rockylinux8':
hpccm.config.set_linux_distro('rockylinux8')
elif re.search(r'centos:?7', self.image):
hpccm.config.set_linux_distro('centos7')
elif re.search(r'centos:?8', self.image):
hpccm.config.set_linux_distro('centos8')
elif re.search(r'rockylinux:?8', self.image):
hpccm.config.set_linux_distro('rockylinux8')
elif re.search(r'centos|rhel|redhat', self.image):
hpccm.config.set_linux_distro('centos')
elif re.search(r'ubi:?7', self.image):
hpccm.config.set_linux_distro('rhel7')
elif re.search(r'ubi:?8', self.image):
hpccm.config.set_linux_distro('rhel8')
elif re.search(r'ubuntu:?16', self.image):
hpccm.config.set_linux_distro('ubuntu16')
elif re.search(r'ubuntu:?18', self.image):
hpccm.config.set_linux_distro('ubuntu18')
elif re.search(r'ubuntu:?20', self.image):
hpccm.config.set_linux_distro('ubuntu20')
elif re.search(r'ubuntu:?22', self.image):
hpccm.config.set_linux_distro('ubuntu22')
elif re.search(r'ubuntu', self.image):
hpccm.config.set_linux_distro('ubuntu')
else:
logging.warning('Unable to determine the Linux distribution, defaulting to Ubuntu')
hpccm.config.set_linux_distro('ubuntu')
def __str__(self):
"""String representation of the primitive"""
if hpccm.config.g_ctype == container_type.DOCKER:
image = 'FROM {}'.format(self.image)
if self.__as:
image = image + ' AS {}'.format(self.__as)
return image
elif hpccm.config.g_ctype == container_type.SINGULARITY:
image = 'BootStrap: {0}\nFrom: {1}'.format(self.__bootstrap,
self.image)
if (self.__as and
hpccm.config.g_singularity_version >= StrictVersion('3.2')):
image = image + '\nStage: {}'.format(self.__as)
image = str(comment('NOTE: this definition file depends on features only available in Singularity 3.2 and later.')) + '\n' + image
# Singularity does not inherit the environment from the
# Docker base image automatically. Do it manually.
if self.__docker_env:
docker_env = shell(
chdir=False,
commands=['. /.singularity.d/env/10-docker*.sh'])
image = image + '\n' + str(docker_env)
return image
elif hpccm.config.g_ctype == container_type.BASH:
return '#!/bin/bash -ex'
else:
raise RuntimeError('Unknown container type')
| hpc-container-maker-master | hpccm/primitives/baseimage.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Label primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class label(object):
"""The `label` primitive sets container metadata.
# Parameters
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named
`%applabels` rather than `%labels` (Singularity specific).
metadata: A dictionary of key / value pairs. The default is an
empty dictionary.
# Examples
```python
label(metadata={'maintainer': 'jane@doe'})
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(label, self).__init__()
self._app = kwargs.get('_app', '') # Singularity specific
self.__metadata = kwargs.get('metadata', {})
def __str__(self):
"""String representation of the primitive"""
if self.__metadata:
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific %app.. syntax '
'was requested. Docker does not have an '
'equivalent: using regular LABEL!')
# Format:
# LABEL K1=V1 \
# K2=V2 \
# K3=V3
keyvals = []
for key, val in sorted(self.__metadata.items()):
keyvals.append('{0}={1}'.format(key, val))
l = ['LABEL {}'.format(keyvals[0])]
l.extend([' {}'.format(x) for x in keyvals[1:]])
return ' \\\n'.join(l)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
# Format:
# %labels
# K1 V1
# K2 V2
# K3 V3
keyvals = []
for key, val in sorted(self.__metadata.items()):
keyvals.append('{0} {1}'.format(key, val))
if self._app:
l = ['%applabels {0}'.format(self._app)]
else:
l = ['%labels']
l.extend([' {}'.format(x) for x in keyvals])
return '\n'.join(l)
elif hpccm.config.g_ctype == container_type.BASH:
logging.warning('label primitive does not map into bash')
return ''
else:
raise RuntimeError('Unknown container type')
else:
return ''
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
labels = {}
for item in lst:
if not item.__class__.__name__ == 'label': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
labels.update(item._label__metadata)
return label(metadata=labels, _app=_app)
| hpc-container-maker-master | hpccm/primitives/label.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Environment primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class environment(object):
"""The `environment` primitive sets the corresponding environment
variables. Note, for Singularity, this primitive may set
environment variables for the container runtime but not for the
container build process (see this
[rationale](https://github.com/singularityware/singularity/issues/1053)).
See the `_export` parameter for more information.
# Parameters
_app: String containing the [SCI-F](https://www.sylabs.io/guides/2.6/user-guide/reproducible_scif_apps.html)
identifier. This also causes the Singularity block to named
`%appenv` rather than `%environment` (Singularity specific).
_export: A Boolean flag to specify whether the environment should
also be set for the Singularity build context (Singularity
specific). Variables defined in the Singularity `%environment`
section are only defined when the container is run and not for
subsequent build steps (unlike the analogous Docker `ENV`
instruction). If this flag is true, then in addition to the
`%environment` section, a identical `%post` section is generated
to export the variables for subsequent build steps. The default
value is True.
variables: A dictionary of key / value pairs. The default is an
empty dictionary.
# Examples
```python
environment(variables={'PATH': '/usr/local/bin:$PATH'})
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(environment, self).__init__()
# Singularity does not export environment variables into the
# current build context when using the '%environment' section.
# The variables are only set when the container is run. If
# this variable is True, then also generate a '%post' section
# to set the variables for the build context.
self._app = kwargs.get('_app', '') # Singularity specific
self.__export = kwargs.get('_export', True) # Singularity specific
self.__variables = kwargs.get('variables', {})
def __str__(self):
"""String representation of the primitive"""
if self.__variables:
keyvals = []
for key, val in sorted(self.__variables.items()):
keyvals.append('{0}={1}'.format(key, val))
if hpccm.config.g_ctype == container_type.DOCKER:
if self._app:
logging.warning('The Singularity specific %app.. syntax '
'was requested. Docker does not have an '
'equivalent: using regular ENV!')
# Format:
# ENV K1=V1 \
# K2=V2 \
# K3=V3
environ = ['ENV {}'.format(keyvals[0])]
environ.extend([' {}'.format(x) for x in keyvals[1:]])
return ' \\\n'.join(environ)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
# Format:
# %environment [OR %appenv app_name]
# export K1=V1
# export K2=V2
# export K3=V3
# %post
# export K1=V1
# export K2=V2
# export K3=V3
if self._app:
environ = ['%appenv {0}'.format(self._app)]
else:
environ = ['%environment']
environ.extend([' export {}'.format(x) for x in keyvals])
if self.__export and not self._app:
environ.extend(['%post'])
environ.extend([' export {}'.format(x)
for x in keyvals])
return '\n'.join(environ)
elif hpccm.config.g_ctype == container_type.BASH:
return '\n'.join(['export {}'.format(x) for x in keyvals])
else:
raise RuntimeError('Unknown container type')
else:
return ''
def merge(self, lst, _app=None):
"""Merge one or more instances of the primitive into a single
instance. Due to conflicts or option differences the merged
primitive may not be exact merger.
"""
if not lst: # pragma: nocover
raise RuntimeError('no items provided to merge')
envs = {}
for item in lst:
if not item.__class__.__name__ == 'environment': # pragma: nocover
logging.warning('item is not the correct type, skipping...')
continue
envs.update(item._environment__variables)
return environment(variables=envs, _app=_app)
| hpc-container-maker-master | hpccm/primitives/environment.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""Blob primitive"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.config
from hpccm.common import container_type
class blob(object):
"""The `blob` primitive inserts a file, without modification, into the
corresponding place in the container specification file. If a
relative path is specified, the path is relative to current
directory.
Generally, the blob should be functionally equivalent for each
container format.
Wherever possible, the blob primitive should be avoided and other,
more portable, operations should be used instead.
# Parameters
docker: Path to the file containing the Dockerfile blob (Docker
specific).
singularity: Path to the file containing the Singularity blob
(Singularity specific).
# Example
```python
blob(docker='path/to/foo.docker', singularity='path/to/foo.singularity')
```
"""
def __init__(self, **kwargs):
"""Initialize primitive"""
#super(blob, self).__init__()
self.__docker = kwargs.get('docker', {}) # Docker specific
self.__singularity = kwargs.get('singularity', {}) # Singularity
# specific
def __str__(self):
"""String representation of the primitive"""
if hpccm.config.g_ctype == container_type.DOCKER:
return self.__read_blob(self.__docker)
if hpccm.config.g_ctype == container_type.SINGULARITY:
return self.__read_blob(self.__singularity)
elif hpccm.config.g_ctype == container_type.BASH:
return ''
else:
raise RuntimeError('Unknown container type')
def __read_blob(self, path):
"""Read the blob from a file"""
b = ''
try:
if path:
with open(path, 'r') as f:
b = f.read()
else:
logging.warning('Blob file not specified')
except IOError:
logging.error('Error opening blob {}'.format(path))
return b
| hpc-container-maker-master | hpccm/primitives/blob.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""zipfile template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class zipfile(hpccm.base_object):
"""zipfile template"""
def __init__(self, **kwargs):
"""Initialize zip template"""
super(zipfile, self).__init__(**kwargs)
def unzip_step(self, zipfile=None, directory=None):
"""Generate zip command line string"""
if not zipfile:
logging.error('zip file is not defined')
return ''
if directory:
return 'mkdir -p {0} && unzip -d {0} {1}'.format(directory,
zipfile)
else:
return 'unzip {}'.format(zipfile)
| hpc-container-maker-master | hpccm/templates/zipfile.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""CMakeBuild template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six.moves import shlex_quote
import copy
import posixpath
import hpccm.base_object
class CMakeBuild(hpccm.base_object):
"""Template for cmake workflows"""
def __init__(self, **kwargs):
"""Initialize CMakeBuild template"""
super(CMakeBuild, self).__init__(**kwargs)
self.__build_directory = None
self.cmake_opts = kwargs.get('opts', [])
self.parallel = kwargs.get('parallel', '$(nproc)')
self.prefix = kwargs.get('prefix', '/usr/local')
# Some components complain if some compiler variables are
# enabled, e.g., MVAPICH2 with F90, so provide a way for the
# caller to disable any of the compiler variables.
self.toolchain_control = kwargs.get('toolchain_control',
{'CC': True, 'CXX': True,
'F77': True, 'F90': True,
'FC': True})
def build_step(self, target='all', parallel=None):
"""Generate cmake build command line string"""
if not parallel:
parallel = self.parallel
return 'cmake --build {0} --target {1} -- -j{2}'.format(
self.__build_directory, target, parallel)
def configure_step(self, build_directory='build', directory=None,
environment=[], opts=None, toolchain=None):
"""Generate cmake command line string"""
change_directory = ''
if directory:
src_directory = directory
build_directory = posixpath.join(directory, build_directory)
change_directory = "mkdir -p {0} && cd {0} && ".format(
build_directory)
else:
# Assume the build directory is a subdirectory of the source
# directory and we are already in the build directory
src_directory = '..'
# Cache this for the build step
self.__build_directory = build_directory
e = copy.copy(environment)
if toolchain:
if toolchain.CC and self.toolchain_control.get('CC'):
e.append('CC={}'.format(toolchain.CC))
if toolchain.CFLAGS:
e.append('CFLAGS={}'.format(shlex_quote(toolchain.CFLAGS)))
if toolchain.CPPFLAGS:
e.append('CPPFLAGS={}'.format(shlex_quote(toolchain.CPPFLAGS)))
if toolchain.CXX and self.toolchain_control.get('CXX'):
e.append('CXX={}'.format(toolchain.CXX))
if toolchain.CXXFLAGS:
e.append('CXXFLAGS={}'.format(shlex_quote(
toolchain.CXXFLAGS)))
if toolchain.F77 and self.toolchain_control.get('F77'):
e.append('F77={}'.format(toolchain.F77))
if toolchain.F90 and self.toolchain_control.get('F90'):
e.append('F90={}'.format(toolchain.F90))
if toolchain.FC and self.toolchain_control.get('FC'):
e.append('FC={}'.format(toolchain.FC))
if toolchain.FCFLAGS:
e.append('FCFLAGS={}'.format(shlex_quote(toolchain.FCFLAGS)))
if toolchain.FFLAGS:
e.append('FFLAGS={}'.format(shlex_quote(toolchain.FFLAGS)))
if toolchain.FLIBS:
e.append('FLIBS={}'.format(shlex_quote(toolchain.FLIBS)))
if toolchain.LD_LIBRARY_PATH:
e.append('LD_LIBRARY_PATH={}'.format(shlex_quote(
toolchain.LD_LIBRARY_PATH)))
if toolchain.LDFLAGS:
e.append('LDFLAGS={}'.format(shlex_quote(toolchain.LDFLAGS)))
if toolchain.LIBS:
e.append('LIBS={}'.format(shlex_quote(toolchain.LIBS)))
configure_env = ' '.join(e)
if configure_env:
configure_env += ' '
configure_opts = ''
if not opts and self.cmake_opts:
opts = self.cmake_opts
if opts:
configure_opts = ' '.join(opts)
configure_opts += ' '
if self.prefix:
configure_opts = '-DCMAKE_INSTALL_PREFIX={0:s} {1}'.format(
self.prefix, configure_opts)
cmd = '{0}{1}cmake {2}{3}'.format(
change_directory, configure_env, configure_opts, src_directory)
# Add an annotation if the caller inherits from the annotate template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('cmake', '{1}cmake {2}'.format(
change_directory, configure_env, configure_opts,
src_directory).strip())
return cmd.strip() # trim whitespace
| hpc-container-maker-master | hpccm/templates/CMakeBuild.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""git template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import posixpath
import re
import subprocess
import hpccm.base_object
class git(hpccm.base_object):
"""Template for working with git repositories"""
def __init__(self, **kwargs):
"""Initialize template"""
super(git, self).__init__(**kwargs)
self.git_opts = kwargs.get('opts', ['--depth=1'])
def __verify(self, repository, branch=None):
"""Verify that the specific git branch and the remote repositories exist"""
cmd = 'git ls-remote --exit-code --heads {0}'.format(repository)
if branch is not None: cmd = 'git ls-remote --exit-code {0} | grep "{1}"'.format(repository, branch)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout,stderr = p.communicate()
if p.returncode != 0:
return 'git repository "{}" or branch "{}" do not exist\n cmd: "{}"\n stdout: "{}"\n stderr: "{}"'.format(
repository, branch, cmd, stdout, stderr
)
return None
def clone_step(self, branch=None, commit=None, directory='', path='/tmp',
repository=None, verify=None, lfs=False, recursive=False):
"""Clone a git repository"""
if not repository:
logging.warning('No git repository specified')
return ''
if branch and commit: # pragma: no cover
logging.warning('Both branch and commit specified, ' +
'ignoring branch and using commit...')
if not directory:
# Use the final entry in the repository as the directory,
# stripping off any '.git'. This is the default git
# behavior, but the directory may be explicitly needed
# below.
directory = posixpath.splitext(posixpath.basename(repository))[0]
# Copy so not to modify the member variable
opts = list(self.git_opts)
# Commit has precedence over branch
if branch and not commit:
opts.append('--branch {}'.format(branch))
# recursive clone with submodules
if recursive:
opts.append('--recursive')
opt_string = ' '.join(opts)
if commit:
# Likely need the full repository history, so remove
# '--depth' if present
opt_string = re.sub(r'--depth=\d+\s*', '', opt_string).strip()
# Verify the commit / branch is valid
if verify == True or verify == 'fatal':
error = self.__verify(repository, branch)
if error is not None:
if verify == 'fatal':
raise RuntimeError(error)
else:
logging.warning(error)
# If lfs=True use `git lfs clone`
lfs_string = " "
if lfs:
lfs_string = " lfs "
# Ensure the path exists
# Would prefer to use 'git -C', but the ancient git included
# with CentOS7 does not support that option.
clone = ['mkdir -p {0}'.format(path),
'cd {0}'.format(path),
'git{0}clone {1} {2} {3}'.format(
lfs_string, opt_string, repository, directory).strip(),
'cd -']
if commit:
clone.extend(['cd {0}'.format(posixpath.join(path, directory)),
'git checkout {0}'.format(commit),
'cd -'])
# Add labels if the caller inherits from the labels template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('repository', repository)
if branch:
self.add_annotation('branch', branch)
if commit:
self.add_annotation('commit', commit)
return ' && '.join(clone)
| hpc-container-maker-master | hpccm/templates/git.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""downloader template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging
import posixpath
import re
import hpccm.base_object
import hpccm.config
from hpccm.common import container_type
class downloader(hpccm.base_object):
"""Template for downloading source code"""
def __init__(self, **kwargs):
"""Initialize template"""
self.branch = kwargs.get('branch', None)
self.commit = kwargs.get('commit', None)
self.package = kwargs.get('package', None)
self.repository = kwargs.get('repository', None)
self.src_directory = None
self.url = kwargs.get('url', None)
super(downloader, self).__init__(**kwargs)
def download_step(self, allow_unknown_filetype=True, recursive=False,
unpack=True, wd=hpccm.config.g_wd):
"""Get source code"""
if not self.repository and not self.package and not self.url:
raise RuntimeError('must specify a package, repository, or a URL')
if sum([bool(self.package), bool(self.repository),
bool(self.url)]) > 1:
raise RuntimeError('must specify exactly one of a package, repository, or a URL')
# Check if the caller inherits from the annotate template
annotate = getattr(self, 'add_annotation', None)
commands = []
if self.url:
# Download package
commands.append(hpccm.templates.wget().download_step(
url=self.url, directory=wd))
if unpack:
commands.append(self.__unpack(
self.url, wd,
allow_unknown_filetype=allow_unknown_filetype))
if callable(annotate):
self.add_annotation('url', self.url)
elif self.package:
# Use an already available package
if unpack:
commands.append(self.__unpack(
self.package, wd,
allow_unknown_filetype=allow_unknown_filetype))
if callable(annotate):
self.add_annotation('package', self.package)
elif self.repository:
# Clone git repository
commands.append(hpccm.templates.git().clone_step(
branch=self.branch, commit=self.commit, path=wd,
recursive=recursive, repository=self.repository))
# Set directory where to find source
self.src_directory = posixpath.join(wd, posixpath.splitext(
posixpath.basename(self.repository))[0])
# Add annotations
if callable(annotate):
self.add_annotation('repository', self.repository)
if self.branch:
self.add_annotation('branch', self.branch)
if self.commit:
self.add_annotation('commit', self.commit)
if hpccm.config.g_ctype == container_type.DOCKER:
return ' && \\\n '.join(commands)
elif hpccm.config.g_ctype == container_type.SINGULARITY:
return '\n '.join(commands)
elif hpccm.config.g_ctype == container_type.BASH:
return '\n'.join(commands)
else:
raise RuntimeError('Unknown container type')
def __unpack(self, package, wd, allow_unknown_filetype=True):
"""Unpack package and set source directory"""
match_tar = re.search(r'(.*)(?:(?:\.tar)|(?:\.tar\.gz)|(?:\.txz)'
r'|(?:\.tgz)|(?:\.tar\.bz2)|(?:\.tar\.xz))$',
posixpath.basename(package))
match_zip = re.search(r'(.*)(?:(?:\.zip))$',
posixpath.basename(package))
if match_tar:
# Set directory where to find source
self.src_directory = posixpath.join(wd, match_tar.group(1))
return hpccm.templates.tar().untar_step(
posixpath.join(wd, posixpath.basename(package)), directory=wd)
elif match_zip:
self.src_directory = posixpath.join(wd, match_zip.group(1))
return hpccm.templates.zipfile().unzip_step(
posixpath.join(wd, posixpath.basename(package)), directory=wd)
elif allow_unknown_filetype:
# Unclear what the file type is. For instance, this can
# happen if a site uses a URL redirector and the shortened
# URL does not include the file extension. In most cases,
# tar can figure it out. However, note that the src
# directory is set to None since there is no way to infer
# what the directory structure might be inside the
# archive.
logging.warning('unrecognized package format')
self.src_directory = None
return hpccm.templates.tar().untar_step(
posixpath.join(wd, posixpath.basename(package)), directory=wd)
else:
raise RuntimeError('unrecognized package format')
| hpc-container-maker-master | hpccm/templates/downloader.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""sed template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six.moves import shlex_quote
import logging # pylint: disable=unused-import
import hpccm.base_object
class sed(hpccm.base_object):
"""sed template"""
def __init__(self, **kwargs):
"""Initialize sed template"""
super(sed, self).__init__(**kwargs)
self.sed_opts = kwargs.get('opts', [])
def sed_step(self, file=None, in_place=True, patterns=[]):
"""Generate sed command line string"""
if not file:
logging.error('file is not defined')
return ''
if not patterns:
logging.error('patterns is not defined')
return ''
# Copy so not to modify the member variable
opts = list(self.sed_opts)
if in_place:
opts.append('-i')
opt_string = ' '.join(opts)
quoted_patterns = ['-e {}'.format(shlex_quote(patterns[0]))]
quoted_patterns.extend(' -e {}'.format(shlex_quote(x)) for x in patterns[1:])
quoted_pattern_string = ' \\\n'.join(quoted_patterns)
return 'sed {0} {1} {2}'.format(opt_string, quoted_pattern_string, file)
| hpc-container-maker-master | hpccm/templates/sed.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""tar template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import re
import hpccm.base_object
class tar(hpccm.base_object):
"""tar template"""
def __init__(self, **kwargs):
"""Initialize tar template"""
super(tar, self).__init__(**kwargs)
def untar_step(self, tarball=None, directory=None, args=None):
"""Generate untar command line string"""
if not tarball:
logging.error('tarball is not defined')
return ''
opts = ['-x', '-f {}'.format(tarball)]
if directory:
opts.append('-C {}'.format(directory))
if re.search(r'\.tar\.bz2$', tarball):
opts.append('-j')
elif re.search(r'\.tar\.gz$', tarball):
opts.append('-z')
elif re.search(r'\.tgz$', tarball):
opts.append('-z')
elif re.search(r'\.tar\.xz$', tarball):
opts.append('-J')
elif re.search(r'\.txz$', tarball):
opts.append('-J')
elif re.search(r'\.tbz$', tarball):
opts.append('-j')
elif re.search(r'\.tar$', tarball):
pass
else:
logging.warning('File type not recognized, trying anyway...')
if args:
opts.extend(args)
if directory:
return 'mkdir -p {0} && tar {1}'.format(directory, ' '.join(opts))
else:
return 'tar {}'.format(' '.join(opts))
| hpc-container-maker-master | hpccm/templates/tar.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""environment variables template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import hpccm.base_object
class envvars(hpccm.base_object):
"""Template for setting environment variables"""
def __init__(self, **kwargs):
"""Initialize template"""
super(envvars, self).__init__(**kwargs)
self.environment = kwargs.get('environment', True)
self.environment_variables = {}
# Use only if the runtime environment is incompatible with the
# non-runtime environment, e.g., PATH contains different
# values. Otherwise, try to use the filtering options.
self.runtime_environment_variables = {}
def environment_step(self, include_only=None, exclude=None, runtime=False):
"""Return dictionary of environment variables"""
if runtime:
e = self.runtime_environment_variables
else:
e = self.environment_variables
if self.environment:
if include_only:
return {x: e[x] for x in e if x in include_only}
elif exclude:
return {x: e[x] for x in e if x not in exclude}
else:
return e
else:
return {}
| hpc-container-maker-master | hpccm/templates/envvars.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from hpccm.templates.CMakeBuild import CMakeBuild
from hpccm.templates.ConfigureMake import ConfigureMake
from hpccm.templates.annotate import annotate
from hpccm.templates.downloader import downloader
from hpccm.templates.envvars import envvars
from hpccm.templates.git import git
from hpccm.templates.ldconfig import ldconfig
from hpccm.templates.rm import rm
from hpccm.templates.sed import sed
from hpccm.templates.tar import tar
from hpccm.templates.wget import wget
from hpccm.templates.zipfile import zipfile
| hpc-container-maker-master | hpccm/templates/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""annotate template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six import string_types
from six.moves import shlex_quote
import hpccm.base_object
class annotate(hpccm.base_object):
"""Template for setting annotations"""
def __init__(self, **kwargs):
"""Initialize template"""
super(annotate, self).__init__(**kwargs)
self.annotate = kwargs.get('annotate', False)
self.base_annotation = kwargs.get('base_annotation', True)
self.__labels = {}
def add_annotation(self, key, value):
if isinstance(self.base_annotation, string_types):
key = 'hpccm.' + self.base_annotation + '.' + key
elif self.base_annotation:
key = 'hpccm.' + self.__class__.__name__ + '.' + key
self.__labels[key] = shlex_quote(str(value))
def annotate_step(self):
"""Return dictionary of annotations"""
if self.annotate:
return self.__labels
else:
return {}
| hpc-container-maker-master | hpccm/templates/annotate.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""rm template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class rm(hpccm.base_object):
"""Template for cleaning up files and directories"""
def __init__(self, **kwargs):
"""Initialize template"""
super(rm, self).__init__(**kwargs)
def cleanup_step(self, items=None):
"""Cleanup files and directories"""
if not items:
logging.error('items are not defined')
return ''
return 'rm -rf {}'.format(' '.join(items))
| hpc-container-maker-master | hpccm/templates/rm.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""ldconfig template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import posixpath
import hpccm.base_object
class ldconfig(hpccm.base_object):
"""Template for manipulating the dynamic linker"""
def __init__(self, **kwargs):
"""Initialize template"""
super(ldconfig, self).__init__(**kwargs)
self.ldconfig = kwargs.get('ldconfig', False)
def ldcache_step(self, conf='hpccm.conf', directory=None):
"""Add a directory to the dynamic linker cache"""
if not directory:
logging.error('directory is not defined')
return ''
return 'echo "{0}" >> {1} && ldconfig'.format(
directory, posixpath.join('/etc/ld.so.conf.d', conf))
| hpc-container-maker-master | hpccm/templates/ldconfig.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""ConfigureMake template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from six import string_types
from six.moves import shlex_quote
import copy
import hpccm.base_object
class ConfigureMake(hpccm.base_object):
"""Template for autotools configure / make / make install workflow"""
def __init__(self, **kwargs):
"""Initialize ConfigureMake template"""
super(ConfigureMake, self).__init__(**kwargs)
self.configure_opts = kwargs.get('opts', [])
self.parallel = kwargs.get('parallel', '$(nproc)')
self.prefix = kwargs.get('prefix', '/usr/local')
# Some components complain if some compiler variables are
# enabled, e.g., MVAPICH2 with F90, so provide a way for the
# caller to disable any of the compiler variables.
self.toolchain_control = kwargs.get('toolchain_control',
{'CC': True, 'CXX': True,
'F77': True, 'F90': True,
'FC': True})
# Process --disable, --enable, --with, and --without options
self.__opts = []
for k in kwargs:
# handles both --with and --without
if (k.startswith('disable') or k.startswith('enable') or
k.startswith('with')) and kwargs.get(k):
opt = '--{}'.format(k.replace('_', '-'))
if isinstance(kwargs.get(k), string_types):
self.__opts.append('{0}={1}'.format(opt, kwargs.get(k)))
else:
self.__opts.append(opt)
def build_step(self, parallel=None):
"""Generate make command line string"""
if not parallel:
parallel = self.parallel
return 'make -j{}'.format(parallel)
def check_step(self, parallel=None):
"""Generate make check command line string"""
if not parallel:
parallel = self.parallel
return 'make -j{} check'.format(parallel)
def configure_step(self, build_directory=None, directory=None,
environment=[], export_environment=False, opts=[],
toolchain=None):
"""Generate configure command line string"""
change_directory = ''
src_directory = '.'
if directory:
if build_directory:
src_directory = directory
change_directory = 'mkdir -p {0} && cd {0} && '.format(
build_directory)
else:
change_directory = 'cd {} && '.format(directory)
e = copy.copy(environment)
if toolchain:
if toolchain.CC and self.toolchain_control.get('CC'):
e.append('CC={}'.format(toolchain.CC))
if toolchain.CFLAGS:
e.append('CFLAGS={}'.format(shlex_quote(
toolchain.CFLAGS)))
if toolchain.CPPFLAGS:
e.append('CPPFLAGS={}'.format(shlex_quote(
toolchain.CPPFLAGS)))
if toolchain.CXX and self.toolchain_control.get('CXX'):
e.append('CXX={}'.format(toolchain.CXX))
if toolchain.CXXFLAGS:
e.append('CXXFLAGS={}'.format(shlex_quote(
toolchain.CXXFLAGS)))
if toolchain.F77 and self.toolchain_control.get('F77'):
e.append('F77={}'.format(toolchain.F77))
if toolchain.F90 and self.toolchain_control.get('F90'):
e.append('F90={}'.format(toolchain.F90))
if toolchain.FC and self.toolchain_control.get('FC'):
e.append('FC={}'.format(toolchain.FC))
if toolchain.FCFLAGS:
e.append('FCFLAGS={}'.format(shlex_quote(
toolchain.FCFLAGS)))
if toolchain.FFLAGS:
e.append('FFLAGS={}'.format(shlex_quote(
toolchain.FFLAGS)))
if toolchain.FLIBS:
e.append('FLIBS={}'.format(shlex_quote(
toolchain.FLIBS)))
if toolchain.LD_LIBRARY_PATH:
e.append('LD_LIBRARY_PATH={}'.format(shlex_quote(
toolchain.LD_LIBRARY_PATH)))
if toolchain.LDFLAGS:
e.append('LDFLAGS={}'.format(shlex_quote(
toolchain.LDFLAGS)))
if toolchain.LIBS:
e.append('LIBS={}'.format(shlex_quote(
toolchain.LIBS)))
if export_environment:
configure_env = 'export {} &&'.format(' '.join(e))
else:
configure_env = ' '.join(e)
# Build set of configuration command line options
optlist = []
if not opts:
if self.configure_opts:
optlist.extend(self.configure_opts)
if self.__opts:
optlist.extend(self.__opts)
else:
optlist = opts
# Remove duplicates and sort options
configure_opts = ' '.join(sorted(list(set(optlist))))
# Prefix is always the first option
if self.prefix:
configure_opts = '--prefix={0:s} {1}'.format(self.prefix,
configure_opts)
cmd = '{0} {1} {3}/configure {2}'.format(change_directory,
configure_env, configure_opts,
src_directory)
# Add an annotation if the caller inherits from the annotate template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('configure', '{1} {3}/configure {2}'.format(
change_directory, configure_env, configure_opts,
src_directory).strip())
return cmd.strip() # trim whitespace
def install_step(self, parallel=None):
"""Generate make install command line string"""
if not parallel:
parallel = self.parallel
return 'make -j{} install'.format(parallel)
| hpc-container-maker-master | hpccm/templates/ConfigureMake.py |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""wget template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class wget(hpccm.base_object):
"""wget template"""
def __init__(self, **kwargs):
"""Initialize wget template"""
super(wget, self).__init__(**kwargs)
self.wget_opts = kwargs.get('opts', ['-q', '-nc',
'--no-check-certificate'])
def download_step(self, outfile=None, referer=None, url=None,
directory='/tmp'):
"""Generate wget command line string"""
if not url:
logging.error('url is not defined')
return ''
# Copy so not to modify the member variable
opts = self.wget_opts
if outfile:
opts.append('-O {}'.format(outfile))
if referer:
opts.append('--referer {}'.format(referer))
opt_string = ' '.join(self.wget_opts)
# Add annotation if the caller inherits from the annotate template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('url', url)
# Ensure the directory exists
return 'mkdir -p {1} && wget {0} -P {1} {2}'.format(opt_string,
directory, url)
| hpc-container-maker-master | hpccm/templates/wget.py |
# BSD License
#
# Copyright (c) 2016-present, Miguel Gonzalez-Fierro. All rights reserved.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Miguel Gonzalez-Fierro nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import sklearn.metrics as sklm
from datasets import LearningTask
def get_metrics(data, pred):
if data.learning_task == LearningTask.REGRESSION:
return regression_metrics(data.y_test, pred)
if data.learning_task == LearningTask.CLASSIFICATION:
return classification_metrics(data.y_test, pred)
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
return classification_metrics_multilabel(data.y_test, pred)
raise ValueError("No metrics defined for learning task: " + str(data.learning_task))
def evaluate_metrics(y_true, y_pred, metrics):
res = {}
for metric_name, metric in metrics.items():
res[metric_name] = float(metric(y_true, y_pred))
return res
def classification_metrics(y_true, y_prob, threshold=0.5):
y_pred = np.where(y_prob > threshold, 1, 0)
metrics = {
"Accuracy": sklm.accuracy_score,
"Log_Loss": lambda real, pred: sklm.log_loss(real, y_prob, eps=1e-5),
# yes, I'm using y_prob here!
"AUC": lambda real, pred: sklm.roc_auc_score(real, y_prob),
"Precision": sklm.precision_score,
"Recall": sklm.recall_score,
}
return evaluate_metrics(y_true, y_pred, metrics)
def classification_metrics_multilabel(y_true, y_pred):
metrics = {
"Accuracy": sklm.accuracy_score,
"Precision": lambda real, pred: sklm.precision_score(real, pred,
average="weighted"),
"Recall": lambda real, pred: sklm.recall_score(real, pred,
average="weighted"),
"F1": lambda real, pred: sklm.f1_score(real, pred,
average="weighted"),
}
return evaluate_metrics(y_true, y_pred, metrics)
def regression_metrics(y_true, y_pred):
metrics = {
"MeanAbsError": sklm.mean_absolute_error,
"MeanSquaredError": sklm.mean_squared_error,
"MedianAbsError": sklm.median_absolute_error,
}
return evaluate_metrics(y_true, y_pred, metrics)
| gbm-bench-master | metrics.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABC, abstractmethod
import time
import pandas as pd
import numpy as np
import dask.dataframe as dd
import dask.array as da
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import xgboost as xgb
try:
import catboost as cat
except ImportError:
cat = None
try:
import lightgbm as lgb
except (ImportError, OSError):
lgb = None
try:
import dask_xgboost as dxgb
except ImportError:
dxgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier as skhgb
except ImportError:
skhgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor as skhgb_r
except ImportError:
skhgb_r = None
try:
from sklearn.ensemble import GradientBoostingClassifier as skgb
except ImportError:
skgb = None
try:
from sklearn.ensemble import GradientBoostingRegressor as skgb_r
except ImportError:
skgb_r = None
try:
from sklearn.ensemble import RandomForestClassifier as skrf
except ImportError:
skrf = None
try:
from sklearn.ensemble import RandomForestRegressor as skrf_r
except ImportError:
skrf_r = None
try:
from cuml.ensemble import RandomForestClassifier as cumlrf
except ImportError:
cumlrf = None
try:
from cuml.ensemble import RandomForestRegressor as cumlrf_r
except ImportError:
cumlrf_r = None
from datasets import LearningTask
class Timer:
def __init__(self):
self.start = None
self.end = None
self.interval = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
class Algorithm(ABC):
@staticmethod
def create(name): # pylint: disable=too-many-return-statements
if name == 'xgb-gpu':
return XgbGPUHistAlgorithm()
if name == 'xgb-gpu-dask':
return XgbGPUHistDaskAlgorithm()
if name == 'xgb-gpu-dask-old':
return XgbGPUHistDaskOldAlgorithm()
if name == 'xgb-cpu':
return XgbCPUHistAlgorithm()
if name == 'lgbm-cpu':
return LgbmCPUAlgorithm()
if name == 'lgbm-gpu':
return LgbmGPUAlgorithm()
if name == 'cat-cpu':
return CatCPUAlgorithm()
if name == 'cat-gpu':
return CatGPUAlgorithm()
if name == 'skhgb':
return SkHistAlgorithm()
if name == 'skgb':
return SkGradientAlgorithm()
if name == 'skrf':
return SkRandomForestAlgorithm()
if name == 'cumlrf':
return CumlRfAlgorithm()
raise ValueError("Unknown algorithm: " + name)
def __init__(self):
self.model = None
@abstractmethod
def fit(self, data, args):
pass
@abstractmethod
def test(self, data):
pass
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
pass
# learning parameters shared by all algorithms, using the xgboost convention
shared_params = {"max_depth": 8, "learning_rate": 0.1,
"reg_lambda": 1}
class CumlRfAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = cumlrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = cumlrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "reg:squarederror"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary:logistic"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multi:softmax"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = xgb.DMatrix(data.X_train, data.y_train)
params = self.configure(data, args)
with Timer() as t:
self.model = xgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "gpu_id": 0})
return params
class SkRandomForestAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkGradientAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkHistAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skhgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skhgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist"})
del params['nthread'] # This is handled by dask
return params
def get_slices(self, n_slices, X, y):
n_rows_worker = int(np.ceil(len(y) / n_slices))
indices = []
count = 0
for _ in range(0, n_slices - 1):
indices.append(min(count + n_rows_worker, len(y)))
count += n_rows_worker
return np.split(X, indices), np.split(y, indices)
def fit(self, data, args):
params = self.configure(data, args)
n_workers = None if args.gpus < 0 else args.gpus
cluster = LocalCUDACluster(n_workers=n_workers,
local_directory=args.root)
client = Client(cluster)
n_partitions = len(client.scheduler_info()['workers'])
X_sliced, y_sliced = self.get_slices(n_partitions,
data.X_train, data.y_train)
X = da.concatenate([da.from_array(sub_array) for sub_array in X_sliced])
X = X.rechunk((X_sliced[0].shape[0], data.X_train.shape[1]))
y = da.concatenate([da.from_array(sub_array) for sub_array in y_sliced])
y = y.rechunk(X.chunksize[0])
dtrain = xgb.dask.DaskDMatrix(client, X, y)
with Timer() as t:
output = xgb.dask.train(client, params, dtrain, num_boost_round=args.ntrees)
self.model = output['booster']
client.close()
cluster.close()
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
self.model.set_param({'predictor': 'gpu_predictor'})
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskOldAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskOldAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "nthread": 1})
return params
def fit(self, data, args):
params = self.configure(data, args)
cluster = LocalCUDACluster(n_workers=None if args.gpus < 0 else args.gpus,
local_directory=args.root)
client = Client(cluster)
partition_size = 1000
if isinstance(data.X_train, np.ndarray):
X = dd.from_array(data.X_train, partition_size)
y = dd.from_array(data.y_train, partition_size)
else:
X = dd.from_pandas(data.X_train, partition_size)
y = dd.from_pandas(data.y_train, partition_size)
X.columns = [str(i) for i in range(0, X.shape[1])]
with Timer() as t:
self.model = dxgb.train(client, params, X, y, num_boost_round=args.ntrees)
client.close()
return t.interval
def test(self, data):
if isinstance(data.X_test, np.ndarray):
data.X_test = pd.DataFrame(data=data.X_test, columns=np.arange(0,
data.X_test.shape[1]),
index=np.arange(0, data.X_test.shape[0]))
data.X_test.columns = [str(i) for i in range(0, data.X_test.shape[1])]
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbCPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbCPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "hist"})
return params
class LgbmAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({"max_leaves": 256,
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "regression"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multiclass"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = lgb.Dataset(data.X_train, data.y_train,
free_raw_data=False)
params = self.configure(data, args)
with Timer() as t:
self.model = lgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(data.X_test)
return np.argmax(prob, axis=1)
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
self.model.free_dataset()
del self.model
class LgbmCPUAlgorithm(LgbmAlgorithm):
pass
class LgbmGPUAlgorithm(LgbmAlgorithm):
def configure(self, data, args):
params = super(LgbmGPUAlgorithm, self).configure(data, args)
params.update({"device": "gpu"})
return params
class CatAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"thread_count": args.cpus})
if args.gpus >= 0:
params["devices"] = "0-" + str(args.gpus)
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "RMSE"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "Logloss"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "MultiClassOneVsAll"
params["classes_count"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = cat.Pool(data.X_train, data.y_train)
params = self.configure(data, args)
params["iterations"] = args.ntrees
self.model = cat.CatBoost(params)
with Timer() as t:
self.model.fit(dtrain)
return t.interval
def test(self, data):
dtest = cat.Pool(data.X_test)
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(dtest)
return np.argmax(prob, axis=1)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class CatCPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatCPUAlgorithm, self).configure(data, args)
params.update({"task_type": "CPU"})
return params
class CatGPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatGPUAlgorithm, self).configure(data, args)
params.update({"task_type": "GPU"})
return params
| gbm-bench-master | algorithms.py |
#!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import argparse
import json
import ast
import psutil
import algorithms
from metrics import get_metrics
from datasets import prepare_dataset
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus
def print_sys_info(args):
try:
import xgboost # pylint: disable=import-outside-toplevel
print("Xgboost : %s" % xgboost.__version__)
except ImportError:
pass
try:
import lightgbm # pylint: disable=import-outside-toplevel
print("LightGBM: %s" % lightgbm.__version__)
except (ImportError, OSError):
pass
try:
import catboost # pylint: disable=import-outside-toplevel
print("Catboost: %s" % catboost.__version__)
except ImportError:
pass
print("System : %s" % sys.version)
print("#jobs : %d" % args.cpus)
def parse_args():
parser = argparse.ArgumentParser(
description="Benchmark xgboost/lightgbm/catboost on real datasets")
parser.add_argument("-dataset", default="all", type=str,
help="The dataset to be used for benchmarking. 'all' for all datasets.")
parser.add_argument("-root", default="/opt/gbm-datasets",
type=str, help="The root datasets folder")
parser.add_argument("-algorithm", default="all", type=str,
help=("Comma-separated list of algorithms to run; "
"'all' run all"))
parser.add_argument("-gpus", default=-1, type=int,
help=("#GPUs to use for the benchmarks; "
"ignored when not supported. Default is to use all."))
parser.add_argument("-cpus", default=0, type=int,
help=("#CPUs to use for the benchmarks; "
"0 means psutil.cpu_count(logical=False)"))
parser.add_argument("-output", default=sys.path[0] + "/results.json", type=str,
help="Output json file with runtime/accuracy stats")
parser.add_argument("-ntrees", default=500, type=int,
help=("Number of trees. Default is as specified in "
"the respective dataset configuration"))
parser.add_argument("-nrows", default=None, type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."))
parser.add_argument("-warmup", action="store_true",
help=("Whether to run a small benchmark (fraud) as a warmup"))
parser.add_argument("-verbose", action="store_true", help="Produce verbose output")
parser.add_argument("-extra", default='{}', help="Extra arguments as a python dictionary")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "%s.json" % args.dataset
return args
# benchmarks a single dataset
def benchmark(args, dataset_folder, dataset):
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
# "all" runs all algorithms
if args.algorithm == "all":
args.algorithm = "xgb-gpu,xgb-cpu,xgb-gpu-dask,lgbm-cpu,lgbm-gpu,cat-cpu,cat-gpu"
for alg in args.algorithm.split(","):
print("Running '%s' ..." % alg)
runner = algorithms.Algorithm.create(alg)
with runner:
train_time = runner.fit(data, args)
pred = runner.test(data)
results[alg] = {
"train_time": train_time,
"accuracy": get_metrics(data, pred),
}
return results
def main():
args = parse_args()
args.cpus = get_number_processors(args)
args.extra = ast.literal_eval(args.extra)
print_sys_info(args)
if args.warmup:
benchmark(args, os.path.join(args.root, "fraud"), "fraud")
if args.dataset == 'all':
args.dataset = 'airline,bosch,fraud,higgs,year,epsilon,covtype,newsgroups'
results = {}
for dataset in args.dataset.split(","):
folder = os.path.join(args.root, dataset)
results.update({dataset: benchmark(args, folder, dataset)})
print(json.dumps({dataset: results[dataset]}, indent=2, sort_keys=True))
output = json.dumps(results, indent=2, sort_keys=True)
output_file = open(args.output, "w")
output_file.write(output + "\n")
output_file.close()
print("Results written to file '%s'" % args.output)
if __name__ == "__main__":
main()
| gbm-bench-master | runme.py |
# MIT License
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
import os
from enum import Enum
import pickle
from urllib.request import urlretrieve
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import datasets
import pandas as pd
import tqdm
pbar = None
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = tqdm.tqdm(total=total_size / 1024, unit='kB')
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(block_size / 1024)
else:
pbar.close()
pbar = None
def retrieve(url, filename=None):
return urlretrieve(url, filename, reporthook=show_progress)
class LearningTask(Enum):
REGRESSION = 1
CLASSIFICATION = 2
MULTICLASS_CLASSIFICATION = 3
class Data: # pylint: disable=too-few-public-methods,too-many-arguments
def __init__(self, X_train, X_test, y_train, y_test, learning_task, qid_train=None,
qid_test=None):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.learning_task = learning_task
# For ranking task
self.qid_train = qid_train
self.qid_test = qid_test
def prepare_dataset(dataset_folder, dataset, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
prepare_function = globals()["prepare_" + dataset]
return prepare_function(dataset_folder, nrows)
def __prepare_airline(dataset_folder, nrows, regression=False): # pylint: disable=too-many-locals
url = 'http://kt.ijs.si/elena_ikonomovska/datasets/airline/airline_14col.data.bz2'
pkl_base_name = "airline"
if regression:
pkl_base_name += "-regression"
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
pkl_base_name
+ ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
cols = [
"Year", "Month", "DayofMonth", "DayofWeek", "CRSDepTime",
"CRSArrTime", "UniqueCarrier", "FlightNum", "ActualElapsedTime",
"Origin", "Dest", "Distance", "Diverted", "ArrDelay"
]
# load the data as int16
dtype = np.int16
dtype_columns = {
"Year": dtype, "Month": dtype, "DayofMonth": dtype, "DayofWeek": dtype,
"CRSDepTime": dtype, "CRSArrTime": dtype, "FlightNum": dtype,
"ActualElapsedTime": dtype, "Distance":
dtype,
"Diverted": dtype, "ArrDelay": dtype,
}
df = pd.read_csv(local_url,
names=cols, dtype=dtype_columns, nrows=nrows)
# Encode categoricals as numeric
for col in df.select_dtypes(['object']).columns:
df[col] = df[col].astype("category").cat.codes
# Turn into binary classification problem
if not regression:
df["ArrDelay"] = 1 * (df["ArrDelay"] > 0)
X = df[df.columns.difference(["ArrDelay"])].to_numpy(dtype=np.float32)
y = df["ArrDelay"].to_numpy(dtype=np.float32)
del df
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
if regression:
task = LearningTask.REGRESSION
else:
task = LearningTask.CLASSIFICATION
data = Data(X_train, X_test, y_train, y_test, task)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_airline(dataset_folder, nrows):
return __prepare_airline(dataset_folder, nrows, False)
def prepare_airline_regression(dataset_folder, nrows):
return __prepare_airline(dataset_folder, nrows, True)
def prepare_bosch(dataset_folder, nrows):
filename = "train_numeric.csv.zip"
local_url = os.path.join(dataset_folder, filename)
pickle_url = os.path.join(dataset_folder,
"bosch" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
os.system("kaggle competitions download -c bosch-production-line-performance -f " +
filename + " -p " + dataset_folder)
X = pd.read_csv(local_url, index_col=0, compression='zip', dtype=np.float32,
nrows=nrows)
y = X.iloc[:, -1].to_numpy(dtype=np.float32)
X.drop(X.columns[-1], axis=1, inplace=True)
X = X.to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_fraud(dataset_folder, nrows):
if not os.path.exists(dataset_folder):
os.makedirs(dataset_folder)
filename = "creditcard.csv"
local_url = os.path.join(dataset_folder, filename)
pickle_url = os.path.join(dataset_folder,
"creditcard" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
os.system("kaggle datasets download mlg-ulb/creditcardfraud -f" +
filename + " -p " + dataset_folder)
df = pd.read_csv(local_url + ".zip", dtype=np.float32, nrows=nrows)
X = df[[col for col in df.columns if col.startswith('V')]].to_numpy(dtype=np.float32)
y = df['Class'].to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_higgs(dataset_folder, nrows):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz'
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
"higgs" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
higgs = pd.read_csv(local_url, nrows=nrows)
X = higgs.iloc[:, 1:].to_numpy(dtype=np.float32)
y = higgs.iloc[:, 0].to_numpy(dtype=np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_year(dataset_folder, nrows):
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt' \
'.zip'
local_url = os.path.join(dataset_folder, os.path.basename(url))
pickle_url = os.path.join(dataset_folder,
"year" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url):
retrieve(url, local_url)
year = pd.read_csv(local_url, nrows=nrows, header=None)
X = year.iloc[:, 1:].to_numpy(dtype=np.float32)
y = year.iloc[:, 0].to_numpy(dtype=np.float32)
if nrows is None:
# this dataset requires a specific train/test split,
# with the specified number of rows at the start belonging to the train set,
# and the rest being the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, shuffle=False,
train_size=463715,
test_size=51630)
else:
print(
"Warning: nrows is specified, not using predefined test/train split for "
"YearPredictionMSD.")
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.REGRESSION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_epsilon(dataset_folder, nrows):
url_train = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.bz2'
url_test = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary' \
'/epsilon_normalized.t.bz2'
pickle_url = os.path.join(dataset_folder,
"epsilon" + ("" if nrows is None else "-" + str(nrows)) + ".pkl")
local_url_train = os.path.join(dataset_folder, os.path.basename(url_train))
local_url_test = os.path.join(dataset_folder, os.path.basename(url_test))
if os.path.exists(pickle_url):
return pickle.load(open(pickle_url, "rb"))
if not os.path.isfile(local_url_train):
retrieve(url_train, local_url_train)
if not os.path.isfile(local_url_test):
retrieve(url_test, local_url_test)
X_train, y_train = datasets.load_svmlight_file(local_url_train,
dtype=np.float32)
X_test, y_test = datasets.load_svmlight_file(local_url_test,
dtype=np.float32)
X_train = X_train.toarray()
X_test = X_test.toarray()
y_train[y_train <= 0] = 0
y_test[y_test <= 0] = 0
if nrows is not None:
print("Warning: nrows is specified, not using predefined test/train split for epsilon.")
X_train = np.vstack((X_train, X_test))
y_train = np.append(y_train, y_test)
X_train = X_train[:nrows]
y_train = y_train[:nrows]
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, random_state=77,
test_size=0.2,
)
data = Data(X_train, X_test, y_train, y_test, LearningTask.CLASSIFICATION)
pickle.dump(data, open(pickle_url, "wb"), protocol=4)
return data
def prepare_covtype(dataset_folder, nrows): # pylint: disable=unused-argument
X, y = datasets.fetch_covtype(return_X_y=True) # pylint: disable=unexpected-keyword-arg
if nrows is not None:
X = X[0:nrows]
y = y[0:nrows]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
return Data(X_train, X_test, y_train, y_test, LearningTask.MULTICLASS_CLASSIFICATION)
def prepare_newsgroups(dataset_folder, nrows): # pylint: disable=unused-argument
X, y = datasets.fetch_20newsgroups_vectorized(subset='all',return_X_y=True) # pylint: disable=unexpected-keyword-arg
if nrows is not None:
X = X[0:nrows]
y = y[0:nrows]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=77,
test_size=0.2,
)
return Data(X_train, X_test, y_train, y_test, LearningTask.MULTICLASS_CLASSIFICATION) | gbm-bench-master | datasets.py |
#!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json
import os
import csv
TIMINGS = ["train_time", "test_time"]
METRICS = ["AUC", "Accuracy", "F1", "Precision", "Recall", "MeanAbsError", "MeanSquaredError",
"MedianAbsError"]
ALLMETRICS = TIMINGS + METRICS
def load_perf_data(json_file):
file = open(json_file, "r")
data = json.load(file)
file.close()
return data
def load_all_perf_data(files):
data = {}
for json_file in files:
dataset = os.path.basename(json_file)
dataset = dataset.replace(".json", "")
data[dataset] = load_perf_data(json_file)
return data
def get_all_datasets(data):
return data.keys()
def get_all_algos(data):
algos = {}
for dset in data.keys():
for algo in data[dset].keys():
algos[algo] = 1
return algos.keys()
def read_from_dict(hashmap, key, def_val="-na-"):
return hashmap[key] if key in hashmap else def_val
def combine_perf_data(data, datasets, algos):
all_data = {}
for dataset in datasets:
out = []
dset = read_from_dict(data, dataset, {})
for algo in algos:
algo_data = read_from_dict(dset, algo, {})
perf = [algo]
for timing in TIMINGS:
perf.append(read_from_dict(algo_data, timing))
metric_data = read_from_dict(algo_data, "accuracy", {})
for metric in METRICS:
perf.append(read_from_dict(metric_data, metric))
out.append(perf)
all_data[dataset] = out
return all_data
def write_csv(all_data, datasets):
writer = csv.writer(sys.stdout)
header = ['dataset', 'algorithm'] + ALLMETRICS
writer.writerow(header)
for dataset in sorted(datasets):
for row in all_data[dataset]:
writer.writerow([dataset] + row)
def main():
data = load_perf_data(sys.argv[1])
datasets = get_all_datasets(data)
algos = get_all_algos(data)
table = combine_perf_data(data, datasets, algos)
write_csv(table, datasets)
if __name__ == '__main__':
main()
| gbm-bench-master | json2csv.py |
gbm-bench-master | 3rdparty/fast_retraining/experiments/__init__.py |
|
#Original source: https://github.com/miguelgfierro/codebase/blob/master/python/machine_learning/metrics.py
import numpy as np
from sklearn.metrics import roc_auc_score,accuracy_score, precision_score, recall_score, f1_score
def classification_metrics_binary(y_true, y_pred):
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred)
m_precision = precision_score(y_true, y_pred)
m_recall = recall_score(y_true, y_pred)
report = {'Accuracy':m_acc, 'Precision':m_precision, 'Recall':m_recall, 'F1':m_f1}
return report
def classification_metrics_binary_prob(y_true, y_prob):
m_auc = roc_auc_score(y_true, y_prob)
report = {'AUC':m_auc}
return report
def classification_metrics_multilabel(y_true, y_pred, labels):
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred, labels, average='weighted')
m_precision = precision_score(y_true, y_pred, labels, average='weighted')
m_recall = recall_score(y_true, y_pred, labels, average='weighted')
report = {'Accuracy':m_acc, 'Precision':m_precision, 'Recall':m_recall, 'F1':m_f1}
return report
def binarize_prediction(y, threshold=0.5):
y_pred = np.where(y > threshold, 1, 0)
return y_pred
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/metrics.py |
#code based on https://github.com/miguelgfierro/codebase/
from timeit import default_timer
class Timer(object):
"""Timer class.
Examples:
>>> big_num = 100000
>>> t = Timer()
>>> t.start()
>>> for i in range(big_num):
>>> r = 1
>>> t.stop()
>>> print(t.interval)
0.0946876304844
>>> with Timer() as t:
>>> for i in range(big_num):
>>> r = 1
>>> print(t.interval)
0.0766928562442
>>> try:
>>> with Timer() as t:
>>> for i in range(big_num):
>>> r = 1
>>> raise(Exception("Get out!"))
>>> finally:
>>> print(t.interval)
0.0757778924471
"""
def __init__(self):
self._timer = default_timer
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
def start(self):
"""Start the timer."""
self.start = self._timer()
def stop(self):
"""Stop the timer. Calculate the interval in seconds."""
self.end = self._timer()
self.interval = self.end - self.start
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/timer.py |
import pandas as pd
def _get_nominal_integer_dict(nominal_vals):
"""Convert nominal values in integers, starting at 0.
Parameters:
nominal_vals (pd.Series): A series.
Returns:
d (dict): An dictionary with numeric values.
"""
d = {}
for val in nominal_vals:
if val not in d:
current_max = max(d.values()) if len(d) > 0 else -1
d[val] = current_max+1
return d
def _convert_to_integer(srs, d):
"""Convert series to integer, given a dictionary.
Parameters:
srs (pd.Series): A series.
d (dict): A dictionary mapping values to integers
Returns:
srs (pd.Series): An series with numeric values.
"""
return srs.map(lambda x: d[x])
def convert_cols_categorical_to_numeric(df, col_list=None):
"""Convert categorical columns to numeric and leave numeric columns
as they are. You can force to convert a numerical column if it is
included in col_list
Parameters:
df (pd.DataFrame): Dataframe.
col_list (list): List of columns.
Returns:
ret (pd.DataFrame): An dataframe with numeric values.
Examples:
>>> df = pd.DataFrame({'letters':['a','b','c'],'numbers':[1,2,3]})
>>> df_numeric = convert_cols_categorical_to_numeric(df)
>>> print(df_numeric)
letters numbers
0 0 1
1 1 2
2 2 3
"""
if col_list is None: col_list = []
ret = pd.DataFrame()
for column_name in df.columns:
column = df[column_name]
if column.dtype == 'object' or column_name in col_list:
col_dict = _get_nominal_integer_dict(column)
ret[column_name] = _convert_to_integer(column, col_dict)
else:
ret[column_name] = column
return ret
def convert_related_cols_categorical_to_numeric(df, col_list):
"""Convert categorical columns, that are related between each other,
to numeric and leave numeric columns
as they are.
Parameters:
df (pd.DataFrame): Dataframe.
col_list (list): List of columns.
Returns:
ret (pd.DataFrame): An dataframe with numeric values.
Examples:
>>> df = pd.DataFrame({'letters':['a','b','c'],'letters2':['c','d','e'],'numbers':[1,2,3]})
>>> df_numeric = convert_related_cols_categorical_to_numeric(df, col_list=['letters','letters2'])
>>> print(df_numeric)
letters letters2 numbers
0 0 2 1
1 1 3 2
2 2 4 3
"""
ret = pd.DataFrame()
values=None
for c in col_list:
values = pd.concat([values,df[c]], axis=0)
values = pd.Series(values.unique())
col_dict = _get_nominal_integer_dict(values)
for column_name in df.columns:
column = df[column_name]
if column_name in col_list:
ret[column_name] = _convert_to_integer(column, col_dict)
else:
ret[column_name] = column
return ret
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/conversion.py |
import os
import pandas as pd
import arff
import numpy as np
from functools import reduce
import sqlite3
import logging
from libs.planet_kaggle import (to_multi_label_dict, get_file_count, enrich_with_feature_encoding,
featurise_images, generate_validation_files)
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_FRAUD_PATH = 'fraud_detection', 'credit_card_fraud_kaggle', 'creditcard.csv'
_IOT_PATH = 'iot', 'sensor_stream_berkeley', 'sensor.arff'
_AIRLINE_PATH = 'airline', 'airline_14col.data'
_FOOTBALL_PATH = 'football', 'database.sqlite'
_BCI_PATH = 'bci', 'data.npz'
_HIGGS_PATH = 'higgs', 'HIGGS.csv'
_KAGGLE_ROOT = 'planet'
_PLANET_KAGGLE_LABEL_CSV = 'train_v2.csv'
_PLANET_KAGGLE_TRAIN_DIR = 'train-jpg'
_PLANET_KAGGLE_VAL_DIR = 'validate-jpg'
def _get_datapath():
try:
datapath = os.environ['MOUNT_POINT']
except KeyError:
logger.info("MOUNT_POINT not found in environment. Defaulting to /fileshare")
datapath = '/fileshare'
return datapath
def load_fraud():
""" Loads the credit card fraud data
The datasets contains transactions made by credit cards in September 2013 by european cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation.
Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about
the data.
Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed
with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first
transaction in the dataset.
The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning.
Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve
(AUPRC).
Confusion matrix accuracy is not meaningful for unbalanced classification.
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group
(http://mlg.ulb.ac.be) of ULB (Universite Libre de Bruxelles) on big data mining and fraud detection. More details
on current and past projects on related topics are available on http://mlg.ulb.ac.be/BruFence
and http://mlg.ulb.ac.be/ARTML
Please cite: Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with
Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Returns
-------
pandas DataFrame
"""
return pd.read_csv(reduce(os.path.join, _FRAUD_PATH, _get_datapath()))
def load_iot():
""" Loads iot data
Sensor stream contains information (temperature, humidity, light, and sensor voltage) collected from 54 sensors deployed
in Intel Berkeley Research Lab. The whole stream contains consecutive information recorded over a 2 months
period (1 reading per 1-3 minutes). I used the sensor ID as the class label, so the learning task of the stream is
to correctly identify the sensor ID (1 out of 54 sensors) purely based on the sensor data and the corresponding recording
time.
While the data stream flow over time, so does the concepts underlying the stream. For example, the lighting during
the working hours is generally stronger than the night, and the temperature of specific sensors (conference room)
may regularly rise during the meetings.
Returns
-------
pandas DataFrame
"""
dataset = arff.load(open(reduce(os.path.join, _IOT_PATH, _get_datapath())))
columns = [i[0] for i in dataset['attributes']]
return pd.DataFrame(dataset['data'], columns=columns)
def load_airline():
""" Loads airline data
The dataset consists of a large amount of records, containing flight arrival and departure details for all the
commercial flights within the USA, from October 1987 to April 2008. Its size is around 116 million records and
5.76 GB of memory.
There are 13 attributes, each represented in a separate column: Year (1987-2008), Month (1-12), Day of Month (1-31),
Day of Week (1:Monday - 7:Sunday), CRS Departure Time (local time as hhmm), CRS Arrival Time (local time as hhmm),
Unique Carrier, Flight Number, Actual Elapsed Time (in min), Origin, Destination, Distance (in miles), and Diverted
(1=yes, 0=no).
The target attribute is Arrival Delay, it is a positive or negative value measured in minutes.
Link to the source: http://kt.ijs.si/elena_ikonomovska/data.html
Returns
-------
pandas DataFrame
"""
cols = ['Year', 'Month', 'DayofMonth', 'DayofWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'ActualElapsedTime', 'Origin', 'Dest', 'Distance', 'Diverted', 'ArrDelay']
return pd.read_csv(reduce(os.path.join, _AIRLINE_PATH, _get_datapath()), names=cols)
def load_football():
""" Loads football data
Dataset of football stats. +25,000 matches, +10,000 players from 11 European Countries with their lead championship
Seasons 2008 to 2016. It also contains players attributes sourced from EA Sports' FIFA video game series,
including the weekly updates, team line up with squad formation (X, Y coordinates), betting odds from up to 10
providers and detailed match events (goal types, possession, corner, cross, fouls, cards etc...) for +10,000 matches.
The meaning of the columns can be found here: http://www.football-data.co.uk/notes.txt
Number of attributes in each table (size of the dataframe):
countries (11, 2)
matches (25979, 115)
leagues (11, 3)
teams (299, 5)
players (183978, 42)
Link to the source: https://www.kaggle.com/hugomathien/soccer
Returns
-------
list of pandas DataFrame
"""
database_path = reduce(os.path.join, _FOOTBALL_PATH, _get_datapath())
with sqlite3.connect(database_path) as con:
countries = pd.read_sql_query("SELECT * from Country", con)
matches = pd.read_sql_query("SELECT * from Match", con)
leagues = pd.read_sql_query("SELECT * from League", con)
teams = pd.read_sql_query("SELECT * from Team", con)
players = pd.read_sql("SELECT * FROM Player_Attributes;", con)
return countries, matches, leagues, teams, players
def load_bci():
""" Loads BCI data
Contains measurements from 64 EEG sensors on the scalp of a single participant.
The purpose of the recording is to determine from the electrical brain activity when the participant is paying attention.
Returns
-------
A tuple containing four numpy arrays
train features
train labels
test features
test labels
"""
npzfile = np.load(reduce(os.path.join, _BCI_PATH, _get_datapath()))
return npzfile['train_X'], npzfile['train_y'], npzfile['test_X'], npzfile['test_y']
def load_higgs():
""" Loads HIGGS data
Dataset of atomic particles measurements. The total size of the data is 11 millions of observations.
It can be used in a classification problem to distinguish between a signal process which produces Higgs
bosons and a background process which does not.
The data has been produced using Monte Carlo simulations. The first 21 features (columns 2-22) are kinematic
properties measured by the particle detectors in the accelerator. The last seven features are functions of
the first 21 features; these are high-level features derived by physicists to help discriminate between the
two classes. The first column is the class label (1 for signal, 0 for background), followed by the 28
features (21 low-level features then 7 high-level features): lepton pT, lepton eta, lepton phi,
missing energy magnitude, missing energy phi, jet 1 pt, jet 1 eta, jet 1 phi, jet 1 b-tag, jet 2 pt, jet 2 eta,
jet 2 phi, jet 2 b-tag, jet 3 pt, jet 3 eta, jet 3 phi, jet 3 b-tag, jet 4 pt, jet 4 eta, jet 4 phi,
jet 4 b-tag, m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb.
Link to the source: https://archive.ics.uci.edu/ml/datasets/HIGGS
Returns
-------
pandas DataFrame
"""
cols = ['boson','lepton_pT','lepton_eta','lepton_phi','missing_energy_magnitude','missing_energy_phi','jet_1_pt','jet_1_eta','jet_1_phi','jet_1_b-tag','jet_2_pt','jet_2_eta','jet_2_phi','jet_2_b-tag','jet_3_pt','jet_3_eta','jet_3_phi','jet_3_b-tag','jet_4_pt','jet_4_eta','jet_4_phi','jet_4_b-tag','m_jj','m_jjj','m_lv','m_jlv','m_bb','m_wbb','m_wwbb']
return pd.read_csv(reduce(os.path.join, _HIGGS_PATH, _get_datapath()), names=cols)
def load_planet_kaggle():
""" Loads Planet Kaggle data
Dataset of satellite images of the Amazon. The objective of this dataset is to label satellite image chips
with atmospheric conditions and various classes of land cover/land use. Resulting algorithms will help the
global community better understand where, how, and why deforestation happens all over the world. The images
use the GeoTiff format and each contain four bands of data: red, green, blue, and near infrared.
To treat the images we used transfer learning with the CNN ResNet50. The images are featurized with this
deep neural network. Once the features are generated we can use a boosted tree to classify them.
Link to the source: https://www.kaggle.com/c/planet-understanding-the-amazon-from-space/data
Returns
-------
A tuple containing four numpy arrays
train_features
y_train
validation_features
y_val
"""
csv_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_LABEL_CSV), _get_datapath())
train_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_TRAIN_DIR), _get_datapath())
val_path = reduce(os.path.join, (_KAGGLE_ROOT, _PLANET_KAGGLE_VAL_DIR), _get_datapath())
assert os.path.isfile(csv_path)
assert os.path.exists(train_path)
if not os.path.exists(val_path): os.mkdir(val_path)
if not os.listdir(val_path):
logger.info('Validation folder is empty, moving files...')
generate_validation_files(train_path, val_path)
logger.info('Reading in labels')
labels_df = pd.read_csv(csv_path).pipe(enrich_with_feature_encoding)
multi_label_dict = to_multi_label_dict(labels_df)
nb_train_samples = get_file_count(os.path.join(train_path, '*.jpg'))
nb_validation_samples = get_file_count(os.path.join(val_path, '*.jpg'))
logger.debug('Number of training files {}'.format(nb_train_samples))
logger.debug('Number of validation files {}'.format(nb_validation_samples))
logger.debug('Loading model')
model = ResNet50(include_top=False)
train_features, train_names = featurise_images(model,
train_path,
'train_{}',
range(nb_train_samples),
desc='Featurising training images')
validation_features, validation_names = featurise_images(model,
val_path,
'train_{}',
range(nb_train_samples, nb_train_samples+nb_validation_samples),
desc='Featurising validation images')
# Prepare data
y_train = np.array([multi_label_dict[name] for name in train_names])
y_val = np.array([multi_label_dict[name] for name in validation_names])
return train_features, y_train, validation_features, y_val
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/loaders.py |
gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/__init__.py |
|
#code from https://www.kaggle.com/airback/match-outcome-prediction-in-football
import numpy as np
import pandas as pd
def get_fifa_stats(match, player_stats):
''' Aggregates fifa stats for a given match. '''
#Define variables
match_id = match.match_api_id
date = match['date']
players = ['home_player_1', 'home_player_2', 'home_player_3', "home_player_4", "home_player_5",
"home_player_6", "home_player_7", "home_player_8", "home_player_9", "home_player_10",
"home_player_11", "away_player_1", "away_player_2", "away_player_3", "away_player_4",
"away_player_5", "away_player_6", "away_player_7", "away_player_8", "away_player_9",
"away_player_10", "away_player_11"]
player_stats_new = pd.DataFrame()
names = []
#Loop through all players
for player in players:
#Get player ID
player_id = match[player]
#Get player stats
stats = player_stats[player_stats.player_api_id == player_id]
#Identify current stats
current_stats = stats[stats.date < date].sort_values(by = 'date', ascending = False)[:1]
if np.isnan(player_id) == True:
overall_rating = pd.Series(0)
else:
current_stats.reset_index(inplace = True, drop = True)
overall_rating = pd.Series(current_stats.loc[0, "overall_rating"])
#Rename stat
name = "{}_overall_rating".format(player)
names.append(name)
#Aggregate stats
player_stats_new = pd.concat([player_stats_new, overall_rating], axis = 1)
player_stats_new.columns = names
player_stats_new['match_api_id'] = match_id
player_stats_new.reset_index(inplace = True, drop = True)
#Return player stats
return player_stats_new.ix[0]
def get_fifa_data(matches, player_stats):
''' Gets fifa data for all matches. '''
#Apply get_fifa_stats for each match
fifa_data = matches.apply(lambda x :get_fifa_stats(x, player_stats), axis = 1)
return fifa_data
def get_match_label(match):
''' Derives a label for a given match. '''
#Define variables
home_goals = match['home_team_goal']
away_goals = match['away_team_goal']
label = pd.DataFrame()
label.loc[0,'match_api_id'] = match['match_api_id']
#Identify match label
if home_goals > away_goals:
label.loc[0,'label'] = "Win"
if home_goals == away_goals:
label.loc[0,'label'] = "Draw"
if home_goals < away_goals:
label.loc[0,'label'] = "Defeat"
#Return label
return label.loc[0]
def get_overall_fifa_rankings(fifa, get_overall = False):
''' Get overall fifa rankings from fifa data. '''
temp_data = fifa
#Check if only overall player stats are desired
if get_overall == True:
#Get overall stats
data = temp_data.loc[:,(fifa.columns.str.contains('overall_rating'))]
data.loc[:,'match_api_id'] = temp_data.loc[:,'match_api_id']
else:
#Get all stats except for stat date
cols = fifa.loc[:,(fifa.columns.str.contains('date_stat'))]
temp_data = fifa.drop(cols.columns, axis = 1)
data = temp_data
#Return data
return data
def get_last_matches(matches, date, team, x = 10):
''' Get the last x matches of a given team. '''
#Filter team matches from matches
team_matches = matches[(matches['home_team_api_id'] == team) | (matches['away_team_api_id'] == team)]
#Filter x last matches from team matches
last_matches = team_matches[team_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:x,:]
#Return last matches
return last_matches
def get_last_matches_against_eachother(matches, date, home_team, away_team, x = 10):
''' Get the last x matches of two given teams. '''
#Find matches of both teams
home_matches = matches[(matches['home_team_api_id'] == home_team) & (matches['away_team_api_id'] == away_team)]
away_matches = matches[(matches['home_team_api_id'] == away_team) & (matches['away_team_api_id'] == home_team)]
total_matches = pd.concat([home_matches, away_matches])
#Get last x matches
try:
last_matches = total_matches[total_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:x,:]
except:
last_matches = total_matches[total_matches.date < date].sort_values(by = 'date', ascending = False).iloc[0:total_matches.shape[0],:]
#Check for error in data
if(last_matches.shape[0] > x):
print("Error in obtaining matches")
#Return data
return last_matches
def get_goals(matches, team):
''' Get the goals of a specfic team from a set of matches. '''
#Find home and away goals
home_goals = int(matches.home_team_goal[matches.home_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.away_team_api_id == team].sum())
total_goals = home_goals + away_goals
#Return total goals
return total_goals
def get_goals_conceided(matches, team):
''' Get the goals conceided of a specfic team from a set of matches. '''
#Find home and away goals
home_goals = int(matches.home_team_goal[matches.away_team_api_id == team].sum())
away_goals = int(matches.away_team_goal[matches.home_team_api_id == team].sum())
total_goals = home_goals + away_goals
#Return total goals
return total_goals
def get_wins(matches, team):
''' Get the number of wins of a specfic team from a set of matches. '''
#Find home and away wins
home_wins = int(matches.home_team_goal[(matches.home_team_api_id == team) & (matches.home_team_goal > matches.away_team_goal)].count())
away_wins = int(matches.away_team_goal[(matches.away_team_api_id == team) & (matches.away_team_goal > matches.home_team_goal)].count())
total_wins = home_wins + away_wins
#Return total wins
return total_wins
def get_match_features(match, matches, x = 10):
''' Create match specific features for a given match. '''
#Define variables
date = match.date
home_team = match.home_team_api_id
away_team = match.away_team_api_id
#Get last x matches of home and away team
matches_home_team = get_last_matches(matches, date, home_team, x = 10)
matches_away_team = get_last_matches(matches, date, away_team, x = 10)
#Get last x matches of both teams against each other
last_matches_against = get_last_matches_against_eachother(matches, date, home_team, away_team, x = 3)
#Create goal variables
home_goals = get_goals(matches_home_team, home_team)
away_goals = get_goals(matches_away_team, away_team)
home_goals_conceided = get_goals_conceided(matches_home_team, home_team)
away_goals_conceided = get_goals_conceided(matches_away_team, away_team)
#Define result data frame
result = pd.DataFrame()
#Define ID features
result.loc[0, 'match_api_id'] = match.match_api_id
result.loc[0, 'league_id'] = match.league_id
#Create match features
result.loc[0, 'home_team_goals_difference'] = home_goals - home_goals_conceided
result.loc[0, 'away_team_goals_difference'] = away_goals - away_goals_conceided
result.loc[0, 'games_won_home_team'] = get_wins(matches_home_team, home_team)
result.loc[0, 'games_won_away_team'] = get_wins(matches_away_team, away_team)
result.loc[0, 'games_against_won'] = get_wins(last_matches_against, home_team)
result.loc[0, 'games_against_lost'] = get_wins(last_matches_against, away_team)
#Add season
result.loc[0, 'season'] = int(match['season'].split('/')[0])
#Return match features
return result.loc[0]
def create_feables(matches, fifa, bookkeepers, get_overall = False, horizontal = True, x = 10, all_leagues = True, verbose = True):
''' Create and aggregate features and labels for all matches. '''
#Get fifa stats features
fifa_stats = get_overall_fifa_rankings(fifa, get_overall)
if verbose == True:
print("Generating match features...")
#Get match features for all matches
match_stats = matches.apply(lambda x: get_match_features(x, matches, x = 10), axis = 1)
#Create dummies for league ID feature
if all_leagues:
dummies = pd.get_dummies(match_stats['league_id']).rename(columns = lambda x: 'League_' + str(x))
match_stats = pd.concat([match_stats, dummies], axis = 1)
match_stats.drop(['league_id'], inplace = True, axis = 1)
if verbose == True:
print("Generating match labels...")
#Create match labels
labels = matches.apply(get_match_label, axis = 1)
if verbose == True:
print("Generating bookkeeper data...")
#Get bookkeeper quotas for all matches
bk_data = get_bookkeeper_data(matches, bookkeepers, horizontal = True)
bk_data.loc[:,'match_api_id'] = matches.loc[:,'match_api_id']
#Merges features and labels into one frame
features = pd.merge(match_stats, fifa_stats, on = 'match_api_id', how = 'left')
features = pd.merge(features, bk_data, on = 'match_api_id', how = 'left')
feables = pd.merge(features, labels, on = 'match_api_id', how = 'left')
#Drop NA values
feables.dropna(inplace = True)
#Return preprocessed data
return feables
def convert_odds_to_prob(match_odds):
''' Converts bookkeeper odds to probabilities. '''
#Define variables
match_id = match_odds.loc[:,'match_api_id']
bookkeeper = match_odds.loc[:,'bookkeeper']
win_odd = match_odds.loc[:,'Win']
draw_odd = match_odds.loc[:,'Draw']
loss_odd = match_odds.loc[:,'Defeat']
#Converts odds to prob
win_prob = 1 / win_odd
draw_prob = 1 / draw_odd
loss_prob = 1 / loss_odd
total_prob = win_prob + draw_prob + loss_prob
probs = pd.DataFrame()
#Define output format and scale probs by sum over all probs
probs.loc[:,'match_api_id'] = match_id
probs.loc[:,'bookkeeper'] = bookkeeper
probs.loc[:,'Win'] = win_prob / total_prob
probs.loc[:,'Draw'] = draw_prob / total_prob
probs.loc[:,'Defeat'] = loss_prob / total_prob
#Return probs and meta data
return probs
def get_bookkeeper_data(matches, bookkeepers, horizontal = True):
''' Aggregates bookkeeper data for all matches and bookkeepers. '''
bk_data = pd.DataFrame()
#Loop through bookkeepers
for bookkeeper in bookkeepers:
#Find columns containing data of bookkeeper
temp_data = matches.loc[:,(matches.columns.str.contains(bookkeeper))]
temp_data.loc[:, 'bookkeeper'] = str(bookkeeper)
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
#Rename odds columns and convert to numeric
cols = temp_data.columns.values
cols[:3] = ['Win','Draw','Defeat']
temp_data.columns = cols
temp_data.loc[:,'Win'] = pd.to_numeric(temp_data['Win'])
temp_data.loc[:,'Draw'] = pd.to_numeric(temp_data['Draw'])
temp_data.loc[:,'Defeat'] = pd.to_numeric(temp_data['Defeat'])
#Check if data should be aggregated horizontally
if(horizontal == True):
#Convert data to probs
temp_data = convert_odds_to_prob(temp_data)
temp_data.drop('match_api_id', axis = 1, inplace = True)
temp_data.drop('bookkeeper', axis = 1, inplace = True)
#Rename columns with bookkeeper names
win_name = bookkeeper + "_" + "Win"
draw_name = bookkeeper + "_" + "Draw"
defeat_name = bookkeeper + "_" + "Defeat"
temp_data.columns.values[:3] = [win_name, draw_name, defeat_name]
#Aggregate data
bk_data = pd.concat([bk_data, temp_data], axis = 1)
else:
#Aggregate vertically
bk_data = bk_data.append(temp_data, ignore_index = True)
#If horizontal add match api id to data
if(horizontal == True):
temp_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']
#Return bookkeeper data
return bk_data
def get_bookkeeper_probs(matches, bookkeepers, horizontal = False):
''' Get bookkeeper data and convert to probabilities for vertical aggregation. '''
#Get bookkeeper data
data = get_bookkeeper_data(matches, bookkeepers, horizontal = False)
#Convert odds to probabilities
probs = convert_odds_to_prob(data)
#Return data
return probs
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/football.py |
#Source: https://github.com/ianozsvald/ipython_memory_usage
"""Profile mem usage envelope of IPython commands and report interactively"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import time
import memory_profiler
from IPython import get_ipython
import threading
# keep a global accounting for the last known memory usage
# which is the reference point for the memory delta calculation
previous_call_memory_usage = memory_profiler.memory_usage()[0]
t1 = time.time() # will be set to current time later
keep_watching = True
watching_memory = True
input_cells = get_ipython().user_ns['In']
def start_watching_memory():
"""Register memory profiling tools to IPython instance."""
global watching_memory
watching_memory = True
ip = get_ipython()
ip.events.register("post_run_cell", watch_memory)
ip.events.register("pre_run_cell", pre_run_cell)
def stop_watching_memory():
"""Unregister memory profiling tools from IPython instance."""
global watching_memory
watching_memory = False
ip = get_ipython()
try:
ip.events.unregister("post_run_cell", watch_memory)
except ValueError:
pass
try:
ip.events.unregister("pre_run_cell", pre_run_cell)
except ValueError:
pass
def watch_memory():
# bring in the global memory usage value from the previous iteration
global previous_call_memory_usage, peak_memory_usage, keep_watching, \
watching_memory, input_cells
new_memory_usage = memory_profiler.memory_usage()[0]
memory_delta = new_memory_usage - previous_call_memory_usage
keep_watching = False
# calculate time delta using global t1 (from the pre-run event) and current
# time
time_delta_secs = time.time() - t1
num_commands = len(input_cells) - 1
cmd = "In [{}]".format(num_commands)
# convert the results into a pretty string
output_template = ("{cmd} used {memory_delta:0.4f} MiB RAM in "
"{time_delta:0.2f}s, total RAM usage "
"{memory_usage:0.2f} MiB")
output = output_template.format(time_delta=time_delta_secs,
cmd=cmd,
memory_delta=memory_delta,
memory_usage=new_memory_usage)
if watching_memory:
print(str(output))
previous_call_memory_usage = new_memory_usage
def pre_run_cell():
"""Capture current time before we execute the current command"""
global t1
t1 = time.time()
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/notebook_memory_management.py |
import os
import multiprocessing
def get_number_processors():
try:
num = os.cpu_count()
except:
num = multiprocessing.cpu_count()
return num
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/utils.py |
import os
import numpy as np
import glob
from tqdm import tqdm
import shutil
from keras.preprocessing import image
from keras.applications.imagenet_utils import preprocess_input
def labels_from(labels_df):
""" Extracts the unique labels from the labels dataframe
"""
# Build list with unique labels
label_list = []
for tag_str in labels_df.tags.values:
labels = tag_str.split(' ')
for label in labels:
if label not in label_list:
label_list.append(label)
return label_list
def enrich_with_feature_encoding(labels_df):
# Add onehot features for every label
for label in labels_from(labels_df):
labels_df[label] = labels_df['tags'].apply(lambda x: 1 if label in x.split(' ') else 0)
return labels_df
def to_multi_label_dict(enriched_labels_df):
df = enriched_labels_df.set_index('image_name').drop('tags', axis=1)
return dict((filename, encoded_array) for filename, encoded_array in zip(df.index, df.values))
def get_file_count(folderpath):
""" Returns the number of files in a folder
"""
return len(glob.glob(folderpath))
def threshold_prediction(pred_y, threshold=0.5):# TODO: Needs to be tuned?
return pred_y > threshold
def read_images(filepath, filenames):
""" Read images in batches
"""
img_data = list()
for name in filenames:
img_path = os.path.join(filepath, name+'.jpg')
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
img_data.append(preprocess_input(x))
return np.concatenate(img_data)
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def featurise_images(model, filepath, nameformat, num_iter, batch_size=32, desc=None):
""" Use DL model to featurise images
"""
features = list()
img_names = list()
num_list = list(num_iter)
num_batches = np.ceil(len(num_list)/batch_size)
for num_chunk in tqdm(chunks(num_list, batch_size), total=num_batches, desc=desc):
filenames = [nameformat.format(index) for index in num_chunk]
batch_images = read_images(filepath, filenames)
img_names.extend(filenames)
features.extend(model.predict_on_batch(batch_images).squeeze())
return np.array(features), img_names
def generate_validation_files(train_path, val_path, num_train = 35000):
""" Creates the validation files from the train files.
"""
num_train_ini = get_file_count(os.path.join(train_path, '*.jpg'))
assert num_train_ini > num_train
order = 'mv ' + train_path + '/train_{' + str(num_train) + '..' + str(num_train_ini) + '}.jpg ' + val_path
os.system(order)
| gbm-bench-master | 3rdparty/fast_retraining/experiments/libs/planet_kaggle.py |
from sklearn.metrics import (confusion_matrix, accuracy_score, roc_auc_score, f1_score, log_loss, precision_score,
recall_score, mean_squared_error, mean_absolute_error, r2_score)
import numpy as np
def classification_metrics_binary(y_true, y_pred):
"""Returns a report with different metrics for a binary classification problem.
- Accuracy: Number of correct predictions made as a ratio of all predictions. Useful when there are equal number
of observations in each class and all predictions and prediction errors are equally important.
- Confusion matrix: C_ij where observations are known to be in group i but predicted to be in group j. In binary
classification true negatives is C_00, false negatives is C_10, true positives is C_11 and false positives is C_01.
- Precision: Number of true positives divided by the number of true and false positives. It is the ability of the
classifier not to label as positive a sample that is negative.
- Recall: Number of true positives divided by the number of true positives and false negatives. It is the ability
of the classifier to find all the positive samples.
High Precision and low Recall will return few positive results but most of them will be correct.
High Recall and low Precision will return many positive results but most of them will be incorrect.
- F1 Score: 2*((precision*recall)/(precision+recall)). It measures the balance between precision and recall.
Args:
y_true (list or array): True labels.
y_pred (list or array): Predicted labels (binary).
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,0,0,1]
>>> y_pred = [0,1,0,1,1]
>>> result = classification_metrics_binary(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('Accuracy', 0.8), ('Confusion Matrix', array([[2, 1],
[0, 2]])), ('F1', 0.8), ('Precision', 0.6666666666666666), ('Recall', 1.0)])
"""
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred)
m_precision = precision_score(y_true, y_pred)
m_recall = recall_score(y_true, y_pred)
m_conf = confusion_matrix(y_true, y_pred)
report = {'Accuracy': m_acc, 'Precision': m_precision, 'Recall': m_recall, 'F1': m_f1, 'Confusion Matrix': m_conf}
return report
def classification_metrics_multilabel(y_true, y_pred, labels):
"""Returns a report with different metrics for a multilabel classification problem.
- Accuracy: Number of correct predictions made as a ratio of all predictions. Useful when there are equal number
of observations in each class and all predictions and prediction errors are equally important.
- Confusion matrix: C_ij where observations are known to be in group i but predicted to be in group j. In multilabel
classification true predictions are in the diagonal and false predictions outside the diagonal.
- Precision: Number of true positives divided by the number of true and false positives. It is the ability of the
classifier not to label as positive a sample that is negative.
- Recall: Number of true positives divided by the number of true positives and false negatives. It is the ability
of the classifier to find all the positive samples.
High Precision and low Recall will return few positive results but most of them will be correct.
High Recall and low Precision will return many positive results but most of them will be incorrect.
- F1 Score: 2*((precision*recall)/(precision+recall)). It measures the balance between precision and recall.
Args:
y_true (list or array): True labels.
y_pred (list or array): Predicted labels.
labels (list): Label index or name.
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,2,0,1]
>>> y_pred = [0,1,0,1,1]
>>> result = classification_metrics_multilabel(y_true, y_pred, [0,1,2])
>>> OrderedDict(sorted(result.items()))
OrderedDict([('Accuracy', 0.6), ('Confusion Matrix', array([[1, 1, 0],
[0, 2, 0],
[1, 0, 0]])), ('F1', 0.52), ('Precision', 0.4666666666666666), ('Recall', 0.6)])
"""
m_acc = accuracy_score(y_true, y_pred)
m_f1 = f1_score(y_true, y_pred, labels, average='weighted')
m_precision = precision_score(y_true, y_pred, labels, average='weighted')
m_recall = recall_score(y_true, y_pred, labels, average='weighted')
m_conf = confusion_matrix(y_true, y_pred, labels)
report = {'Accuracy': m_acc, 'Precision': m_precision, 'Recall': m_recall, 'F1': m_f1, 'Confusion Matrix': m_conf}
return report
def classification_metrics_binary_prob(y_true, y_prob):
"""Returns a report with different metrics for a binary classification problem.
- AUC: The Area Under the Curve represents the ability to discriminate between positive and negative classes. An
area of 1 represent perfect scoring and an area of 0.5 means random guessing.
- Log loss: Also called logistic regression loss or cross-entropy loss. It quantifies the performance by
penalizing false classifications. Minimizing the Log Loss is equivalent to minimizing the squared error but using
probabilistic predictions. Log loss penalize heavily classifiers that are confident about incorrect classifications.
Args:
y_true (list or array): True labels.
y_prob (list or array): Predicted labels (probability).
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [0,1,0,0,1]
>>> y_prob = [0.2,0.7,0.4,0.3,0.2]
>>> result = classification_metrics_binary_prob(y_true, y_prob)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('AUC', 0.5833333333333333), ('Log loss', 0.6113513950783531)])
>>> y_prob = [0.2,0.7,0.4,0.3,0.3]
>>> result = classification_metrics_binary_prob(y_true, y_prob)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('AUC', 0.75), ('Log loss', 0.5302583734567203)])
"""
m_auc = roc_auc_score(y_true, y_prob)
m_logloss = log_loss(y_true, y_prob)
report = {'AUC': m_auc, 'Log loss': m_logloss}
return report
def regression_metrics(y_true, y_pred):
"""Returns a report with different metrics for a regression problem.
- Mean Squared Error: MSE is a risk metric corresponding to the expected value of the squared (quadratic) error.
It has the disadvantage of heavily weighting outliers.
- Mean Absolute Error: MAE is a risk metric corresponding to the expected value of the absolute error or L1 loss.
Not as sensitive to outliers.
- R Square: R2 is statistical measure of how close the data are to the fitted regression line. It's best possible
score is 1.0 and it can be negative (because the model can be arbitrarily worse). A score of 0 means that the
variables are not linearly correlated.
- Root Mean Squared Error: RMSE is the square root of MSE. It also gives a relatively high weight to large errors.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
Returns:
report (dict): Dictionary with metrics.
Examples:
>>> from collections import OrderedDict
>>> y_true = [5,1,0,7,1]
>>> y_pred = [6,0.7,0.4,10,20]
>>> result = regression_metrics(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('MAE', 4.74), ('MSE', 74.25), ('R2', -9.088315217391303), ('RMSE', 8.616843969807043)])
>>> y_true = [5,1,0,7,1]
>>> y_pred = [6,0.7,0.4,10,2]
>>> result = regression_metrics(y_true, y_pred)
>>> OrderedDict(sorted(result.items()))
OrderedDict([('MAE', 1.1400000000000001), ('MSE', 2.25), ('R2', 0.6942934782608696), ('RMSE', 1.5)])
"""
mse = mean_squared_error(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
r2 = r2_score(y_true, y_pred)
report = {'MSE': mse, 'MAE': mae, 'R2': r2, 'RMSE': np.sqrt(mse)}
return report
def precision_at_k(y_true, y_pred, k=None):
"""Precision at K.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): precision at k (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> precision_at_k(y_true, y_pred, k=3)
1.0
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> precision_at_k(y_true, y_pred, k=3)
0.3333333333333333
"""
predictions = y_pred[:k]
num_hit = len(set(predictions).intersection(set(y_true)))
return float(num_hit) / len(predictions)
def recall_at_k(y_true, y_pred, k=None):
"""Recall at K.
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): recall at k (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> recall_at_k(y_true, y_pred, k=3)
0.6
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> recall_at_k(y_true, y_pred, k=3)
0.2
"""
predictions = y_pred[:k]
num_hit = len(set(predictions).intersection(set(y_true)))
return float(num_hit) / len(y_true)
def discounted_cumulative_gain(y_true, y_pred, k=None):
"""Discounted Cumulative Gain (DCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): DCG
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> discounted_cumulative_gain(y_true, y_pred, k=3)
5.130929753571458
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> discounted_cumulative_gain(y_true, y_pred, k=3)
6.0
"""
order = np.argsort(y_pred)[::-1]
y_true = np.take(y_true, order[:k])
return (y_true / np.log2(np.arange(y_true.shape[0]) + 2)).sum()
def exponential_discounted_cumulative_gain(y_true, y_pred, k=None):
"""Exponential Discounted Cumulative Gain (eDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): eDCG
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
19.130929753571458
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
32.0
"""
order = np.argsort(y_pred)[::-1]
y_true = np.take(y_true, order[:k])
return ((2 ** y_true - 1) / np.log2(np.arange(y_true.shape[0]) + 2)).sum()
def normalized_discounted_cumulative_gain(y_true, y_pred, k=None):
"""Normalized Discounted Cumulative Gain (nDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): nDCG (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> normalized_discounted_cumulative_gain(y_true, y_pred, k=3)
0.4599812921368268
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> normalized_discounted_cumulative_gain(y_true, y_pred, k=3)
0.537892328558952
"""
return discounted_cumulative_gain(y_true, y_pred, k) / discounted_cumulative_gain(y_true, y_true, k)
def normalized_exponential_discounted_cumulative_gain(y, y_pred, k=None):
"""Normalized Exponential Discounted Cumulative Gain (neDCG).
Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain
Args:
y_true (list or array): True values.
y_pred (list or array): Predicted values.
k (int): Limit of predicted values.
Returns:
result (float): neDCG (max=1, min=0)
Examples:
>>> y_true = [5,1,0,7,2]
>>> y_pred = [2,5,0,1,7]
>>> normalized_exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
0.1292116839006246
>>> y_true = np.array([5,1,0,7,2])
>>> y_pred = np.array([9,0,8,1,7])
>>> normalized_exponential_discounted_cumulative_gain(y_true, y_pred, k=3)
0.21950735175253772
"""
return exponential_discounted_cumulative_gain(y, y_pred, k)/exponential_discounted_cumulative_gain(y, y, k)
| gbm-bench-master | 3rdparty/codebase/python/machine_learning/metrics.py |
#!/usr/bin/env python3
#
# This script computes a table that maps each byte to its bitwise reverse.
def reverse_byte(v):
return sum(1 << (7 - bit) for bit in range(8) if (v & (1 << bit)) != 0)
tab = [reverse_byte(v) for v in range(256)]
print('static const u8 bitreverse_tab[256] = {')
for i in range(0, len(tab), 8):
print('\t', end='')
for j, v in enumerate(tab[i:i+8]):
print(f'0x{v:02x},', end='')
if j == 7:
print('')
else:
print(' ', end='')
print('};')
| libdeflate-master | scripts/gen_bitreverse_tab.py |
Subsets and Splits