diff --git a/HPC_setup/mlt_modules_hdfml.sh b/HPC_setup/mlt_modules_hdfml.sh index d99000097ca7958d0269f79c64970bcc8ca39607..0105b75929fd069aaf39ad4689ae9a8ed8356e34 100644 --- a/HPC_setup/mlt_modules_hdfml.sh +++ b/HPC_setup/mlt_modules_hdfml.sh @@ -8,17 +8,14 @@ module --force purge module use $OTHERSTAGES -ml Stages/2019a -ml GCCcore/.8.3.0 -ml Python/3.6.8 -ml TensorFlow/1.13.1-GPU-Python-3.6.8 -ml Keras/2.2.4-GPU-Python-3.6.8 -ml SciPy-Stack/2019a-Python-3.6.8 -# ml dask/1.1.5-Python-3.6.8 -ml dask/2.12.0-Python-3.6.8 -ml GEOS/3.7.1-Python-3.6.8 -ml Graphviz/2.40.1 - - +ml Stages/2020 +ml GCCcore/.10.3.0 +ml Jupyter/2021.3.1-Python-3.8.5 +ml Python/3.8.5 +ml TensorFlow/2.5.0-Python-3.8.5 +ml SciPy-Stack/2021-Python-3.8.5 +ml dask/2.22.0-Python-3.8.5 +ml GEOS/3.8.1-Python-3.8.5 +ml Graphviz/2.44.1 \ No newline at end of file diff --git a/HPC_setup/mlt_modules_juwels.sh b/HPC_setup/mlt_modules_juwels.sh index 01eecbab617f7b3042222e24e562901b302d401e..ffacfe6fc45302dfa60b108ca2493d9a27408df1 100755 --- a/HPC_setup/mlt_modules_juwels.sh +++ b/HPC_setup/mlt_modules_juwels.sh @@ -8,14 +8,13 @@ module --force purge module use $OTHERSTAGES -ml Stages/2019a -ml GCCcore/.8.3.0 +ml Stages/2020 +ml GCCcore/.10.3.0 -ml Jupyter/2019a-Python-3.6.8 -ml Python/3.6.8 -ml TensorFlow/1.13.1-GPU-Python-3.6.8 -ml Keras/2.2.4-GPU-Python-3.6.8 -ml SciPy-Stack/2019a-Python-3.6.8 -ml dask/1.1.5-Python-3.6.8 -ml GEOS/3.7.1-Python-3.6.8 -ml Graphviz/2.40.1 +ml Jupyter/2021.3.1-Python-3.8.5 +ml Python/3.8.5 +ml TensorFlow/2.5.0-Python-3.8.5 +ml SciPy-Stack/2021-Python-3.8.5 +ml dask/2.22.0-Python-3.8.5 +ml GEOS/3.8.1-Python-3.8.5 +ml Graphviz/2.44.1 \ No newline at end of file diff --git a/HPC_setup/requirements_HDFML_additionals.txt b/HPC_setup/requirements_HDFML_additionals.txt index a0bbb3b21daaa5181d5180b22d69c40652e387d7..f55466bb25b376df3390b094a092054e19f6cb40 100644 --- a/HPC_setup/requirements_HDFML_additionals.txt +++ b/HPC_setup/requirements_HDFML_additionals.txt @@ -1,4 +1,8 @@ +tensorflow==2.5.0 +numpy==1.19.5 +six==1.15.0 absl-py==0.11.0 +aiohttp==3.7.4 appdirs==1.4.4 astor==0.8.1 astropy==4.1 @@ -7,26 +11,25 @@ bottleneck==1.3.2 cached-property==1.5.2 certifi==2020.12.5 cftime==1.4.1 -chardet==4.0.0 +cchardet==2.1.7 coverage==5.4 cycler==0.10.0 -dask==2021.2.0 +dask==2.22.0 dill==0.3.3 fsspec==0.8.5 gast==0.4.0 -grpcio==1.35.0 -h5py==2.10.0 +grpcio==1.34.0 +h5py==3.1.0 idna==2.10 importlib-metadata==3.4.0 iniconfig==1.1.1 - +ipython==7.28.0 kiwisolver==1.3.1 locket==0.2.1 Markdown==3.3.3 matplotlib==3.3.4 mock==4.0.3 netCDF4==1.5.5.1 -numpy==1.19.5 ordered-set==4.0.2 packaging==20.9 pandas==1.1.5 @@ -35,6 +38,7 @@ patsy==0.5.1 Pillow==8.1.0 pluggy==0.13.1 protobuf==3.15.0 +psutil==5.8.0 py==1.10.0 pydot==1.4.2 pyparsing==2.4.7 @@ -44,15 +48,14 @@ pytest-cov==2.11.1 pytest-html==3.1.1 pytest-lazy-fixture==0.6.3 pytest-metadata==1.11.0 -pytest-sugar +pytest-sugar==0.9.4 python-dateutil==2.8.1 pytz==2021.1 PyYAML==5.4.1 requests==2.25.1 -scipy==1.5.4 +scipy==1.5.2 seaborn==0.11.1 --no-binary shapely Shapely==1.7.0 -six==1.15.0 statsmodels==0.12.2 tabulate==0.8.8 termcolor==1.1.0 @@ -65,3 +68,5 @@ Werkzeug==1.0.1 wget==3.2 xarray==0.16.2 zipp==3.4.0 + +#Cartopy==0.18.0 \ No newline at end of file diff --git a/HPC_setup/requirements_JUWELS_additionals.txt b/HPC_setup/requirements_JUWELS_additionals.txt index a0bbb3b21daaa5181d5180b22d69c40652e387d7..ef0b752375a4174a0cccb200a59eb7134087b74f 100644 --- a/HPC_setup/requirements_JUWELS_additionals.txt +++ b/HPC_setup/requirements_JUWELS_additionals.txt @@ -1,59 +1,13 @@ -absl-py==0.11.0 -appdirs==1.4.4 -astor==0.8.1 astropy==4.1 -attrs==20.3.0 bottleneck==1.3.2 cached-property==1.5.2 -certifi==2020.12.5 -cftime==1.4.1 -chardet==4.0.0 -coverage==5.4 -cycler==0.10.0 -dask==2021.2.0 -dill==0.3.3 -fsspec==0.8.5 -gast==0.4.0 -grpcio==1.35.0 -h5py==2.10.0 -idna==2.10 -importlib-metadata==3.4.0 iniconfig==1.1.1 - -kiwisolver==1.3.1 -locket==0.2.1 -Markdown==3.3.3 -matplotlib==3.3.4 -mock==4.0.3 -netCDF4==1.5.5.1 -numpy==1.19.5 ordered-set==4.0.2 -packaging==20.9 -pandas==1.1.5 -partd==1.1.0 -patsy==0.5.1 -Pillow==8.1.0 -pluggy==0.13.1 -protobuf==3.15.0 -py==1.10.0 -pydot==1.4.2 -pyparsing==2.4.7 pyshp==2.1.3 -pytest==6.2.2 -pytest-cov==2.11.1 pytest-html==3.1.1 pytest-lazy-fixture==0.6.3 pytest-metadata==1.11.0 -pytest-sugar -python-dateutil==2.8.1 -pytz==2021.1 -PyYAML==5.4.1 -requests==2.25.1 -scipy==1.5.4 -seaborn==0.11.1 ---no-binary shapely Shapely==1.7.0 -six==1.15.0 -statsmodels==0.12.2 +pytest-sugar==0.9.4 tabulate==0.8.8 termcolor==1.1.0 toml==0.10.2 @@ -63,5 +17,6 @@ tzwhere==3.0.3 urllib3==1.26.3 Werkzeug==1.0.1 wget==3.2 -xarray==0.16.2 -zipp==3.4.0 +--no-binary shapely Shapely==1.7.0 + +#Cartopy==0.18.0 diff --git a/HPC_setup/setup_venv_juwels.sh b/HPC_setup/setup_venv_juwels.sh index b33a0baf8e89ec477f3278b9ae6088a021f6bba8..ba44900ee2db3e3cde63b4d38c05e643eb154d5c 100755 --- a/HPC_setup/setup_venv_juwels.sh +++ b/HPC_setup/setup_venv_juwels.sh @@ -29,10 +29,8 @@ echo "##### START INSTALLING requirements_JUWELS_additionals.txt #####" pip install -r ${cur}/requirements_JUWELS_additionals.txt echo "##### FINISH INSTALLING requirements_JUWELS_additionals.txt #####" -pip install -r ${cur}/requirements_JUWELS_additionals.txt -pip install netcdf4 -pip install --ignore-installed matplotlib==3.2.0 -pip install --ignore-installed pandas==1.0.1 +# pip install --ignore-installed matplotlib==3.2.0 +# pip install --ignore-installed pandas==1.0.1 pip install -U typing_extensions python -m pip install --ignore-installed "dask[complete]==2021.3.0" diff --git a/mlair/data_handler/iterator.py b/mlair/data_handler/iterator.py index 564bf3bfd6e4f5b814c9d090733cfbfbf26a850b..f2e3b689512ee99524eef8445f84a5a3bdb60f90 100644 --- a/mlair/data_handler/iterator.py +++ b/mlair/data_handler/iterator.py @@ -3,7 +3,7 @@ __author__ = 'Lukas Leufen' __date__ = '2020-07-07' from collections import Iterator, Iterable -import keras +import tensorflow.keras as keras import numpy as np import math import os diff --git a/mlair/helpers/__init__.py b/mlair/helpers/__init__.py index 4671334c16267be819ab8ee0ad96b7135ee01531..bb30a594fca5b5b161571d2b3485b48467018900 100644 --- a/mlair/helpers/__init__.py +++ b/mlair/helpers/__init__.py @@ -3,4 +3,4 @@ from .testing import PyTestRegex, PyTestAllEqual from .time_tracking import TimeTracking, TimeTrackingWrapper from .logger import Logger -from .helpers import remove_items, float_round, dict_to_xarray, to_list, extract_value, select_from_dict +from .helpers import remove_items, float_round, dict_to_xarray, to_list, extract_value, select_from_dict, make_keras_pickable diff --git a/mlair/helpers/helpers.py b/mlair/helpers/helpers.py index ccf0250b0ac70e77b7b456b05883fbc7b8d2d34b..2f25972cf8490f5dbe0eaebd53f5b530a34d7914 100644 --- a/mlair/helpers/helpers.py +++ b/mlair/helpers/helpers.py @@ -18,6 +18,43 @@ import dask.array as da from typing import Dict, Callable, Union, List, Any, Tuple +from tensorflow.keras.models import Model +from tensorflow.python.keras.layers import deserialize, serialize +from tensorflow.python.keras.saving import saving_utils + +""" +The following code is copied from: https://github.com/tensorflow/tensorflow/issues/34697#issuecomment-627193883 +and is a hotfix to make keras.model.model models serializable/pickable +""" + + +def unpack(model, training_config, weights): + restored_model = deserialize(model) + if training_config is not None: + restored_model.compile( + **saving_utils.compile_args_from_training_config( + training_config + ) + ) + restored_model.set_weights(weights) + return restored_model + +# Hotfix function +def make_keras_pickable(): + + def __reduce__(self): + model_metadata = saving_utils.model_metadata(self) + training_config = model_metadata.get("training_config", None) + model = serialize(self) + weights = self.get_weights() + return (unpack, (model, training_config, weights)) + + cls = Model + cls.__reduce__ = __reduce__ + + +" end of hotfix " + def to_list(obj: Any) -> List: """ diff --git a/mlair/keras_legacy/__init__.py b/mlair/keras_legacy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mlair/keras_legacy/conv_utils.py b/mlair/keras_legacy/conv_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c5ee50e3f260fdf41f90c58654f82cfb8b35dfe8 --- /dev/null +++ b/mlair/keras_legacy/conv_utils.py @@ -0,0 +1,180 @@ +"""Utilities used in convolutional layers. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensorflow.keras import backend as K + + +def normalize_tuple(value, n, name): + """Transforms a single int or iterable of ints into an int tuple. + + # Arguments + value: The value to validate and convert. Could be an int, or any iterable + of ints. + n: The size of the tuple to be returned. + name: The name of the argument being validated, e.g. `strides` or + `kernel_size`. This is only used to format error messages. + + # Returns + A tuple of n integers. + + # Raises + ValueError: If something else than an int/long or iterable thereof was + passed. + """ + if isinstance(value, int): + return (value,) * n + else: + try: + value_tuple = tuple(value) + except TypeError: + raise ValueError('The `' + name + '` argument must be a tuple of ' + + str(n) + ' integers. Received: ' + str(value)) + if len(value_tuple) != n: + raise ValueError('The `' + name + '` argument must be a tuple of ' + + str(n) + ' integers. Received: ' + str(value)) + for single_value in value_tuple: + try: + int(single_value) + except ValueError: + raise ValueError('The `' + name + '` argument must be a tuple of ' + + str(n) + ' integers. Received: ' + str(value) + ' ' + 'including element ' + str(single_value) + ' of ' + 'type ' + str(type(single_value))) + return value_tuple + + +def normalize_padding(value): + padding = value.lower() + allowed = {'valid', 'same', 'causal'} + if K.backend() == 'theano': + allowed.add('full') + if padding not in allowed: + raise ValueError('The `padding` argument must be one of "valid", "same" ' + '(or "causal" for Conv1D). Received: ' + str(padding)) + return padding + + +def convert_kernel(kernel): + """Converts a Numpy kernel matrix from Theano format to TensorFlow format. + + Also works reciprocally, since the transformation is its own inverse. + + # Arguments + kernel: Numpy array (3D, 4D or 5D). + + # Returns + The converted kernel. + + # Raises + ValueError: in case of invalid kernel shape or invalid data_format. + """ + kernel = np.asarray(kernel) + if not 3 <= kernel.ndim <= 5: + raise ValueError('Invalid kernel shape:', kernel.shape) + slices = [slice(None, None, -1) for _ in range(kernel.ndim)] + no_flip = (slice(None, None), slice(None, None)) + slices[-2:] = no_flip + return np.copy(kernel[slices]) + + +def conv_output_length(input_length, filter_size, + padding, stride, dilation=1): + """Determines output length of a convolution given input length. + + # Arguments + input_length: integer. + filter_size: integer. + padding: one of `"same"`, `"valid"`, `"full"`. + stride: integer. + dilation: dilation rate, integer. + + # Returns + The output length (integer). + """ + if input_length is None: + return None + assert padding in {'same', 'valid', 'full', 'causal'} + dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) + if padding == 'same': + output_length = input_length + elif padding == 'valid': + output_length = input_length - dilated_filter_size + 1 + elif padding == 'causal': + output_length = input_length + elif padding == 'full': + output_length = input_length + dilated_filter_size - 1 + return (output_length + stride - 1) // stride + + +def conv_input_length(output_length, filter_size, padding, stride): + """Determines input length of a convolution given output length. + + # Arguments + output_length: integer. + filter_size: integer. + padding: one of `"same"`, `"valid"`, `"full"`. + stride: integer. + + # Returns + The input length (integer). + """ + if output_length is None: + return None + assert padding in {'same', 'valid', 'full'} + if padding == 'same': + pad = filter_size // 2 + elif padding == 'valid': + pad = 0 + elif padding == 'full': + pad = filter_size - 1 + return (output_length - 1) * stride - 2 * pad + filter_size + + +def deconv_length(dim_size, stride_size, kernel_size, padding, + output_padding, dilation=1): + """Determines output length of a transposed convolution given input length. + + # Arguments + dim_size: Integer, the input length. + stride_size: Integer, the stride along the dimension of `dim_size`. + kernel_size: Integer, the kernel size along the dimension of + `dim_size`. + padding: One of `"same"`, `"valid"`, `"full"`. + output_padding: Integer, amount of padding along the output dimension, + Can be set to `None` in which case the output length is inferred. + dilation: dilation rate, integer. + + # Returns + The output length (integer). + """ + assert padding in {'same', 'valid', 'full'} + if dim_size is None: + return None + + # Get the dilated kernel size + kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) + + # Infer length if output padding is None, else compute the exact length + if output_padding is None: + if padding == 'valid': + dim_size = dim_size * stride_size + max(kernel_size - stride_size, 0) + elif padding == 'full': + dim_size = dim_size * stride_size - (stride_size + kernel_size - 2) + elif padding == 'same': + dim_size = dim_size * stride_size + else: + if padding == 'same': + pad = kernel_size // 2 + elif padding == 'valid': + pad = 0 + elif padding == 'full': + pad = kernel_size - 1 + + dim_size = ((dim_size - 1) * stride_size + kernel_size - 2 * pad + + output_padding) + + return dim_size diff --git a/mlair/keras_legacy/interfaces.py b/mlair/keras_legacy/interfaces.py new file mode 100644 index 0000000000000000000000000000000000000000..45a0e310cda87df3b3af238dc83405878b0d4746 --- /dev/null +++ b/mlair/keras_legacy/interfaces.py @@ -0,0 +1,668 @@ +"""Interface converters for Keras 1 support in Keras 2. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six +import warnings +import functools +import numpy as np + + +def generate_legacy_interface(allowed_positional_args=None, + conversions=None, + preprocessor=None, + value_conversions=None, + object_type='class'): + if allowed_positional_args is None: + check_positional_args = False + else: + check_positional_args = True + allowed_positional_args = allowed_positional_args or [] + conversions = conversions or [] + value_conversions = value_conversions or [] + + def legacy_support(func): + @six.wraps(func) + def wrapper(*args, **kwargs): + if object_type == 'class': + object_name = args[0].__class__.__name__ + else: + object_name = func.__name__ + if preprocessor: + args, kwargs, converted = preprocessor(args, kwargs) + else: + converted = [] + if check_positional_args: + if len(args) > len(allowed_positional_args) + 1: + raise TypeError('`' + object_name + + '` can accept only ' + + str(len(allowed_positional_args)) + + ' positional arguments ' + + str(tuple(allowed_positional_args)) + + ', but you passed the following ' + 'positional arguments: ' + + str(list(args[1:]))) + for key in value_conversions: + if key in kwargs: + old_value = kwargs[key] + if old_value in value_conversions[key]: + kwargs[key] = value_conversions[key][old_value] + for old_name, new_name in conversions: + if old_name in kwargs: + value = kwargs.pop(old_name) + if new_name in kwargs: + raise_duplicate_arg_error(old_name, new_name) + kwargs[new_name] = value + converted.append((new_name, old_name)) + if converted: + signature = '`' + object_name + '(' + for i, value in enumerate(args[1:]): + if isinstance(value, six.string_types): + signature += '"' + value + '"' + else: + if isinstance(value, np.ndarray): + str_val = 'array' + else: + str_val = str(value) + if len(str_val) > 10: + str_val = str_val[:10] + '...' + signature += str_val + if i < len(args[1:]) - 1 or kwargs: + signature += ', ' + for i, (name, value) in enumerate(kwargs.items()): + signature += name + '=' + if isinstance(value, six.string_types): + signature += '"' + value + '"' + else: + if isinstance(value, np.ndarray): + str_val = 'array' + else: + str_val = str(value) + if len(str_val) > 10: + str_val = str_val[:10] + '...' + signature += str_val + if i < len(kwargs) - 1: + signature += ', ' + signature += ')`' + warnings.warn('Update your `' + object_name + '` call to the ' + + 'Keras 2 API: ' + signature, stacklevel=2) + return func(*args, **kwargs) + wrapper._original_function = func + return wrapper + return legacy_support + + +generate_legacy_method_interface = functools.partial(generate_legacy_interface, + object_type='method') + + +def raise_duplicate_arg_error(old_arg, new_arg): + raise TypeError('For the `' + new_arg + '` argument, ' + 'the layer received both ' + 'the legacy keyword argument ' + '`' + old_arg + '` and the Keras 2 keyword argument ' + '`' + new_arg + '`. Stick to the latter!') + + +legacy_dense_support = generate_legacy_interface( + allowed_positional_args=['units'], + conversions=[('output_dim', 'units'), + ('init', 'kernel_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('W_constraint', 'kernel_constraint'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')]) + +legacy_dropout_support = generate_legacy_interface( + allowed_positional_args=['rate', 'noise_shape', 'seed'], + conversions=[('p', 'rate')]) + + +def embedding_kwargs_preprocessor(args, kwargs): + converted = [] + if 'dropout' in kwargs: + kwargs.pop('dropout') + warnings.warn('The `dropout` argument is no longer support in `Embedding`. ' + 'You can apply a `keras.layers.SpatialDropout1D` layer ' + 'right after the `Embedding` layer to get the same behavior.', + stacklevel=3) + return args, kwargs, converted + +legacy_embedding_support = generate_legacy_interface( + allowed_positional_args=['input_dim', 'output_dim'], + conversions=[('init', 'embeddings_initializer'), + ('W_regularizer', 'embeddings_regularizer'), + ('W_constraint', 'embeddings_constraint')], + preprocessor=embedding_kwargs_preprocessor) + +legacy_pooling1d_support = generate_legacy_interface( + allowed_positional_args=['pool_size', 'strides', 'padding'], + conversions=[('pool_length', 'pool_size'), + ('stride', 'strides'), + ('border_mode', 'padding')]) + +legacy_prelu_support = generate_legacy_interface( + allowed_positional_args=['alpha_initializer'], + conversions=[('init', 'alpha_initializer')]) + + +legacy_gaussiannoise_support = generate_legacy_interface( + allowed_positional_args=['stddev'], + conversions=[('sigma', 'stddev')]) + + +def recurrent_args_preprocessor(args, kwargs): + converted = [] + if 'forget_bias_init' in kwargs: + if kwargs['forget_bias_init'] == 'one': + kwargs.pop('forget_bias_init') + kwargs['unit_forget_bias'] = True + converted.append(('forget_bias_init', 'unit_forget_bias')) + else: + kwargs.pop('forget_bias_init') + warnings.warn('The `forget_bias_init` argument ' + 'has been ignored. Use `unit_forget_bias=True` ' + 'instead to initialize with ones.', stacklevel=3) + if 'input_dim' in kwargs: + input_length = kwargs.pop('input_length', None) + input_dim = kwargs.pop('input_dim') + input_shape = (input_length, input_dim) + kwargs['input_shape'] = input_shape + converted.append(('input_dim', 'input_shape')) + warnings.warn('The `input_dim` and `input_length` arguments ' + 'in recurrent layers are deprecated. ' + 'Use `input_shape` instead.', stacklevel=3) + return args, kwargs, converted + +legacy_recurrent_support = generate_legacy_interface( + allowed_positional_args=['units'], + conversions=[('output_dim', 'units'), + ('init', 'kernel_initializer'), + ('inner_init', 'recurrent_initializer'), + ('inner_activation', 'recurrent_activation'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('U_regularizer', 'recurrent_regularizer'), + ('dropout_W', 'dropout'), + ('dropout_U', 'recurrent_dropout'), + ('consume_less', 'implementation')], + value_conversions={'consume_less': {'cpu': 0, + 'mem': 1, + 'gpu': 2}}, + preprocessor=recurrent_args_preprocessor) + +legacy_gaussiandropout_support = generate_legacy_interface( + allowed_positional_args=['rate'], + conversions=[('p', 'rate')]) + +legacy_pooling2d_support = generate_legacy_interface( + allowed_positional_args=['pool_size', 'strides', 'padding'], + conversions=[('border_mode', 'padding'), + ('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_pooling3d_support = generate_legacy_interface( + allowed_positional_args=['pool_size', 'strides', 'padding'], + conversions=[('border_mode', 'padding'), + ('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_global_pooling_support = generate_legacy_interface( + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_upsampling1d_support = generate_legacy_interface( + allowed_positional_args=['size'], + conversions=[('length', 'size')]) + +legacy_upsampling2d_support = generate_legacy_interface( + allowed_positional_args=['size'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_upsampling3d_support = generate_legacy_interface( + allowed_positional_args=['size'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + + +def conv1d_args_preprocessor(args, kwargs): + converted = [] + if 'input_dim' in kwargs: + if 'input_length' in kwargs: + length = kwargs.pop('input_length') + else: + length = None + input_shape = (length, kwargs.pop('input_dim')) + kwargs['input_shape'] = input_shape + converted.append(('input_shape', 'input_dim')) + return args, kwargs, converted + +legacy_conv1d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('filter_length', 'kernel_size'), + ('subsample_length', 'strides'), + ('border_mode', 'padding'), + ('init', 'kernel_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('W_constraint', 'kernel_constraint'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')], + preprocessor=conv1d_args_preprocessor) + + +def conv2d_args_preprocessor(args, kwargs): + converted = [] + if len(args) > 4: + raise TypeError('Layer can receive at most 3 positional arguments.') + elif len(args) == 4: + if isinstance(args[2], int) and isinstance(args[3], int): + new_keywords = ['padding', 'strides', 'data_format'] + for kwd in new_keywords: + if kwd in kwargs: + raise ValueError( + 'It seems that you are using the Keras 2 ' + 'and you are passing both `kernel_size` and `strides` ' + 'as integer positional arguments. For safety reasons, ' + 'this is disallowed. Pass `strides` ' + 'as a keyword argument instead.') + kernel_size = (args[2], args[3]) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'nb_row/nb_col')) + elif len(args) == 3 and isinstance(args[2], int): + if 'nb_col' in kwargs: + kernel_size = (args[2], kwargs.pop('nb_col')) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'nb_row/nb_col')) + elif len(args) == 2: + if 'nb_row' in kwargs and 'nb_col' in kwargs: + kernel_size = (kwargs.pop('nb_row'), kwargs.pop('nb_col')) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'nb_row/nb_col')) + elif len(args) == 1: + if 'nb_row' in kwargs and 'nb_col' in kwargs: + kernel_size = (kwargs.pop('nb_row'), kwargs.pop('nb_col')) + kwargs['kernel_size'] = kernel_size + converted.append(('kernel_size', 'nb_row/nb_col')) + return args, kwargs, converted + +legacy_conv2d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('subsample', 'strides'), + ('border_mode', 'padding'), + ('dim_ordering', 'data_format'), + ('init', 'kernel_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('W_constraint', 'kernel_constraint'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=conv2d_args_preprocessor) + + +def separable_conv2d_args_preprocessor(args, kwargs): + converted = [] + if 'init' in kwargs: + init = kwargs.pop('init') + kwargs['depthwise_initializer'] = init + kwargs['pointwise_initializer'] = init + converted.append(('init', 'depthwise_initializer/pointwise_initializer')) + args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs) + return args, kwargs, converted + _converted + +legacy_separable_conv2d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('subsample', 'strides'), + ('border_mode', 'padding'), + ('dim_ordering', 'data_format'), + ('b_regularizer', 'bias_regularizer'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=separable_conv2d_args_preprocessor) + + +def deconv2d_args_preprocessor(args, kwargs): + converted = [] + if len(args) == 5: + if isinstance(args[4], tuple): + args = args[:-1] + converted.append(('output_shape', None)) + if 'output_shape' in kwargs: + kwargs.pop('output_shape') + converted.append(('output_shape', None)) + args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs) + return args, kwargs, converted + _converted + +legacy_deconv2d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('subsample', 'strides'), + ('border_mode', 'padding'), + ('dim_ordering', 'data_format'), + ('init', 'kernel_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('W_constraint', 'kernel_constraint'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=deconv2d_args_preprocessor) + + +def conv3d_args_preprocessor(args, kwargs): + converted = [] + if len(args) > 5: + raise TypeError('Layer can receive at most 4 positional arguments.') + if len(args) == 5: + if all([isinstance(x, int) for x in args[2:5]]): + kernel_size = (args[2], args[3], args[4]) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'kernel_dim*')) + elif len(args) == 4 and isinstance(args[3], int): + if isinstance(args[2], int) and isinstance(args[3], int): + new_keywords = ['padding', 'strides', 'data_format'] + for kwd in new_keywords: + if kwd in kwargs: + raise ValueError( + 'It seems that you are using the Keras 2 ' + 'and you are passing both `kernel_size` and `strides` ' + 'as integer positional arguments. For safety reasons, ' + 'this is disallowed. Pass `strides` ' + 'as a keyword argument instead.') + if 'kernel_dim3' in kwargs: + kernel_size = (args[2], args[3], kwargs.pop('kernel_dim3')) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'kernel_dim*')) + elif len(args) == 3: + if all([x in kwargs for x in ['kernel_dim2', 'kernel_dim3']]): + kernel_size = (args[2], + kwargs.pop('kernel_dim2'), + kwargs.pop('kernel_dim3')) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'kernel_dim*')) + elif len(args) == 2: + if all([x in kwargs for x in ['kernel_dim1', 'kernel_dim2', 'kernel_dim3']]): + kernel_size = (kwargs.pop('kernel_dim1'), + kwargs.pop('kernel_dim2'), + kwargs.pop('kernel_dim3')) + args = [args[0], args[1], kernel_size] + converted.append(('kernel_size', 'kernel_dim*')) + elif len(args) == 1: + if all([x in kwargs for x in ['kernel_dim1', 'kernel_dim2', 'kernel_dim3']]): + kernel_size = (kwargs.pop('kernel_dim1'), + kwargs.pop('kernel_dim2'), + kwargs.pop('kernel_dim3')) + kwargs['kernel_size'] = kernel_size + converted.append(('kernel_size', 'nb_row/nb_col')) + return args, kwargs, converted + +legacy_conv3d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('subsample', 'strides'), + ('border_mode', 'padding'), + ('dim_ordering', 'data_format'), + ('init', 'kernel_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('W_constraint', 'kernel_constraint'), + ('b_constraint', 'bias_constraint'), + ('bias', 'use_bias')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=conv3d_args_preprocessor) + + +def batchnorm_args_preprocessor(args, kwargs): + converted = [] + if len(args) > 1: + raise TypeError('The `BatchNormalization` layer ' + 'does not accept positional arguments. ' + 'Use keyword arguments instead.') + if 'mode' in kwargs: + value = kwargs.pop('mode') + if value != 0: + raise TypeError('The `mode` argument of `BatchNormalization` ' + 'no longer exists. `mode=1` and `mode=2` ' + 'are no longer supported.') + converted.append(('mode', None)) + return args, kwargs, converted + + +def convlstm2d_args_preprocessor(args, kwargs): + converted = [] + if 'forget_bias_init' in kwargs: + value = kwargs.pop('forget_bias_init') + if value == 'one': + kwargs['unit_forget_bias'] = True + converted.append(('forget_bias_init', 'unit_forget_bias')) + else: + warnings.warn('The `forget_bias_init` argument ' + 'has been ignored. Use `unit_forget_bias=True` ' + 'instead to initialize with ones.', stacklevel=3) + args, kwargs, _converted = conv2d_args_preprocessor(args, kwargs) + return args, kwargs, converted + _converted + +legacy_convlstm2d_support = generate_legacy_interface( + allowed_positional_args=['filters', 'kernel_size'], + conversions=[('nb_filter', 'filters'), + ('subsample', 'strides'), + ('border_mode', 'padding'), + ('dim_ordering', 'data_format'), + ('init', 'kernel_initializer'), + ('inner_init', 'recurrent_initializer'), + ('W_regularizer', 'kernel_regularizer'), + ('U_regularizer', 'recurrent_regularizer'), + ('b_regularizer', 'bias_regularizer'), + ('inner_activation', 'recurrent_activation'), + ('dropout_W', 'dropout'), + ('dropout_U', 'recurrent_dropout'), + ('bias', 'use_bias')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=convlstm2d_args_preprocessor) + +legacy_batchnorm_support = generate_legacy_interface( + allowed_positional_args=[], + conversions=[('beta_init', 'beta_initializer'), + ('gamma_init', 'gamma_initializer')], + preprocessor=batchnorm_args_preprocessor) + + +def zeropadding2d_args_preprocessor(args, kwargs): + converted = [] + if 'padding' in kwargs and isinstance(kwargs['padding'], dict): + if set(kwargs['padding'].keys()) <= {'top_pad', 'bottom_pad', + 'left_pad', 'right_pad'}: + top_pad = kwargs['padding'].get('top_pad', 0) + bottom_pad = kwargs['padding'].get('bottom_pad', 0) + left_pad = kwargs['padding'].get('left_pad', 0) + right_pad = kwargs['padding'].get('right_pad', 0) + kwargs['padding'] = ((top_pad, bottom_pad), (left_pad, right_pad)) + warnings.warn('The `padding` argument in the Keras 2 API no longer' + 'accepts dict types. You can now input argument as: ' + '`padding=(top_pad, bottom_pad, left_pad, right_pad)`.', + stacklevel=3) + elif len(args) == 2 and isinstance(args[1], dict): + if set(args[1].keys()) <= {'top_pad', 'bottom_pad', + 'left_pad', 'right_pad'}: + top_pad = args[1].get('top_pad', 0) + bottom_pad = args[1].get('bottom_pad', 0) + left_pad = args[1].get('left_pad', 0) + right_pad = args[1].get('right_pad', 0) + args = (args[0], ((top_pad, bottom_pad), (left_pad, right_pad))) + warnings.warn('The `padding` argument in the Keras 2 API no longer' + 'accepts dict types. You can now input argument as: ' + '`padding=((top_pad, bottom_pad), (left_pad, right_pad))`', + stacklevel=3) + return args, kwargs, converted + +legacy_zeropadding2d_support = generate_legacy_interface( + allowed_positional_args=['padding'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}, + preprocessor=zeropadding2d_args_preprocessor) + +legacy_zeropadding3d_support = generate_legacy_interface( + allowed_positional_args=['padding'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_cropping2d_support = generate_legacy_interface( + allowed_positional_args=['cropping'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_cropping3d_support = generate_legacy_interface( + allowed_positional_args=['cropping'], + conversions=[('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_spatialdropout1d_support = generate_legacy_interface( + allowed_positional_args=['rate'], + conversions=[('p', 'rate')]) + +legacy_spatialdropoutNd_support = generate_legacy_interface( + allowed_positional_args=['rate'], + conversions=[('p', 'rate'), + ('dim_ordering', 'data_format')], + value_conversions={'dim_ordering': {'tf': 'channels_last', + 'th': 'channels_first', + 'default': None}}) + +legacy_lambda_support = generate_legacy_interface( + allowed_positional_args=['function', 'output_shape']) + + +# Model methods + +def generator_methods_args_preprocessor(args, kwargs): + converted = [] + if len(args) < 3: + if 'samples_per_epoch' in kwargs: + samples_per_epoch = kwargs.pop('samples_per_epoch') + if len(args) > 1: + generator = args[1] + else: + generator = kwargs['generator'] + if hasattr(generator, 'batch_size'): + kwargs['steps_per_epoch'] = samples_per_epoch // generator.batch_size + else: + kwargs['steps_per_epoch'] = samples_per_epoch + converted.append(('samples_per_epoch', 'steps_per_epoch')) + + keras1_args = {'samples_per_epoch', 'val_samples', + 'nb_epoch', 'nb_val_samples', 'nb_worker'} + if keras1_args.intersection(kwargs.keys()): + warnings.warn('The semantics of the Keras 2 argument ' + '`steps_per_epoch` is not the same as the ' + 'Keras 1 argument `samples_per_epoch`. ' + '`steps_per_epoch` is the number of batches ' + 'to draw from the generator at each epoch. ' + 'Basically steps_per_epoch = samples_per_epoch/batch_size. ' + 'Similarly `nb_val_samples`->`validation_steps` and ' + '`val_samples`->`steps` arguments have changed. ' + 'Update your method calls accordingly.', stacklevel=3) + + return args, kwargs, converted + + +legacy_generator_methods_support = generate_legacy_method_interface( + allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'], + conversions=[('samples_per_epoch', 'steps_per_epoch'), + ('val_samples', 'steps'), + ('nb_epoch', 'epochs'), + ('nb_val_samples', 'validation_steps'), + ('nb_worker', 'workers'), + ('pickle_safe', 'use_multiprocessing'), + ('max_q_size', 'max_queue_size')], + preprocessor=generator_methods_args_preprocessor) + + +legacy_model_constructor_support = generate_legacy_interface( + allowed_positional_args=None, + conversions=[('input', 'inputs'), + ('output', 'outputs')]) + +legacy_input_support = generate_legacy_interface( + allowed_positional_args=None, + conversions=[('input_dtype', 'dtype')]) + + +def add_weight_args_preprocessing(args, kwargs): + if len(args) > 1: + if isinstance(args[1], (tuple, list)): + kwargs['shape'] = args[1] + args = (args[0],) + args[2:] + if len(args) > 1: + if isinstance(args[1], six.string_types): + kwargs['name'] = args[1] + args = (args[0],) + args[2:] + return args, kwargs, [] + + +legacy_add_weight_support = generate_legacy_interface( + allowed_positional_args=['name', 'shape'], + preprocessor=add_weight_args_preprocessing) + + +def get_updates_arg_preprocessing(args, kwargs): + # Old interface: (params, constraints, loss) + # New interface: (loss, params) + if len(args) > 4: + raise TypeError('`get_update` call received more arguments ' + 'than expected.') + elif len(args) == 4: + # Assuming old interface. + opt, params, _, loss = args + kwargs['loss'] = loss + kwargs['params'] = params + return [opt], kwargs, [] + elif len(args) == 3: + if isinstance(args[1], (list, tuple)): + assert isinstance(args[2], dict) + assert 'loss' in kwargs + opt, params, _ = args + kwargs['params'] = params + return [opt], kwargs, [] + return args, kwargs, [] + +legacy_get_updates_support = generate_legacy_interface( + allowed_positional_args=None, + conversions=[], + preprocessor=get_updates_arg_preprocessing) diff --git a/mlair/model_modules/abstract_model_class.py b/mlair/model_modules/abstract_model_class.py index 5248b2634666a9405e37a09ac01f93daa739a228..e7d0437f2ff62b635146047496f09db9e7fcdd5c 100644 --- a/mlair/model_modules/abstract_model_class.py +++ b/mlair/model_modules/abstract_model_class.py @@ -2,11 +2,10 @@ import inspect from abc import ABC from typing import Any, Dict, Callable -import keras +import tensorflow.keras as keras import tensorflow as tf -from mlair.helpers import remove_items -from keras import backend as K +from mlair.helpers import remove_items, make_keras_pickable class AbstractModelClass(ABC): @@ -22,6 +21,7 @@ class AbstractModelClass(ABC): def __init__(self, input_shape, output_shape) -> None: """Predefine internal attributes for model and loss.""" + make_keras_pickable() self.__model = None self.model_name = self.__class__.__name__ self.__custom_objects = {} @@ -37,7 +37,7 @@ class AbstractModelClass(ABC): self.__compile_options_is_set = False self._input_shape = input_shape self._output_shape = self.__extract_from_tuple(output_shape) - self.avail_gpus = len(K.tensorflow_backend._get_available_gpus()) + # self.avail_gpus = len(K.tensorflow_backend._get_available_gpus()) def __getattr__(self, name: str) -> Any: """ @@ -141,6 +141,8 @@ class AbstractModelClass(ABC): for allow_k in self.__allowed_compile_options.keys(): if hasattr(self, allow_k): new_v_attr = getattr(self, allow_k) + if new_v_attr == list(): + new_v_attr = None else: new_v_attr = None if isinstance(value, dict): @@ -149,8 +151,10 @@ class AbstractModelClass(ABC): new_v_dic = None else: raise TypeError(f"`compile_options' must be `dict' or `None', but is {type(value)}.") - if (new_v_attr == new_v_dic or self.__compare_keras_optimizers(new_v_attr, new_v_dic)) or ( - (new_v_attr is None) ^ (new_v_dic is None)): + ## self.__compare_keras_optimizers() foremost disabled, because it does not work as expected + #if (new_v_attr == new_v_dic or self.__compare_keras_optimizers(new_v_attr, new_v_dic)) or ( + # (new_v_attr is None) ^ (new_v_dic is None)): + if (new_v_attr == new_v_dic) or ((new_v_attr is None) ^ (new_v_dic is None)): if new_v_attr is not None: self.__compile_options[allow_k] = new_v_attr else: @@ -173,18 +177,22 @@ class AbstractModelClass(ABC): :return True if optimisers are interchangeable, or False if optimisers are distinguishable. """ - if first.__class__ == second.__class__ and first.__module__ == 'keras.optimizers': - res = True - init = tf.global_variables_initializer() - with tf.Session() as sess: - sess.run(init) - for k, v in first.__dict__.items(): - try: - res *= sess.run(v) == sess.run(second.__dict__[k]) - except TypeError: - res *= v == second.__dict__[k] - else: + if isinstance(list, type(second)): res = False + else: + if first.__class__ == second.__class__ and '.'.join( + first.__module__.split('.')[0:4]) == 'tensorflow.python.keras.optimizer_v2': + res = True + init = tf.compat.v1.global_variables_initializer() + with tf.compat.v1.Session() as sess: + sess.run(init) + for k, v in first.__dict__.items(): + try: + res *= sess.run(v) == sess.run(second.__dict__[k]) + except TypeError: + res *= v == second.__dict__[k] + else: + res = False return bool(res) def get_settings(self) -> Dict: diff --git a/mlair/model_modules/advanced_paddings.py b/mlair/model_modules/advanced_paddings.py index 3e64fa9a8c34d2307cc9cced1dfdddcd646520cd..2fc34c9b380091c9f0056bc21f3bcbd3212afbee 100644 --- a/mlair/model_modules/advanced_paddings.py +++ b/mlair/model_modules/advanced_paddings.py @@ -8,12 +8,88 @@ from typing import Union, Tuple import numpy as np import tensorflow as tf -from keras.backend.common import normalize_data_format -from keras.layers import ZeroPadding2D -from keras.layers.convolutional import _ZeroPadding -from keras.legacy import interfaces -from keras.utils import conv_utils -from keras.utils.generic_utils import transpose_shape +# from tensorflow.keras.backend.common import normalize_data_format +from tensorflow.keras.layers import ZeroPadding2D +# from tensorflow.keras.layers.convolutional import _ZeroPadding +from tensorflow.keras.layers import Layer +# from tensorflow.keras.legacy import interfaces +from mlair.keras_legacy import interfaces +# from tensorflow.keras.utils import conv_utils +from mlair.keras_legacy import conv_utils +# from tensorflow.keras.utils.generic_utils import transpose_shape +# from mlair.keras_legacy.generic_utils import transpose_shape + + + +def transpose_shape(shape, target_format, spatial_axes): + """Converts a tuple or a list to the correct `data_format`. + It does so by switching the positions of its elements. + # Arguments + shape: Tuple or list, often representing shape, + corresponding to `'channels_last'`. + target_format: A string, either `'channels_first'` or `'channels_last'`. + spatial_axes: A tuple of integers. + Correspond to the indexes of the spatial axes. + For example, if you pass a shape + representing (batch_size, timesteps, rows, cols, channels), + then `spatial_axes=(2, 3)`. + # Returns + A tuple or list, with the elements permuted according + to `target_format`. + # Example + ```python + >>> # from keras.utils.generic_utils import transpose_shape + >>> transpose_shape((16, 128, 128, 32),'channels_first', spatial_axes=(1, 2)) + (16, 32, 128, 128) + >>> transpose_shape((16, 128, 128, 32), 'channels_last', spatial_axes=(1, 2)) + (16, 128, 128, 32) + >>> transpose_shape((128, 128, 32), 'channels_first', spatial_axes=(0, 1)) + (32, 128, 128) + ``` + # Raises + ValueError: if `value` or the global `data_format` invalid. + """ + if target_format == 'channels_first': + new_values = shape[:spatial_axes[0]] + new_values += (shape[-1],) + new_values += tuple(shape[x] for x in spatial_axes) + + if isinstance(shape, list): + return list(new_values) + return new_values + elif target_format == 'channels_last': + return shape + else: + raise ValueError('The `data_format` argument must be one of ' + '"channels_first", "channels_last". Received: ' + + str(target_format)) + + +def normalize_data_format(value): + """Checks that the value correspond to a valid data format. + # Arguments + value: String or None. `'channels_first'` or `'channels_last'`. + # Returns + A string, either `'channels_first'` or `'channels_last'` + # Example + ```python + >>> from tensorflow.keras import backend as K + >>> K.normalize_data_format(None) + 'channels_first' + >>> K.normalize_data_format('channels_last') + 'channels_last' + ``` + # Raises + ValueError: if `value` or the global `data_format` invalid. + """ + if value is None: + value = 'channels_last' + data_format = value.lower() + if data_format not in {'channels_first', 'channels_last'}: + raise ValueError('The `data_format` argument must be one of ' + '"channels_first", "channels_last". Received: ' + + str(value)) + return data_format class PadUtils: @@ -118,6 +194,94 @@ class PadUtils: return normalized_padding +class InputSpec(object): + """Specifies the ndim, dtype and shape of every input to a layer. + Every layer should expose (if appropriate) an `input_spec` attribute: + a list of instances of InputSpec (one per input tensor). + A None entry in a shape is compatible with any dimension, + a None shape is compatible with any shape. + # Arguments + dtype: Expected datatype of the input. + shape: Shape tuple, expected shape of the input + (may include None for unchecked axes). + ndim: Integer, expected rank of the input. + max_ndim: Integer, maximum rank of the input. + min_ndim: Integer, minimum rank of the input. + axes: Dictionary mapping integer axes to + a specific dimension value. + """ + + def __init__(self, dtype=None, + shape=None, + ndim=None, + max_ndim=None, + min_ndim=None, + axes=None): + self.dtype = dtype + self.shape = shape + if shape is not None: + self.ndim = len(shape) + else: + self.ndim = ndim + self.max_ndim = max_ndim + self.min_ndim = min_ndim + self.axes = axes or {} + + def __repr__(self): + spec = [('dtype=' + str(self.dtype)) if self.dtype else '', + ('shape=' + str(self.shape)) if self.shape else '', + ('ndim=' + str(self.ndim)) if self.ndim else '', + ('max_ndim=' + str(self.max_ndim)) if self.max_ndim else '', + ('min_ndim=' + str(self.min_ndim)) if self.min_ndim else '', + ('axes=' + str(self.axes)) if self.axes else ''] + return 'InputSpec(%s)' % ', '.join(x for x in spec if x) + + +class _ZeroPadding(Layer): + """Abstract nD ZeroPadding layer (private, used as implementation base). + # Arguments + padding: Tuple of tuples of two ints. Can be a tuple of ints when + rank is 1. + data_format: A string, + one of `"channels_last"` or `"channels_first"`. + The ordering of the dimensions in the inputs. + `"channels_last"` corresponds to inputs with shape + `(batch, ..., channels)` while `"channels_first"` corresponds to + inputs with shape `(batch, channels, ...)`. + It defaults to the `image_data_format` value found in your + Keras config file at `~/.keras/keras.json`. + If you never set it, then it will be "channels_last". + """ + def __init__(self, padding, data_format=None, **kwargs): + # self.rank is 1 for ZeroPadding1D, 2 for ZeroPadding2D. + self.rank = len(padding) + self.padding = padding + self.data_format = normalize_data_format(data_format) + self.input_spec = tf.keras.layers.InputSpec(ndim=self.rank + 2) + super(_ZeroPadding, self).__init__(**kwargs) + + def call(self, inputs): + raise NotImplementedError + + def compute_output_shape(self, input_shape): + padding_all_dims = ((0, 0),) + self.padding + ((0, 0),) + spatial_axes = list(range(1, 1 + self.rank)) + padding_all_dims = transpose_shape(padding_all_dims, + self.data_format, + spatial_axes) + output_shape = list(input_shape) + for dim in range(len(output_shape)): + if output_shape[dim] is not None: + output_shape[dim] += sum(padding_all_dims[dim]) + return tuple(output_shape) + + def get_config(self): + config = {'padding': self.padding, + 'data_format': self.data_format} + base_config = super(_ZeroPadding, self).get_config() + return dict(list(base_config.items()) + list(config.items())) + + class ReflectionPadding2D(_ZeroPadding): """ Reflection padding layer for 2D input. @@ -190,7 +354,7 @@ class ReflectionPadding2D(_ZeroPadding): def call(self, inputs, mask=None): """Call ReflectionPadding2D.""" pattern = PadUtils.spatial_2d_padding(padding=self.padding, data_format=self.data_format) - return tf.pad(inputs, pattern, 'REFLECT') + return tf.pad(tensor=inputs, paddings=pattern, mode='REFLECT') class SymmetricPadding2D(_ZeroPadding): @@ -264,7 +428,7 @@ class SymmetricPadding2D(_ZeroPadding): def call(self, inputs, mask=None): """Call SymmetricPadding2D.""" pattern = PadUtils.spatial_2d_padding(padding=self.padding, data_format=self.data_format) - return tf.pad(inputs, pattern, 'SYMMETRIC') + return tf.pad(tensor=inputs, paddings=pattern, mode='SYMMETRIC') class Padding2D: @@ -321,8 +485,8 @@ class Padding2D: if __name__ == '__main__': - from keras.models import Model - from keras.layers import Conv2D, Flatten, Dense, Input + from tensorflow.keras.models import Model + from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input kernel_1 = (3, 3) kernel_2 = (5, 5) diff --git a/mlair/model_modules/convolutional_networks.py b/mlair/model_modules/convolutional_networks.py index 624cfa097a2ce562e9e2d2ae698a1e84bdef7309..be047eb7a1c92cbb8847328c157c874bfeca93ca 100644 --- a/mlair/model_modules/convolutional_networks.py +++ b/mlair/model_modules/convolutional_networks.py @@ -8,7 +8,7 @@ from mlair.helpers import select_from_dict from mlair.model_modules.loss import var_loss, custom_loss from mlair.model_modules.advanced_paddings import PadUtils, Padding2D, SymmetricPadding2D -import keras +import tensorflow.keras as keras class CNN(AbstractModelClass): @@ -21,7 +21,7 @@ class CNN(AbstractModelClass): _initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform", "relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(), "prelu": keras.initializers.he_normal()} - _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD} + _optimizer = {"adam": keras.optimizers.Adam, "sgd": keras.optimizers.SGD} _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2} _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"] _dropout = {"selu": keras.layers.AlphaDropout} diff --git a/mlair/model_modules/flatten.py b/mlair/model_modules/flatten.py index dd1e8e21eeb96f75372add0208b03dc06f5dc25c..98a55bfcfbe51ff0757479704f8e30738f7db705 100644 --- a/mlair/model_modules/flatten.py +++ b/mlair/model_modules/flatten.py @@ -3,7 +3,7 @@ __date__ = '2019-12-02' from typing import Union, Callable -import keras +import tensorflow.keras as keras def get_activation(input_to_activate: keras.layers, activation: Union[Callable, str], **kwargs): diff --git a/mlair/model_modules/fully_connected_networks.py b/mlair/model_modules/fully_connected_networks.py index 0338033315d294c2e54de8b038bba2123d2fee77..8536516e66cc1dda15972fd2e91d0ef67c70dda7 100644 --- a/mlair/model_modules/fully_connected_networks.py +++ b/mlair/model_modules/fully_connected_networks.py @@ -7,7 +7,7 @@ from mlair.model_modules import AbstractModelClass from mlair.helpers import select_from_dict from mlair.model_modules.loss import var_loss, custom_loss, l_p_loss -import keras +import tensorflow.keras as keras class FCN(AbstractModelClass): @@ -25,7 +25,7 @@ class FCN(AbstractModelClass): _initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform", "relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(), "prelu": keras.initializers.he_normal()} - _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD} + _optimizer = {"adam": keras.optimizers.Adam, "sgd": keras.optimizers.SGD} _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2} _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"] _dropout = {"selu": keras.layers.AlphaDropout} @@ -207,7 +207,7 @@ class BranchedInputFCN(AbstractModelClass): _initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform", "relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(), "prelu": keras.initializers.he_normal()} - _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD} + _optimizer = {"adam": keras.optimizers.Adam, "sgd": keras.optimizers.SGD} _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2} _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"] _dropout = {"selu": keras.layers.AlphaDropout} diff --git a/mlair/model_modules/inception_model.py b/mlair/model_modules/inception_model.py index d7354c37899bbb7d8f80bc76b4cd9237c7df96dc..0387a5f2ca1d389f60adb3f63cde4e13d60eafc4 100644 --- a/mlair/model_modules/inception_model.py +++ b/mlair/model_modules/inception_model.py @@ -3,8 +3,8 @@ __date__ = '2019-10-22' import logging -import keras -import keras.layers as layers +import tensorflow.keras as keras +import tensorflow.keras.layers as layers from mlair.model_modules.advanced_paddings import PadUtils, ReflectionPadding2D, Padding2D diff --git a/mlair/model_modules/keras_extensions.py b/mlair/model_modules/keras_extensions.py index e0f54282010e765fb3d8b0aca191a75c0b22fdf9..d890e7b0ff3beea812d8fc7766433a84d65a1ebe 100644 --- a/mlair/model_modules/keras_extensions.py +++ b/mlair/model_modules/keras_extensions.py @@ -11,8 +11,8 @@ from typing_extensions import TypedDict from time import time import numpy as np -from keras import backend as K -from keras.callbacks import History, ModelCheckpoint, Callback +from tensorflow.keras import backend as K +from tensorflow.keras.callbacks import History, ModelCheckpoint, Callback from mlair import helpers diff --git a/mlair/model_modules/loss.py b/mlair/model_modules/loss.py index 2034c5a7795fad302d2a289e6fadbd5e295117cc..1a54bc1c1ae280d07a731aed2dd001c1c2c28af0 100644 --- a/mlair/model_modules/loss.py +++ b/mlair/model_modules/loss.py @@ -1,6 +1,6 @@ """Collection of different customised loss functions.""" -from keras import backend as K +from tensorflow.keras import backend as K from typing import Callable diff --git a/mlair/model_modules/model_class.py b/mlair/model_modules/model_class.py index 6a218990d34f8d7e4ad17cce86099d0ef5fabcdf..0604c777ad56d7bb3fbda4723e39e0bfb607b5bb 100644 --- a/mlair/model_modules/model_class.py +++ b/mlair/model_modules/model_class.py @@ -120,7 +120,7 @@ import mlair.model_modules.keras_extensions __author__ = "Lukas Leufen, Felix Kleinert" __date__ = '2020-05-12' -import keras +import tensorflow.keras as keras from mlair.model_modules import AbstractModelClass from mlair.model_modules.inception_model import InceptionModelBase @@ -346,7 +346,7 @@ class MyTowerModel(AbstractModelClass): self.model = keras.Model(inputs=X_input, outputs=[out_main]) def set_compile_options(self): - self.optimizer = keras.optimizers.adam(lr=self.initial_lr) + self.optimizer = keras.optimizers.Adam(lr=self.initial_lr) self.compile_options = {"loss": [keras.losses.mean_squared_error], "metrics": ["mse"]} @@ -462,7 +462,7 @@ class IntelliO3TsArchitecture(AbstractModelClass): print(f"Set multi_gpu model with {self.avail_gpus} GPUs") def set_compile_options(self): - self.compile_options = {"optimizer": keras.optimizers.adam(lr=self.initial_lr, amsgrad=True), + self.compile_options = {"optimizer": keras.optimizers.Adam(lr=self.initial_lr, amsgrad=True), "loss": [l_p_loss(4), keras.losses.mean_squared_error], "metrics": ['mse'], "loss_weights": [.01, .99] diff --git a/mlair/model_modules/recurrent_networks.py b/mlair/model_modules/recurrent_networks.py index 95c48bc8659354c7c669bb03a7591dafbbe9f262..59927e992d432207db5b5737289a6f4d671d92f3 100644 --- a/mlair/model_modules/recurrent_networks.py +++ b/mlair/model_modules/recurrent_networks.py @@ -7,7 +7,7 @@ from mlair.model_modules import AbstractModelClass from mlair.helpers import select_from_dict from mlair.model_modules.loss import var_loss, custom_loss -import keras +import tensorflow.keras as keras class RNN(AbstractModelClass): @@ -24,7 +24,7 @@ class RNN(AbstractModelClass): _initializer = {"tanh": "glorot_uniform", "sigmoid": "glorot_uniform", "linear": "glorot_uniform", "relu": keras.initializers.he_normal(), "selu": keras.initializers.lecun_normal(), "prelu": keras.initializers.he_normal()} - _optimizer = {"adam": keras.optimizers.adam, "sgd": keras.optimizers.SGD} + _optimizer = {"adam": keras.optimizers.Adam, "sgd": keras.optimizers.SGD} _regularizer = {"l1": keras.regularizers.l1, "l2": keras.regularizers.l2, "l1_l2": keras.regularizers.l1_l2} _requirements = ["lr", "beta_1", "beta_2", "epsilon", "decay", "amsgrad", "momentum", "nesterov", "l1", "l2"] _dropout = {"selu": keras.layers.AlphaDropout} diff --git a/mlair/plotting/training_monitoring.py b/mlair/plotting/training_monitoring.py index 9cad9fd0ee2b9f3d81bd91810abcd4f6eeefb05f..b2b531b99c85bb43e4e758fd23045c9f0575cb24 100644 --- a/mlair/plotting/training_monitoring.py +++ b/mlair/plotting/training_monitoring.py @@ -5,7 +5,7 @@ __date__ = '2019-12-11' from typing import Union, Dict, List -import keras +import tensorflow.keras as keras import matplotlib import matplotlib.pyplot as plt import pandas as pd diff --git a/mlair/run_modules/model_setup.py b/mlair/run_modules/model_setup.py index 83f4a2bd96314d6f8c53f8cc9407cbc12e7b9a16..0b9e8ec56592901d9feba15eb50b6b21a0c53560 100644 --- a/mlair/run_modules/model_setup.py +++ b/mlair/run_modules/model_setup.py @@ -8,7 +8,7 @@ import os import re from dill.source import getsource -import keras +import tensorflow.keras as keras import pandas as pd import tensorflow as tf diff --git a/mlair/run_modules/post_processing.py b/mlair/run_modules/post_processing.py index 12e38e9ab269c9967425fe78466db0720934b4e9..7c42b9a34c75e332bb18b8ac465486f4eaa189a7 100644 --- a/mlair/run_modules/post_processing.py +++ b/mlair/run_modules/post_processing.py @@ -10,7 +10,7 @@ import sys import traceback from typing import Dict, Tuple, Union, List, Callable -import keras +import tensorflow.keras as keras import numpy as np import pandas as pd import xarray as xr @@ -594,8 +594,8 @@ class PostProcessing(RunEnvironment): """Evaluate test score of model and save locally.""" # test scores on transformed data - test_score = self.model.evaluate_generator(generator=self.test_data_distributed, - use_multiprocessing=True, verbose=0) + test_score = self.model.evaluate(self.test_data_distributed, + use_multiprocessing=True, verbose=0) path = self.data_store.get("model_path") with open(os.path.join(path, "test_scores.txt"), "a") as f: for index, item in enumerate(to_list(test_score)): diff --git a/mlair/run_modules/training.py b/mlair/run_modules/training.py index 00e8eae1581453666d3ca11f48fcdaedf6a24ad0..0696c2e7b8daa75925cf16096e183de94c21fe85 100644 --- a/mlair/run_modules/training.py +++ b/mlair/run_modules/training.py @@ -8,8 +8,8 @@ import logging import os from typing import Union -import keras -from keras.callbacks import Callback, History +import tensorflow.keras as keras +from tensorflow.keras.callbacks import Callback, History import psutil import pandas as pd @@ -99,7 +99,7 @@ class Training(RunEnvironment): workers. To prevent this, the function is pre-compiled. See discussion @ https://stackoverflow.com/questions/40850089/is-keras-thread-safe/43393252#43393252 """ - self.model._make_predict_function() + self.model.make_predict_function() def _set_gen(self, mode: str) -> None: """ @@ -123,7 +123,7 @@ class Training(RunEnvironment): def train(self) -> None: """ - Perform training using keras fit_generator(). + Perform training using keras fit(). Callbacks are stored locally in the experiment directory. Best model from training is saved for class variable model. If the file path of checkpoint is not empty, this method assumes, that this is not a new @@ -137,14 +137,14 @@ class Training(RunEnvironment): checkpoint = self.callbacks.get_checkpoint() if not os.path.exists(checkpoint.filepath) or self._create_new_model: - history = self.model.fit_generator(generator=self.train_set, - steps_per_epoch=len(self.train_set), - epochs=self.epochs, - verbose=2, - validation_data=self.val_set, - validation_steps=len(self.val_set), - callbacks=self.callbacks.get_callbacks(as_dict=False), - workers=psutil.cpu_count(logical=False)) + history = self.model.fit(self.train_set, + steps_per_epoch=len(self.train_set), + epochs=self.epochs, + verbose=2, + validation_data=self.val_set, + validation_steps=len(self.val_set), + callbacks=self.callbacks.get_callbacks(as_dict=False), + workers=psutil.cpu_count(logical=False)) else: logging.info("Found locally stored model and checkpoints. Training is resumed from the last checkpoint.") self.callbacks.load_callbacks() @@ -152,15 +152,15 @@ class Training(RunEnvironment): self.model = keras.models.load_model(checkpoint.filepath) hist: History = self.callbacks.get_callback_by_name("hist") initial_epoch = max(hist.epoch) + 1 - _ = self.model.fit_generator(generator=self.train_set, - steps_per_epoch=len(self.train_set), - epochs=self.epochs, - verbose=2, - validation_data=self.val_set, - validation_steps=len(self.val_set), - callbacks=self.callbacks.get_callbacks(as_dict=False), - initial_epoch=initial_epoch, - workers=psutil.cpu_count(logical=False)) + _ = self.model.fit(self.train_set, + steps_per_epoch=len(self.train_set), + epochs=self.epochs, + verbose=2, + validation_data=self.val_set, + validation_steps=len(self.val_set), + callbacks=self.callbacks.get_callbacks(as_dict=False), + initial_epoch=initial_epoch, + workers=psutil.cpu_count(logical=False)) history = hist try: lr = self.callbacks.get_callback_by_name("lr") @@ -178,7 +178,7 @@ class Training(RunEnvironment): """Save model in local experiment directory. Model is named as `<experiment_name>_<custom_model_name>.h5`.""" model_name = self.data_store.get("model_name", "model") logging.debug(f"save best model to {model_name}") - self.model.save(model_name) + self.model.save(model_name, save_format='h5') self.data_store.set("best_model", self.model) def load_best_model(self, name: str) -> None: @@ -261,7 +261,7 @@ class Training(RunEnvironment): tables.save_to_md(path, "training_settings.md", df=df) # calculate val scores - val_score = self.model.evaluate_generator(generator=self.val_set, use_multiprocessing=True, verbose=0) + val_score = self.model.evaluate(self.val_set, use_multiprocessing=True, verbose=0) path = self.data_store.get("model_path") with open(os.path.join(path, "val_scores.txt"), "a") as f: for index, item in enumerate(to_list(val_score)): diff --git a/requirements.txt b/requirements.txt index a2ccc2b04bcde470228bc8684f80f33f6a1945e8..886e086719a3978e6ad3125f04349d3f685a978e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,18 +10,15 @@ cftime==1.4.1 chardet==4.0.0 coverage==5.4 cycler==0.10.0 -dask==2021.2.0 +dask==2.22.0 dill==0.3.3 fsspec==0.8.5 gast==0.4.0 -grpcio==1.35.0 -h5py==2.10.0 +grpcio==1.34.0 +h5py==3.1.0 idna==2.10 importlib-metadata==3.4.0 iniconfig==1.1.1 -Keras==2.2.4 -Keras-Applications==1.0.8 -Keras-Preprocessing==1.1.2 kiwisolver==1.3.1 locket==0.2.1 Markdown==3.3.3 @@ -52,14 +49,13 @@ python-dateutil==2.8.1 pytz==2021.1 PyYAML==5.4.1 requests==2.25.1 -scipy==1.5.4 +scipy==1.5.2 seaborn==0.11.1 +--no-binary shapely Shapely==1.7.0 six==1.15.0 statsmodels==0.12.2 tabulate==0.8.8 -tensorboard==1.13.1 -tensorflow==1.13.1 -tensorflow-estimator==1.13.0 +tensorflow==2.5.0 termcolor==1.1.0 toml==0.10.2 toolz==0.11.1 @@ -71,5 +67,4 @@ wget==3.2 xarray==0.16.2 zipp==3.4.0 ---no-binary shapely Shapely==1.7.0 -Cartopy==0.18.0 +#Cartopy==0.18.0 diff --git a/run.py b/run.py index 11cc01257fdf4535845a2cfedb065dd27942ef66..5324e55a09b004352c4e35f23f5e2ea21a7451d6 100644 --- a/run.py +++ b/run.py @@ -3,9 +3,11 @@ __date__ = '2020-06-29' import argparse from mlair.workflows import DefaultWorkflow +# from mlair.model_modules.recurrent_networks import RNN as chosen_model from mlair.helpers import remove_items from mlair.configuration.defaults import DEFAULT_PLOT_LIST import os +import tensorflow as tf def load_stations(): @@ -20,13 +22,15 @@ def load_stations(): def main(parser_args): - plots = remove_items(DEFAULT_PLOT_LIST, "PlotConditionalQuantiles") + # tf.compat.v1.disable_v2_behavior() + plots = remove_items(DEFAULT_PLOT_LIST, ["PlotConditionalQuantiles", "PlotPeriodogram"]) workflow = DefaultWorkflow( # stations=load_stations(), # stations=["DEBW087","DEBW013", "DEBW107", "DEBW076"], stations=["DEBW013", "DEBW087", "DEBW107", "DEBW076"], train_model=False, create_new_model=True, network="UBA", evaluate_feature_importance=False, # plot_list=["PlotCompetitiveSkillScore"], competitors=["test_model", "test_model2"], + competitor_path=os.path.join(os.getcwd(), "data", "comp_test"), **parser_args.__dict__, start_script=__file__) workflow.run() diff --git a/run_climate_filter.py b/run_climate_filter.py old mode 100755 new mode 100644