diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index b45cf0b402d7883d2c501b9af81acf02bcd8f964..b32490a60b44f0531ef80e4dc7366e069f475efe 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -23,7 +23,7 @@ jobs: uses: actions/checkout@v1 - name: Install Conda environment with Micromamba - uses: mamba-org/provision-with-micromamba@main + uses: mamba-org/setup-micromamba@v1 with: environment-file: ${{ env.YML }} @@ -70,10 +70,10 @@ jobs: uses: actions/checkout@v3 - name: Install Conda environment with Micromamba - uses: mamba-org/provision-with-micromamba@main + uses: mamba-org/setup-micromamba@v1 with: environment-file: "etc/environment-${{ matrix.env }}.yml" - extra-specs: | + create-args: >- python=${{ matrix.python }} - name: Run pytest for CPU stuff @@ -95,27 +95,71 @@ jobs: data_3.10 coverage_${{ matrix.env }}_3.10.dat + user_libpressio_tests: + runs-on: ubuntu-latest + + container: + image: brownbaerchen/libpressio:amd64_2 + volumes: + - ${{ github.workspace }}:/pySDC + + defaults: + run: + shell: bash -l {0} + + steps: + + - name: Checkout + uses: actions/checkout@v3 + + - name: Install pySDC and pytest + run: | + source /pySDC/pySDC/projects/compression/Docker/install_pySDC.sh + + - name: Run pytest + run: | + source /opt/spack/share/spack/setup-env.sh + spack load libpressio + + coverage run -m pytest --continue-on-collection-errors -v --durations=0 pySDC/tests -m libpressio + - name: Make coverage report + run: | + source /opt/spack/share/spack/setup-env.sh + spack load libpressio + + mv data data_libpressio + coverage combine + mv .coverage coverage_libpressio_3.10.dat + + - name: Upload artifacts + uses: actions/upload-artifact@v3 + with: + name: cpu-test-artifacts + path: | + data_libpressio + coverage_libpressio_3.10.dat + user_cpu_tests_macos: runs-on: macos-12 - + strategy: matrix: env: ['base', 'fenics', 'mpi4py', 'petsc'] - + defaults: run: shell: bash -l {0} - + steps: - name: Checkout uses: actions/checkout@v3 - + - name: Install Conda environment with Micromamba - uses: mamba-org/provision-with-micromamba@main + uses: mamba-org/setup-micromamba@v1 with: environment-file: "etc/environment-${{ matrix.env }}.yml" - + - name: Run pytest for CPU stuff run: | pytest --continue-on-collection-errors -v --durations=0 pySDC/tests -m ${{ matrix.env }} @@ -156,6 +200,7 @@ jobs: needs: - lint - user_cpu_tests_linux + - user_libpressio_tests # - wait_for_gitlab defaults: @@ -167,7 +212,7 @@ jobs: uses: actions/checkout@v3 - name: Install Conda environment with Micromamba - uses: mamba-org/provision-with-micromamba@main + uses: mamba-org/setup-micromamba@v1 with: environment-file: "etc/environment-base.yml" diff --git a/README.md b/README.md index ad59693b4adf91495dbdaf94ad6868a2873fcbee..1557b0256a1009759637e9685176487c6ca214a0 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,7 @@ and is now maintained and developed by a small community of scientists intereste Checkout the [Changelog](./CHANGELOG.md) to see pySDC's evolution since 2016. Any contribution is dearly welcome ! If you want to take part of this, please take the time to read our [Contribution Guidelines](./CONTRIBUTING.md) -(and don't forget to take a pick at our nice [Code of Conduct](./CODE_OF_CONDUCT.md) :wink:). +(and don't forget to take a peek at our nice [Code of Conduct](./CODE_OF_CONDUCT.md) :wink:). ## Acknowledgements diff --git a/docs/contrib/02_continuous_integration.md b/docs/contrib/02_continuous_integration.md index a646aada170dc475e7053a19bc14ee3f8cdf83d0..1b4d5276cee50fc658bb98b3cee12b5211dfa6e1 100644 --- a/docs/contrib/02_continuous_integration.md +++ b/docs/contrib/02_continuous_integration.md @@ -19,7 +19,7 @@ black pySDC --check --diff --color flakeheaven lint --benchmark pySDC ``` -> :bell: To avoid any error about formatting (`black`), you can simply use this program to reformat directly your code using the command : +> :bell: To avoid any error about formatting (`black`), you can simply use this program to reformat your code directly using the command : > > ```bash > black pySDC @@ -30,6 +30,51 @@ Some style rules that are automatically enforced : - lines should be not longer than 120 characters - arithmetic operators (`+`, `*`, ...) should be separated with variables by one empty space +You can automate linting somewhat by using git hooks. +In order to run black automatically, we want to do a pre-commit hook which adds the modified files to the commit after reformatting. +To this end, just add the following to a possibly new file in the path `<pySDC-root-directory>/.git/hooks/pre-commit`: + +```bash +#!/bin/sh + +export files=$(git diff --staged --name-only HEAD | grep .py | sed -e "s,^,$(git rev-parse --show-toplevel)/,") + +if [[ $files != "" ]] +then + black $files + git add $files +fi +``` + +You may need to run `chmod +x` on the file to allow it to be executed. +Be aware that the hook will alter files you may have opened in an editor whenever you make a commit, which may confuse you(r editor). + +To automate flakeheaven, we want to write a hook that alters the commit message in case any errors are detected. This gives us the choice of aborting the commit and fixing the issues, or we can go ahead and commit them and worry about flakeheaven only when the time comes to do a pull request. +To obtain this functionality, add the following to `<pySDC-root-directory>/.git/hooks/prepare-commit-msg`: + +```bash +#!/bin/sh + +COMMIT_MSG_FILE=$1 + +export files=$(git diff --staged --name-only HEAD | grep .py | sed -e "s,^,$(git rev-parse --show-toplevel)/,") + +if [[ $files != "" ]] +then + export flakeheaven_output=$(flakeheaven lint --format default $files) + if [[ "$flakeheaven_output" != 0 ]] + then + git interpret-trailers --in-place --trailer "$(echo "$flakeheaven_output" | sed -e 's/^/#/')" "$COMMIT_MSG_FILE" + git interpret-trailers --in-place --trailer "#!!!!!!!!!! WARNING: FLAKEHEAVEN FAILED !!!!!!!!!!" "$COMMIT_MSG_FILE" + fi +fi + +``` +Don't forget to assign execution rights. + +As a final note, make sure to regularly update linting related packages, as they constantly introduce checking of more PEP8 guidelines. +This might cause the linting to fail in the GitHub action, which uses the most up to date versions available on the conda-forge channel, even though it passed locally. + ## Code testing This is done using [pytest](https://docs.pytest.org/en/7.2.x/), and runs all the tests written in the `pySDC/tests` folder. You can run those locally in the root folder of `pySDC` using : diff --git a/docs/source/index.rst b/docs/source/index.rst index 829792cab7203f1cab882452acb953ad90fc2553..14970ed97c857758be64085efe1ebab202df5c4e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -48,8 +48,9 @@ Projects projects/AllenCahn_Bayreuth.rst projects/performance.rst projects/PinTSimE.rst - projects/Resilience.rst + projects/Resilience.rst projects/DAE.rst + projects/compression.rst API documentation @@ -69,4 +70,4 @@ Indices and tables : * :ref:`genindex` * :ref:`modindex` -* :ref:`search` \ No newline at end of file +* :ref:`search` diff --git a/docs/source/projects/compression.rst b/docs/source/projects/compression.rst new file mode 100644 index 0000000000000000000000000000000000000000..3e626d61abed48ba19931dd99ed8b14457066887 --- /dev/null +++ b/docs/source/projects/compression.rst @@ -0,0 +1 @@ +.. include:: /../../pySDC/projects/compression/README.rst diff --git a/etc/environment-mpi4py.yml b/etc/environment-mpi4py.yml index 6f7ba4cb30c9d8faf8a346452e52906c66205d14..e495181a85a93d8459c5039641bd8ec0b253bb53 100644 --- a/etc/environment-mpi4py.yml +++ b/etc/environment-mpi4py.yml @@ -11,4 +11,4 @@ dependencies: - mpi4py-fft>=2.0.2 - mpi4py>=3.0.0 - pytest - - pytest-cov \ No newline at end of file + - pytest-cov diff --git a/pySDC/core/ConvergenceController.py b/pySDC/core/ConvergenceController.py index 2a98d1c771cd8889bd88318b2a247cf8d47b6659..eb11cc31d7b3388daf9c5e9aee47a673d4c8ffdf 100644 --- a/pySDC/core/ConvergenceController.py +++ b/pySDC/core/ConvergenceController.py @@ -263,26 +263,7 @@ class ConvergenceController(object): controller (pySDC.Controller): The controller S (pySDC.Step): The current step size (int): The number of ranks - time (float): The current time - Tend (float): The final time - - Returns: - None - """ - pass - - def prepare_next_block_nonMPI(self, controller, MS, active_slots, time, Tend): - """ - This is an extension to the function `prepare_next_block`, which is only called in the non MPI controller and - is needed because there is no chance to communicate backwards otherwise. While you should not do this in the - first place, the first step in the new block comes after the last step in the last block, such that it is still - in fact forwards communication, even though it looks backwards. - - Args: - controller (pySDC.Controller): The controller - MS (list): All steps of the controller - active_slots (list): Index list of active steps - time (float): The current time + time (float): The current time will be list in nonMPI controller implementation Tend (float): The final time Returns: @@ -334,6 +315,8 @@ class ConvergenceController(object): # log what's happening for debug purposes self.logger.debug(f'Step {comm.rank} initiates send to step {dest}') + kwargs['tag'] = kwargs.get('tag', abs(self.params.control_order)) + if blocking: req = comm.send(data, dest=dest, **kwargs) else: @@ -358,6 +341,8 @@ class ConvergenceController(object): # log what's happening for debug purposes self.logger.debug(f'Step {comm.rank} initiates receive from step {source}') + kwargs['tag'] = kwargs.get('tag', abs(self.params.control_order)) + data = comm.recv(source=source, **kwargs) # log what's happening for debug purposes diff --git a/pySDC/core/Problem.py b/pySDC/core/Problem.py index 5e161c4fb438bcb949ff6dead6394148626dd7c5..bea486b92596066e2ec1ed32f17a009115e416b2 100644 --- a/pySDC/core/Problem.py +++ b/pySDC/core/Problem.py @@ -110,13 +110,13 @@ class ptype(RegisterParams): t_init (float): the starting time Returns: - numpy.ndarray: exact solution + numpy.ndarray: Reference solution """ import numpy as np from scipy.integrate import solve_ivp tol = 100 * np.finfo(float).eps - u_init = self.u_exact(t=0) if u_init is None else u_init + u_init = self.u_exact(t=0) if u_init is None else u_init * 1.0 t_init = 0 if t_init is None else t_init u_shape = u_init.shape diff --git a/pySDC/core/Sweeper.py b/pySDC/core/Sweeper.py index 339d5aeee79a972c8111a3b24b7185a221fe8a6e..7ab1c2fbecefc3a86d46566ffb0719de961673e2 100644 --- a/pySDC/core/Sweeper.py +++ b/pySDC/core/Sweeper.py @@ -15,6 +15,7 @@ class _Pars(FrozenClass): def __init__(self, pars): self.do_coll_update = False self.initial_guess = 'spread' + self.skip_residual_computation = () # gain performance at the cost of correct residual output for k, v in pars.items(): if k != 'collocation_class': @@ -331,14 +332,23 @@ class sweeper(object): L.status.unlocked = True L.status.updated = True - def compute_residual(self): + def compute_residual(self, stage=None): """ Computation of the residual using the collocation matrix Q + + Args: + stage (str): The current stage of the step the level belongs to """ # get current level and problem description L = self.level + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + # check if there are new values (e.g. from a sweep) # assert L.status.updated diff --git a/pySDC/implementations/controller_classes/controller_MPI.py b/pySDC/implementations/controller_classes/controller_MPI.py index fc49884f1ffee66fbd463ce1fabce90e8b2fbabf..a924c4ff22f57ed548d697fac127408aeb2da366 100644 --- a/pySDC/implementations/controller_classes/controller_MPI.py +++ b/pySDC/implementations/controller_classes/controller_MPI.py @@ -62,7 +62,7 @@ class controller_MPI(controller): if num_levels == 1 and self.params.predict_type is not None: self.logger.warning( - 'you have specified a predictor type but only a single level.. ' 'predictor will be ignored' + 'you have specified a predictor type but only a single level.. predictor will be ignored' ) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: @@ -194,7 +194,7 @@ class controller_MPI(controller): # determine whether I am the first and/or last in line self.S.status.first = self.S.prev == size - 1 self.S.status.last = self.S.next == 0 - # intialize step with u0 + # initialize step with u0 self.S.init_step(u0) # reset some values self.S.status.done = False @@ -475,7 +475,7 @@ class controller_MPI(controller): self.S.levels[0].sweep.predict() if self.params.use_iteration_estimator: - # store pervious iterate to compute difference later on + # store previous iterate to compute difference later on self.S.levels[0].uold[1:] = self.S.levels[0].u[1:] # update stage @@ -603,7 +603,7 @@ class controller_MPI(controller): return None # compute the residual - self.S.levels[0].sweep.compute_residual() + self.S.levels[0].sweep.compute_residual(stage='IT_CHECK') if self.params.use_iteration_estimator: # TODO: replace with convergence controller @@ -632,7 +632,7 @@ class controller_MPI(controller): C.pre_iteration_processing(self, self.S, comm=comm) if self.params.use_iteration_estimator: - # store pervious iterate to compute difference later on + # store previous iterate to compute difference later on self.S.levels[0].uold[1:] = self.S.levels[0].u[1:] if len(self.S.levels) > 1: # MLSDC or PFASST @@ -693,7 +693,7 @@ class controller_MPI(controller): for hook in self.hooks: hook.pre_sweep(step=self.S, level_number=0) self.S.levels[0].sweep.update_nodes() - self.S.levels[0].sweep.compute_residual() + self.S.levels[0].sweep.compute_residual(stage='IT_FINE') for hook in self.hooks: hook.post_sweep(step=self.S, level_number=0) @@ -723,7 +723,7 @@ class controller_MPI(controller): for hook in self.hooks: hook.pre_sweep(step=self.S, level_number=l) self.S.levels[l].sweep.update_nodes() - self.S.levels[l].sweep.compute_residual() + self.S.levels[l].sweep.compute_residual(stage='IT_DOWN') for hook in self.hooks: hook.post_sweep(step=self.S, level_number=l) @@ -751,7 +751,7 @@ class controller_MPI(controller): % self.S.levels[-1].params.nsweeps ) self.S.levels[-1].sweep.update_nodes() - self.S.levels[-1].sweep.compute_residual() + self.S.levels[-1].sweep.compute_residual(stage='IT_COARSE') for hook in self.hooks: hook.post_sweep(step=self.S, level_number=len(self.S.levels) - 1) self.S.levels[-1].sweep.compute_end_point() @@ -793,7 +793,7 @@ class controller_MPI(controller): for hook in self.hooks: hook.pre_sweep(step=self.S, level_number=l - 1) self.S.levels[l - 1].sweep.update_nodes() - self.S.levels[l - 1].sweep.compute_residual() + self.S.levels[l - 1].sweep.compute_residual(stage='IT_UP') for hook in self.hooks: hook.post_sweep(step=self.S, level_number=l - 1) diff --git a/pySDC/implementations/controller_classes/controller_nonMPI.py b/pySDC/implementations/controller_classes/controller_nonMPI.py index 3c68f89eef1a33eb2f67814706c8d4f33d088985..9b0ec6b29ca2ed92ec7bf17e80888f4ec416a593 100644 --- a/pySDC/implementations/controller_classes/controller_nonMPI.py +++ b/pySDC/implementations/controller_classes/controller_nonMPI.py @@ -75,12 +75,12 @@ class controller_nonMPI(controller): if self.nlevels == 1 and self.params.predict_type is not None: self.logger.warning( - 'you have specified a predictor type but only a single level.. ' 'predictor will be ignored' + 'you have specified a predictor type but only a single level.. predictor will be ignored' ) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: C.reset_buffers_nonMPI(self) - C.setup_status_variables(self) + C.setup_status_variables(self, MS=self.MS) def run(self, u0, t0, Tend): """ @@ -153,8 +153,7 @@ class controller_nonMPI(controller): C.post_step_processing(self, S) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: - [C.prepare_next_block(self, S, len(active_slots), time, Tend) for S in self.MS] - C.prepare_next_block_nonMPI(self, self.MS, active_slots, time, Tend) + [C.prepare_next_block(self, S, len(active_slots), time, Tend, MS=MS_active) for S in self.MS] # setup the times of the steps for the next block for i in range(1, len(active_slots)): @@ -326,7 +325,7 @@ class controller_nonMPI(controller): switcher.get(stage, self.default)(MS_running) - return all([S.status.done for S in local_MS_active]) + return all(S.status.done for S in local_MS_active) def spread(self, local_MS_running): """ @@ -351,7 +350,7 @@ class controller_nonMPI(controller): S.status.stage = 'IT_CHECK' for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: - C.post_spread_processing(self, S) + C.post_spread_processing(self, S, MS=local_MS_running) def predict(self, local_MS_running): """ @@ -487,7 +486,7 @@ class controller_nonMPI(controller): # receive values self.recv_full(S, level=0) # compute current residual - S.levels[0].sweep.compute_residual() + S.levels[0].sweep.compute_residual(stage='IT_CHECK') for S in local_MS_running: if S.status.iter > 0: @@ -496,8 +495,8 @@ class controller_nonMPI(controller): # decide if the step is done, needs to be restarted and other things convergence related for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: - C.post_iteration_processing(self, S) - C.convergence_control(self, S) + C.post_iteration_processing(self, S, MS=local_MS_running) + C.convergence_control(self, S, MS=local_MS_running) for S in local_MS_running: if not S.status.first: @@ -511,7 +510,7 @@ class controller_nonMPI(controller): if self.params.all_to_done: for hook in self.hooks: hook.pre_comm(step=S, level_number=0) - S.status.done = all([T.status.done for T in local_MS_running]) + S.status.done = all(T.status.done for T in local_MS_running) for hook in self.hooks: hook.post_comm(step=S, level_number=0, add_to_stats=True) @@ -521,7 +520,7 @@ class controller_nonMPI(controller): for hook in self.hooks: hook.pre_iteration(step=S, level_number=0) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: - C.pre_iteration_processing(self, S) + C.pre_iteration_processing(self, S, MS=local_MS_running) if len(S.levels) > 1: # MLSDC or PFASST S.status.stage = 'IT_DOWN' @@ -565,7 +564,7 @@ class controller_nonMPI(controller): for hook in self.hooks: hook.pre_sweep(step=S, level_number=0) S.levels[0].sweep.update_nodes() - S.levels[0].sweep.compute_residual() + S.levels[0].sweep.compute_residual(stage='IT_FINE') for hook in self.hooks: hook.post_sweep(step=S, level_number=0) @@ -598,7 +597,7 @@ class controller_nonMPI(controller): for hook in self.hooks: hook.pre_sweep(step=S, level_number=l) S.levels[l].sweep.update_nodes() - S.levels[l].sweep.compute_residual() + S.levels[l].sweep.compute_residual(stage='IT_DOWN') for hook in self.hooks: hook.post_sweep(step=S, level_number=l) @@ -626,7 +625,7 @@ class controller_nonMPI(controller): for hook in self.hooks: hook.pre_sweep(step=S, level_number=len(S.levels) - 1) S.levels[-1].sweep.update_nodes() - S.levels[-1].sweep.compute_residual() + S.levels[-1].sweep.compute_residual(stage='IT_COARSE') for hook in self.hooks: hook.post_sweep(step=S, level_number=len(S.levels) - 1) @@ -665,7 +664,7 @@ class controller_nonMPI(controller): for hook in self.hooks: hook.pre_sweep(step=S, level_number=l - 1) S.levels[l - 1].sweep.update_nodes() - S.levels[l - 1].sweep.compute_residual() + S.levels[l - 1].sweep.compute_residual(stage='IT_UP') for hook in self.hooks: hook.post_sweep(step=S, level_number=l - 1) diff --git a/pySDC/implementations/convergence_controller_classes/adaptive_collocation.py b/pySDC/implementations/convergence_controller_classes/adaptive_collocation.py index 4c83395ae1827aae18f75a7e761d71b93ea05f16..743335bc51b0ba08842124cbc7d364ddfa24c75e 100644 --- a/pySDC/implementations/convergence_controller_classes/adaptive_collocation.py +++ b/pySDC/implementations/convergence_controller_classes/adaptive_collocation.py @@ -63,7 +63,7 @@ class AdaptiveCollocation(ConvergenceController): } # only these keys can be changed by this convergence controller - self.allowed_sweeper_keys = ['quad_type', 'num_nodes', 'node_type'] + self.allowed_sweeper_keys = ['quad_type', 'num_nodes', 'node_type', 'do_coll_update'] self.allowed_level_keys = ['restol'] # add the keys to lists so we know what we need to change later diff --git a/pySDC/implementations/convergence_controller_classes/adaptivity.py b/pySDC/implementations/convergence_controller_classes/adaptivity.py index 1aac5c55f0befb6bd00914fa99b2218c99ef089e..d824eb5e3494542b0a21f6634fbb1ad831825329 100644 --- a/pySDC/implementations/convergence_controller_classes/adaptivity.py +++ b/pySDC/implementations/convergence_controller_classes/adaptivity.py @@ -6,7 +6,6 @@ from pySDC.implementations.convergence_controller_classes.step_size_limiter impo from pySDC.implementations.convergence_controller_classes.basic_restarting import ( BasicRestartingNonMPI, ) -from pySDC.implementations.hooks.log_step_size import LogStepSize class AdaptivityBase(ConvergenceController): @@ -35,7 +34,10 @@ class AdaptivityBase(ConvergenceController): "control_order": -50, "beta": 0.9, } + from pySDC.implementations.hooks.log_step_size import LogStepSize + controller.add_hook(LogStepSize) + return {**defaults, **super().setup(controller, params, description, **kwargs)} def dependencies(self, controller, description, **kwargs): @@ -49,11 +51,11 @@ class AdaptivityBase(ConvergenceController): Returns: None """ + step_limiter_keys = ['dt_min', 'dt_max', 'dt_slope_min', 'dt_slope_max'] + available_keys = [me for me in step_limiter_keys if me in self.params.__dict__.keys()] - if "dt_min" in self.params.__dict__.keys() or "dt_max" in self.params.__dict__.keys(): - step_limiter_params = dict() - step_limiter_params["dt_min"] = self.params.__dict__.get("dt_min", 0) - step_limiter_params["dt_max"] = self.params.__dict__.get("dt_max", np.inf) + if len(available_keys) > 0: + step_limiter_params = {key: self.params.__dict__[key] for key in available_keys} controller.add_convergence_controller(StepSizeLimiter, params=step_limiter_params, description=description) return None @@ -121,14 +123,24 @@ class AdaptivityBase(ConvergenceController): # see if we try to avoid restarts if self.params.get('avoid_restarts'): more_iter_needed = max([L.status.iter_to_convergence for L in S.levels]) + k_final = S.status.iter + more_iter_needed rho = max([L.status.contraction_factor for L in S.levels]) + coll_order = S.levels[0].sweep.coll.order if rho > 1: S.status.restart = True self.log(f"Convergence factor = {rho:.2e} > 1 -> restarting", S) - elif S.status.iter + more_iter_needed > 2 * S.params.maxiter: + elif k_final > 2 * S.params.maxiter: + S.status.restart = True + self.log( + f"{more_iter_needed} more iterations needed for convergence -> restart is more efficient", S + ) + elif k_final > coll_order: S.status.restart = True - self.log(f"{more_iter_needed} more iterations needed for convergence -> restart", S) + self.log( + f"{more_iter_needed} more iterations needed for convergence -> restart because collocation problem would be over resolved", + S, + ) else: S.status.force_continue = True self.log(f"{more_iter_needed} more iterations needed for convergence -> no restart", S) @@ -158,6 +170,27 @@ class Adaptivity(AdaptivityBase): The behaviour in multi-step SDC is not well studied and it is unclear if anything useful happens there. """ + def setup(self, controller, params, description, **kwargs): + """ + Define default parameters here. + + Default parameters are: + - control_order (int): The order relative to other convergence controllers + - beta (float): The safety factor + + Args: + controller (pySDC.Controller): The controller + params (dict): The params passed for this specific convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + (dict): The updated params dictionary + """ + defaults = { + "embedded_error_flavor": 'standard', + } + return {**defaults, **super().setup(controller, params, description, **kwargs)} + def dependencies(self, controller, description, **kwargs): """ Load the embedded error estimator. @@ -171,10 +204,10 @@ class Adaptivity(AdaptivityBase): """ from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError - super(Adaptivity, self).dependencies(controller, description) + super().dependencies(controller, description) controller.add_convergence_controller( - EstimateEmbeddedError.get_implementation("nonMPI" if not self.params.useMPI else "MPI"), + EstimateEmbeddedError.get_implementation(self.params.embedded_error_flavor, self.params.useMPI), description=description, ) @@ -268,28 +301,10 @@ class AdaptivityRK(Adaptivity): Adaptivity for Runge-Kutta methods. Basically, we need to change the order in the step size update """ - def check_parameters(self, controller, params, description, **kwargs): - """ - Check whether parameters are compatible with whatever assumptions went into the step size functions etc. - For adaptivity, we need to know the order of the scheme. - - Args: - controller (pySDC.Controller): The controller - params (dict): The params passed for this specific convergence controller - description (dict): The description object used to instantiate the controller - - Returns: - bool: Whether the parameters are compatible - str: The error message - """ - if "update_order" not in params.keys(): - return ( - False, - "Adaptivity needs an order for the update rule! Please set some up in \ -description['convergence_control_params']['update_order']!", - ) - - return super(AdaptivityRK, self).check_parameters(controller, params, description) + def setup(self, controller, params, description, **kwargs): + defaults = {} + defaults['update_order'] = params.get('update_order', description['sweeper_class'].get_update_order()) + return {**defaults, **super().setup(controller, params, description, **kwargs)} def get_new_step_size(self, controller, S, **kwargs): """ @@ -351,6 +366,7 @@ class AdaptivityResidual(AdaptivityBase): "e_tol_low": 0, "e_tol": np.inf, "max_restarts": 99 if "e_tol_low" in params else None, + "allowed_modifications": ['increase', 'decrease'], # what we are allowed to do with the step size } return {**defaults, **params} @@ -424,10 +440,10 @@ smaller than 0!", dt_planned = L.status.dt_new if L.status.dt_new is not None else L.params.dt - if res > self.params.e_tol: + if res > self.params.e_tol and 'decrease' in self.params.allowed_modifications: L.status.dt_new = min([dt_planned, L.params.dt / 2.0]) self.log(f'Adjusting step size from {L.params.dt:.2e} to {L.status.dt_new:.2e}', S) - elif res < self.params.e_tol_low: + elif res < self.params.e_tol_low and 'increase' in self.params.allowed_modifications: L.status.dt_new = max([dt_planned, L.params.dt * 2.0]) self.log(f'Adjusting step size from {L.params.dt:.2e} to {L.status.dt_new:.2e}', S) @@ -598,3 +614,124 @@ class AdaptivityCollocation(AdaptivityBase): if e_est >= self.params.e_tol: S.status.restart = True self.log(f"Restarting: e={e_est:.2e} >= e_tol={self.params.e_tol:.2e}", S) + + +class AdaptivityExtrapolationWithinQ(AdaptivityBase): + """ + Class to compute time step size adaptively based on error estimate obtained from extrapolation within the quadrature + nodes. + + This error estimate depends on solving the collocation problem exactly, so make sure you set a sufficient stopping criterion. + """ + + def setup(self, controller, params, description, **kwargs): + """ + Add a default value for control order to the parameters. + + Args: + controller (pySDC.Controller): The controller + params (dict): Parameters for the convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + dict: Updated parameters + """ + defaults = { + **super().setup(controller, params, description, **kwargs), + } + return defaults + + def dependencies(self, controller, description, **kwargs): + """ + Load the error estimator. + + Args: + controller (pySDC.Controller): The controller + description (dict): The description object used to instantiate the controller + + Returns: + None + """ + from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import ( + EstimateExtrapolationErrorWithinQ, + ) + + super().dependencies(controller, description) + + controller.add_convergence_controller( + EstimateExtrapolationErrorWithinQ, + description=description, + ) + return None + + def check_parameters(self, controller, params, description, **kwargs): + """ + Check whether parameters are compatible with whatever assumptions went into the step size functions etc. + For adaptivity, we need to know the order of the scheme. + + Args: + controller (pySDC.Controller): The controller + params (dict): The params passed for this specific convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + bool: Whether the parameters are compatible + str: The error message + """ + if "e_tol" not in params.keys(): + return ( + False, + "Adaptivity needs a local tolerance! Please pass `e_tol` to the parameters for this convergence controller!", + ) + + return True, "" + + def get_new_step_size(self, controller, S, **kwargs): + """ + Determine a step size for the next step from the error estimate. + + Args: + controller (pySDC.Controller): The controller + S (pySDC.Step): The current step + + Returns: + None + """ + # check if the step is converged + from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence + + if CheckConvergence.check_convergence(S): + L = S.levels[0] + + # compute next step size + order = L.sweep.coll.num_nodes + 1 + + e_est = self.get_local_error_estimate(controller, S) + L.status.dt_new = self.compute_optimal_step_size( + self.params.beta, L.params.dt, self.params.e_tol, e_est, order + ) + + self.log( + f'Error target: {self.params.e_tol:.2e}, error estimate: {e_est:.2e}, update_order: {order}', + S, + level=10, + ) + self.log(f'Adjusting step size from {L.params.dt:.2e} to {L.status.dt_new:.2e}', S) + + # check if we need to restart + S.status.restart = e_est > self.params.e_tol + + return None + + def get_local_error_estimate(self, controller, S, **kwargs): + """ + Get the embedded error estimate of the finest level of the step. + + Args: + controller (pySDC.Controller): The controller + S (pySDC.Step): The current step + + Returns: + float: Embedded error estimate + """ + return S.levels[0].status.error_extrapolation_estimate diff --git a/pySDC/implementations/convergence_controller_classes/basic_restarting.py b/pySDC/implementations/convergence_controller_classes/basic_restarting.py index 863a6b87d1dce20fb972e0fe0857c539fb89fb7e..596e093168496e4f1e54ebf25ecb5be422240ab0 100644 --- a/pySDC/implementations/convergence_controller_classes/basic_restarting.py +++ b/pySDC/implementations/convergence_controller_classes/basic_restarting.py @@ -1,7 +1,6 @@ from pySDC.core.ConvergenceController import ConvergenceController, Pars from pySDC.implementations.convergence_controller_classes.spread_step_sizes import ( - SpreadStepSizesBlockwiseNonMPI, - SpreadStepSizesBlockwiseMPI, + SpreadStepSizesBlockwise, ) from pySDC.core.Errors import ConvergenceError @@ -42,7 +41,7 @@ class BasicRestarting(ConvergenceController): params (dict): Parameters for the convergence controller description (dict): The description object used to instantiate the controller """ - super(BasicRestarting, self).__init__(controller, params, description) + super().__init__(controller, params, description) self.buffers = Pars({"restart": False, "max_restart_reached": False}) def setup(self, controller, params, description, **kwargs): @@ -68,8 +67,13 @@ class BasicRestarting(ConvergenceController): "control_order": 95, "max_restarts": 10, "crash_after_max_restarts": True, + "step_size_spreader": SpreadStepSizesBlockwise.get_implementation(useMPI=params['useMPI']), } + from pySDC.implementations.hooks.log_restarts import LogRestarts + + controller.add_hook(LogRestarts) + return {**defaults, **super().setup(controller, params, description, **kwargs)} def setup_status_variables(self, controller, **kwargs): @@ -137,8 +141,8 @@ class BasicRestarting(ConvergenceController): Args: controller (pySDC.Controller): The controller - MS (list): List of the steps of the controller - active_slots (list): Index list of active steps + S (pySDC.Step): The current step + size (int): The number of ranks time (list): List containing the time of all the steps Tend (float): Final time of the simulation @@ -170,33 +174,6 @@ class BasicRestartingNonMPI(BasicRestarting): return None - def setup(self, controller, params, description, **kwargs): - """ - Define parameters here. - - Default parameters are: - - control_order (int): The order relative to other convergence controllers - - max_restarts (int): Maximum number of restarts we allow each step before we just move on with whatever we - have - - step_size_spreader (pySDC.ConvergenceController): A convergence controller that takes care of distributing - the steps sizes between blocks - - Args: - controller (pySDC.Controller): The controller - params (dict): The params passed for this specific convergence controller - description (dict): The description object used to instantiate the controller - - Returns: - (dict): The updated params dictionary - """ - defaults = { - "step_size_spreader": SpreadStepSizesBlockwiseNonMPI, - } - return { - **defaults, - **super(BasicRestartingNonMPI, self).setup(controller, params, description), - } - def determine_restart(self, controller, S, **kwargs): """ Restart all steps after the first one which wants to be restarted as well, but also check if we lost patience @@ -241,37 +218,10 @@ class BasicRestartingMPI(BasicRestarting): params (dict): Parameters for the convergence controller description (dict): The description object used to instantiate the controller """ - super(BasicRestartingMPI, self).__init__(controller, params, description) + super().__init__(controller, params, description) self.buffers = Pars({"restart": False, "max_restart_reached": False, 'restart_earlier': False}) - def setup(self, controller, params, description, **kwargs): - """ - Define parameters here. - - Default parameters are: - - control_order (int): The order relative to other convergence controllers - - max_restarts (int): Maximum number of restarts we allow each step before we just move on with whatever we - have - - step_size_spreader (pySDC.ConvergenceController): A convergence controller that takes care of distributing - the steps sizes between blocks - - Args: - controller (pySDC.Controller): The controller - params (dict): The params passed for this specific convergence controller - description (dict): The description object used to instantiate the controller - - Returns: - (dict): The updated params dictionary - """ - defaults = { - "step_size_spreader": SpreadStepSizesBlockwiseMPI, - } - return { - **defaults, - **super(BasicRestartingMPI, self).setup(controller, params, description), - } - - def determine_restart(self, controller, S, **kwargs): + def determine_restart(self, controller, S, comm, **kwargs): """ Restart all steps after the first one which wants to be restarted as well, but also check if we lost patience with the restarts and want to move on anyways. @@ -279,11 +229,11 @@ class BasicRestartingMPI(BasicRestarting): Args: controller (pySDC.Controller): The controller S (pySDC.Step): The current step + comm (mpi4py.MPI.Intracomm): Communicator Returns: None """ - comm = kwargs['comm'] assert S.status.slot == comm.rank if S.status.first: diff --git a/pySDC/implementations/convergence_controller_classes/check_convergence.py b/pySDC/implementations/convergence_controller_classes/check_convergence.py index d27b108e213de9dba03c22b5520ebd26660402eb..524c870a4a19b7dfe44c6af5f808423720415a97 100644 --- a/pySDC/implementations/convergence_controller_classes/check_convergence.py +++ b/pySDC/implementations/convergence_controller_classes/check_convergence.py @@ -47,7 +47,7 @@ class CheckConvergence(ConvergenceController): ) controller.add_convergence_controller( - EstimateEmbeddedError.get_implementation("nonMPI" if not self.params.useMPI else "MPI"), + EstimateEmbeddedError, description=description, ) @@ -67,7 +67,6 @@ class CheckConvergence(ConvergenceController): """ # do all this on the finest level L = S.levels[0] - L.sweep.compute_residual() # get residual and check against prescribed tolerance (plus check number of iterations) iter_converged = S.status.iter >= S.params.maxiter diff --git a/pySDC/implementations/convergence_controller_classes/estimate_contraction_factor.py b/pySDC/implementations/convergence_controller_classes/estimate_contraction_factor.py index 63a67a8bdbad45d038dc297b816dbd4c0e3b8f9e..f78c9c97914c6044925d8444888e2bcdec23888b 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_contraction_factor.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_contraction_factor.py @@ -35,7 +35,7 @@ class EstimateContractionFactor(ConvergenceController): None """ controller.add_convergence_controller( - EstimateEmbeddedError.get_implementation("nonMPI" if not self.params.useMPI else "MPI"), + EstimateEmbeddedError, description=description, ) diff --git a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py index 86568107638c297f58c522e1df2be5547ed0d79d..3ba6efc221a2afd88d7b08dce3e354386eb51802 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py @@ -2,7 +2,6 @@ import numpy as np from pySDC.core.ConvergenceController import ConvergenceController, Pars, Status from pySDC.implementations.convergence_controller_classes.store_uold import StoreUOld -from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate from pySDC.implementations.sweeper_classes.Runge_Kutta import RungeKutta @@ -15,21 +14,8 @@ class EstimateEmbeddedError(ConvergenceController): you make sure your preconditioner is compatible, which you have to just try out... """ - def __init__(self, controller, params, description, **kwargs): - """ - Initialisation routine. Add the buffers for communication. - - Args: - controller (pySDC.Controller): The controller - params (dict): Parameters for the convergence controller - description (dict): The description object used to instantiate the controller - """ - super(EstimateEmbeddedError, self).__init__(controller, params, description, **kwargs) - self.buffers = Pars({'e_em_last': 0.0}) - controller.add_hook(LogEmbeddedErrorEstimate) - @classmethod - def get_implementation(cls, flavor): + def get_implementation(cls, flavor='standard', useMPI=False): """ Retrieve the implementation for a specific flavor of this class. @@ -39,10 +25,15 @@ class EstimateEmbeddedError(ConvergenceController): Returns: cls: The child class that implements the desired flavor """ - if flavor == 'MPI': - return EstimateEmbeddedErrorMPI - elif flavor == 'nonMPI': - return EstimateEmbeddedErrorNonMPI + if flavor == 'standard': + return cls + elif flavor == 'linearized': + if useMPI: + return EstimateEmbeddedErrorLinearizedMPI + else: + return EstimateEmbeddedErrorLinearizedNonMPI + elif flavor == 'collocation': + return EstimateEmbeddedErrorCollocation else: raise NotImplementedError(f'Flavor {flavor} of EstimateEmbeddedError is not implemented!') @@ -67,7 +58,8 @@ class EstimateEmbeddedError(ConvergenceController): def dependencies(self, controller, description, **kwargs): """ - Load the convergence controller that stores the solution of the last sweep unless we are doing Runge-Kutta + Load the convergence controller that stores the solution of the last sweep unless we are doing Runge-Kutta. + Add the hook for recording the error. Args: controller (pySDC.Controller): The controller @@ -78,6 +70,10 @@ class EstimateEmbeddedError(ConvergenceController): """ if RungeKutta not in description["sweeper_class"].__bases__: controller.add_convergence_controller(StoreUOld, description=description) + + from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate + + controller.add_hook(LogEmbeddedErrorEstimate) return None def estimate_embedded_error_serial(self, L): @@ -125,8 +121,41 @@ class EstimateEmbeddedError(ConvergenceController): def reset_status_variables(self, controller, **kwargs): self.setup_status_variables(controller, **kwargs) + def post_iteration_processing(self, controller, S, **kwargs): + """ + Estimate the local error here. + + If you are doing MSSDC, this is the global error within the block in Gauss-Seidel mode. + In Jacobi mode, I haven't thought about what this is. + + Args: + controller (pySDC.Controller): The controller + S (pySDC.Step): The current step + + Returns: + None + """ + + if S.status.iter > 0 or self.params.sweeper_type == "RK": + for L in S.levels: + L.status.error_embedded_estimate = max([self.estimate_embedded_error_serial(L), np.finfo(float).eps]) + + return None + + +class EstimateEmbeddedErrorLinearizedNonMPI(EstimateEmbeddedError): + def __init__(self, controller, params, description, **kwargs): + """ + Initialisation routine. Add the buffers for communication. + + Args: + controller (pySDC.Controller): The controller + params (dict): Parameters for the convergence controller + description (dict): The description object used to instantiate the controller + """ + super().__init__(controller, params, description, **kwargs) + self.buffers = Pars({'e_em_last': 0.0}) -class EstimateEmbeddedErrorNonMPI(EstimateEmbeddedError): def reset_buffers_nonMPI(self, controller, **kwargs): """ Reset buffers for imitated communication. @@ -168,7 +197,19 @@ level" return None -class EstimateEmbeddedErrorMPI(EstimateEmbeddedError): +class EstimateEmbeddedErrorLinearizedMPI(EstimateEmbeddedError): + def __init__(self, controller, params, description, **kwargs): + """ + Initialisation routine. Add the buffers for communication. + + Args: + controller (pySDC.Controller): The controller + params (dict): Parameters for the convergence controller + description (dict): The description object used to instantiate the controller + """ + super().__init__(controller, params, description, **kwargs) + self.buffers = Pars({'e_em_last': 0.0}) + def post_iteration_processing(self, controller, S, **kwargs): """ Compute embedded error estimate on the last node of each level @@ -251,6 +292,9 @@ class EstimateEmbeddedErrorCollocation(ConvergenceController): controller.add_convergence_controller( AdaptiveCollocation, params=self.params.adaptive_coll_params, description=description ) + from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate + + controller.add_hook(LogEmbeddedErrorEstimate) def post_iteration_processing(self, controller, step, **kwargs): """ diff --git a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py index 689362a527384d96b5e6ad81bb9553a7dac206c1..faf0fbdbb3e882fc5687f57d16dcfc46a073af26 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py @@ -27,7 +27,7 @@ class EstimateExtrapolationErrorBase(ConvergenceController): """ self.prev = Status(["t", "u", "f", "dt"]) # store solutions etc. of previous steps here self.coeff = Status(["u", "f", "prefactor"]) # store coefficients for extrapolation here - super(EstimateExtrapolationErrorBase, self).__init__(controller, params, description) + super().__init__(controller, params, description) controller.add_hook(LogExtrapolationErrorEstimate) def setup(self, controller, params, description, **kwargs): @@ -52,8 +52,8 @@ class EstimateExtrapolationErrorBase(ConvergenceController): default_params = { "control_order": -75, - "use_adaptivity": any([me == Adaptivity for me in description.get("convergence_controllers", {})]), - "use_HotRod": any([me == HotRod for me in description.get("convergence_controllers", {})]), + "use_adaptivity": True in [me == Adaptivity for me in description.get("convergence_controllers", {})], + "use_HotRod": True in [me == HotRod for me in description.get("convergence_controllers", {})], "order_time_marching": description["step_params"]["maxiter"], } @@ -265,7 +265,7 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): Returns: dict: Updated parameters with default values """ - default_params = super(EstimateExtrapolationErrorNonMPI, self).setup(controller, params, description) + default_params = super().setup(controller, params, description) non_mpi_defaults = { "no_storage": False, @@ -283,7 +283,7 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): Returns: None """ - super(EstimateExtrapolationErrorNonMPI, self).setup_status_variables(controller, **kwargs) + super().setup_status_variables(controller, **kwargs) self.prev.t = np.array([None] * self.params.n) self.prev.dt = np.array([None] * self.params.n) @@ -311,7 +311,11 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): # compute the extrapolation coefficients if needed if ( - (None in self.coeff.u or self.params.use_adaptivity) + ( + None in self.coeff.u + or self.params.use_adaptivity + or (not self.params.no_storage and S.status.time_size > 1) + ) and None not in self.prev.t and t_eval > max(self.prev.t) ): @@ -327,21 +331,23 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): return None - def prepare_next_block_nonMPI(self, controller, MS, active_slots, time, Tend, **kwargs): + def prepare_next_block(self, controller, S, size, time, Tend, MS, **kwargs): """ If the no-memory-overhead version is used, we need to delete stuff that shouldn't be available. Otherwise, we need to store all the stuff that we can. Args: controller (pySDC.Controller): The controller - MS (list): All steps of the controller - active_slots (list): Index list of active steps + S (pySDC.step): The current step + size (int): Number of ranks time (float): The current time Tend (float): The final time + MS (list): Active steps Returns: None """ + # delete values that should not be available in the next step if self.params.no_storage: self.prev.t = np.array([None] * self.params.n) @@ -351,13 +357,12 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): else: # decide where we need to restart to store everything up to that point - MS_active = [MS[i] for i in range(len(MS)) if i in active_slots] - restarts = [S.status.restart for S in MS_active] - restart_at = np.where(restarts)[0][0] if True in restarts else len(MS_active) + restarts = [S.status.restart for S in MS] + restart_at = np.where(restarts)[0][0] if True in restarts else len(MS) # store values in the current block that don't need restarting - if restart_at > 0: - [self.store_values(S) for S in MS_active[:restart_at]] + if restart_at > S.status.slot: + self.store_values(S) return None @@ -410,4 +415,89 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): else: S.levels[0].status.error_extrapolation_estimate = None + +class EstimateExtrapolationErrorWithinQ(EstimateExtrapolationErrorBase): + """ + This convergence controller estimates the local error based on comparing the SDC solution to an extrapolated + solution within the quadrature matrix. Collocation methods compute a high order solution from a linear combination + of solutions at intermediate time points. While the intermediate solutions (a.k.a. stages) don't share the order of + accuracy with the solution at the end of the interval, for SDC we know that the order is equal to the number of + nodes + 1 (locally). + That means we can do a Taylor expansion around the end point of the interval to higher order and after cancelling + terms just like we are used to with the extrapolation based error estimate across multiple steps, we get an error + estimate that is of the order accuracy of the stages. + This can be used for adaptivity, for instance, with the nice property that it doesn't matter how we arrived at the + converged collocation solution, as long as we did. We don't rely on knowing the order of accuracy after every sweep, + only after convergence of the collocation problem has been achieved, which we can check from the residual. + """ + + def setup(self, controller, params, description, **kwargs): + """ + We need this convergence controller to become active after the check for convergence, because we need the step + to be converged. + + Args: + controller (pySDC.Controller): The controller + params (dict): The params passed for this specific convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + dict: Updated parameters with default values + """ + num_nodes = description['sweeper_params']['num_nodes'] + + default_params = { + 'Taylor_order': 2 * num_nodes, + 'n': num_nodes, + } + + return {**super().setup(controller, params, description, **kwargs), **default_params} + + def post_iteration_processing(self, controller, S, **kwargs): + """ + Compute the extrapolated error estimate here if the step is converged. + + Args: + controller (pySDC.Controller): The controller + S (pySDC.Step): The current step + + Returns: + None + """ + from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence + + if not CheckConvergence.check_convergence(S): + return None + + lvl = S.levels[0] + + nodes_ = lvl.sweep.coll.nodes * S.dt + nodes = S.time + np.append(0, nodes_[:-1]) + t_eval = S.time + nodes_[-1] + + dts = np.append(nodes_[0], nodes_[1:] - nodes_[:-1]) + self.params.Taylor_order = 2 * len(nodes) + self.params.n = len(nodes) + + # compute the extrapolation coefficients + # TODO: Maybe this can be reused + self.get_extrapolation_coefficients(nodes, dts, t_eval) + + # compute the extrapolated solution + if type(lvl.f[0]) == imex_mesh: + f = [me.impl + me.expl for me in lvl.f] + elif type(lvl.f[0]) == mesh: + f = lvl.f + else: + raise DataError( + f"Unable to store f from datatype {type(lvl.f[0])}, extrapolation based error estimate only\ + works with types imex_mesh and mesh" + ) + + u_ex = lvl.u[-1] * 0.0 + for i in range(self.params.n): + u_ex += self.coeff.u[i] * lvl.u[i] + self.coeff.f[i] * f[i] + + # store the error + lvl.status.error_extrapolation_estimate = abs(u_ex - lvl.u[-1]) * self.coeff.prefactor return None diff --git a/pySDC/implementations/convergence_controller_classes/hotrod.py b/pySDC/implementations/convergence_controller_classes/hotrod.py index eeefceb44f9822aec0c38cc1a7e47b18488e4704..5e49cf57017b1d8330549de1149a76b1866df39d 100644 --- a/pySDC/implementations/convergence_controller_classes/hotrod.py +++ b/pySDC/implementations/convergence_controller_classes/hotrod.py @@ -48,21 +48,17 @@ class HotRod(ConvergenceController): Returns: None """ - if not self.params.useMPI: - from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import ( - EstimateEmbeddedErrorNonMPI, - ) + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError - controller.add_convergence_controller(EstimateEmbeddedErrorNonMPI, description=description) + controller.add_convergence_controller( + EstimateEmbeddedError.get_implementation(flavor='linearized', useMPI=self.params.useMPI), + description=description, + ) + if not self.params.useMPI: controller.add_convergence_controller( EstimateExtrapolationErrorNonMPI, description=description, params={'no_storage': self.params.no_storage} ) else: - from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import ( - EstimateEmbeddedErrorMPI, - ) - - controller.add_convergence_controller(EstimateEmbeddedErrorMPI, description=description) raise NotImplementedError("Don't know how to estimate extrapolated error with MPI") def check_parameters(self, controller, params, description, **kwargs): diff --git a/pySDC/implementations/convergence_controller_classes/inexactness.py b/pySDC/implementations/convergence_controller_classes/inexactness.py new file mode 100644 index 0000000000000000000000000000000000000000..1a6ec9719cb7afb863479b5aeea2f0a20cbc8825 --- /dev/null +++ b/pySDC/implementations/convergence_controller_classes/inexactness.py @@ -0,0 +1,46 @@ +from pySDC.core.ConvergenceController import ConvergenceController + + +class NewtonInexactness(ConvergenceController): + """ + Gradually refine Newton tolerance based on SDC residual. + Be aware that the problem needs a parameter called "newton_tol" which controls the tolerance for the Newton solver for this to work! + """ + + def setup(self, controller, params, description, **kwargs): + """ + Define default parameters here. + + Args: + controller (pySDC.Controller): The controller + params (dict): The params passed for this specific convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + (dict): The updated params dictionary + """ + defaults = { + "control_order": 500, + "ratio": 1e-2, + "min_tol": 0, + "max_tol": 1e99, + } + return {**defaults, **super().setup(controller, params, description, **kwargs)} + + def post_iteration_processing(self, controller, step, **kwargs): + """ + Change the Newton tolerance after every iteration. + + Args: + controller (pySDC.Controller.controller): The controller + S (pySDC.Step): The current step + + Returns: + None + """ + for lvl in step.levels: + lvl.prob.newton_tol = max( + [min([lvl.status.residual * self.params.ratio, self.params.max_tol]), self.params.min_tol] + ) + + self.log(f'Changed Newton tolerance to {lvl.prob.newton_tol:.2e}', step) diff --git a/pySDC/implementations/convergence_controller_classes/interpolate_between_restarts.py b/pySDC/implementations/convergence_controller_classes/interpolate_between_restarts.py index 95e1f11bdee873db0e4bdaecb309fb708ebddf06..a80cb639d065fa507ee9d76eb9eb6c583d7f0701 100644 --- a/pySDC/implementations/convergence_controller_classes/interpolate_between_restarts.py +++ b/pySDC/implementations/convergence_controller_classes/interpolate_between_restarts.py @@ -1,7 +1,6 @@ import numpy as np from pySDC.core.ConvergenceController import ConvergenceController, Status from pySDC.core.Lagrange import LagrangeApproximation -from pySDC.core.Collocation import CollBase class InterpolateBetweenRestarts(ConvergenceController): @@ -74,19 +73,20 @@ class InterpolateBetweenRestarts(ConvergenceController): controller (pySDC.Controller): The controller step (pySDC.Step.step): The current step """ - if step.status.restart and all([level.status.dt_new for level in step.levels]): + if step.status.restart and all(level.status.dt_new for level in step.levels): for level in step.levels: nodes_old = level.sweep.coll.nodes.copy() nodes_new = level.sweep.coll.nodes.copy() * level.status.dt_new / level.params.dt interpolator = LagrangeApproximation(points=np.append(0, nodes_old)) - self.status.u_inter += [(interpolator.getInterpolationMatrix(np.append(0, nodes_new)) @ level.u[:])[:]] - self.status.f_inter += [(interpolator.getInterpolationMatrix(np.append(0, nodes_new)) @ level.f[:])[:]] - + interpolation_matrix = interpolator.getInterpolationMatrix(np.append(0, nodes_new)) + self.status.u_inter += [(interpolation_matrix @ level.u[:])[:]] + self.status.f_inter += [(interpolation_matrix @ level.f[:])[:]] self.status.perform_interpolation = True self.log( f'Interpolating before restart from dt={level.params.dt:.2e} to dt={level.status.dt_new:.2e}', step ) + else: self.status.perform_interpolation = False diff --git a/pySDC/implementations/convergence_controller_classes/spread_step_sizes.py b/pySDC/implementations/convergence_controller_classes/spread_step_sizes.py index 41e59422d1cfa3f6fd9c631e5059021738a2d266..85abfcd62090afb49cd5439a22b384cc6d77b59d 100644 --- a/pySDC/implementations/convergence_controller_classes/spread_step_sizes.py +++ b/pySDC/implementations/convergence_controller_classes/spread_step_sizes.py @@ -2,7 +2,7 @@ import numpy as np from pySDC.core.ConvergenceController import ConvergenceController -class SpreadStepSizesBlockwiseBase(ConvergenceController): +class SpreadStepSizesBlockwise(ConvergenceController): """ Take the step size from the last step in the block and spread it to all steps in the next block such that every step in a block always has the same step size. @@ -30,56 +30,87 @@ class SpreadStepSizesBlockwiseBase(ConvergenceController): return {**defaults, **super().setup(controller, params, description, **kwargs)} + @classmethod + def get_implementation(cls, useMPI, **kwargs): + """ + Get MPI or non-MPI version + + Args: + useMPI (bool): The implementation that you want + + Returns: + cls: The child class implementing the desired flavor + """ + if useMPI: + return SpreadStepSizesBlockwiseMPI + else: + return SpreadStepSizesBlockwiseNonMPI + -class SpreadStepSizesBlockwiseNonMPI(SpreadStepSizesBlockwiseBase): +class SpreadStepSizesBlockwiseNonMPI(SpreadStepSizesBlockwise): """ Non-MPI version """ - def prepare_next_block_nonMPI(self, controller, MS, active_slots, time, Tend, **kwargs): + def prepare_next_block(self, controller, S, size, time, Tend, MS, **kwargs): """ Spread the step size of the last step with no restarted predecessors to all steps and limit the step size based on Tend Args: controller (pySDC.Controller): The controller - MS (list): List of the steps of the controller - active_slots (list): Index list of active steps - time (list): List containing the time of all the steps + S (pySDC.step): The current step + size (int): The number of ranks + time (list): List containing the time of all the steps handled by the controller (or float in MPI implementation) Tend (float): Final time of the simulation + MS (list): Active steps Returns: None """ + # inactive steps don't need to participate + if S not in MS: + return None + # figure out where the block is restarted - restarts = [MS[p].status.restart for p in active_slots] + restarts = [me.status.restart for me in MS] if True in restarts: restart_at = np.where(restarts)[0][0] else: restart_at = len(restarts) - 1 # Compute the maximum allowed step size based on Tend. - dt_max = (Tend - time[0]) / len(active_slots) + dt_max = (Tend - time[0]) / size # record the step sizes to restart with from all the levels of the step - new_steps = [None] * len(MS[restart_at].levels) + new_steps = [None] * len(S.levels) for i in range(len(MS[restart_at].levels)): l = MS[restart_at].levels[i] # overrule the step size control to reach Tend if needed new_steps[i] = min( - [l.status.dt_new if l.status.dt_new is not None else l.params.dt, max([dt_max, l.params.dt_initial])] + [ + l.status.dt_new if l.status.dt_new is not None else l.params.dt, + max([dt_max, l.params.dt_initial]), + ] ) + if ( + new_steps[i] < (l.status.dt_new if l.status.dt_new is not None else l.params.dt) + and i == 0 + and l.status.dt_new is not None + ): + self.log( + f"Overwriting stepsize control to reach Tend: {Tend:.2e}! New step size: {new_steps[i]:.2e}", S + ) - for p in active_slots: - # spread the step sizes to all levels - for i in range(len(MS[p].levels)): - MS[p].levels[i].params.dt = new_steps[i] + # spread the step sizes to all levels + for i in range(len(S.levels)): + S.levels[i].params.dt = new_steps[i] return None -class SpreadStepSizesBlockwiseMPI(SpreadStepSizesBlockwiseBase): - def prepare_next_block(self, controller, S, size, time, Tend, **kwargs): +class SpreadStepSizesBlockwiseMPI(SpreadStepSizesBlockwise): + def prepare_next_block(self, controller, S, size, time, Tend, comm, **kwargs): """ Spread the step size of the last step with no restarted predecessors to all steps and limit the step size based on Tend @@ -88,13 +119,13 @@ class SpreadStepSizesBlockwiseMPI(SpreadStepSizesBlockwiseBase): controller (pySDC.Controller): The controller S (pySDC.step): The current step size (int): The number of ranks - time (list): List containing the time of all the steps + time (list): List containing the time of all the steps handled by the controller (or float in MPI implementation) Tend (float): Final time of the simulation + comm (mpi4py.MPI.Intracomm): Communicator Returns: None """ - comm = kwargs['comm'] # figure out where the block is restarted restarts = comm.allgather(S.status.restart) @@ -104,7 +135,7 @@ class SpreadStepSizesBlockwiseMPI(SpreadStepSizesBlockwiseBase): restart_at = len(restarts) - 1 # Compute the maximum allowed step size based on Tend. - dt_max = (Tend - time) / size + dt_max = comm.bcast((Tend - time) / size, root=restart_at) # record the step sizes to restart with from all the levels of the step new_steps = [None] * len(S.levels) @@ -119,8 +150,14 @@ class SpreadStepSizesBlockwiseMPI(SpreadStepSizesBlockwiseBase): ] ) - if new_steps[i] < l.status.dt_new if l.status.dt_new is not None else l.params.dt: - self.log("Overwriting stepsize control to reach Tend", S) + if ( + new_steps[i] < l.status.dt_new + if l.status.dt_new is not None + else l.params.dt and l.status.dt_new is not None + ): + self.log( + f"Overwriting stepsize control to reach Tend: {Tend:.2e}! New step size: {new_steps[i]:.2e}", S + ) new_steps = comm.bcast(new_steps, root=restart_at) # spread the step sizes to all levels diff --git a/pySDC/implementations/convergence_controller_classes/step_size_limiter.py b/pySDC/implementations/convergence_controller_classes/step_size_limiter.py index df92c4e4d22d771e071b6c21b75dee93d43d438f..f390864afff66a9563a5d4f34d26330192f2f54e 100644 --- a/pySDC/implementations/convergence_controller_classes/step_size_limiter.py +++ b/pySDC/implementations/convergence_controller_classes/step_size_limiter.py @@ -28,6 +28,29 @@ class StepSizeLimiter(ConvergenceController): } return {**defaults, **super().setup(controller, params, description, **kwargs)} + def dependencies(self, controller, description, **kwargs): + """ + Load the slope limiter if needed. + + Args: + controller (pySDC.Controller): The controller + description (dict): The description object used to instantiate the controller + + Returns: + None + """ + slope_limiter_keys = ['dt_slope_min', 'dt_slope_max'] + available_keys = [me for me in slope_limiter_keys if me in self.params.__dict__.keys()] + + if len(available_keys) > 0: + slope_limiter_params = {key: self.params.__dict__[key] for key in available_keys} + slope_limiter_params['control_order'] = self.params.control_order - 1 + controller.add_convergence_controller( + StepSizeSlopeLimiter, params=slope_limiter_params, description=description + ) + + return None + def get_new_step_size(self, controller, S, **kwargs): """ Enforce an upper and lower limit to the step size here. @@ -55,10 +78,70 @@ class StepSizeLimiter(ConvergenceController): L.status.dt_new = self.params.dt_min elif L.status.dt_new > self.params.dt_max: self.log( - f"Step size exceeds maximum, decreasing from {L.status.dt_new:.2e} to \ -{self.params.dt_max:.2e}", + f"Step size exceeds maximum, decreasing from {L.status.dt_new:.2e} to {self.params.dt_max:.2e}", S, ) L.status.dt_new = self.params.dt_max return None + + +class StepSizeSlopeLimiter(ConvergenceController): + """ + Class to set limits to adaptive step size computation during run time + + Please supply dt_min or dt_max in the params to limit in either direction + """ + + def setup(self, controller, params, description, **kwargs): + """ + Define parameters here + + Args: + controller (pySDC.Controller): The controller + params (dict): The params passed for this specific convergence controller + description (dict): The description object used to instantiate the controller + + Returns: + (dict): The updated params dictionary + """ + defaults = { + "control_order": 91, + "dt_slope_min": 0, + "dt_slope_max": np.inf, + } + return {**defaults, **super().setup(controller, params, description, **kwargs)} + + def get_new_step_size(self, controller, S, **kwargs): + """ + Enforce an upper and lower limit to the slope of the step size here. + The final step is adjusted such that we reach Tend as best as possible, which might give step sizes below + the lower limit set here. + + Args: + controller (pySDC.Controller): The controller + S (pySDC.Step): The current step + + Returns: + None + """ + for L in S.levels: + if L.status.dt_new is not None: + if L.status.dt_new / L.params.dt < self.params.dt_slope_min: + dt_new = L.params.dt * self.params.dt_slope_min + self.log( + f"Step size slope is below minimum, increasing from {L.status.dt_new:.2e} to \ +{dt_new:.2e}", + S, + ) + L.status.dt_new = dt_new + elif L.status.dt_new / L.params.dt > self.params.dt_slope_max: + dt_new = L.params.dt * self.params.dt_slope_max + self.log( + f"Step size slope exceeds maximum, decreasing from {L.status.dt_new:.2e} to \ +{dt_new:.2e}", + S, + ) + L.status.dt_new = dt_new + + return None diff --git a/pySDC/implementations/hooks/log_errors.py b/pySDC/implementations/hooks/log_errors.py index 971d34ed5d8c1cea489dd0e81a9639887a1af56c..ebedcd4083eb1a76db5a86dc2e5e6a9c2718ce47 100644 --- a/pySDC/implementations/hooks/log_errors.py +++ b/pySDC/implementations/hooks/log_errors.py @@ -28,6 +28,8 @@ class LogError(hooks): L.sweep.compute_end_point() + u_ref = L.prob.u_exact(t=L.time + L.dt) + self.add_to_stats( process=step.status.slot, time=L.time + L.dt, @@ -35,7 +37,16 @@ class LogError(hooks): iter=step.status.iter, sweep=L.status.sweep, type=f'e_global{suffix}', - value=abs(L.prob.u_exact(t=L.time + L.dt) - L.uend), + value=abs(u_ref - L.uend), + ) + self.add_to_stats( + process=step.status.slot, + time=L.time + L.dt, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type=f'e_global_rel{suffix}', + value=abs((u_ref - L.uend / u_ref)), ) def log_local_error(self, step, level_number, suffix=''): @@ -54,6 +65,8 @@ class LogError(hooks): L.sweep.compute_end_point() + value = abs(L.prob.u_exact(t=L.time + L.dt, u_init=L.u[0] * 1.0, t_init=L.time) - L.uend) + self.add_to_stats( process=step.status.slot, time=L.time + L.dt, @@ -61,7 +74,19 @@ class LogError(hooks): iter=step.status.iter, sweep=L.status.sweep, type=f'e_local{suffix}', - value=abs(L.prob.u_exact(t=L.time + L.dt, u_init=L.u[0], t_init=L.time) - L.uend), + value=value, + ) + + self.logger.debug( + 'Process %2i on time %8.6f at stage %15s: Level: %s -- Iteration: %2i -- Sweep: %2i -- ' + 'local_error: %12.8e', + step.status.slot, + L.time, + step.status.stage, + L.level_index, + step.status.iter, + L.status.sweep, + value, ) @@ -71,6 +96,25 @@ class LogGlobalErrorPostStep(LogError): self.log_global_error(step, level_number, '_post_step') +class LogGlobalErrorPostIter(LogError): + """ + Log the global error after each iteration + """ + + def post_iteration(self, step, level_number): + """ + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + + Returns: + None + """ + super().post_iteration(step, level_number) + + self.log_global_error(step, level_number, suffix='_post_iteration') + + class LogGlobalErrorPostRun(hooks): """ Compute the global error once after the run is finished. @@ -79,6 +123,11 @@ class LogGlobalErrorPostRun(hooks): `post_run` functions of the hooks are called, which results in a mismatch of `L.time + L.dt` as corresponding to when the solution is computed and when the error is computed. The issue is resolved by recording the time at which the solution is computed in a private attribute of this class. + + There is another issue: The MPI controller instantiates a step after the run is completed, meaning the final + solution is not accessed by computing the end point, but by using the initial value on the finest level. + Additionally, the number of restarts is reset, which we need to filter recomputed values in post processing. + For this reason, we need to mess with the private `__num_restarts` of the core Hooks class. """ def __init__(self): @@ -86,7 +135,8 @@ class LogGlobalErrorPostRun(hooks): Add an attribute for when the last solution was added. """ super().__init__() - self.__t_last_solution = 0 + self.t_last_solution = 0 + self.num_restarts = 0 def post_step(self, step, level_number): """ @@ -102,7 +152,8 @@ class LogGlobalErrorPostRun(hooks): None """ super().post_step(step, level_number) - self.__t_last_solution = step.levels[0].time + step.levels[0].dt + self.t_last_solution = step.levels[0].time + step.levels[0].dt + self.num_restarts = step.status.get('restarts_in_a_row', 0) def post_run(self, step, level_number): """ @@ -116,24 +167,63 @@ class LogGlobalErrorPostRun(hooks): None """ super().post_run(step, level_number) + self._hooks__num_restarts = self.num_restarts - if level_number == 0: + if level_number == 0 and step.status.last: L = step.levels[level_number] - e_glob = np.linalg.norm(L.prob.u_exact(t=self.__t_last_solution) - L.uend, np.inf) + u_num = self.get_final_solution(L) + u_ref = L.prob.u_exact(t=self.t_last_solution) - if step.status.last: - self.logger.info(f'Finished with a global error of e={e_glob:.2e}') + self.logger.info(f'Finished with a global error of e={abs(u_num-u_ref):.2e}') self.add_to_stats( process=step.status.slot, - time=L.time + L.dt, + time=self.t_last_solution, level=L.level_index, iter=step.status.iter, sweep=L.status.sweep, type='e_global_post_run', - value=e_glob, + value=abs(u_num - u_ref), ) + self.add_to_stats( + process=step.status.slot, + time=self.t_last_solution, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='e_global_rel_post_run', + value=abs((u_num - u_ref) / u_ref), + ) + + def get_final_solution(self, lvl): + """ + Get the final solution from the level + + Args: + lvl (pySDC.Level.level): The level + """ + return lvl.uend + + +class LogGlobalErrorPostRunMPI(LogGlobalErrorPostRun): + """ + The MPI controller shows slightly different behaviour which is why the final solution is stored in a different place + than in the nonMPI controller. + """ + + def post_step(self, step, level_number): + super().post_step(step, level_number) + self.num_restarts = self._hooks__num_restarts + + def get_final_solution(self, lvl): + """ + Get the final solution from the level + + Args: + lvl (pySDC.Level.level): The level + """ + return lvl.u[0] class LogLocalErrorPostStep(LogError): diff --git a/pySDC/implementations/hooks/log_restarts.py b/pySDC/implementations/hooks/log_restarts.py new file mode 100644 index 0000000000000000000000000000000000000000..cb251e84dcf914df55e17c7c676583b2a4e0db0a --- /dev/null +++ b/pySDC/implementations/hooks/log_restarts.py @@ -0,0 +1,29 @@ +from pySDC.core.Hooks import hooks + + +class LogRestarts(hooks): + """ + Record restarts as `restart` at the beginning of the step. + """ + + def post_step(self, step, level_number): + """ + Record here if the step was restarted. + + Args: + step (pySDC.Step.step): Current step + level_number (int): Current level + """ + super().post_step(step, level_number) + + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='restart', + value=int(step.status.get('restart')), + ) diff --git a/pySDC/implementations/hooks/log_work.py b/pySDC/implementations/hooks/log_work.py index cd873093a69cb264a6c3bb998d319b83c8bae854..af50530bb16888bc2d3c27bef01d4818a9a9f1a5 100644 --- a/pySDC/implementations/hooks/log_work.py +++ b/pySDC/implementations/hooks/log_work.py @@ -6,6 +6,13 @@ class LogWork(hooks): Log the increment of all work counters in the problem between steps """ + def __init__(self): + """ + Initialize the variables for the work recorded in the last step + """ + super().__init__() + self.__work_last_step = {} + def pre_step(self, step, level_number): """ Store the current values of the work counters @@ -18,7 +25,7 @@ class LogWork(hooks): None """ if level_number == 0: - self.__work_last_step = [ + self.__work_last_step[step.status.slot] = [ {key: step.levels[i].prob.work_counters[key].niter for key in step.levels[i].prob.work_counters.keys()} for i in range(len(step.levels)) ] @@ -35,7 +42,7 @@ class LogWork(hooks): None """ L = step.levels[level_number] - for key in self.__work_last_step[level_number].keys(): + for key in self.__work_last_step[step.status.slot][level_number].keys(): self.add_to_stats( process=step.status.slot, time=L.time + L.dt, @@ -43,5 +50,5 @@ class LogWork(hooks): iter=step.status.iter, sweep=L.status.sweep, type=f'work_{key}', - value=L.prob.work_counters[key].niter - self.__work_last_step[level_number][key], + value=L.prob.work_counters[key].niter - self.__work_last_step[step.status.slot][level_number][key], ) diff --git a/pySDC/implementations/problem_classes/Battery.py b/pySDC/implementations/problem_classes/Battery.py index bb98a5e5420d40439592d284ba4a71162a28e6a1..4693c98a7328f4f12de82e8746e10afa595eca9a 100644 --- a/pySDC/implementations/problem_classes/Battery.py +++ b/pySDC/implementations/problem_classes/Battery.py @@ -6,10 +6,55 @@ from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh class battery_n_capacitors(ptype): - """ - Example implementing the battery drain model with N capacitors, where N is an arbitrary integer greater than 0. - Attributes: - nswitches: number of switches + r""" + Example implementing the battery drain model with :math:`N` capacitors, where :math:`N` is an arbitrary integer greater than zero. + First, the capacitor :math:`C` serves as a battery and provides energy. When the voltage of the capacitor :math:`u_{C_n}` for + :math:`n=1,..,N` drops below their reference value :math:`V_{ref,n-1}', the circuit switches to the next capacitor. If all capacitors + has dropped below their reference value, the voltage source :math:`V_s` provides further energy. The problem of simulating the + battery draining has :math:`N + 1` different states. Each of this state can be expressed as a nonhomogeneous linear system of + ordinary differential equations (ODEs) + + .. math:: + \frac{d u(t)}{dt} = A_k u(t) + f_k (t) + + for :math:`k=1,..,N+1` using an initial condition. + + Parameters + ---------- + ncapacitors : int + Number of capacitors :math:`n_{capacitors}` in the circuit. + Vs : float + Voltage at the voltage source :math:`V_s`. + Rs : float + Resistance of the resistor :math:`R_s` at the voltage source. + C : np.ndarray + Capacitances of the capacitors. + R : float + Resistance for the load. + L : float + Inductance of inductor. + alpha : float + Factor greater than zero to describe the storage of the capacitor(s). + V_ref : np.ndarray + Array contains the reference values greater than zero for each capacitor to switch to the next energy source. + + Attributes + ---------- + A: matrix + Coefficients matrix of the linear system of ordinary differential equations (ODEs). + switch_A: dict + Dictionary that contains the coefficients for the coefficient matrix A. + switch_f: dict + Dictionary that contains the coefficients of the right-hand side f of the ODE system. + t_switch: float + Time point of the discrete event found by switch estimation. + nswitches: int + Number of switches found by switch estimation. + + Note + ---- + The array containing the capacitances :math:`C_n` and the array containing the reference values :math:`V_{ref, n-1}` + for each capacitor must be equal to the number of capacitors :math:`n_{capacitors}`. """ dtype_u = mesh @@ -32,24 +77,47 @@ class battery_n_capacitors(ptype): self.nswitches = 0 def eval_f(self, u, t): - """ - Routine to evaluate the RHS. No Switch Estimator is used: For N = 3 there are N + 1 = 4 different states of the battery: - 1. u[1] > V_ref[0] and u[2] > V_ref[1] and u[3] > V_ref[2] -> C1 supplies energy - 2. u[1] <= V_ref[0] and u[2] > V_ref[1] and u[3] > V_ref[2] -> C2 supplies energy - 3. u[1] <= V_ref[0] and u[2] <= V_ref[1] and u[3] > V_ref[2] -> C3 supplies energy - 4. u[1] <= V_ref[0] and u[2] <= V_ref[1] and u[3] <= V_ref[2] -> Vs supplies energy - max_index is initialized to -1. List "switch" contains a True if u[k] <= V_ref[k-1] is satisfied. - - Is no True there (i.e. max_index = -1), we are in the first case. - - max_index = k >= 0 means we are in the (k+1)-th case. - So, the actual RHS has key max_index-1 in the dictionary self.switch_f. - In case of using the Switch Estimator, we count the number of switches which illustrates in which case of voltage source we are. - - Args: - u (dtype_u): current values - t (float): current time - - Returns: - dtype_f: the RHS + r""" + Routine to evaluate the right-hand side of the problem. Let :math:`v_k:=v_{C_k}` be the voltage of capacitor :math:`C_k` for :math:`k=1,..,N` + with reference value :math:`V_{ref, k-1}`. No switch estimator is used: For :math:`N = 3` there are :math:`N + 1 = 4` different states of the battery: + + :math:`C_1` supplies energy if: + + .. math:: + v_1 > V_{ref,0}, v_2 > V_{ref,1}, v_3 > V_{ref,2}, + + :math:`C_2` supplies energy if: + + .. math:: + v_1 \leq V_{ref,0}, v_2 > V_{ref,1}, v_3 > V_{ref,2}, + + :math:`C_3` supplies energy if: + + .. math:: + v_1 \leq V_{ref,0}, v_2 \leq V_{ref,1}, v_3 > V_{ref,2}, + + :math:`V_s` supplies energy if: + + .. math:: + v_1 \leq V_{ref,0}, v_2 \leq V_{ref,1}, v_3 \leq V_{ref,2}. + + :math:`max_{index}` is initialized to :math:`-1`. List "switch" contains a True if :math:`u_k \leq V_{ref,k-1}` is satisfied. + - Is no True there (i.e., :math:`max_{index}=-1`), we are in the first case. + - :math:`max_{index}=k\geq 0` means we are in the :math:`(k+1)`-th case. + So, the actual RHS has key :math:`max_{index}`-1 in the dictionary self.switch_f. + In case of using the switch estimator, we count the number of switches which illustrates in which case of voltage source we are. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. """ f = self.dtype_f(self.init, val=0.0) @@ -72,17 +140,24 @@ class battery_n_capacitors(ptype): return f def solve_system(self, rhs, factor, u0, t): - """ - Simple linear solver for (I-factor*A)u = rhs - - Args: - rhs (dtype_f): right-hand side for the linear system - factor (float): abbrev. for the local stepsize (or any other factor required) - u0 (dtype_u): initial guess for the iterative solver - t (float): current time (e.g. for time-dependent BCs) - - Returns: - dtype_u: solution as mesh + r""" + Simple linear solver for :math:`(I-factor\cdot A)\vec{u}=\vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. """ if self.t_switch is not None: @@ -104,13 +179,17 @@ class battery_n_capacitors(ptype): def u_exact(self, t): """ - Routine to compute the exact solution at time t + Routine to compute the exact solution at time t. - Args: - t (float): current time + Parameters + ---------- + t : float + Time of the exact solution. - Returns: - dtype_u: exact solution + Returns + ------- + me : dtype_u + The exact solution. """ assert t == 0, 'ERROR: u_exact only valid for t=0' @@ -124,14 +203,21 @@ class battery_n_capacitors(ptype): """ Provides information about a discrete event for one subinterval. - Args: - u (dtype_u): current values - t (float): current time - - Returns: - switch_detected (bool): Indicates if a switch is found or not - m_guess (np.int): Index of collocation node inside one subinterval of where the discrete event was found - vC_switch (list): Contains function values of switching condition (for interpolation) + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + switch_detected : bool + Indicates if a switch is found or not. + m_guess : int + Index of collocation node inside one subinterval of where the discrete event was found. + vC_switch : list + Contains function values of switching condition (for interpolation). """ switch_detected = False @@ -157,7 +243,7 @@ class battery_n_capacitors(ptype): def count_switches(self): """ Counts the number of switches. This function is called when a switch is found inside the range of tolerance - (in switch_estimator.py) + (in pySDC/projects/PinTSimE/switch_estimator.py) """ self.nswitches += 1 @@ -180,22 +266,47 @@ class battery_n_capacitors(ptype): class battery(battery_n_capacitors): - """ - Example implementing the battery drain model with one capacitor, inherits from battery_n_capacitors. + r""" + Example implementing the battery drain model with :math:`N=1` capacitor, inherits from battery_n_capacitors. The ODE system + of this model is given by the following equations. If :math:`v_1 > V_{ref, 0}:` + + .. math:: + \frac{d i_L (t)}{dt} = 0, + + .. math:: + \frac{d v_1 (t)}{dt} = -\frac{1}{CR}v_1 (t), + + where :math:`i_L` denotes the function of the current over time :math:`t`. + If :math:`v_1 \leq V_{ref, 0}:` + + .. math:: + \frac{d i_L(t)}{dt} = -\frac{R_s + R}{L}i_L (t) + \frac{1}{L} V_s, + + .. math:: + \frac{d v_1(t)}{dt} = 0. + + Note + ---- + This class has the same attributes as the class it inherits from. """ dtype_f = imex_mesh def eval_f(self, u, t): """ - Routine to evaluate the RHS - - Args: - u (dtype_u): current values - t (float): current time - - Returns: - dtype_f: the RHS + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. """ f = self.dtype_f(self.init, val=0.0) @@ -212,17 +323,24 @@ class battery(battery_n_capacitors): return f def solve_system(self, rhs, factor, u0, t): - """ - Simple linear solver for (I-factor*A)u = rhs - - Args: - rhs (dtype_f): right-hand side for the linear system - factor (float): abbrev. for the local stepsize (or any other factor required) - u0 (dtype_u): initial guess for the iterative solver - t (float): current time (e.g. for time-dependent BCs) - - Returns: - dtype_u: solution as mesh + r""" + Simple linear solver for :math:`(I-factor\cdot A)\vec{u}=\vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. """ self.A = np.zeros((2, 2)) @@ -240,13 +358,17 @@ class battery(battery_n_capacitors): def u_exact(self, t): """ - Routine to compute the exact solution at time t + Routine to compute the exact solution at time t. - Args: - t (float): current time + Parameters + ---------- + t : float + Time of the exact solution. - Returns: - dtype_u: exact solution + Returns + ------- + me : dtype_u + The exact solution. """ assert t == 0, 'ERROR: u_exact only valid for t=0' @@ -259,6 +381,39 @@ class battery(battery_n_capacitors): class battery_implicit(battery): + r""" + Example implementing the battery drain model as above. The method solve_system uses a fully-implicit computation. + + Parameters + ---------- + ncapacitors : int + Number of capacitors in the circuit. + Vs : float + Voltage at the voltage source :math:`V_s`. + Rs : float + Resistance of the resistor :math:`R_s` at the voltage source. + C : np.ndarray + Capacitances of the capacitors. Length of array must equal to number of capacitors. + R : float + Resistance for the load. + L : float + Inductance of inductor. + alpha : float + Factor greater than zero to describe the storage of the capacitor(s). + V_ref : float + Reference value greater than zero for the battery to switch to the voltage source. + newton_maxiter : int + Number of maximum iterations for the Newton solver. + newton_tol : float + Tolerance for determination of the Newton solver. + + Attributes + ---------- + newton_itercount: int + Counts the number of Newton iterations. + newton_ncalls: int + Counts the number of how often Newton is called in the simulation of the problem. + """ dtype_f = mesh def __init__(self, ncapacitors, Vs, Rs, C, R, L, alpha, V_ref, newton_maxiter, newton_tol): @@ -266,20 +421,23 @@ class battery_implicit(battery): self._makeAttributeAndRegister('newton_maxiter', 'newton_tol', localVars=locals(), readOnly=True) self.newton_itercount = 0 - self.lin_itercount = 0 self.newton_ncalls = 0 - self.lin_ncalls = 0 def eval_f(self, u, t): """ - Routine to evaluate the RHS - - Args: - u (dtype_u): current values - t (float): current time - - Returns: - dtype_f: the RHS + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. """ f = self.dtype_f(self.init, val=0.0) @@ -300,16 +458,23 @@ class battery_implicit(battery): def solve_system(self, rhs, factor, u0, t): """ - Simple Newton solver - - Args: - rhs (dtype_f): right-hand side for the linear system - factor (float): abbrev. for the local stepsize (or any other factor required) - u0 (dtype_u): initial guess for the iterative solver - t (float): current time (e.g. for time-dependent BCs) - - Returns: - dtype_u: solution as mesh + Simple Newton solver. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. """ u = self.dtype_u(u0) diff --git a/pySDC/implementations/problem_classes/BuckConverter.py b/pySDC/implementations/problem_classes/BuckConverter.py index 8aa9387a8c9fdab79fb3c54ce10530c874bd6fbe..e377f6e7f9b9dc98c549521cea136d16f6af77a7 100644 --- a/pySDC/implementations/problem_classes/BuckConverter.py +++ b/pySDC/implementations/problem_classes/BuckConverter.py @@ -5,13 +5,75 @@ from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh class buck_converter(ptype): - """ - Example implementing the buck converter model as in the description in the PinTSimE project + r""" + Example implementing the model of a buck converter, which is also called a step-down converter. The converter has two different + states and each of this state can be expressed as a nonhomogeneous linear system of ordinary differential equations (ODEs) + + .. math:: + \frac{d u(t)}{dt} = A_k u(t) + f_k (t) + + for :math:`k=1,2`. The two states are the following. Define :math:`T_{sw}:=\frac{1}{f_{sw}}` as the switching period with + switching frequency :math:`f_{sw}`. The duty cycle :math:`duty`defines the period of how long the switches are in one state + until they switch to the other state. Roughly saying, the duty cycle can be seen as a percentage. A duty cycle of one means + that the switches are always in only one state. If :math:`0 \leq \frac{t}{T_{sw}} mod 1 \leq duty`: + + .. math:: + \frac{d v_{C_1} (t)}{dt} = -\frac{1}{R_s C_1}v_{C_1} (t) - \frac{1}{C_1} i_{L_1} (t) + \frac{V_s}{R_s C_1}, + + .. math:: + \frac{d v_{C_2} (t)}{dt} = -\frac{1}{R_\ell C_2}v_{C_2} (t) + \frac{1}{C_2} i_{L_1} (t), + + .. math:: + \frac{d i_{L_1} (t)}{dt} = \frac{1}{L_1} v_{C_1} (t) - \frac{1}{L_1} v_{C_2} (t) - \frac{R_\pi}{L_1} i_{L_1} (t), + + Otherwise, the equations are + + .. math:: + \frac{d v_{C_1} (t)}{dt} = -\frac{1}{R_s C_1}v_{C_1} (t) + \frac{V_s}{R_s C_1}, + + .. math:: + \frac{d v_{C_2} (t)}{dt} = -\frac{1}{R_\ell C_2}v_{C_2} (t) + \frac{1}{C_2} i_{L_1} (t), + + .. math:: + \frac{d i_{L_1} (t)}{dt} = \frac{R_\pi}{R_s L_1} v_{C_1} (t) - \frac{1}{L_1} v_{C_2} (t) - \frac{R_\pi V_s}{L_1 R_s}. + + using an initial condition. + + Parameters + ---------- + duty : float + Cycle between zero and one indicates the time period how long the converter stays on one switching state + until it switches to the other state. + fsw : int + Switching frequency, it is used to determine the number of time steps after the switching state is changed. + Vs : float + Voltage at the voltage source :math:`V_s`. + Rs : float + Resistance of the resistor :math:`R_s` at the voltage source. + C1 : float + Capacitance of the capacitor :math:`C_1`. + Rp : float + Resistance of the resistor in front of the inductor. + L1 : float + Inductance of the inductor :math:`L_1`. + C2 : float + Capacitance of the capacitor :math:`C_2`. + Rl : float + Resistance of the resistor :math:`R_\pi` + + Attributes + ---------- + A: system matrix, representing the 3 ODEs - TODO : doku + Note + ---- + The duty cycle needs to be a value in :math:`[0,1]`. - Attributes: - A: system matrix, representing the 3 ODEs + References + ---------- + .. [1] J. Sun. Pulse-Width Modulation. 25-61. Springer, (2012). + .. [2] J. Gyselinck, C. Martis, R. V. Sabariego. Using dedicated time-domain basis functions for the simulation of + pulse-width-modulation controlled devices - application to the steady-state regime of a buck converter. Electromotion (2013). """ dtype_u = mesh @@ -31,12 +93,19 @@ class buck_converter(ptype): def eval_f(self, u, t): """ - Routine to evaluate the RHS - Args: - u (dtype_u): current values - t (float): current time - Returns: - dtype_f: the RHS + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. """ Tsw = 1 / self.fsw @@ -54,15 +123,24 @@ class buck_converter(ptype): return f def solve_system(self, rhs, factor, u0, t): - """ - Simple linear solver for (I-factor*A)u = rhs - Args: - rhs (dtype_f): right-hand side for the linear system - factor (float): abbrev. for the local stepsize (or any other factor required) - u0 (dtype_u): initial guess for the iterative solver - t (float): current time (e.g. for time-dependent BCs) - Returns: - dtype_u: solution as mesh + r""" + Simple linear solver for :math:`(I-factor\cdot A)\vec{u}=\vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. """ Tsw = 1 / self.fsw self.A = np.zeros((3, 3)) @@ -93,11 +171,17 @@ class buck_converter(ptype): def u_exact(self, t): """ - Routine to compute the exact solution at time t - Args: - t (float): current time - Returns: - dtype_u: exact solution + Routine to compute the exact solution at time t. + + Parameters + ---------- + t : float + Time of the exact solution. + + Returns + ------- + me : dtype_u + The exact solution. """ assert t == 0, 'ERROR: u_exact only valid for t=0' diff --git a/pySDC/implementations/problem_classes/Lorenz.py b/pySDC/implementations/problem_classes/Lorenz.py index 1e6643d82ebf39be2baf7226f9720552dbeebba5..43fd8c5d6b14402056dc322b882239fd9cadcdbc 100644 --- a/pySDC/implementations/problem_classes/Lorenz.py +++ b/pySDC/implementations/problem_classes/Lorenz.py @@ -22,7 +22,7 @@ class LorenzAttractor(ptype): dtype_u = mesh dtype_f = mesh - def __init__(self, sigma=10.0, rho=28.0, beta=8 / 3, newton_tol=1e-9, newton_maxiter=99): + def __init__(self, sigma=10.0, rho=28.0, beta=8.0 / 3.0, newton_tol=1e-9, newton_maxiter=99): """ Initialization routine @@ -37,6 +37,7 @@ class LorenzAttractor(ptype): 'sigma', 'rho', 'beta', 'newton_tol', 'newton_maxiter', localVars=locals(), readOnly=True ) self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() def eval_f(self, u, t): """ @@ -59,14 +60,13 @@ class LorenzAttractor(ptype): f[0] = sigma * (u[1] - u[0]) f[1] = rho * u[0] - u[1] - u[0] * u[2] f[2] = u[0] * u[1] - beta * u[2] + + self.work_counters['rhs']() return f def solve_system(self, rhs, dt, u0, t): """ Simple Newton solver for the nonlinear system. - Notice that I did not go through the trouble of inverting the Jacobian beforehand. If you have some time on your - hands feel free to do that! In the current implementation it is inverted using `numpy.linalg.solve`, which is a - bit more expensive than simple matrix-vector multiplication. Args: rhs (dtype_f): right-hand side for the linear system @@ -101,17 +101,35 @@ class LorenzAttractor(ptype): if res <= self.newton_tol or np.isnan(res): break - # assemble Jacobian J of G - J = np.array( + # assemble inverse of Jacobian J of G + prefactor = 1.0 / ( + dt**3 * sigma * (u[0] ** 2 + u[0] * u[1] + beta * (-rho + u[2] + 1)) + + dt**2 * (beta * sigma + beta - rho * sigma + sigma + u[0] ** 2 + sigma * u[2]) + + dt * (beta + sigma + 1) + + 1 + ) + J_inv = prefactor * np.array( [ - [1.0 + dt * sigma, -dt * sigma, 0], - [-dt * (rho - u[2]), 1 + dt, dt * u[0]], - [-dt * u[1], -dt * u[0], 1.0 + dt * beta], + [ + beta * dt**2 + dt**2 * u[0] ** 2 + beta * dt + dt + 1, + beta * dt**2 * sigma + dt * sigma, + -(dt**2) * sigma * u[0], + ], + [ + beta * dt**2 * rho + dt**2 * (-u[0]) * u[1] - beta * dt**2 * u[2] + dt * rho - dt * u[2], + beta * dt**2 * sigma + beta * dt + dt * sigma + 1, + dt**2 * sigma * (-u[0]) - dt * u[0], + ], + [ + dt**2 * rho * u[0] - dt**2 * u[0] * u[2] + dt**2 * u[1] + dt * u[1], + dt**2 * sigma * u[0] + dt**2 * sigma * u[1] + dt * u[0], + -(dt**2) * rho * sigma + dt**2 * sigma + dt**2 * sigma * u[2] + dt * sigma + dt + 1, + ], ] ) # solve the linear system for the Newton correction J delta = G - delta = np.linalg.solve(J, G) + delta = J_inv @ G # update solution u = u - delta diff --git a/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py b/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py index 8a052c710b7d21d92c9542a08e0fe37a9f486906..8550fb764fd47396d26e358c65f273b4b28a697e 100644 --- a/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py +++ b/pySDC/implementations/problem_classes/NonlinearSchroedinger_MPIFFT.py @@ -3,7 +3,7 @@ from mpi4py import MPI from mpi4py_fft import PFFT from pySDC.core.Errors import ProblemError -from pySDC.core.Problem import ptype +from pySDC.core.Problem import ptype, WorkCounter from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh from mpi4py_fft import newDistArray @@ -81,6 +81,9 @@ class nonlinearschroedinger_imex(ptype): self.dx = self.L / nvars[0] self.dy = self.L / nvars[1] + # work counters + self.work_counters['rhs'] = WorkCounter() + def eval_f(self, u, t): """ Routine to evaluate the RHS @@ -107,6 +110,7 @@ class nonlinearschroedinger_imex(ptype): f.impl[:] = self.fft.backward(lap_u_hat, f.impl) f.expl = self.ndim * self.c * 2j * np.absolute(u) ** 2 * u + self.work_counters['rhs']() return f def solve_system(self, rhs, factor, u0, t): diff --git a/pySDC/implementations/problem_classes/Piline.py b/pySDC/implementations/problem_classes/Piline.py index bf48f2329f4fbb5613ec26309e569d56eab27008..bb0a57de8c36e57fd2b9e6e79fff4f9a89b587e6 100644 --- a/pySDC/implementations/problem_classes/Piline.py +++ b/pySDC/implementations/problem_classes/Piline.py @@ -7,8 +7,42 @@ from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh # noinspection PyUnusedLocal class piline(ptype): - """ - Example implementing the Piline model as in the description in the PinTSimE project + r""" + Example implementing the model of the piline. It serves as a transmission line in an energy grid. The problem of simulating the + piline consists of three ordinary differential equations (ODEs) with nonhomogeneous part: + + .. math:: + \frac{d v_{C_1} (t)}{dt} = -\frac{1}{R_s C_1}v_{C_1} (t) - \frac{1}{C_1} i_{L_\pi} (t) + \frac{V_s}{R_s C_1}, + + .. math:: + \frac{d v_{C_2} (t)}{dt} = -\frac{1}{R_\ell C_2}v_{C_2} (t) + \frac{1}{C_2} i_{L_\pi} (t), + + .. math:: + \frac{d i_{L_\pi} (t)}{dt} = \frac{1}{L_\pi} v_{C_1} (t) - \frac{1}{L_\pi} v_{C_2} (t) - \frac{R_\pi}{L_\pi} i_{L_\pi} (t), + + which can be expressed as a nonhomogeneous linear system of ODEs + + .. math:: + \frac{d u(t)}{dt} = A u(t) + f(t) + + using an initial condition. + + Parameters + ---------- + Vs : float + Voltage at the voltage source :math:`V_s`. + Rs : float + Resistance of the resistor :math:`R_s` at the voltage source. + C1 : float + Capacitance of the capacitor :math:`C_1`. + Rpi : float + Resistance of the resistor :math:`R_\pi`. + Lpi : float + Inductance of the inductor :math:`L_\pi`. + C2 : float + Capacitance of the capacitor :math:`C_2`. + Rl : float + Resistance of the resistive load :math:`R_\ell`. Attributes: A: system matrix, representing the 3 ODEs @@ -39,14 +73,19 @@ class piline(ptype): def eval_f(self, u, t): """ - Routine to evaluate the RHS - - Args: - u (dtype_u): current values - t (float): current time - - Returns: - dtype_f: the RHS + Routine to evaluate the right-hand side of the problem. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution. + t : float + Current time of the numerical solution is computed. + + Returns + ------- + f : dtype_f + The right-hand side of the problem. """ f = self.dtype_f(self.init, val=0.0) @@ -55,17 +94,24 @@ class piline(ptype): return f def solve_system(self, rhs, factor, u0, t): - """ - Simple linear solver for (I-factor*A)u = rhs - - Args: - rhs (dtype_f): right-hand side for the linear system - factor (float): abbrev. for the local stepsize (or any other factor required) - u0 (dtype_u): initial guess for the iterative solver - t (float): current time (e.g. for time-dependent BCs) - - Returns: - dtype_u: solution as mesh + r""" + Simple linear solver for :math:`(I-factor\cdot A)\vec{u}=\vec{rhs}`. + + Parameters + ---------- + rhs : dtype_f + Right-hand side for the linear system. + factor : float + Abbrev. for the local stepsize (or any other factor required). + u0 : dtype_u + Initial guess for the iterative solver. + t : float + Current time (e.g. for time-dependent BCs). + + Returns + ------- + me : dtype_u + The solution as mesh. """ me = self.dtype_u(self.init) @@ -74,15 +120,21 @@ class piline(ptype): def u_exact(self, t, u_init=None, t_init=None): """ - Routine to approximate the exact solution at time t by scipy - - Args: - t (float): current time - u_init (pySDC.problem.Piline.dtype_u): initial conditions for getting the exact solution - t_init (float): the starting time - - Returns: - dtype_u: exact solution (kind of) + Routine to approximate the exact solution at time t by scipy as a reference. + + Parameters + ---------- + t : float + Time of the exact solution. + u_init : pySDC.problem.Piline.dtype_u + Initial conditions for getting the exact solution. + t_init : float + The starting time. + + Returns + ------- + me : dtype_u + The reference solution. """ me = self.dtype_u(self.init) diff --git a/pySDC/implementations/problem_classes/LeakySuperconductor.py b/pySDC/implementations/problem_classes/Quench.py similarity index 69% rename from pySDC/implementations/problem_classes/LeakySuperconductor.py rename to pySDC/implementations/problem_classes/Quench.py index b3442d8afaf3c905f9770780db2322174880526a..8f846127c4314405080ea50b8ec6742a7b3b8067 100644 --- a/pySDC/implementations/problem_classes/LeakySuperconductor.py +++ b/pySDC/implementations/problem_classes/Quench.py @@ -9,7 +9,7 @@ from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh # noinspection PyUnusedLocal -class LeakySuperconductor(ptype): +class Quench(ptype): """ This is a toy problem to emulate a magnet that has been cooled to temperatures where superconductivity is possible. However, there is a leak! Some point in the domain is constantly heated and when this has heated up its environment @@ -34,6 +34,7 @@ class LeakySuperconductor(ptype): Q_max=1.0, leak_range=(0.45, 0.55), leak_type='linear', + leak_transition='step', order=2, stencil_type='center', bc='neumann-zero', @@ -43,6 +44,7 @@ class LeakySuperconductor(ptype): lintol=1e-8, liniter=99, direct_solver=True, + reference_sol_type='scipy', ): """ Initialization routine @@ -62,6 +64,7 @@ class LeakySuperconductor(ptype): 'Q_max', 'leak_range', 'leak_type', + 'leak_transition', 'order', 'stencil_type', 'bc', @@ -71,6 +74,7 @@ class LeakySuperconductor(ptype): 'lintol', 'liniter', 'direct_solver', + 'reference_sol_type', localVars=locals(), readOnly=True, ) @@ -130,15 +134,17 @@ class LeakySuperconductor(ptype): elif self.leak_type == 'exponential': me[:] = Q_max * (np.exp(u) - np.exp(u_thresh)) / (np.exp(u_max) - np.exp(u_thresh)) else: - raise NotImplementedError(f'Leak type {self.leak_type} not implemented!') + raise NotImplementedError(f'Leak type \"{self.leak_type}\" not implemented!') me[u < u_thresh] = 0 - me[self.leak] = Q_max - me[u >= u_max] = Q_max + if self.leak_transition == 'step': + me[self.leak] = Q_max + elif self.leak_transition == 'Gaussian': + me[:] = np.max([me, Q_max * np.exp(-((self.xv - 0.5) ** 2) / 3e-2)], axis=0) + else: + raise NotImplementedError(f'Leak transition \"{self.leak_transition}\" not implemented!') - # boundary conditions - me[0] = 0.0 - me[-1] = 0.0 + me[u >= u_max] = Q_max me[:] /= self.Cv @@ -183,12 +189,14 @@ class LeakySuperconductor(ptype): raise NotImplementedError(f'Leak type {self.leak_type} not implemented!') me[u < u_thresh] = 0 + if self.leak_transition == 'step': + me[self.leak] = 0 + elif self.leak_transition == 'Gaussian': + me[self.leak] = 0 + me[self.leak][u[self.leak] > Q_max * np.exp(-((self.xv[self.leak] - 0.5) ** 2) / 3e-2)] = 1 + else: + raise NotImplementedError(f'Leak transition \"{self.leak_transition}\" not implemented!') me[u > u_max] = 0 - me[self.leak] = 0 - - # boundary conditions - me[0] = 0.0 - me[-1] = 0.0 me[:] /= self.Cv @@ -270,38 +278,92 @@ class LeakySuperconductor(ptype): me = self.dtype_u(self.init, val=0.0) if t > 0: + if self.reference_sol_type == 'scipy': + + def jac(t, u): + """ + Get the Jacobian for the implicit BDF method to use in `scipy.solve_ivp` + + Args: + t (float): The current time + u (dtype_u): Current solution + + Returns: + scipy.sparse.csc: The derivative of the non-linear part of the solution w.r.t. to the solution. + """ + return self.A + self.get_non_linear_Jacobian(u) + + def eval_rhs(t, u): + """ + Function to pass to `scipy.solve_ivp` to evaluate the full RHS + + Args: + t (float): Current time + u (numpy.1darray): Current solution + + Returns: + (numpy.1darray): RHS + """ + return self.eval_f(u.reshape(self.init[0]), t).flatten() + + me[:] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init, method='BDF', jac=jac) + + elif self.reference_sol_type in ['DIRK', 'SDC']: + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.helpers.stats_helper import get_sorted + + description = {} + description['problem_class'] = Quench + description['problem_params'] = { + 'newton_tol': 1e-10, + 'newton_iter': 99, + 'nvars': 2**10, + **self.params, + } + + if self.reference_sol_type == 'DIRK': + from pySDC.implementations.sweeper_classes.Runge_Kutta import DIRK34 + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK + + description['sweeper_class'] = DIRK34 + description['sweeper_params'] = {} + description['step_params'] = {'maxiter': 1} + description['level_params'] = {'dt': 1e-4} + description['convergence_controllers'] = {AdaptivityRK: {'e_tol': 1e-9, 'update_order': 4}} + elif self.reference_sol_type == 'SDC': + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + + description['sweeper_class'] = generic_implicit + description['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE', 'quad_type': 'RADAU-RIGHT'} + description['step_params'] = {'maxiter': 99} + description['level_params'] = {'dt': 0.5, 'restol': 1e-10} + + controller_params = {'hook_class': LogSolution, 'mssdc_jac': False, 'logger_level': 99} + + controller = controller_nonMPI( + description=description, controller_params=controller_params, num_procs=1 + ) - def jac(t, u): - """ - Get the Jacobian for the implicit BDF method to use in `scipy.odeint` - - Args: - t (float): The current time - u (dtype_u): Current solution - - Returns: - scipy.sparse.csc: The derivative of the non-linear part of the solution w.r.t. to the solution. - """ - return self.A + self.get_non_linear_Jacobian(u) + uend, stats = controller.run( + u0=u_init if u_init is not None else self.u_exact(t=0.0), + t0=t_init if t_init is not None else 0, + Tend=t, + ) - def eval_rhs(t, u): - """ - Function to pass to `scipy.solve_ivp` to evaluate the full RHS + u_last = get_sorted(stats, type='u', recomputed=False)[-1] - Args: - t (float): Current time - u (numpy.1darray): Current solution + if abs(u_last[0] - t) > 1e-2: + self.logger.warning( + f'Time difference between reference solution and requested time is {abs(u_last[0]-t):.2e}!' + ) - Returns: - (numpy.1darray): RHS - """ - return self.eval_f(u.reshape(self.init[0]), t).flatten() + me[:] = u_last[1] - me[:] = self.generate_scipy_reference_solution(eval_rhs, t, u_init, t_init, method='BDF', jac=jac) return me -class LeakySuperconductorIMEX(LeakySuperconductor): +class QuenchIMEX(Quench): dtype_f = imex_mesh def eval_f(self, u, t): @@ -360,8 +422,7 @@ class LeakySuperconductorIMEX(LeakySuperconductor): def jac(t, u): """ - Get the Jacobian for the implicit BDF method to use in `scipy.odeint` - + Get the Jacobian for the implicit BDF method to use in `scipy.solve_ivp` Args: t (float): The current time u (dtype_u): Current solution diff --git a/pySDC/implementations/problem_classes/TestEquation_0D.py b/pySDC/implementations/problem_classes/TestEquation_0D.py index 24f6112226d5523b0eff96b1c9c785c723ce84ea..ad1f00f633f6f3bd71a42fbd35a7387ce85b47bd 100644 --- a/pySDC/implementations/problem_classes/TestEquation_0D.py +++ b/pySDC/implementations/problem_classes/TestEquation_0D.py @@ -89,17 +89,21 @@ class testequation0d(ptype): me[:] = L.solve(rhs) return me - def u_exact(self, t): + def u_exact(self, t, u_init=None, t_init=None): """ Routine to compute the exact solution at time t Args: t (float): current time + u_init : pySDC.problem.testequation0d.dtype_u + t_init : float Returns: dtype_u: exact solution """ + u_init = (self.u0 if u_init is None else u_init) * 1.0 + t_init = 0.0 if t_init is None else t_init * 1.0 me = self.dtype_u(self.init) - me[:] = self.u0 * np.exp(t * np.array(self.lambdas)) + me[:] = u_init * np.exp((t - t_init) * np.array(self.lambdas)) return me diff --git a/pySDC/implementations/problem_classes/Van_der_Pol_implicit.py b/pySDC/implementations/problem_classes/Van_der_Pol_implicit.py index 1c58167d05435adcfe923b84de2a151362bcd7e5..665bf0ff172a3d3acea54f33c09b848041a2bbb2 100755 --- a/pySDC/implementations/problem_classes/Van_der_Pol_implicit.py +++ b/pySDC/implementations/problem_classes/Van_der_Pol_implicit.py @@ -27,6 +27,7 @@ class vanderpol(ptype): 'mu', 'newton_maxiter', 'newton_tol', 'stop_at_nan', 'crash_at_maxiter', localVars=locals() ) self.work_counters['newton'] = WorkCounter() + self.work_counters['rhs'] = WorkCounter() def u_exact(self, t, u_init=None, t_init=None): """ @@ -69,6 +70,7 @@ class vanderpol(ptype): f = self.f_init f[0] = x2 f[1] = self.mu * (1 - x1**2) * x2 - x1 + self.work_counters['rhs']() return f def solve_system(self, rhs, dt, u0, t): diff --git a/pySDC/implementations/sweeper_classes/Runge_Kutta.py b/pySDC/implementations/sweeper_classes/Runge_Kutta.py index d4cafed9b4a4f5094f574951de409be175ec0589..509c16a65befc99f71733bfbbcabe6acf7c2c31d 100644 --- a/pySDC/implementations/sweeper_classes/Runge_Kutta.py +++ b/pySDC/implementations/sweeper_classes/Runge_Kutta.py @@ -1,9 +1,9 @@ import numpy as np import logging -from pySDC.core.Sweeper import _Pars +from pySDC.core.Sweeper import sweeper, _Pars from pySDC.core.Errors import ParameterError -from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit +from pySDC.implementations.datatype_classes.mesh import imex_mesh, mesh class ButcherTableau(object): @@ -58,7 +58,7 @@ class ButcherTableau(object): self.delta_m[0] = self.nodes[0] - self.tleft # check if the RK scheme is implicit - self.implicit = any([matrix[i, i] != 0 for i in range(self.num_nodes - 1)]) + self.implicit = any(matrix[i, i] != 0 for i in range(self.num_nodes - 1)) class ButcherTableauEmbedded(object): @@ -116,10 +116,10 @@ class ButcherTableauEmbedded(object): self.delta_m[0] = self.nodes[0] - self.tleft # check if the RK scheme is implicit - self.implicit = any([matrix[i, i] != 0 for i in range(self.num_nodes - 2)]) + self.implicit = any(matrix[i, i] != 0 for i in range(self.num_nodes - 2)) -class RungeKutta(generic_implicit): +class RungeKutta(sweeper): """ Runge-Kutta scheme that fits the interface of a sweeper. Actually, the sweeper idea fits the Runge-Kutta idea when using only lower triangular rules, where solutions @@ -174,6 +174,11 @@ class RungeKutta(generic_implicit): params['collocation_class'] = type(params['butcher_tableau']) params['num_nodes'] = params['butcher_tableau'].num_nodes + # disable residual computation by default + params['skip_residual_computation'] = params.get( + 'skip_residual_computation', ('IT_CHECK', 'IT_FINE', 'IT_COARSE', 'IT_UP', 'IT_DOWN') + ) + self.params = _Pars(params) self.coll = params['butcher_tableau'] @@ -184,6 +189,55 @@ class RungeKutta(generic_implicit): self.parallelizable = False self.QI = self.coll.Qmat + @classmethod + def get_update_order(cls): + """ + Get the order of the lower order method for doing adaptivity. Only applies to embedded methods. + """ + raise NotImplementedError( + f"There is not an update order for RK scheme \"{cls.__name__}\" implemented. Maybe it is not an embedded scheme?" + ) + + def get_full_f(self, f): + """ + Get the full right hand side as a `mesh` from the right hand side + + Args: + f (dtype_f): Right hand side at a single node + + Returns: + mesh: Full right hand side as a mesh + """ + if type(f) == mesh: + return f + elif type(f) == imex_mesh: + return f.impl + f.expl + else: + raise NotImplementedError(f'Type \"{type(f)}\" not implemented in Runge-Kutta sweeper') + + def integrate(self): + """ + Integrates the right-hand side + + Returns: + list of dtype_u: containing the integral as values + """ + + # get current level and problem description + L = self.level + P = L.prob + + me = [] + + # integrate RHS over all collocation nodes + for m in range(1, self.coll.num_nodes + 1): + # new instance of dtype_u, initialize values with 0 + me.append(P.dtype_u(P.init, val=0.0)) + for j in range(1, self.coll.num_nodes + 1): + me[-1] += L.dt * self.coll.Qmat[m, j] * self.get_full_f(L.f[j]) + + return me + def update_nodes(self): """ Update the u- and f-values at the collocation nodes @@ -207,7 +261,7 @@ class RungeKutta(generic_implicit): # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) rhs = L.u[0] for j in range(1, m + 1): - rhs += L.dt * self.QI[m + 1, j] * L.f[j] + rhs += L.dt * self.QI[m + 1, j] * self.get_full_f(L.f[j]) # implicit solve with prefactor stemming from the diagonal of Qd if self.coll.implicit: @@ -224,6 +278,12 @@ class RungeKutta(generic_implicit): return None + def compute_end_point(self): + """ + In this Runge-Kutta implementation, the solution to the step is always stored in the last node + """ + self.level.uend = self.level.u[-1] + class RK1(RungeKutta): def __init__(self, params): @@ -284,7 +344,7 @@ class MidpointMethod(RungeKutta): class RK4(RungeKutta): """ - Explicit Runge-Kutta of fourth order: Everybodies darling. + Explicit Runge-Kutta of fourth order: Everybody's darling. """ def __init__(self, params): @@ -311,10 +371,14 @@ class Heun_Euler(RungeKutta): params['butcher_tableau'] = ButcherTableauEmbedded(weights, nodes, matrix) super(Heun_Euler, self).__init__(params) + @classmethod + def get_update_order(cls): + return 2 + class Cash_Karp(RungeKutta): """ - Fifth order explicit embedded Runge-Kutta + Fifth order explicit embedded Runge-Kutta. See [here](https://doi.org/10.1145/79505.79507). """ def __init__(self, params): @@ -334,6 +398,10 @@ class Cash_Karp(RungeKutta): params['butcher_tableau'] = ButcherTableauEmbedded(weights, nodes, matrix) super(Cash_Karp, self).__init__(params) + @classmethod + def get_update_order(cls): + return 5 + class DIRK34(RungeKutta): """ @@ -354,3 +422,7 @@ class DIRK34(RungeKutta): matrix[3, :] = [4007.0 / 6075.0, -31031.0 / 24300.0, -133.0 / 2700.0, 5.0 / 6.0] params['butcher_tableau'] = ButcherTableauEmbedded(weights, nodes, matrix) super().__init__(params) + + @classmethod + def get_update_order(cls): + return 4 diff --git a/pySDC/implementations/sweeper_classes/imex_1st_order_mass.py b/pySDC/implementations/sweeper_classes/imex_1st_order_mass.py index 44fe96b0b4a5eededc18ab721b3e951c5721c925..9006861096ec0fd0feaca75dc35f6a50c25826bd 100644 --- a/pySDC/implementations/sweeper_classes/imex_1st_order_mass.py +++ b/pySDC/implementations/sweeper_classes/imex_1st_order_mass.py @@ -90,15 +90,24 @@ class imex_1st_order_mass(imex_1st_order): return None - def compute_residual(self): + def compute_residual(self, stage=None): """ Computation of the residual using the collocation matrix Q + + Args: + stage (str): The current stage of the step the level belongs to """ # get current level and problem description L = self.level P = L.prob + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + # check if there are new values (e.g. from a sweep) # assert L.status.updated diff --git a/pySDC/projects/DAE/problems/simple_DAE.py b/pySDC/projects/DAE/problems/simple_DAE.py index 90598333f7d3ef27f50880ec3c70985366627ab2..cf037a666d49a3279ca5cf049d431cfdb7069591 100644 --- a/pySDC/projects/DAE/problems/simple_DAE.py +++ b/pySDC/projects/DAE/problems/simple_DAE.py @@ -6,15 +6,41 @@ from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae class pendulum_2d(ptype_dae): - """ - Example implementing the well known 2D pendulum as a first order DAE of index-3 - The pendulum is used in most introductory literature on DAEs, for example on page 8 of "The numerical solution of differential-algebraic systems by Runge-Kutta methods" by Hairer et al. + r""" + Example implementing the well known 2D pendulum as a first order differential-algebraic equation (DAE) of index 3. + The DAE system is given by the equations + + .. math:: + x' = u, + + .. math:: + \frac{d}{dt} \frac{\partial}{\partial u} L = \frac{\partial L}{\partial x} + f + G^{T} \lambda, + + .. math:: + 0 = \phi. + + The pendulum is used in most introductory literature on DAEs, for example on page 8 of [1]_. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + Attributes + ---------- + t_end: float + The end time at which the reference solution is determined. + + References + ---------- + .. [1] E. Hairer, C. Lubich, M. Roche. The numerical solution of differential-algebraic systems by Runge-Kutta methods. + Lect. Notes Math. (1989). """ def __init__(self, nvars, newton_tol): - """ - Initialization routine for the problem class - """ + """Initialization routine""" super().__init__(nvars, newton_tol) # load reference solution # data file must be generated and stored under misc/data and self.t_end = t[-1] @@ -25,13 +51,22 @@ class pendulum_2d(ptype_dae): self.t_end = 0.0 def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. - t (float): current time (not used here) - Returns: - Current value of F(), 5 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes five components). """ g = 9.8 # The last element of u is a Lagrange multiplier. Not sure if this needs to be time dependent, but must model the @@ -43,10 +78,16 @@ class pendulum_2d(ptype_dae): def u_exact(self, t): """ Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution. - Args: - t (float): current time - Returns: - Mesh containing fixed initial value, 5 components + + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object. It contains fixed initial conditions at initial time. """ me = self.dtype_u(self.init) if t == 0: @@ -60,20 +101,60 @@ class pendulum_2d(ptype_dae): class simple_dae_1(ptype_dae): - """ - Example implementing a smooth linear index-2 DAE with known analytical solution - This example is commonly used to test that numerical implementations are functioning correctly - See, for example, page 267 of "computer methods for ODEs and DAEs" by Ascher and Petzold + r""" + Example implementing a smooth linear index-2 differential-algebraic equation (DAE) with known analytical solution. + The DAE system is given by + + .. math:: + \frac{d u_1 (t)}{dt} = (\alpha - \frac{1}{2 - t}) u_1 (t) + (2-t) \alpha z (t) + \frac{3 - t}{2 - t}, + + .. math:: + \frac{d u_2 (t)}{dt} = \frac{1 - \alpha}{t - 2} u_1 (t) - u_2 (t) + (\alpha - 1) z (t) + 2 e^{t}, + + .. math:: + 0 = (t + 2) u_1 (t) + (t^{2} - 4) u_2 (t) - (t^{2} + t - 2) e^{t}. + + The exact solution of this system is + + .. math:: + u_1 (t) = u_2 (t) = e^{t}, + + .. math:: + z (t) = -\frac{e^{t}}{2 - t}. + + This example is commonly used to test that numerical implementations are functioning correctly. See, for example, + page 267 of [1]_. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + References + ---------- + .. [1] U. Ascher, L. R. Petzold. Computer method for ordinary differential equations and differential-algebraic + equations. Society for Industrial and Applied Mathematics (1998). """ def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 3 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes three components). """ # Smooth index-2 DAE pg. 267 Ascher and Petzold (also the first example in KDC Minion paper) a = 10.0 @@ -87,12 +168,17 @@ class simple_dae_1(ptype_dae): def u_exact(self, t): """ - Routine for the exact solution + Routine for the exact solution. - Args: - t (float): current time - Returns: - mesh type containing the exact solution, 3 components + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object containing three components. """ me = self.dtype_u(self.init) me[:] = (np.exp(t), np.exp(t), -np.exp(t) / (2 - t)) @@ -100,26 +186,58 @@ class simple_dae_1(ptype_dae): class problematic_f(ptype_dae): - """ - Standard example of a very simple fully implicit index-2 DAE that is not numerically solvable for certain choices of the parameter eta - See, for example, page 264 of "computer methods for ODEs and DAEs" by Ascher and Petzold + r""" + Standard example of a very simple fully implicit index-2 differential algebraic equation (DAE) that is not + numerically solvable for certain choices of the parameter :math:`\eta`. The DAE system is given by + + .. math:: + y (t) + \eta t z (t) = f(t), + + .. math:: + \frac{d y(t)}{dt} + \eta t \frac{d z(t)}{dt} + (1 + \eta) z (t) = g (t). + + See, for example, page 264 of [1]_. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + Attributes + ---------- + eta: float + Specific parameter of the problem. + + References + ---------- + .. [1] U. Ascher, L. R. Petzold. Computer method for ordinary differential equations and differential-algebraic + equations. Society for Industrial and Applied Mathematics (1998). """ def __init__(self, nvars, newton_tol, eta=1): - """ - Initialization routine for the problem class - """ + """Initialization routine""" super().__init__(nvars, newton_tol) self._makeAttributeAndRegister('eta', localVars=locals()) def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 2 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes two components). """ f = self.dtype_f(self.init) f[:] = ( @@ -130,12 +248,17 @@ class problematic_f(ptype_dae): def u_exact(self, t): """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 2 components + Routine for the exact solution. + + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object containing two components. """ me = self.dtype_u(self.init) me[:] = (np.sin(t), 0) diff --git a/pySDC/projects/DAE/problems/synchronous_machine.py b/pySDC/projects/DAE/problems/synchronous_machine.py index de4fa79591a0e613157d4857914e715dd24ae69b..c0b021c362851d4cdcc35f5175d584ae21e1c9f5 100644 --- a/pySDC/projects/DAE/problems/synchronous_machine.py +++ b/pySDC/projects/DAE/problems/synchronous_machine.py @@ -7,9 +7,146 @@ from pySDC.implementations.datatype_classes.mesh import mesh class synchronous_machine_infinite_bus(ptype_dae): - """ - Synchronous machine model from Kundur (equiv. circuits fig. 3.18) - attached to infinite bus + r""" + Synchronous machine model from Kundur (equiv. circuits fig. 3.18 in [1]_) attached to infinite bus. The machine can be + represented as two different circuits at the direct-axis and the quadrature-axis. Detailed information can be found in + [1]_. The system of differential-algebraic equations (DAEs) consists of the equations for + + - the stator voltage equations + + .. math:: + \frac{d \Psi_d (t)}{dt} = \omega_b (v_d + R_a i_d (t) + \omega_r \Psi_q (t)), + + .. math:: + \frac{d \Psi_q (t)}{dt} = \omega_b (v_q + R_a i_q (t) - \omega_r \Psi_d (t)), + + .. math:: + \frac{d \Psi_0 (t)}{dt} = \omega_b (v_0 + R_a i_0 (t)), + + - the rotor voltage equations + + .. math:: + \frac{d \Psi_F (t)}{dt} = \omega_b (v_F - R_F i_F (t)), + + .. math:: + \frac{d \Psi_D (t)}{dt} = -\omega_b (R_D i_D (t)), + + .. math:: + \frac{d \Psi_{Q1} (t)}{dt} = -\omega_b (R_{Q1} i_{Q1} (t)), + + .. math:: + \frac{d \Psi_{Q2} (t)}{dt} = -\omega_b (R_{Q2} i_{Q2} (t)), + + - the stator flux linkage equations + + .. math:: + \Psi_d (t) = L_d i_d (t) + L_{md} i_F (t) + L_{md} i_D (t), + + .. math:: + \Psi_q (t) = L_q i_q (t) + L_{mq} i_{Q1} (t) + L_{mq} i_{Q2} (t), + + .. math:: + \Psi_0 (t) = L_0 i_0 (t) + + - the rotor flux linkage equations + + .. math:: + \Psi_F = L_F i_F (t) + L_D i_D + L_{md} i_d (t), + + .. math:: + \Psi_D = L_F i_F (t) + L_D i_D + L_{md} i_d (t), + + .. math:: + \Psi_{Q1} = L_{Q1} i_{Q1} (t) + L_{mq} i_{Q2} + L_{mq} i_q (t), + + .. math:: + \Psi_{Q2} = L_{mq} i_{Q1} (t) + L_{Q2} i_{Q2} + L_{mq} i_q (t), + + - the swing equations + + .. math:: + \frac{d \delta (t)}{dt} = \omega_b (\omega_r (t) - 1), + + .. math:: + \frac{d \omega_r (t)}{dt} = \frac{1}{2 H}(T_m - T_e - K_D \omega_b (\omega_r (t) - 1)). + + The voltages :math:`v_d`, :math:`v_q` can be updated via the following procedure. The stator's currents are mapped + to the comlex-valued external reference frame current :math:`I` with + + .. math:: + \Re(I) = i_d (t) \sin(\delta (t)) + i_q (t) \cos(\delta (t)), + + .. math:: + \Im(I) = -i_d (t) \cos(\delta (t)) + i_q (t) \sin(\delta (t)). + + The voltage V across the stator terminals can then be computed as complex-value via + + .. math:: + V_{comp} = E_B + Z_{line} (\Re(I) + i \Im(I)) + + with impedance :math:`Z_{line}\in\mathbb{C}`. Then, :math:`v_d`, :math:`v_q` can be computed via the network equations + + .. math:: + v_d = \Re(V_{comp}) \sin(\delta (t)) - \Im(V_{comp}) \cos(\delta (t)), + + .. math:: + v_q = \Re(V_{comp}) \cos(\delta (t)) + \Im(V_{comp}) \sin(\delta (t)), + + which describes the connection between the machine and the infinite bus. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + Attributes + ---------- + L_d: float + Inductance of inductor :math:'L_d', see [1]_. + L_q: float + Inductance of inductor :math:'L_q', see [1]_. + L_F: float + Inductance of inductor :math:'L_F', see [1]_. + L_D: float + Inductance of inductor :math:'L_D', see [1]_. + L_Q1: float + Inductance of inductor :math:'L_{Q1}', see [1]_. + L_Q2: float + Inductance of inductor :math:'L_{Q2}', see [1]_. + L_md: float + Inductance of inductor :math:'L_{md}', see [1]_. + L_mq: float + Inductance of inductor :math:'L_{mq}', see [1]_. + R_s: float + Resistance of resistor :math:`R_s`, see [1]_. + R_F: float + Resistance of resistor :math:`R_F`, see [1]_. + R_D: float + Resistance of resistor :math:`R_D`, see [1]_. + R_Q1: float + Resistance of resistor :math:`R_{Q1}`, see [1]_. + R_Q2: float + Resistance of resistor :math:`R_{Q2}`, see [1]_. + omega_b: float + Base frequency of the rotor in mechanical :math:`rad/s`. + H_: float + Defines the per unit inertia constant. + K_D: float + Factor that accounts for damping losses. + Z_line: complex + Impedance of the transmission line that connects the infinite bus to the generator. + E_B: float + Voltage of infinite bus. + v_F: float + Voltage at the field winding. + T_m: float + Defines the mechanical torque applied to the rotor shaft. + + References + ---------- + .. [1] P. Kundur, N. J. Balu, M. G. Lauby. Power system stability and control. The EPRI power system series (1994). """ def __init__(self, nvars, newton_tol): @@ -48,13 +185,22 @@ class synchronous_machine_infinite_bus(ptype_dae): self.T_m = 0.854 def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 14 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes 14 components). """ # simulate torque change at t = 0.05 @@ -116,10 +262,17 @@ class synchronous_machine_infinite_bus(ptype_dae): def u_exact(self, t): """ Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution. - Args: - t (float): current time - Returns: - Mesh containing fixed initial value, 5 components + + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object. It contains fixed initial conditions at initial time (which includes + 14 components). """ me = self.dtype_u(self.init) diff --git a/pySDC/projects/DAE/problems/transistor_amplifier.py b/pySDC/projects/DAE/problems/transistor_amplifier.py index bef3f36ed33b6c45069637dcf049a2707feba8d1..a074f7ea746556b18bbd98dc454ee5d5c76b4ac5 100644 --- a/pySDC/projects/DAE/problems/transistor_amplifier.py +++ b/pySDC/projects/DAE/problems/transistor_amplifier.py @@ -11,9 +11,54 @@ def _transistor(u_in): class one_transistor_amplifier(ptype_dae): - """ - The one transistor amplifier example from pg. 404 Solving ODE II by Hairer and Wanner - The problem is an index-1 DAE + r""" + The one transistor amplifier example from pg. 404 in [1]_. The problem is an index-1 differential-algebraic equation + (DAE) having the equations + + .. math:: + \frac{U_e (t)}{R_0} - \frac{U_1 (t)}{R_0} + C_1 (\frac{d U_2 (t)}{dt} - \frac{d U_1 (t)}{dt}) = 0, + + .. math:: + \frac{U_b}{R_2} - U_2 (t) (\frac{1}{R_1} + \frac{1}{R_2}) + C_1 (\frac{d U_1 (t)}{dt} - \frac{d U_2 (t)}{dt}) - 0.01 f(U_2 (t) - U_3 (t)) = 0, + + .. math:: + f(U_2 (t) - U_3 (t)) - \frac{U_3 (t)}{R_3} - C_2 \frac{d U_3 (t)}{dt} = 0, + + .. math:: + \frac{U_b}{R_4} - \frac{U_4 (t)}{R_4} + C_3 (\frac{d U_5 (t)}{dt} - \frac{d U_4 (t)}{dt}) - 0.99 f(U_2 (t) - U_3 (t)) = 0, + + .. math:: + -\frac{U_5 (t)}{R_5} + C_3 (\frac{d U_4 (t)}{dt} - \frac{d U_5 (t)}{dt}) = 0, + + with + + .. math:: + f(U(t)) = 10^{-6} (exp(\frac{U (t)}{0.026}) - 1). + + The initial signal :math:`U_e (t)` is defined as + + .. math:: + U_e (t) = 0.4 \sin(200 \pi t). + + Constants are fixed as :math:`U_b = 6`, :math:`R_0 = 1000`, :math:`R_k = 9000` for :math:`k=1,..,5`, + `C_j = j \cdot 10^{-6}` for :math:`j=1,2,3`.They are also defined in the method `eval_f`. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + Attributes + ---------- + t_end: float + The end time at which the reference solution is determined. + + References + ---------- + .. [1] E. Hairer, G. Wanner. Solving ordinary differential equations II: Stiff and differential-algebraic problems. + Springer (2009). """ def __init__(self, nvars, newton_tol): @@ -28,13 +73,22 @@ class one_transistor_amplifier(ptype_dae): self.t_end = 0.0 def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 5 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes five components). """ u_b = 6.0 u_e = 0.4 * np.sin(200 * np.pi * t) @@ -54,11 +108,18 @@ class one_transistor_amplifier(ptype_dae): def u_exact(self, t): """ - Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution. - Args: - t (float): current time - Returns: - Mesh containing fixed initial value, 5 components + Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical + reference solution. + + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object containing five components and fixed initial conditions. """ me = self.dtype_u(self.init) @@ -73,9 +134,64 @@ class one_transistor_amplifier(ptype_dae): class two_transistor_amplifier(ptype_dae): - """ - The two transistor amplifier example from page 108 "The numerical solution of differential-algebraic systems by Runge-Kutta methods" Hairer et al. - The problem is an index-1 DAE + r""" + The two transistor amplifier example from page 108 in [1]_. The problem is an index-1 differential-algebraic equation + (DAE) having the equations + + .. math:: + \frac{U_e (t)}{R_0} - \frac{U_1 (t)}{R_0} + C_1 (\frac{d U_2 (t)}{dt} - \frac{d U_1 (t)}{dt}) = 0, + + .. math:: + \frac{U_b}{R_2} - U_2 (t) (\frac{1}{R_1} + \frac{1}{R_2}) + C_1 (\frac{d U_1 (t)}{dt} - \frac{d U_2 (t)}{dt}) - (\alpha - 1) f(U_2 (t) - U_3 (t)) = 0, + + .. math:: + f(U_2 (t) - U_3 (t)) - \frac{U_3 (t)}{R_3} - C_2 \frac{d U_3 (t)}{dt} = 0, + + .. math:: + \frac{U_b}{R_4} - \frac{U_4 (t)}{R_4} + C_3 (\frac{d U_5 (t)}{dt} - \frac{d U_4 (t)}{dt}) - \alpha f(U_2 (t) - U_3 (t)) = 0, + + .. math:: + \frac{U_b}{R_6} - U_5 (t) (\frac{1}{R_5} + \frac{1}{R_6}) + C_3 (\frac{d U_4 (t)}{dt} - \frac{d U_5 (t)}{dt}) + (\alpha - 1) f(U_5 (t) - U_6 (t)) = 0, + + .. math:: + f(U_5 (t) - U_6 (t)) - \frac{U_6 (t)}{R_7} - C_4 \frac{d U_6 (t)}{dt} = 0, + + .. math:: + \frac{U_b}{R_8} - \frac{U_7 (t)}{R_8} - C_5 (\frac{d U_7 (t)}{dt} - \frac{d U_8 (t)}{dt}) - \alpha f(U_5 (t) - U_6 (t)) = 0, + + .. math:: + \frac{U_8 (t)}{R_9} - C_5 (\frac{d U_7 (t)}{dt} - \frac{d U_7 (t)}{dt}) = 0, + + with + + .. math:: + f(U_i (t) - U_j (t)) = \beta (\exp(\frac{U_i (t) - U_j (t)}{U_F}) - 1). + + The initial signal :math:`U_e (t)` is defined as + + .. math:: + U_e (t) = 0.1 \sin(200 \pi t). + + Constants are fixed as :math:`U_b = 6`, :math:`U_F = 0.026`, :math:`\alpha = 0.99`, :math:`\beta = 10^{-6}`, :math:`R_0 = 1000`, + :math:`R_k = 9000` for :math:`k=1,..,9`, `C_j = j \cdot 10^{-6}` for :math:`j=1,..,5`. They are also defined in the + method `eval_f`. + + Parameters + ---------- + nvars : int + Number of unknowns of the system of DAEs. + newton_tol : float + Tolerance for Newton solver. + + Attributes + ---------- + t_end: float + The end time at which the reference solution is determined. + + References + ---------- + .. [1] E. Hairer, C. Lubich, M. Roche. The numerical solution of differential-algebraic systems by Runge-Kutta methods. + Lect. Notes Math. (1989). """ def __init__(self, nvars, newton_tol): @@ -90,13 +206,22 @@ class two_transistor_amplifier(ptype_dae): self.t_end = 0.0 def eval_f(self, u, du, t): - """ - Routine to evaluate the implicit representation of the problem i.e. F(u', u, t) - Args: - u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation - t (float): current time - Returns: - Current value of F(), 8 components + r""" + Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`. + + Parameters + ---------- + u : dtype_u + Current values of the numerical solution at time t. + du : dtype_u + Current values of the derivative of the numerical solution at time t. + t : float + Current time of the numerical solution. + + Returns + ------- + f : dtype_f + Current value of the right-hand side of f (which includes eight components). """ u_b = 6.0 u_e = 0.1 * np.sin(200 * np.pi * t) @@ -119,13 +244,19 @@ class two_transistor_amplifier(ptype_dae): def u_exact(self, t): """ - Dummy exact solution that should only be used to get initial conditions for the problem - This makes initialisation compatible with problems that have a known analytical solution - Could be used to output a reference solution if generated/available - Args: - t (float): current time - Returns: - Mesh containing fixed initial value, 5 components + Dummy exact solution that should only be used to get initial conditions for the problem. This makes + initialisation compatible with problems that have a known analytical solution. Could be used to output a + reference solution if generated/available. + + Parameters + ---------- + t : float + The time of the reference solution. + + Returns + ------- + me : dtype_u + The reference solution as mesh object containing eight components and fixed initial conditions. """ me = self.dtype_u(self.init) diff --git a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py index 954d38af24ddf068909b79592245d1f899cac526..1061618a203889435cc6050a1758346ff67f320c 100644 --- a/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py +++ b/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py @@ -169,10 +169,14 @@ class fully_implicit_DAE(sweeper): L.status.unlocked = True L.status.updated = True - def compute_residual(self): + def compute_residual(self, stage=None): """ Overrides the base implementation Uses the absolute value of the implicit function ||F(u', u, t)|| as the residual + + Args: + stage (str): The current stage of the step the level belongs to + Returns: None """ @@ -181,6 +185,12 @@ class fully_implicit_DAE(sweeper): L = self.level P = L.prob + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + # check if there are new values (e.g. from a sweep) # assert L.status.updated diff --git a/pySDC/projects/PinTSimE/battery_model.py b/pySDC/projects/PinTSimE/battery_model.py index 39656643c50694a69baa1b803e7837e7b7362962..2ee06ab970e1d99652ee297757fde2eb14f238fb 100644 --- a/pySDC/projects/PinTSimE/battery_model.py +++ b/pySDC/projects/PinTSimE/battery_model.py @@ -34,15 +34,6 @@ class log_data(hooks): type='u', value=L.uend, ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='restart', - value=int(step.status.get('restart')), - ) self.add_to_stats( process=step.status.slot, time=L.time + L.dt, @@ -112,7 +103,7 @@ def generate_description( if problem == battery_implicit: problem_params['newton_maxiter'] = 200 problem_params['newton_tol'] = 1e-08 - problem_params['ncapacitors'] = ncapacitors # number of condensators + problem_params['ncapacitors'] = ncapacitors # number of capacitors problem_params['Vs'] = 5.0 problem_params['Rs'] = 0.5 problem_params['C'] = C diff --git a/pySDC/projects/PinTSimE/switch_estimator.py b/pySDC/projects/PinTSimE/switch_estimator.py index d04038dbb29109baa3d76b1378699b4f07a56ef9..7cb4e4674d7bc61e8e6bf3abfff8f697c5feeded 100644 --- a/pySDC/projects/PinTSimE/switch_estimator.py +++ b/pySDC/projects/PinTSimE/switch_estimator.py @@ -59,7 +59,7 @@ class SwitchEstimator(ConvergenceController): self.setup_status_variables(controller, **kwargs) - def get_new_step_size(self, controller, S): + def get_new_step_size(self, controller, S, **kwargs): """ Determine a new step size when a switch is found such that the switch happens at the time step. @@ -124,7 +124,7 @@ class SwitchEstimator(ConvergenceController): else: self.status.switch_detected = False - def determine_restart(self, controller, S): + def determine_restart(self, controller, S, **kwargs): """ Check if the step needs to be restarted due to a predicting switch. @@ -140,9 +140,9 @@ class SwitchEstimator(ConvergenceController): S.status.restart = True S.status.force_done = True - super(SwitchEstimator, self).determine_restart(controller, S) + super().determine_restart(controller, S, **kwargs) - def post_step_processing(self, controller, S): + def post_step_processing(self, controller, S, **kwargs): """ After a step is done, some variables will be prepared for predicting a possibly new switch. If no Adaptivity is used, the next time step will be set as the default one from the front end. @@ -160,7 +160,7 @@ class SwitchEstimator(ConvergenceController): if self.status.t_switch is None: L.status.dt_new = L.status.dt_new if L.status.dt_new is not None else L.params.dt_initial - super(SwitchEstimator, self).post_step_processing(controller, S) + super().post_step_processing(controller, S, **kwargs) @staticmethod def get_switch(t_interp, vC_switch, m_guess): diff --git a/pySDC/projects/Resilience/Lorenz.py b/pySDC/projects/Resilience/Lorenz.py index a10b998655aca86d6e980c1a0e69bb7884a575e3..83c5e1e7564409d401206f5cd235ee1db572505d 100644 --- a/pySDC/projects/Resilience/Lorenz.py +++ b/pySDC/projects/Resilience/Lorenz.py @@ -9,6 +9,7 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controlle from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.core.Errors import ConvergenceError from pySDC.projects.Resilience.hook import LogData, hook_collection +from pySDC.projects.Resilience.strategies import merge_descriptions def run_Lorenz( @@ -18,7 +19,6 @@ def run_Lorenz( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, use_MPI=False, **kwargs, ): @@ -32,7 +32,6 @@ def run_Lorenz( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets use_MPI (bool): Whether or not to use MPI Returns: @@ -56,9 +55,6 @@ def run_Lorenz( 'newton_maxiter': 99, } - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters step_params = dict() step_params['maxiter'] = 4 @@ -82,11 +78,7 @@ def run_Lorenz( description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 @@ -98,18 +90,13 @@ def run_Lorenz( comm = kwargs.get('comm', MPI.COMM_WORLD) controller = controller_MPI(controller_params=controller_params, description=description, comm=comm) - - # get initial values on finest level P = controller.S.levels[0].prob - uinit = P.u_exact(t0) else: controller = controller_nonMPI( num_procs=num_procs, controller_params=controller_params, description=description ) - - # get initial values on finest level P = controller.MS[0].levels[0].prob - uinit = P.u_exact(t0) + uinit = P.u_exact(t0) # insert faults if fault_stuff is not None: diff --git a/pySDC/projects/Resilience/Schroedinger.py b/pySDC/projects/Resilience/Schroedinger.py index 4a90674da4e47af23346637d223cd4ffcfb31c62..69bf81f8b0b1765bd21b8073a8d9969acb6701ef 100644 --- a/pySDC/projects/Resilience/Schroedinger.py +++ b/pySDC/projects/Resilience/Schroedinger.py @@ -1,6 +1,5 @@ -import numpy as np -from pathlib import Path from mpi4py import MPI +import numpy as np from pySDC.helpers.stats_helper import get_sorted @@ -9,6 +8,58 @@ from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order from pySDC.implementations.problem_classes.NonlinearSchroedinger_MPIFFT import nonlinearschroedinger_imex from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft from pySDC.projects.Resilience.hook import LogData, hook_collection +from pySDC.projects.Resilience.strategies import merge_descriptions + +from pySDC.core.Hooks import hooks + +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import make_axes_locatable + + +class live_plotting_with_error(hooks): # pragma: no cover + def __init__(self): + super().__init__() + self.fig, self.axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12, 7)) + + divider = make_axes_locatable(self.axs[1]) + self.cax_right = divider.append_axes('right', size='5%', pad=0.05) + divider = make_axes_locatable(self.axs[0]) + self.cax_left = divider.append_axes('right', size='5%', pad=0.05) + + def post_step(self, step, level_number): + lvl = step.levels[level_number] + lvl.sweep.compute_end_point() + + self.axs[0].cla() + im1 = self.axs[0].imshow(np.abs(lvl.uend), vmin=0, vmax=2.0) + self.fig.colorbar(im1, cax=self.cax_left) + + self.axs[1].cla() + im = self.axs[1].imshow(np.abs(lvl.prob.u_exact(lvl.time + lvl.dt) - lvl.uend)) + self.fig.colorbar(im, cax=self.cax_right) + + self.fig.suptitle(f't={lvl.time:.2f}') + self.axs[0].set_title('solution') + self.axs[1].set_title('error') + plt.pause(1e-9) + + +class live_plotting(hooks): # pragma: no cover + def __init__(self): + super().__init__() + self.fig, self.ax = plt.subplots() + divider = make_axes_locatable(self.ax) + self.cax = divider.append_axes('right', size='5%', pad=0.05) + + def post_step(self, step, level_number): + lvl = step.levels[level_number] + lvl.sweep.compute_end_point() + + self.ax.cla() + im = self.ax.imshow(np.abs(lvl.uend), vmin=0.2, vmax=1.8) + self.ax.set_title(f't={lvl.time + lvl.dt:.2f}') + self.fig.colorbar(im, cax=self.cax) + plt.pause(1e-9) def run_Schroedinger( @@ -18,7 +69,6 @@ def run_Schroedinger( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, use_MPI=False, space_comm=None, **kwargs, @@ -33,7 +83,6 @@ def run_Schroedinger( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets use_MPI (bool): Whether or not to use MPI Returns: @@ -41,13 +90,14 @@ def run_Schroedinger( controller: The controller Tend: The time that was supposed to be integrated to """ + from mpi4py import MPI - space_comm = MPI.COMM_WORLD if space_comm is None else space_comm + space_comm = MPI.COMM_SELF if space_comm is None else space_comm rank = space_comm.Get_rank() # initialize level parameters level_params = dict() - level_params['restol'] = 1e-08 + level_params['restol'] = 1e-8 level_params['dt'] = 1e-01 / 2 level_params['nsweeps'] = 1 @@ -62,11 +112,9 @@ def run_Schroedinger( problem_params = dict() problem_params['nvars'] = (128, 128) problem_params['spectral'] = False + problem_params['c'] = 1.0 problem_params['comm'] = space_comm - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters step_params = dict() step_params['maxiter'] = 50 @@ -90,21 +138,26 @@ def run_Schroedinger( description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if type(custom_description[k]) == dict: - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} - else: - description[k] = custom_description[k] + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 # instantiate controller - assert use_MPI == False, "MPI version in time not implemented" - controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) + controller_args = { + 'controller_params': controller_params, + 'description': description, + } + if use_MPI: + from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + + comm = kwargs.get('comm', MPI.COMM_WORLD) + controller = controller_MPI(**controller_args, comm=comm) + P = controller.S.levels[0].prob + else: + controller = controller_nonMPI(**controller_args, num_procs=num_procs) + P = controller.MS[0].levels[0].prob - # get initial values on finest level - P = controller.MS[0].levels[0].prob uinit = P.u_exact(t0) # insert faults @@ -124,25 +177,9 @@ def run_Schroedinger( return stats, controller, Tend -def plot_solution(stats): # pragma: no cover - import matplotlib.pyplot as plt - - u = get_sorted(stats, type='u') - fig, axs = plt.subplots(1, 2, figsize=(12, 5)) - axs[0].imshow(np.abs(u[0][1])) - axs[0].set_title(f't={u[0][0]:.2f}') - for i in range(len(u)): - axs[1].cla() - axs[1].imshow(np.abs(u[i][1])) - axs[1].set_title(f't={u[i][0]:.2f}') - plt.pause(1e-1) - fig.tight_layout() - plt.show() - - def main(): - stats, _, _ = run_Schroedinger(space_comm=MPI.COMM_WORLD) - plot_solution(stats) + stats, _, _ = run_Schroedinger(space_comm=MPI.COMM_WORLD, hook_class=live_plotting) + plt.show() if __name__ == "__main__": diff --git a/pySDC/projects/Resilience/accuracy_check.py b/pySDC/projects/Resilience/accuracy_check.py index 68628a7686079c7472e087ee243a35eff5e6c921..d79d8c0085ffe9c18c0a9421fd290de39224455f 100644 --- a/pySDC/projects/Resilience/accuracy_check.py +++ b/pySDC/projects/Resilience/accuracy_check.py @@ -3,7 +3,7 @@ import matplotlib.pylab as plt import numpy as np from pySDC.helpers.stats_helper import get_sorted -from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedErrorNonMPI +from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import ( EstimateExtrapolationErrorNonMPI, ) @@ -118,12 +118,14 @@ def multiple_runs( num_procs = 1 if serial else 5 + embedded_error_flavor = 'standard' if avoid_restarts else 'linearized' + # perform rest of the tests for i in range(0, len(dt_list)): desc = { 'step_params': {'maxiter': k}, 'convergence_controllers': { - EstimateEmbeddedErrorNonMPI: {}, + EstimateEmbeddedError.get_implementation(flavor=embedded_error_flavor, useMPI=False): {}, EstimateExtrapolationErrorNonMPI: {'no_storage': not serial}, }, } @@ -134,7 +136,11 @@ def multiple_runs( elif var == 'e_tol': from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity - desc['convergence_controllers'][Adaptivity] = {'e_tol': dt_list[i], 'avoid_restarts': avoid_restarts} + desc['convergence_controllers'][Adaptivity] = { + 'e_tol': dt_list[i], + 'avoid_restarts': avoid_restarts, + 'embedded_error_flavor': embedded_error_flavor, + } if custom_description is not None: desc = {**desc, **custom_description} @@ -206,7 +212,7 @@ def plot(res, ax, k, var='dt'): color = plt.rcParams['axes.prop_cycle'].by_key()['color'][k - 2] for i in range(len(keys)): - if all([me == 0.0 for me in res[keys[i]]]): + if all(me == 0.0 for me in res[keys[i]]): continue order = get_accuracy_order(res, key=keys[i], var=var) if keys[i] == 'e_embedded': @@ -379,7 +385,16 @@ def check_order_with_adaptivity(): ks = [3, 2] for serial in [True, False]: fig, ax = plt.subplots(1, 1, figsize=(3.5, 3)) - plot_all_errors(ax, ks, serial, Tend_fixed=5e-1, var='e_tol', dt_list=[1e-5, 1e-6, 1e-7], avoid_restarts=True) + plot_all_errors( + ax, + ks, + serial, + Tend_fixed=5e-1, + var='e_tol', + dt_list=[1e-5, 5e-6], + avoid_restarts=False, + custom_controller_params={'logger_level': 30}, + ) if serial: fig.savefig('data/error_estimate_order_adaptivity.png', dpi=300, bbox_inches='tight') else: diff --git a/pySDC/projects/Resilience/advection.py b/pySDC/projects/Resilience/advection.py index b267dc4e7286eddf676c6310f89947e2a6a93601..13be254fd3d14c61a8485a64fe305e9b5df069d9 100644 --- a/pySDC/projects/Resilience/advection.py +++ b/pySDC/projects/Resilience/advection.py @@ -2,11 +2,10 @@ from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit -from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.helpers.stats_helper import get_sorted -import numpy as np from pySDC.projects.Resilience.hook import LogData, hook_collection from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults +from pySDC.projects.Resilience.strategies import merge_descriptions def plot_embedded(stats, ax): @@ -28,29 +27,44 @@ def run_advection( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, + use_MPI=False, + **kwargs, ): + """ + Run an advection problem with default parameters. + + Args: + custom_description (dict): Overwrite presets + num_procs (int): Number of steps for MSSDC + Tend (float): Time to integrate to + hook_class (pySDC.Hook): A hook to store data + fault_stuff (dict): A dictionary with information on how to add faults + custom_controller_params (dict): Overwrite presets + use_MPI (bool): Whether or not to use MPI + + Returns: + dict: The stats object + controller: The controller + Tend: The time that was supposed to be integrated to + """ # initialize level parameters - level_params = dict() + level_params = {} level_params['dt'] = 0.05 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = 3 sweeper_params['QI'] = 'IE' - problem_params = {'freq': 2, 'nvars': 2**9, 'c': 1.0, 'order': 5, 'bc': 'periodic'} - - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} + problem_params = {'freq': 2, 'nvars': 2**9, 'c': 1.0, 'stencil_type': 'center', 'order': 4, 'bc': 'periodic'} # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 5 # initialize controller parameters - controller_params = dict() + controller_params = {} controller_params['logger_level'] = 30 controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class]) controller_params['mssdc_jac'] = False @@ -59,26 +73,41 @@ def run_advection( controller_params = {**controller_params, **custom_controller_params} # fill description dictionary for easy step instantiation - description = dict() - description['problem_class'] = advectionNd # pass problem class - description['problem_params'] = problem_params # pass problem parameters - description['sweeper_class'] = generic_implicit # pass sweeper - description['sweeper_params'] = sweeper_params # pass sweeper parameters - description['level_params'] = level_params # pass level parameters + description = {} + description['problem_class'] = advectionNd + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 # instantiate controller - controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) + if use_MPI: + from mpi4py import MPI + from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + + comm = kwargs.get('comm', MPI.COMM_WORLD) + controller = controller_MPI(controller_params=controller_params, description=description, comm=comm) + + # get initial values on finest level + P = controller.S.levels[0].prob + uinit = P.u_exact(t0) + else: + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + + controller = controller_nonMPI( + num_procs=num_procs, controller_params=controller_params, description=description + ) + + # get initial values on finest level + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) # insert faults if fault_stuff is not None: @@ -91,10 +120,6 @@ def run_advection( } prepare_controller_for_faults(controller, fault_stuff, rnd_args, args) - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_exact(t0) - # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) return stats, controller, Tend @@ -105,13 +130,13 @@ if __name__ == '__main__': from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.projects.Resilience.hook import LogUold - adaptivity_params = dict() + adaptivity_params = {} adaptivity_params['e_tol'] = 1e-8 - convergence_controllers = dict() + convergence_controllers = {} convergence_controllers[Adaptivity] = adaptivity_params - description = dict() + description = {} description['convergence_controllers'] = convergence_controllers fig, axs = plt.subplots(1, 2, figsize=(12, 4), sharex=True, sharey=True) diff --git a/pySDC/projects/Resilience/collocation_adaptivity.py b/pySDC/projects/Resilience/collocation_adaptivity.py index d57defa24be8e71aa477e3d82c96ffcf04fc5ebe..2ec723c29490e1a1e3e29adec954183d822f227b 100644 --- a/pySDC/projects/Resilience/collocation_adaptivity.py +++ b/pySDC/projects/Resilience/collocation_adaptivity.py @@ -244,7 +244,7 @@ def check_order(prob, coll_name, ax, k_ax): order = get_accuracy_order(me, key=i, thresh=1e-9) assert np.isclose( np.mean(order), expected_order, atol=0.3 - ), f"Expected order: {expected_order}, got {order:.2f}!" + ), f"Expected order: {expected_order}, got {np.mean(order):.2f}!" ax.loglog(result['dt'], result[i], label=f'{label} nodes: order: {np.mean(order):.1f}', color=CMAP[i]) if i > 0: diff --git a/pySDC/projects/Resilience/dahlquist.py b/pySDC/projects/Resilience/dahlquist.py index 8837d5ce87c5294ca819aa63e354b814733c1070..5b1ebc218940a1cf2860c9006f6f3bf5f807b516 100644 --- a/pySDC/projects/Resilience/dahlquist.py +++ b/pySDC/projects/Resilience/dahlquist.py @@ -11,6 +11,7 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable from pySDC.implementations.hooks.log_solution import LogSolutionAfterIteration from pySDC.implementations.hooks.log_step_size import LogStepSize +from pySDC.projects.Resilience.strategies import merge_descriptions class LogLambdas(hooks): @@ -34,7 +35,6 @@ def run_dahlquist( hook_class=hooks, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, **kwargs, ): """ @@ -47,7 +47,6 @@ def run_dahlquist( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets Returns: dict: The stats object @@ -78,9 +77,6 @@ def run_dahlquist( 'u0': 1.0 + 0.0j, } - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters step_params = dict() step_params['maxiter'] = 5 @@ -104,11 +100,7 @@ def run_dahlquist( description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 diff --git a/pySDC/projects/Resilience/extrapolation_within_Q.py b/pySDC/projects/Resilience/extrapolation_within_Q.py new file mode 100644 index 0000000000000000000000000000000000000000..506d31e69af9a5438928054befc7456478bab024 --- /dev/null +++ b/pySDC/projects/Resilience/extrapolation_within_Q.py @@ -0,0 +1,117 @@ +import matplotlib.pyplot as plt +import numpy as np + +from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import ( + EstimateExtrapolationErrorWithinQ, +) +from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep +from pySDC.helpers.stats_helper import get_sorted + +from pySDC.projects.Resilience.piline import run_piline +from pySDC.projects.Resilience.advection import run_advection +from pySDC.projects.Resilience.vdp import run_vdp + + +def multiple_runs(prob, dts, num_nodes, quad_type='RADAU-RIGHT'): + """ + Make multiple runs of a specific problem and record vital error information + + Args: + prob (function): A problem from the resilience project to run + dts (list): The step sizes to run with + num_nodes (int): Number of nodes + quad_type (str): Type of nodes + + Returns: + dict: Errors for multiple runs + int: Order of the collocation problem + """ + description = {} + description['level_params'] = {'restol': 1e-10} + description['step_params'] = {'maxiter': 99} + description['sweeper_params'] = {'num_nodes': num_nodes, 'quad_type': quad_type} + description['convergence_controllers'] = {EstimateExtrapolationErrorWithinQ: {}} + + if prob.__name__ == 'run_advection': + description['problem_params'] = {'order': 6, 'stencil_type': 'center'} + + res = {} + + for dt in dts: + description['level_params']['dt'] = dt + + stats, controller, _ = prob(custom_description=description, Tend=5.0 * dt, hook_class=LogLocalErrorPostStep) + + res[dt] = {} + res[dt]['e_loc'] = max([me[1] for me in get_sorted(stats, type='e_local_post_step')]) + res[dt]['e_ex'] = max([me[1] for me in get_sorted(stats, type='error_extrapolation_estimate')]) + + coll_order = controller.MS[0].levels[0].sweep.coll.order + return res, coll_order + + +def plot_and_compute_order(ax, res, num_nodes, coll_order): + """ + Plot and compute the order from the multiple runs ran with `multiple_runs`. Also, it is tested if the expected order + is reached for the respective errors. + + Args: + ax (Matplotlib.pyplot.axes): Somewhere to plot + res (dict): Result from `multiple_runs` + num_nodes (int): Number of nodes + coll_order (int): Order of the collocation problem + + Returns: + None + """ + dts = np.array(list(res.keys())) + keys = list(res[dts[0]].keys()) + + # local error is one order higher than global error + expected_order = { + 'e_loc': coll_order + 1, + 'e_ex': num_nodes + 1, + } + + for key in keys: + errors = np.array([res[dt][key] for dt in dts]) + + mask = np.logical_and(errors < 1e-3, errors > 1e-10) + order = np.log(errors[mask][1:] / errors[mask][:-1]) / np.log(dts[mask][1:] / dts[mask][:-1]) + + if ax is not None: + ax.loglog(dts, errors, label=f'{key}: order={np.mean(order):.2f}') + + assert np.isclose( + np.mean(order), expected_order[key], atol=0.5 + ), f'Expected order {expected_order[key]} for {key}, but got {np.mean(order):.2e}!' + + if ax is not None: + ax.legend(frameon=False) + + +def check_order(ax, prob, dts, num_nodes, quad_type): + """ + Check the order by calling `multiple_runs` and then `plot_and_compute_order`. + + Args: + ax (Matplotlib.pyplot.axes): Somewhere to plot + prob (function): A problem from the resilience project to run + dts (list): The step sizes to run with + num_nodes (int): Number of nodes + quad_type (str): Type of nodes + """ + res, coll_order = multiple_runs(prob, dts, num_nodes, quad_type) + plot_and_compute_order(ax, res, num_nodes, coll_order) + + +def main(): + fig, ax = plt.subplots() + num_nodes = 3 + quad_type = 'RADAU-RIGHT' + check_order(ax, run_advection, [5e-1, 1e-1, 5e-2, 1e-2], num_nodes, quad_type) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/pySDC/projects/Resilience/fault_injection.py b/pySDC/projects/Resilience/fault_injection.py index f70adacbe3aaaa9a831633e8714bd246f6e7f171..84e513827d6feb7cbb3c05fcc3ac917944f1f378 100644 --- a/pySDC/projects/Resilience/fault_injection.py +++ b/pySDC/projects/Resilience/fault_injection.py @@ -254,6 +254,8 @@ class FaultInjector(hooks): None ''' L = step.levels[f.level_number] + _abs_before = None + _abs_after = None # insert the fault in some target if f.target == 0: @@ -268,14 +270,18 @@ class FaultInjector(hooks): fault happens in the last iteration, it will not show up in the residual and the iteration is wrongly stopped. ''' + _abs_before = abs(L.u[f.node][tuple(f.problem_pos)]) L.u[f.node][tuple(f.problem_pos)] = self.flip_bit(L.u[f.node][tuple(f.problem_pos)], f.bit) L.f[f.node] = L.prob.eval_f(L.u[f.node], L.time + L.dt * L.sweep.coll.nodes[max([0, f.node - 1])]) L.sweep.compute_residual() + _abs_after = abs(L.u[f.node][tuple(f.problem_pos)]) else: raise NotImplementedError(f'Target {f.target} for faults not implemented!') # log what happened to stats and screen - self.logger.info(f'Flipping bit {f.bit} {f.when} iteration {f.iteration} in node {f.node}. Target: {f.target}') + self.logger.info( + f'Flipping bit {f.bit} {f.when} iteration {f.iteration} in node {f.node}. Target: {f.target}. Abs: {_abs_before:.2e} -> {_abs_after:.2e}' + ) self.add_to_stats( process=step.status.slot, time=L.time, @@ -326,6 +332,7 @@ class FaultInjector(hooks): 'iteration': step.params.maxiter, 'problem_pos': step.levels[level_number].u[0].shape, 'bit': bit, # change manually if you ever have something else + **self.rnd_params, } # initialize the faults have been added before we knew the random parameters @@ -420,7 +427,8 @@ class FaultInjector(hooks): return None - def to_binary(self, f): + @classmethod + def to_binary(cls, f): ''' Converts a single float in a string containing its binary representation in memory following IEEE754 The struct.pack function returns the input with the applied conversion code in 8 bit blocks, which are then @@ -437,13 +445,14 @@ class FaultInjector(hooks): elif type(f) in [np.float32]: conversion_code = '>f' # big endian, float elif type(f) in [np.complex128]: - return f'{self.to_binary(f.real)}{self.to_binary(f.imag)}' + return f'{cls.to_binary(f.real)}{cls.to_binary(f.imag)}' else: raise NotImplementedError(f'Don\'t know how to convert number of type {type(f)} to binary') return ''.join('{:0>8b}'.format(c) for c in struct.pack(conversion_code, f)) - def to_float(self, s): + @classmethod + def to_float(cls, s): ''' Converts a string of a IEEE754 binary representation in a float. The string is converted to integer with base 2 and converted to bytes, which can be unpacked into a Python float by the struct module. @@ -463,14 +472,15 @@ class FaultInjector(hooks): elif len(s) == 128: # complex floats real = s[0:64] imag = s[64:128] - return self.to_float(real) + self.to_float(imag) * 1j + return cls.to_float(real) + cls.to_float(imag) * 1j else: raise NotImplementedError(f'Don\'t know how to convert string of length {len(s)} to float') return struct.unpack(conversion_code, int(s, 2).to_bytes(byte_count, 'big'))[0] - def flip_bit(self, target, bit): + @classmethod + def flip_bit(cls, target, bit): ''' Flips a bit at position bit in a target using the bitwise xor operator @@ -481,8 +491,8 @@ class FaultInjector(hooks): Returns: (float) The floating point number resulting from flipping the respective bit in target ''' - binary = self.to_binary(target) - return self.to_float(f'{binary[:bit]}{int(binary[bit]) ^ 1}{binary[bit+1:]}') + binary = cls.to_binary(target) + return cls.to_float(f'{binary[:bit]}{int(binary[bit]) ^ 1}{binary[bit+1:]}') def prepare_controller_for_faults(controller, fault_stuff, rnd_args, args): @@ -501,10 +511,19 @@ def prepare_controller_for_faults(controller, fault_stuff, rnd_args, args): """ faultHook = get_fault_injector_hook(controller) faultHook.random_generator = fault_stuff['rng'] - faultHook.add_fault( - rnd_args={**rnd_args, **fault_stuff.get('rnd_params', {})}, - args={**args, **fault_stuff.get('args', {})}, - ) + + for key in ['fault_frequency_iter']: + if key in fault_stuff.keys(): + faultHook.__dict__[key] = fault_stuff[key] + + for key, val in fault_stuff.get('rnd_params', {}).items(): + faultHook.rnd_params[key] = val + + if not len(faultHook.rnd_params.keys()) > 0: + faultHook.add_fault( + rnd_args={**rnd_args, **fault_stuff.get('rnd_params', {})}, + args={**args, **fault_stuff.get('args', {})}, + ) def get_fault_injector_hook(controller): diff --git a/pySDC/projects/Resilience/fault_stats.py b/pySDC/projects/Resilience/fault_stats.py index ab1c5d933a531a6e5e13d243c5a2bc90af66fcc6..917d61619189d652e8cfc18d3a9eed2a69a6ebcf 100644 --- a/pySDC/projects/Resilience/fault_stats.py +++ b/pySDC/projects/Resilience/fault_stats.py @@ -1,10 +1,7 @@ import numpy as np import pickle import matplotlib.pyplot as plt -from matplotlib.colors import TABLEAU_COLORS from mpi4py import MPI -import sys -import matplotlib as mpl import pySDC.helpers.plot_helper as plot_helper from pySDC.helpers.stats_helper import get_sorted @@ -13,7 +10,6 @@ from pySDC.projects.Resilience.hook import hook_collection, LogUAllIter, LogData from pySDC.projects.Resilience.fault_injection import get_fault_injector_hook from pySDC.implementations.convergence_controller_classes.hotrod import HotRod from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity -from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestartingNonMPI from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep from pySDC.implementations.hooks.log_work import LogWork @@ -23,381 +19,11 @@ from pySDC.projects.Resilience.vdp import run_vdp from pySDC.projects.Resilience.piline import run_piline from pySDC.projects.Resilience.Lorenz import run_Lorenz from pySDC.projects.Resilience.Schroedinger import run_Schroedinger -from pySDC.projects.Resilience.leaky_superconductor import run_leaky_superconductor +from pySDC.projects.Resilience.quench import run_quench -plot_helper.setup_mpl(reset=True) -cmap = TABLEAU_COLORS - - -class Strategy: - ''' - Abstract class for resilience strategies - ''' - - def __init__(self): - ''' - Initialization routine - ''' - - # set default values for plotting - self.linestyle = '-' - self.marker = '.' - self.name = '' - self.bar_plot_x_label = '' - self.color = list(cmap.values())[0] - - # setup custom descriptions - self.custom_description = {} - - # prepare parameters for masks to identify faults that cannot be fixed by this strategy - self.fixable = [] - self.fixable += [ - { - 'key': 'node', - 'op': 'gt', - 'val': 0, - } - ] - self.fixable += [ - { - 'key': 'error', - 'op': 'isfinite', - } - ] - - def get_fixable_params(self, **kwargs): - """ - Return a list containing dictionaries which can be passed to `FaultStats.get_mask` as keyword arguments to - obtain a mask of faults that can be fixed - - Returns: - list: Dictionary of parameters - """ - return self.fixable - - def get_custom_description(self, problem, num_procs): - ''' - Routine to get a custom description that realizes the resilience strategy and tailors it to the problem at hand - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - dict: The custom descriptions you can supply to the problem when running it - ''' - - return self.custom_description - - def get_fault_args(self, problem, num_procs): - ''' - Routine to get arguments for the faults that are exempt from randomization - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - dict: Arguments for the faults that are exempt from randomization - ''' - - return {} - - def get_random_params(self, problem, num_procs): - ''' - Routine to get parameters for the randomization of faults - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - dict: Randomization parameters - ''' - - return {} - - @property - def style(self): - """ - Get the plotting parameters for the strategy. - Supply them to a plotting function using `**` - - Returns: - (dict): The plotting parameters as a dictionary - """ - return { - 'marker': self.marker, - 'label': self.label, - 'color': self.color, - 'ls': self.linestyle, - } - - @property - def label(self): - """ - Get a label for plotting - """ - return self.name - - -class BaseStrategy(Strategy): - ''' - Do a fixed iteration count - ''' - - def __init__(self): - ''' - Initialization routine - ''' - super(BaseStrategy, self).__init__() - self.color = list(cmap.values())[0] - self.marker = 'o' - self.name = 'base' - self.bar_plot_x_label = 'base' - - -class AdaptivityStrategy(Strategy): - ''' - Adaptivity as a resilience strategy - ''' - - def __init__(self): - ''' - Initialization routine - ''' - super(AdaptivityStrategy, self).__init__() - self.color = list(cmap.values())[1] - self.marker = '*' - self.name = 'adaptivity' - self.bar_plot_x_label = 'adaptivity' - - def get_fixable_params(self, maxiter, **kwargs): - """ - Here faults occurring in the last iteration cannot be fixed. - - Args: - maxiter (int): Max. iterations until convergence is declared - - Returns: - (list): Contains dictionaries of keyword arguments for `FaultStats.get_mask` - """ - self.fixable += [ - { - 'key': 'iteration', - 'op': 'lt', - 'val': maxiter, - } - ] - return self.fixable - - def get_custom_description(self, problem, num_procs): - ''' - Routine to get a custom description that adds adaptivity - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - The custom descriptions you can supply to the problem when running it - ''' - custom_description = {} - - dt_max = np.inf - dt_min = 1e-5 - - if problem == run_piline: - e_tol = 1e-7 - dt_min = 1e-2 - elif problem == run_vdp: - e_tol = 2e-5 - dt_min = 1e-3 - elif problem == run_Lorenz: - e_tol = 2e-5 - dt_min = 1e-3 - elif problem == run_Schroedinger: - e_tol = 4e-6 - dt_min = 1e-3 - elif problem == run_leaky_superconductor: - e_tol = 1e-7 - dt_min = 1e-3 - dt_max = 1e2 - else: - raise NotImplementedError( - 'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\ - strategy' - ) - - custom_description['convergence_controllers'] = { - Adaptivity: {'e_tol': e_tol, 'dt_min': dt_min, 'dt_max': dt_max} - } - return {**custom_description, **self.custom_description} - - -class AdaptiveHotRodStrategy(Strategy): - ''' - Adaptivity + Hot Rod as a resilience strategy - ''' - - def __init__(self): - ''' - Initialization routine - ''' - super(AdaptiveHotRodStrategy, self).__init__() - self.color = list(cmap.values())[4] - self.marker = '.' - self.name = 'adaptive Hot Rod' - self.bar_plot_x_label = 'adaptive\nHot Rod' - - def get_custom_description(self, problem, num_procs): - ''' - Routine to get a custom description that adds adaptivity and Hot Rod - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - The custom description you can supply to the problem when running it - ''' - if problem == run_vdp: - e_tol = 3e-7 - dt_min = 1e-3 - maxiter = 4 - HotRod_tol = 3e-7 - else: - raise NotImplementedError( - 'I don\'t have a tolerance for adaptive Hot Rod for your problem. Please add one \ -to the strategy' - ) - - no_storage = num_procs > 1 - - custom_description = { - 'convergence_controllers': { - Adaptivity: {'e_tol': e_tol, 'dt_min': dt_min}, - HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage}, - }, - 'step_params': {'maxiter': maxiter}, - } - - return {**custom_description, **self.custom_description} +from pySDC.projects.Resilience.strategies import BaseStrategy, AdaptivityStrategy, IterateStrategy, HotRodStrategy - -class IterateStrategy(Strategy): - ''' - Iterate for as much as you want - ''' - - def __init__(self): - ''' - Initialization routine - ''' - super(IterateStrategy, self).__init__() - self.color = list(cmap.values())[2] - self.marker = 'v' - self.name = 'iterate' - self.bar_plot_x_label = 'iterate' - - @property - def label(self): - return r'$k$ adaptivity' - - def get_custom_description(self, problem, num_procs): - ''' - Routine to get a custom description that allows for adaptive iteration counts - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - The custom description you can supply to the problem when running it - ''' - restol = -1 - e_tol = -1 - - if problem == run_piline: - restol = 2.3e-8 - elif problem == run_vdp: - restol = 9e-7 - elif problem == run_Lorenz: - restol = 16e-7 - elif problem == run_Schroedinger: - restol = 6.5e-7 - elif problem == run_leaky_superconductor: - # e_tol = 1e-6 - restol = 1e-11 - else: - raise NotImplementedError( - 'I don\'t have a residual tolerance for your problem. Please add one to the \ -strategy' - ) - - custom_description = { - 'step_params': {'maxiter': 99}, - 'level_params': {'restol': restol, 'e_tol': e_tol}, - } - - return {**custom_description, **self.custom_description} - - -class HotRodStrategy(Strategy): - ''' - Hot Rod as a resilience strategy - ''' - - def __init__(self): - ''' - Initialization routine - ''' - super(HotRodStrategy, self).__init__() - self.color = list(cmap.values())[3] - self.marker = '^' - self.name = 'Hot Rod' - self.bar_plot_x_label = 'Hot Rod' - - def get_custom_description(self, problem, num_procs): - ''' - Routine to get a custom description that adds Hot Rod - - Args: - problem: A function that runs a pySDC problem, see imports for available problems - num_procs (int): Number of processes you intend to run with - - Returns: - The custom description you can supply to the problem when running it - ''' - if problem == run_vdp: - HotRod_tol = 5e-7 - maxiter = 4 - elif problem == run_Lorenz: - HotRod_tol = 4e-7 - maxiter = 6 - elif problem == run_Schroedinger: - HotRod_tol = 3e-7 - maxiter = 6 - elif problem == run_leaky_superconductor: - HotRod_tol = 3e-5 - maxiter = 6 - else: - raise NotImplementedError( - 'I don\'t have a tolerance for Hot Rod for your problem. Please add one to the\ - strategy' - ) - - no_storage = num_procs > 1 - - custom_description = { - 'convergence_controllers': { - HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage}, - BasicRestartingNonMPI: {'max_restarts': 2, 'crash_after_max_restarts': False}, - }, - 'step_params': {'maxiter': maxiter}, - } - - return {**custom_description, **self.custom_description} +plot_helper.setup_mpl(reset=True) class FaultStats: @@ -412,10 +38,11 @@ class FaultStats: faults=None, reload=True, recovery_thresh=1 + 1e-3, - recovery_thresh_abs=1e9, + recovery_thresh_abs=0.0, num_procs=1, mode='combination', stats_path='data/stats', + **kwargs, ): ''' Initialization routine @@ -438,6 +65,10 @@ class FaultStats: self.num_procs = num_procs self.mode = mode self.stats_path = stats_path + self.kwargs = { + 'fault_frequency_iter': 500, + **kwargs, + } def get_Tend(self): ''' @@ -446,56 +77,9 @@ class FaultStats: Returns: float: Tend to put into the run ''' - if self.prob == run_vdp: - return 2.3752559741400825 - elif self.prob == run_piline: - return 20.0 - elif self.prob == run_Lorenz: - return 1.5 - elif self.prob == run_Schroedinger: - return 1.0 - elif self.prob == run_leaky_superconductor: - return 450 - else: - raise NotImplementedError('I don\'t have a final time for your problem!') - - def get_custom_description(self): - ''' - Get a custom description based on the problem - - Returns: - dict: Custom description - ''' - custom_description = {} - if self.prob == run_vdp: - custom_description['step_params'] = {'maxiter': 3} - elif self.prob == run_Lorenz: - custom_description['step_params'] = {'maxiter': 5} - elif self.prob == run_Schroedinger: - custom_description['step_params'] = {'maxiter': 5} - custom_description['level_params'] = {'dt': 1e-2, 'restol': -1} - elif self.prob == run_leaky_superconductor: - custom_description['level_params'] = {'restol': -1, 'dt': 10.0} - custom_description['step_params'] = {'maxiter': 5} - custom_description['problem_params'] = {'newton_iter': 99, 'newton_tol': 1e-10} - return custom_description - - def get_custom_problem_params(self): - ''' - Get a custom problem parameters based on the problem - - Returns: - dict: Custom problem params - ''' - custom_params = {} - if self.prob == run_vdp: - custom_params = { - 'u0': np.array([0.99995, -0.00999985], dtype=np.float64), - 'crash_at_maxiter': False, - } - return custom_params + return self.strategies[0].get_Tend(self.prob, self.num_procs) - def run_stats_generation(self, runs=1000, step=None, comm=None, _reload=False, _runs_partial=0): + def run_stats_generation(self, runs=1000, step=None, comm=None, kwargs_range=None, _reload=False, _runs_partial=0): ''' Run the generation of stats for all strategies in the `self.strategies` variable @@ -503,11 +87,21 @@ class FaultStats: runs (int): Number of runs you want to do step (int): Number of runs you want to do between saving comm (MPI.Communicator): Communicator for distributing runs + kw_args_range (dict): Range for the parameters _reload, _runs_partial: Variables only used for recursion. Do not change! Returns: None ''' + for key, val in kwargs_range.items() if kwargs_range is not None else {}: + if type(val) == int: + self.kwargs[key] = val + else: + for me in val: + kwargs_range_me = {**kwargs_range, key: me} + self.run_stats_generation(runs=runs, step=step, comm=comm, kwargs_range=kwargs_range_me) + return None + comm = MPI.COMM_WORLD if comm is None else comm step = (runs if step is None else step) if comm.size == 1 else comm.size _runs_partial = step if _runs_partial == 0 else _runs_partial @@ -519,9 +113,11 @@ class FaultStats: # sort the strategies to do some load balancing sorting_index = None if comm.rank == 0: - already_completed = np.array([self.load(strategy, True).get('runs', 0) for strategy in self.strategies]) + already_completed = np.array( + [self.load(strategy=strategy, faults=True).get('runs', 0) for strategy in self.strategies] + ) sorting_index_ = np.argsort(already_completed) - sorting_index = sorting_index_[already_completed[sorting_index_] < runs] + sorting_index = sorting_index_[already_completed[sorting_index_] < max_runs] # tell all ranks what strategies to use sorting_index = comm.bcast(sorting_index, root=0) @@ -540,7 +136,7 @@ class FaultStats: else: runs_partial = min([5, _runs_partial]) self.generate_stats( - strategy=strategies[j + comm.rank % len(strategies)], + strategy=strategies[j + (comm.rank % len(strategies) % (len(strategies)) - j)], runs=runs_partial, faults=f, reload=reload, @@ -583,11 +179,17 @@ class FaultStats: 'target': np.zeros(runs), } + # store arguments for storing and loading + identifier_args = { + 'faults': faults, + 'strategy': strategy, + } + # reload previously recorded stats and write them to dat if reload: already_completed_ = None if comm.rank == 0: - already_completed_ = self.load(strategy, faults) + already_completed_ = self.load(**identifier_args) already_completed = comm.bcast(already_completed_, root=0) if already_completed['runs'] > 0 and already_completed['runs'] <= runs and comm.rank == 0: for k in dat.keys(): @@ -625,13 +227,15 @@ class FaultStats: # record the new data point if faults: - assert len(faults_run) > 0, f'No faults where recorded in run {i} of strategy {strategy.name}!' - dat['level'][i] = faults_run[0][1][0] - dat['iteration'][i] = faults_run[0][1][1] - dat['node'][i] = faults_run[0][1][2] - dat['problem_pos'] += [faults_run[0][1][3]] - dat['bit'][i] = faults_run[0][1][4] - dat['target'][i] = faults_run[0][1][5] + if len(faults_run) > 0: + dat['level'][i] = faults_run[0][1][0] + dat['iteration'][i] = faults_run[0][1][1] + dat['node'][i] = faults_run[0][1][2] + dat['problem_pos'] += [faults_run[0][1][3]] + dat['bit'][i] = faults_run[0][1][4] + dat['target'][i] = faults_run[0][1][5] + else: + assert self.mode == 'regular', f'No faults where recorded in run {i} of strategy {strategy.name}!' dat['error'][i] = error dat['total_iteration'][i] = total_iteration dat['total_newton_iteration'][i] = total_newton_iteration @@ -646,10 +250,10 @@ class FaultStats: if already_completed['runs'] < runs: if comm.rank == 0: - self.store(strategy, faults, dat_full) + self.store(dat_full, **identifier_args) if self.faults: try: - self.get_recovered(strategy) + self.get_recovered(strategy=strategy) except KeyError: print('Warning: Can\'t compute recovery rate right now') @@ -668,18 +272,9 @@ class FaultStats: Returns: float: Error """ - if self.prob == run_leaky_superconductor: - ref = { - AdaptivityStrategy: 0.036832240840408426, - IterateStrategy: 0.0368214748207781, - HotRodStrategy: 0.03682153860683977, - BaseStrategy: 0.03682153860683977, - } - return abs(max(u) - ref[type(strategy)]) - else: - return abs(u - controller.MS[0].levels[0].prob.u_exact(t=t)) + return abs(u - controller.MS[0].levels[0].prob.u_exact(t=t)) - def single_run(self, strategy, run=0, faults=False, force_params=None, hook_class=None, space_comm=None): + def single_run(self, strategy, run=0, faults=False, force_params=None, hook_class=None, space_comm=None, Tend=None): ''' Run the problem once with the specified parameters @@ -699,34 +294,34 @@ class FaultStats: force_params = {} if force_params is None else force_params # build the custom description - custom_description_prob = self.get_custom_description() - custom_description_strategy = strategy.get_custom_description(self.prob, self.num_procs) - custom_description = {} - for key in list(custom_description_strategy.keys()) + list(custom_description_prob.keys()): - custom_description[key] = { - **custom_description_prob.get(key, {}), - **custom_description_strategy.get(key, {}), - } + custom_description = strategy.get_custom_description(self.prob, self.num_procs) for k in force_params.keys(): custom_description[k] = {**custom_description.get(k, {}), **force_params[k]} custom_controller_params = force_params.get('controller_params', {}) - custom_problem_params = self.get_custom_problem_params() if faults: + fault_stuff = { + 'rng': None, + 'args': strategy.get_fault_args(self.prob, self.num_procs), + 'rnd_params': strategy.get_fault_args(self.prob, self.num_procs), + } + # make parameters for faults: if self.mode == 'random': - rng = np.random.RandomState(run) + fault_stuff['rng'] = np.random.RandomState(run) elif self.mode == 'combination': - rng = run + fault_stuff['rng'] = run + elif self.mode == 'regular': + fault_stuff['rng'] = np.random.RandomState(run) + fault_stuff['fault_frequency_iter'] = self.kwargs['fault_frequency_iter'] + fault_stuff['rnd_params'] = { + 'bit': 12, + 'min_node': 1, + } else: raise NotImplementedError(f'Don\'t know how to add faults in mode {self.mode}') - fault_stuff = { - 'rng': rng, - 'args': strategy.get_fault_args(self.prob, self.num_procs), - 'rnd_params': strategy.get_fault_args(self.prob, self.num_procs), - } else: fault_stuff = None @@ -735,13 +330,12 @@ class FaultStats: num_procs=self.num_procs, hook_class=hook_class, fault_stuff=fault_stuff, - Tend=self.get_Tend(), + Tend=self.get_Tend() if Tend is None else Tend, custom_controller_params=custom_controller_params, - custom_problem_params=custom_problem_params, space_comm=space_comm, ) - def compare_strategies(self, run=0, faults=False, ax=None): + def compare_strategies(self, run=0, faults=False, ax=None): # pragma: no cover ''' Take a closer look at how the strategies compare for a specific run @@ -771,7 +365,9 @@ class FaultStats: fig.tight_layout() plt.savefig(f'data/{self.get_name()}-comparison.pdf', transparent=True) - def scrutinize_visual(self, strategy, run, faults, ax=None, k_ax=None, ls='-', plot_restarts=False): + def scrutinize_visual( + self, strategy, run, faults, ax=None, k_ax=None, ls='-', plot_restarts=False + ): # pragma: no cover ''' Take a closer look at a specific run with a plot @@ -792,7 +388,7 @@ class FaultStats: else: store = False - force_params = dict() + force_params = {} stats, controller, Tend = self.single_run( strategy=strategy, @@ -843,7 +439,7 @@ class FaultStats: Returns: None ''' - force_params = dict() + force_params = {} force_params['controller_params'] = {'logger_level': 15} stats, controller, Tend = self.single_run(strategy=strategy, run=run, faults=faults, force_params=force_params) @@ -856,7 +452,7 @@ class FaultStats: print(f'\nOverview for {strategy.name} strategy') # see if we can determine if the faults where recovered - no_faults = self.load(strategy, False) + no_faults = self.load(strategy=strategy, faults=False) e_star = np.mean(no_faults.get('error', [0])) if t < Tend: error = np.inf @@ -889,10 +485,10 @@ class FaultStats: # print faults faults = get_sorted(stats, type='bitflip') print('\nfaults:') - print(' t | level | iter | node | bit | trgt | pos') - print('------+-------+------+------+-----+------+----') + print(' t | level | iter | node | bit | trgt | pos') + print('--------+-------+------+------+-----+------+----') for f in faults: - print(f' {f[0]:.2f} | {f[1][0]:5d} | {f[1][1]:4d} | {f[1][2]:4d} | {f[1][4]:3d} | {f[1][5]:4d} |', f[1][3]) + print(f' {f[0]:6.2f} | {f[1][0]:5d} | {f[1][1]:4d} | {f[1][2]:4d} | {f[1][4]:3d} | {f[1][5]:4d} |', f[1][3]) return None @@ -919,7 +515,7 @@ class FaultStats: bit = [faults[i][1][4] for i in range(len(faults))] return time, level, iteration, node, problem_pos, bit - def get_path(self, strategy, faults): + def get_path(self, **kwargs): ''' Get the path to where the stats are stored @@ -930,9 +526,9 @@ class FaultStats: Returns: str: The path to what you are looking for ''' - return f'{self.stats_path}/{self.get_name(strategy, faults)}.pickle' + return f'{self.stats_path}/{self.get_name(**kwargs)}.pickle' - def get_name(self, strategy=None, faults=False): + def get_name(self, strategy=None, faults=True, mode=None): ''' Function to get a unique name for a kind of statistics based on the problem and strategy that was used @@ -953,7 +549,7 @@ class FaultStats: prob_name = 'Lorenz' elif self.prob == run_Schroedinger: prob_name = 'Schroedinger' - elif self.prob == run_leaky_superconductor: + elif self.prob == run_quench: prob_name = 'Quench' else: raise NotImplementedError(f'Name not implemented for problem {self.prob}') @@ -968,42 +564,41 @@ class FaultStats: else: strategy_name = '' - return f'{prob_name}{strategy_name}{fault_name}-{self.num_procs}procs' + mode = self.mode if mode is None else mode + if mode == 'regular': + mode_thing = f'-regular{self.kwargs["fault_frequency_iter"] if faults else ""}' + else: + mode_thing = '' + + return f'{prob_name}{strategy_name}{fault_name}-{self.num_procs}procs{mode_thing}' - def store(self, strategy, faults, dat): + def store(self, dat, **kwargs): ''' Stores the data for a run at a predefined path Args: - strategy (Strategy): Resilience strategy - faults (bool): Whether or not faults where inserted dat (dict): The data of the recorded statistics Returns: None ''' - with open(self.get_path(strategy, faults), 'wb') as f: + with open(self.get_path(**kwargs), 'wb') as f: pickle.dump(dat, f) return None - def load(self, strategy=None, faults=True): + def load(self, **kwargs): ''' Loads the stats belonging to a specific strategy and whether or not faults where inserted. When no data has been generated yet, a dictionary is returned which only contains the number of completed runs, which is 0 of course. - Args: - strategy (Strategy): Resilience strategy - faults (bool): Whether or not faults where inserted - Returns: dict: Data from previous run or if it is not available a placeholder dictionary ''' - if strategy is None: - strategy = self.strategies[MPI.COMM_WORLD.rank % len(self.strategies)] + kwargs['strategy'] = kwargs.get('strategy', self.strategies[MPI.COMM_WORLD.rank % len(self.strategies)]) try: - with open(self.get_path(strategy, faults), 'rb') as f: + with open(self.get_path(**kwargs), 'rb') as f: dat = pickle.load(f) except FileNotFoundError: return {'runs': 0} @@ -1016,27 +611,26 @@ class FaultStats: Args: strategy (Strategy): The resilience strategy """ - fault_free = self.load(strategy, False) + fault_free = self.load(strategy=strategy, faults=False) assert fault_free['error'].std() / fault_free['error'].mean() < 1e-5 return self.recovery_thresh_abs + self.recovery_thresh * fault_free["error"].mean() - def get_recovered(self, strategy=None): + def get_recovered(self, **kwargs): ''' Determine the recovery rate for a specific strategy and store it to disk. - Args: - strategy (Strategy): The resilience strategy you want to get the recovery rate for. If left at None, it will - be computed for all available strategies - Returns: None ''' - if strategy is None: - [self.get_recovered(strat) for strat in self.strategies] - - with_faults = self.load(strategy, True) - with_faults['recovered'] = with_faults['error'] < self.get_thresh(strategy) - self.store(strategy, True, with_faults) + if 'strategy' not in kwargs.keys(): + [self.get_recovered(strategy=strat, **kwargs) for strat in self.strategies] + else: + try: + with_faults = self.load(faults=True, **kwargs) + with_faults['recovered'] = with_faults['error'] < self.get_thresh(kwargs['strategy']) + self.store(faults=True, dat=with_faults, **kwargs) + except KeyError: + print("Can\'t compute recovery rate right now") return None @@ -1112,7 +706,9 @@ class FaultStats: else: return None - def plot_thingA_per_thingB(self, strategy, thingA, thingB, ax=None, mask=None, recovered=False, op=None): + def plot_thingA_per_thingB( + self, strategy, thingA, thingB, ax=None, mask=None, recovered=False, op=None + ): # pragma: no cover ''' Plot thingA vs. thingB for a single strategy @@ -1129,8 +725,8 @@ class FaultStats: None ''' op = self.rec_rate if op is None else op - dat = self.load(strategy, True) - no_faults = self.load(strategy, False) + dat = self.load(strategy=strategy, faults=True) + no_faults = self.load(strategy=strategy, faults=False) if mask is None: mask = np.ones_like(dat[thingB], dtype=bool) @@ -1178,7 +774,7 @@ class FaultStats: store=True, ax=None, fig=None, - ): + ): # pragma: no cover ''' Plot thingA vs thingB for multiple strategies @@ -1221,7 +817,7 @@ class FaultStats: return None - def plot_recovery_thresholds(self, strategies=None, thresh_range=None, ax=None): + def plot_recovery_thresholds(self, strategies=None, thresh_range=None, ax=None): # pragma: no cover ''' Plot the recovery rate for a range of thresholds @@ -1243,8 +839,8 @@ class FaultStats: for strategy_idx in range(len(strategies)): strategy = strategies[strategy_idx] # load the stats - fault_free = self.load(strategy, False) - with_faults = self.load(strategy, True) + fault_free = self.load(strategy=strategy, faults=False) + with_faults = self.load(strategy=strategy, faults=True) for thresh_idx in range(len(thresh_range)): rec_mask = with_faults['error'] < thresh_range[thresh_idx] * fault_free['error'].mean() @@ -1257,7 +853,7 @@ class FaultStats: return None - def analyse_adaptivity(self, mask): + def analyse_adaptivity(self, mask): # pragma: no cover ''' Analyse a set of runs with adaptivity @@ -1286,7 +882,7 @@ class FaultStats: print(f'We only restart when e_em > e_tol = {e_tol:.2e}!') return None - def analyse_adaptivity_single(self, run): + def analyse_adaptivity_single(self, run): # pragma: no cover ''' Compute what the difference in embedded and global error are for a specific run with adaptivity @@ -1322,7 +918,7 @@ class FaultStats: return [e_em[i][-1] for i in [0, 1]], e_glob - def analyse_HotRod(self, mask): + def analyse_HotRod(self, mask): # pragma: no cover ''' Analyse a set of runs with Hot Rod @@ -1358,7 +954,7 @@ class FaultStats: print(f'We only restart when diff > tol = {tol:.2e}!') return None - def analyse_HotRod_single(self, run): + def analyse_HotRod_single(self, run): # pragma: no cover ''' Compute what the difference in embedded, extrapolated and global error are for a specific run with Hot Rod @@ -1400,7 +996,7 @@ class FaultStats: return [e_em[i][-1] for i in [0, 1]], [e_ex[i][-1] for i in [0, 1]], e_glob - def print_faults(self, mask=None): + def print_faults(self, mask=None): # pragma: no cover ''' Print all faults that happened within a certain mask @@ -1441,16 +1037,16 @@ class FaultStats: Numpy.ndarray with boolean entries that can be used as a mask ''' strategy = self.strategies[0] if strategy is None else strategy - dat = self.load(strategy, True) + dat = self.load(strategy=strategy, faults=True) if compare_faults: if val is not None: raise ValueError('Can\'t use val and compare_faults in get_mask at the same time!') else: - vals = self.load(strategy, False)[key] + vals = self.load(strategy=strategy, faults=False)[key] val = sum(vals) / len(vals) - if None in [key, val] and not op in ['isfinite']: + if None in [key, val] and op not in ['isfinite']: mask = dat['bit'] == dat['bit'] else: if op == 'uneq': @@ -1485,7 +1081,9 @@ class FaultStats: Returns: Numpy.ndarray with boolean entries that can be used as a mask """ - fixable = strategy.get_fixable_params(maxiter=self.get_custom_description()['step_params']['maxiter']) + fixable = strategy.get_fixable_params( + maxiter=strategy.get_custom_description(self.prob, self.num_procs)['step_params']['maxiter'] + ) mask = self.get_mask(strategy=strategy) for kwargs in fixable: @@ -1509,7 +1107,7 @@ class FaultStats: else: return np.arange(len(mask))[mask] - def get_statistics_info(self, mask=None, strategy=None, print_all=False, ax=None): + def get_statistics_info(self, mask=None, strategy=None, print_all=False, ax=None): # pragma: no cover ''' Get information about how many data points for faults we have given a particular mask @@ -1526,7 +1124,7 @@ class FaultStats: # load some data from which to infer the number occurrences of some event strategy = self.strategies[0] if strategy is None else strategy - dat = self.load(strategy, True) + dat = self.load(stratagy=strategy, faults=True) # make a dummy mask in case none is supplied if mask is None: @@ -1548,7 +1146,7 @@ class FaultStats: return None - def combinations_histogram(self, dat=None, keys=None, mask=None, ax=None): + def combinations_histogram(self, dat=None, keys=None, mask=None, ax=None): # pragma: no cover ''' Make a histogram ouf of the occurrences of combinations @@ -1571,7 +1169,7 @@ class FaultStats: return ax - def get_combination_histogram(self, dat=None, keys=None, mask=None): + def get_combination_histogram(self, dat=None, keys=None, mask=None): # pragma: no cover ''' Check how many combinations of values we expect and how many we find to see if we need to do more experiments. It is assumed that each allowed value for each key appears at least once in dat after the mask was applied @@ -1587,7 +1185,7 @@ class FaultStats: ''' # load default values - dat = self.load(self.strategies[0], True) if dat is None else dat + dat = self.load(strategy=self.strategies[0], faults=True) if dat is None else dat keys = ['iteration', 'bit', 'node'] if keys is None else keys if mask is None: mask = np.ones_like(dat['error'], dtype=bool) @@ -1673,7 +1271,7 @@ class FaultStats: def bar_plot_thing( self, x=None, thing=None, ax=None, mask=None, store=False, faults=False, name=None, op=None, args=None - ): + ): # pragma: no cover ''' Make a bar plot about something! @@ -1701,8 +1299,8 @@ class FaultStats: strategy = self.strategies[strategy_idx] # load the values - dat = self.load(strategy, faults) - no_faults = self.load(strategy, False) + dat = self.load(strategy=strategy, faults=faults) + no_faults = self.load(strategy=strategy, faults=False) # check if we have a mask if mask is None: @@ -1730,21 +1328,98 @@ class FaultStats: return None + def fault_frequency_plot(self, ax, iter_ax, kwargs_range, strategy=None): # pragma: no cover + func_args = locals() + func_args.pop('self', None) + if strategy is None: + for strat in self.strategies: + args = {**func_args, 'strategy': strat} + self.fault_frequency_plot(**args) + return None + + # load data + all_data = {} + for me in kwargs_range['fault_frequency_iter']: + self.kwargs['fault_frequency_iter'] = me + self.get_recovered() + all_data[me] = self.load(strategy=strategy, faults=True, mode='regular') + + # get_recovery_rate + results = {} + results['frequencies'] = list(all_data.keys()) + results['recovery_rate'] = [ + len(all_data[key]['recovered'][all_data[key]['recovered'] is True]) / len(all_data[key]['recovered']) + for key in all_data.keys() + ] + # results['iterations'] = [np.mean(all_data[key]['total_iteration']) for key in all_data.keys()] + results['iterations'] = [ + np.mean(all_data[key]['total_iteration'][all_data[key]['error'] != np.inf]) for key in all_data.keys() + ] + + ax.plot(results['frequencies'], results['recovery_rate'], **strategy.style) + iter_ax.plot(results['frequencies'], results['iterations'], **{**strategy.style, 'ls': '--'}) + + ax.set_xscale('log') + ax.set_xlabel('iterations between fault') + ax.set_ylabel('recovery rate') + iter_ax.set_ylabel('average total iterations if not crashed (dashed)') + ax.legend(frameon=False) + + return None + + +def check_local_error(): # pragma: no cover + """ + Make a plot of the resolution over time for all problems + """ + problems = [run_vdp, run_Lorenz, run_Schroedinger, run_quench] + problems = [run_quench] + strategies = [BaseStrategy(), AdaptivityStrategy(), IterateStrategy()] + + for i in range(len(problems)): + stats_analyser = FaultStats( + prob=problems[i], + strategies=strategies, + faults=[False], + reload=True, + recovery_thresh=1.1, + num_procs=1, + mode='random', + ) + stats_analyser.compare_strategies() + plt.show() + def main(): stats_analyser = FaultStats( - prob=run_leaky_superconductor, + prob=run_vdp, strategies=[BaseStrategy(), AdaptivityStrategy(), IterateStrategy(), HotRodStrategy()], faults=[False, True], reload=True, recovery_thresh=1.1, - recovery_thresh_abs=5e-5, + # recovery_thresh_abs=1e-5, num_procs=1, mode='random', stats_path='data/stats-jusuf', ) + ######################## + # msk = stats_analyser.get_mask(AdaptivityStrategy(), val=False, key='recovered') + # stats_analyser.print_faults(msk) + fig, ax = plt.subplots() + iter_ax = ax.twinx() + kwargs_range = {'fault_frequency_iter': (10, 100, 1000, 10000)} + stats_analyser.run_stats_generation(runs=10, kwargs_range=kwargs_range) + stats_analyser.fault_frequency_plot(ax=ax, iter_ax=iter_ax, kwargs_range=kwargs_range) + # stats_analyser.scrutinize(AdaptivityStrategy(), 4, True) + plt.show() + return None + ######################## + + stats_analyser.run_stats_generation(runs=5000) + + if MPI.COMM_WORLD.rank > 0: # make sure only one rank accesses the data + return None - stats_analyser.run_stats_generation(runs=1000) stats_analyser.get_recovered() mask = None @@ -1811,4 +1486,5 @@ def main(): if __name__ == "__main__": + # check_local_error() main() diff --git a/pySDC/projects/Resilience/heat.py b/pySDC/projects/Resilience/heat.py index 8d31cb2d6dae044ec57ed69b136cf7c3194e0b22..02af22c41884a48f9bb6d7e40464b0577a64d28a 100644 --- a/pySDC/projects/Resilience/heat.py +++ b/pySDC/projects/Resilience/heat.py @@ -7,6 +7,7 @@ from pySDC.core.Hooks import hooks from pySDC.helpers.stats_helper import get_sorted from pySDC.projects.Resilience.hook import hook_collection, LogData import numpy as np +from pySDC.projects.Resilience.strategies import merge_descriptions def run_heat( @@ -16,7 +17,6 @@ def run_heat( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, ): """ Run a heat problem with default parameters. @@ -28,7 +28,6 @@ def run_heat( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets Returns: dict: The stats object @@ -58,9 +57,6 @@ def run_heat( 'liniter': None, } - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters step_params = dict() step_params['maxiter'] = 5 @@ -84,11 +80,7 @@ def run_heat( description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 diff --git a/pySDC/projects/Resilience/jobscript_jusuf.sh b/pySDC/projects/Resilience/jobscript_jusuf.sh new file mode 100755 index 0000000000000000000000000000000000000000..639a564beb32495330b0765d5d05ce24518ada62 --- /dev/null +++ b/pySDC/projects/Resilience/jobscript_jusuf.sh @@ -0,0 +1,22 @@ +#!/bin/bash +#SBATCH --nodes=1 +#SBATCH --ntasks=64 +#SBATCH --time=24:00:00 +#SBATCH --output=out/out%j.txt +#SBATCH --error=out/err%j.txt +#SBATCH -A cstma +#SBATCH --mail-type=END,FAIL +#SBATCH --mail-user=t.baumann@fz-juelich.de +#SBATCH -p batch +#SBATCH -J red_wedding + +module --force purge +module load Stages/2023 +module load Intel/2022.1.0 +module load ParaStationMPI/5.8.0-1 + +cd /p/project/ccstma/baumann7/pySDC/pySDC/projects/Resilience + +source /p/project/ccstma/baumann7/miniconda/bin/activate pySDC + +srun -n 64 python ${1} diff --git a/pySDC/projects/Resilience/leaky_superconductor.py b/pySDC/projects/Resilience/leaky_superconductor.py deleted file mode 100644 index 4f4177c3d7fd63cc89edf766e313f0b1a5e55ff5..0000000000000000000000000000000000000000 --- a/pySDC/projects/Resilience/leaky_superconductor.py +++ /dev/null @@ -1,273 +0,0 @@ -# script to run a quench problem -from pySDC.implementations.problem_classes.LeakySuperconductor import LeakySuperconductor, LeakySuperconductorIMEX -from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit -from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order -from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI -from pySDC.core.Hooks import hooks -from pySDC.helpers.stats_helper import get_sorted -from pySDC.projects.Resilience.hook import hook_collection, LogData -import numpy as np - -import matplotlib.pyplot as plt -from pySDC.core.Errors import ConvergenceError - - -class live_plot(hooks): # pragma: no cover - """ - This hook plots the solution and the non-linear part of the right hand side after every step. Keep in mind that using adaptivity will result in restarts, which is not marked in these plots. Prepare to see the temperature profile jumping back again after a restart. - """ - - def _plot_state(self, step, level_number): # pragma: no cover - """ - Plot the solution at all collocation nodes and the non-linear part of the right hand side - - Args: - step (pySDC.Step.step): The current step - level_number (int): Number of current level - - Returns: - None - """ - L = step.levels[level_number] - for ax in self.axs: - ax.cla() - [self.axs[0].plot(L.prob.xv, L.u[i], legend=f"node {i}") for i in range(len(L.u))] - self.axs[0].axhline(L.prob.u_thresh, color='black') - self.axs[1].plot(L.prob.xv, L.prob.eval_f_non_linear(L.u[-1], L.time)) - self.axs[0].set_ylim(0, 0.025) - self.fig.suptitle(f"t={L.time:.2e}, k={step.status.iter}") - plt.pause(1e-9) - - def pre_run(self, step, level_number): # pragma: no cover - """ - Setup a figure to plot into - - Args: - step (pySDC.Step.step): The current step - level_number (int): Number of current level - - Returns: - None - """ - self.fig, self.axs = plt.subplots(1, 2, figsize=(10, 4)) - - def post_step(self, step, level_number): # pragma: no cover - """ - Call the plotting function after the step - - Args: - step (pySDC.Step.step): The current step - level_number (int): Number of current level - - Returns: - None - """ - self._plot_state(step, level_number) - - -def run_leaky_superconductor( - custom_description=None, - num_procs=1, - Tend=6e2, - hook_class=LogData, - fault_stuff=None, - custom_controller_params=None, - custom_problem_params=None, - imex=False, - u0=None, - t0=None, - **kwargs, -): - """ - Run a toy problem of a superconducting magnet with a temperature leak with default parameters. - - Args: - custom_description (dict): Overwrite presets - num_procs (int): Number of steps for MSSDC - Tend (float): Time to integrate to - hook_class (pySDC.Hook): A hook to store data - fault_stuff (dict): A dictionary with information on how to add faults - custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets - imex (bool): Solve the problem IMEX or fully implicit - u0 (dtype_u): Initial value - t0 (float): Starting time - - Returns: - dict: The stats object - controller: The controller - Tend: The time that was supposed to be integrated to - """ - - # initialize level parameters - level_params = dict() - level_params['dt'] = 10.0 - - # initialize sweeper parameters - sweeper_params = dict() - sweeper_params['quad_type'] = 'RADAU-RIGHT' - sweeper_params['num_nodes'] = 3 - sweeper_params['QI'] = 'IE' - sweeper_params['QE'] = 'PIC' - - problem_params = {} - - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - - # initialize step parameters - step_params = dict() - step_params['maxiter'] = 5 - - # initialize controller parameters - controller_params = dict() - controller_params['logger_level'] = 30 - controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class]) - controller_params['mssdc_jac'] = False - - if custom_controller_params is not None: - controller_params = {**controller_params, **custom_controller_params} - - # fill description dictionary for easy step instantiation - description = dict() - description['problem_class'] = LeakySuperconductorIMEX if imex else LeakySuperconductor - description['problem_params'] = problem_params - description['sweeper_class'] = imex_1st_order if imex else generic_implicit - description['sweeper_params'] = sweeper_params - description['level_params'] = level_params - description['step_params'] = step_params - - if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} - - # set time parameters - t0 = 0.0 if t0 is None else t0 - - # instantiate controller - controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) - - # insert faults - if fault_stuff is not None: - from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults - - rnd_args = {'iteration': 5, 'min_node': 1} - args = {'time': 21.0, 'target': 0} - prepare_controller_for_faults(controller, fault_stuff, rnd_args, args) - - # get initial values on finest level - P = controller.MS[0].levels[0].prob - uinit = P.u_exact(t0) if u0 is None else u0 - - # call main function to get things done... - try: - uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) - except ConvergenceError: - stats = controller.return_stats() - return stats, controller, Tend - - -def plot_solution(stats, controller): # pragma: no cover - import matplotlib.pyplot as plt - - fig, ax = plt.subplots(1, 1) - u_ax = ax - dt_ax = u_ax.twinx() - - u = get_sorted(stats, type='u', recomputed=False) - u_ax.plot([me[0] for me in u], [max(me[1]) for me in u], label=r'$T$') - - dt = get_sorted(stats, type='dt', recomputed=False) - dt_ax.plot([me[0] for me in dt], [me[1] for me in dt], color='black', ls='--') - u_ax.plot([None], [None], color='black', ls='--', label=r'$\Delta t$') - - P = controller.MS[0].levels[0].prob - u_ax.axhline(P.params.u_thresh, color='grey', ls='-.', label=r'$T_\mathrm{thresh}$') - u_ax.axhline(P.params.u_max, color='grey', ls=':', label=r'$T_\mathrm{max}$') - - u_ax.legend() - u_ax.set_xlabel(r'$t$') - u_ax.set_ylabel(r'$T$') - dt_ax.set_ylabel(r'$\Delta t$') - - -def compare_imex_full(plotting=False, leak_type='linear'): - """ - Compare the results of IMEX and fully implicit runs. For IMEX we need to limit the step size in order to achieve convergence, but for fully implicit, adaptivity can handle itself better. - - Args: - plotting (bool): Plot the solution or not - """ - from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity - from pySDC.implementations.hooks.log_work import LogWork - from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun - - maxiter = 5 - num_nodes = 3 - newton_iter_max = 20 - - res = {} - rhs = {} - error = {} - - custom_description = {} - custom_description['problem_params'] = { - 'newton_tol': 1e-10, - 'newton_iter': newton_iter_max, - 'nvars': 2**9, - 'leak_type': leak_type, - } - custom_description['step_params'] = {'maxiter': maxiter} - custom_description['sweeper_params'] = {'num_nodes': num_nodes} - - custom_controller_params = {'logger_level': 30} - for imex in [False, True]: - custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-6, 'dt_max': 1e2}} - stats, controller, _ = run_leaky_superconductor( - custom_description=custom_description, - custom_controller_params=custom_controller_params, - imex=imex, - Tend=4.3e2, - hook_class=[LogWork, LogGlobalErrorPostRun], - ) - - res[imex] = get_sorted(stats, type='u')[-1][1] - newton_iter = [me[1] for me in get_sorted(stats, type='work_newton')] - rhs[imex] = np.mean([me[1] for me in get_sorted(stats, type='work_rhs')]) // 1 - error[imex] = get_sorted(stats, type='e_global_post_run')[-1][1] - - if imex: - assert all([me == 0 for me in newton_iter]), "IMEX is not supposed to do Newton iterations!" - else: - assert ( - max(newton_iter) / num_nodes / maxiter <= newton_iter_max - ), "Took more Newton iterations than allowed!" - if plotting: # pragma: no cover - plot_solution(stats, controller) - - diff = abs(res[True] - res[False]) - thresh = 3e-3 - assert ( - diff < thresh - ), f"Difference between IMEX and fully-implicit too large! Got {diff:.2e}, allowed is only {thresh:.2e}!" - prob = controller.MS[0].levels[0].prob - assert ( - max(res[True]) > prob.u_max - ), f"Expected runaway to happen, but maximum temperature is {max(res[True]):.2e} < u_max={prob.u_max:.2e}!" - - assert ( - rhs[True] == rhs[False] - ), f"Expected IMEX and fully implicit schemes to take the same number of right hand side evaluations per step, but got {rhs[True]} and {rhs[False]}!" - - assert ( - error[True] > error[False] - ), f"Expected IMEX to be less accurate at the same precision settings than unsplit version, got for IMEX: e={error[True]:.2e} and fully implicit: e={error[False]:.2e}" - assert error[True] < 1.1e-4, f'Expected error of IMEX version to be less than 1.1e-4, but got e={error[True]:.2e}!' - - -if __name__ == '__main__': - compare_imex_full(plotting=True) - plt.show() diff --git a/pySDC/projects/Resilience/notes/Lorenz/compare_strategies.png b/pySDC/projects/Resilience/notes/Lorenz/compare_strategies.png index 1aa165f58f76d99c40f0f45d7c6aa040f12c9f4b..7921043db41b265b43b9f2e9423999e96c45f33e 100644 Binary files a/pySDC/projects/Resilience/notes/Lorenz/compare_strategies.png and b/pySDC/projects/Resilience/notes/Lorenz/compare_strategies.png differ diff --git a/pySDC/projects/Resilience/notes/Lorenz/recovery_rate_compared.png b/pySDC/projects/Resilience/notes/Lorenz/recovery_rate_compared.png index 9b24184fcaf9d89bb247ae0aaa92b4158d6e17ca..893ad19bd4acb79425b7334378f7e002af5d28ca 100644 Binary files a/pySDC/projects/Resilience/notes/Lorenz/recovery_rate_compared.png and b/pySDC/projects/Resilience/notes/Lorenz/recovery_rate_compared.png differ diff --git a/pySDC/projects/Resilience/paper_plots.py b/pySDC/projects/Resilience/paper_plots.py index c1949560b8e853f297f20b12e7498d49759fffd1..7f95c70009b702c7d02ba6b3458f593c2334de6c 100644 --- a/pySDC/projects/Resilience/paper_plots.py +++ b/pySDC/projects/Resilience/paper_plots.py @@ -11,7 +11,7 @@ from pySDC.projects.Resilience.fault_stats import ( run_Lorenz, run_Schroedinger, run_vdp, - run_leaky_superconductor, + run_quench, ) from pySDC.helpers.plot_helper import setup_mpl, figsize_by_journal from pySDC.helpers.stats_helper import get_sorted @@ -23,7 +23,7 @@ JOURNAL = 'Springer_Numerical_Algorithms' BASE_PATH = 'data/paper' -def get_stats(problem, path='data/stats'): +def get_stats(problem, path='data/stats-jusuf'): """ Create a FaultStats object for a given problem to use for the plots. Note that the statistics need to be already generated somewhere else, this function will only load them. @@ -41,14 +41,14 @@ def get_stats(problem, path='data/stats'): mode = 'random' recovery_thresh_abs = { - run_leaky_superconductor: 5e-5, + run_quench: 5e-3, } strategies = [BaseStrategy(), AdaptivityStrategy(), IterateStrategy()] if JOURNAL not in ['JSC_beamer']: strategies += [HotRodStrategy()] - return FaultStats( + stats_analyser = FaultStats( prob=problem, strategies=strategies, faults=[False, True], @@ -59,6 +59,8 @@ def get_stats(problem, path='data/stats'): mode=mode, stats_path=path, ) + stats_analyser.get_recovered() + return stats_analyser def my_setup_mpl(**kwargs): @@ -66,17 +68,19 @@ def my_setup_mpl(**kwargs): mpl.rcParams.update({'lines.markersize': 6}) -def savefig(fig, name, format='pdf'): # pragma: no cover +def savefig(fig, name, format='pdf', tight_layout=True): # pragma: no cover """ Save a figure to some predefined location. Args: fig (Matplotlib.Figure): The figure of the plot name (str): The name of the plot + tight_layout (bool): Apply tight layout or leave as is Returns: None """ - fig.tight_layout() + if tight_layout: + fig.tight_layout() path = f'{BASE_PATH}/{name}.{format}' fig.savefig(path, bbox_inches='tight', transparent=True, dpi=200) print(f'saved "{path}"') @@ -95,6 +99,18 @@ def analyse_resilience(problem, path='data/stats', **kwargs): # pragma: no cove """ stats_analyser = get_stats(problem, path) + stats_analyser.get_recovered() + + strategy = IterateStrategy() + not_fixed = stats_analyser.get_mask(strategy=strategy, key='recovered', val=False) + not_overflow = stats_analyser.get_mask(strategy=strategy, key='bit', val=1, op='uneq', old_mask=not_fixed) + stats_analyser.print_faults(not_overflow) + + # special = stats_analyser.get_mask(strategy=strategy, key='bit', val=10, op='eq') + # stats_analyser.print_faults(special) + + # Adaptivity: 19, ... + # stats_analyser.scrutinize(strategy, run=19, faults=True) compare_strategies(stats_analyser, **kwargs) plot_recovery_rate(stats_analyser, **kwargs) @@ -131,7 +147,7 @@ def plot_recovery_rate(stats_analyser, **kwargs): # pragma: no cover stats_analyser.plot_things_per_things( 'recovered', 'bit', False, op=stats_analyser.rec_rate, args={'ylabel': 'recovery rate'}, ax=axs[0] ) - plot_recovery_rate_recoverable_only(stats_analyser, fig, axs[1], ylabel='', xlabel='') + plot_recovery_rate_recoverable_only(stats_analyser, fig, axs[1], ylabel='') axs[1].get_legend().remove() axs[0].set_title('All faults') axs[1].set_title('Only recoverable faults') @@ -177,15 +193,15 @@ def compare_recovery_rate_problems(): # pragma: no cover stats = [ get_stats(run_vdp), get_stats(run_Lorenz), - get_stats(run_Schroedinger, 'data/stats-jusuf'), - get_stats(run_leaky_superconductor, 'data/stats-jusuf'), + get_stats(run_Schroedinger), + get_stats(run_quench), ] titles = ['Van der Pol', 'Lorenz attractor', r'Schr\"odinger', 'Quench'] my_setup_mpl() - fig, axs = plt.subplots(2, 2, figsize=figsize_by_journal(JOURNAL, 1, 0.7), sharey=True) + fig, axs = plt.subplots(2, 2, figsize=figsize_by_journal(JOURNAL, 1, 0.8), sharey=True) [ - plot_recovery_rate_recoverable_only(stats[i], fig, axs.flatten()[i], ylabel='', xlabel='', title=titles[i]) + plot_recovery_rate_recoverable_only(stats[i], fig, axs.flatten()[i], ylabel='', title=titles[i]) for i in range(len(stats)) ] @@ -193,13 +209,53 @@ def compare_recovery_rate_problems(): # pragma: no cover ax.get_legend().remove() axs[1, 1].legend(frameon=False) - axs[1, 0].set_xlabel('bit') axs[1, 0].set_ylabel('recovery rate') + axs[0, 0].set_ylabel('recovery rate') savefig(fig, 'compare_equations') -def plot_efficiency_polar(problem, path='data/stats'): # pragma: no cover +def plot_efficiency_polar_vdp(problem, path='data/stats'): # pragma: no cover + stats_analyser = get_stats(problem, path) + fig, ax = plt.subplots( + subplot_kw={'projection': 'polar'}, figsize=figsize_by_journal(JOURNAL, 0.7, 0.5), layout='constrained' + ) + theta, norms = plot_efficiency_polar_single(stats_analyser, ax) + + labels = ['fail rate', 'extra iterations\nfor recovery', 'iterations for solution'] + ax.set_xticks(theta[:-1], [f'{labels[i]}\nmax={norms[i]:.2f}' for i in range(len(labels))]) + ax.set_rlabel_position(90) + + fig.legend(frameon=False, loc='outside right', ncols=1) + savefig(fig, 'efficiency', tight_layout=False) + + +def plot_efficiency_polar_other(): # pragma: no cover + problems = [run_Lorenz, run_Schroedinger, run_quench] + paths = ['./data/stats/', './data/stats-jusuf', './data/stats-jusuf'] + titles = ['Lorenz attractor', r'Schr\"odinger', 'Quench'] + + fig, axs = plt.subplots( + 1, 3, subplot_kw={'projection': 'polar'}, figsize=figsize_by_journal(JOURNAL, 0.7, 0.5), layout='constrained' + ) + + for i in range(len(problems)): + stats_analyser = get_stats(problems[i], paths[i]) + ax = axs[i] + theta, norms = plot_efficiency_polar_single(stats_analyser, ax) + + labels = ['fail rate', 'extra iterations\nfor recovery', 'iterations for solution'] + ax.set_rlabel_position(90) + # ax.set_xticks(theta[:-1], [f'max={norms[i]:.2f}' for i in range(len(labels))]) + ax.set_xticks(theta[:-1], ['' for i in range(len(labels))]) + ax.set_title(titles[i]) + + handles, labels = fig.get_axes()[0].get_legend_handles_labels() + fig.legend(handles=handles, labels=labels, frameon=False, loc='outside lower center', ncols=4) + savefig(fig, 'efficiency_other', tight_layout=False) + + +def plot_efficiency_polar_single(stats_analyser, ax): # pragma: no cover """ Plot the recovery rate and the computational cost in a polar plot. @@ -218,12 +274,10 @@ def plot_efficiency_polar(problem, path='data/stats'): # pragma: no cover Returns: None """ - - stats_analyser = get_stats(problem, path) + # TODO: fix docs mask = stats_analyser.get_mask() # get empty mask, potentially put in some other mask later my_setup_mpl() - fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}, figsize=(7 * cm, 7 * cm)) res = {} for strategy in stats_analyser.strategies: @@ -255,13 +309,7 @@ def plot_efficiency_polar(problem, path='data/stats'): # pragma: no cover theta = np.array([30, 150, 270, 30]) * 2 * np.pi / 360 for s in stats_analyser.strategies: ax.plot(theta, res_norm[s.name] + [res_norm[s.name][0]], label=s.label, color=s.color, marker=s.marker) - - labels = ['fail rate', 'extra iterations\nfor recovery', 'iterations for solution'] - ax.set_xticks(theta[:-1], [f'{labels[i]}\nmax={norms[i]:.2f}' for i in range(len(labels))]) - ax.set_rlabel_position(90) - - ax.legend(frameon=True, loc='lower right') - savefig(fig, 'efficiency') + return theta, norms def plot_adaptivity_stuff(): # pragma: no cover @@ -272,7 +320,9 @@ def plot_adaptivity_stuff(): # pragma: no cover Returns: None """ - from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedErrorNonMPI + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError + from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep + from pySDC.projects.Resilience.hook import LogData stats_analyser = get_stats(run_vdp, 'data/stats') @@ -293,26 +343,31 @@ def plot_adaptivity_stuff(): # pragma: no cover Returns: None """ - e = get_sorted(stats, type='error_embedded_estimate', recomputed=False) - ax.plot([me[0] for me in e], [me[1] for me in e], markevery=15, **strategy.style, **kwargs) + markevery = 40 + e = get_sorted(stats, type='e_local_post_step', recomputed=False) + ax.plot([me[0] for me in e], [me[1] for me in e], markevery=markevery, **strategy.style, **kwargs) k = get_sorted(stats, type='k') - iter_ax.plot([me[0] for me in k], np.cumsum([me[1] for me in k]), **strategy.style, markevery=15, **kwargs) + iter_ax.plot( + [me[0] for me in k], np.cumsum([me[1] for me in k]), **strategy.style, markevery=markevery, **kwargs + ) ax.set_yscale('log') ax.set_ylabel('local error') - iter_ax.set_ylabel(r'iterations') + iter_ax.set_ylabel(r'SDC iterations') - force_params = {'convergence_controllers': {EstimateEmbeddedErrorNonMPI: {}}} + force_params = {'convergence_controllers': {EstimateEmbeddedError: {}}} + # force_params = {'convergence_controllers': {EstimateEmbeddedError: {}}, 'step_params': {'maxiter': 5}, 'level_params': {'dt': 4e-2}} for strategy in [BaseStrategy, AdaptivityStrategy, IterateStrategy]: - stats, _, _ = stats_analyser.single_run(strategy=strategy(), force_params=force_params) + stats, _, _ = stats_analyser.single_run( + strategy=strategy(), force_params=force_params, hook_class=[LogLocalErrorPostStep, LogData] + ) plot_error(stats, axs[1], axs[2], strategy()) - if strategy == AdaptivityStrategy: - u = get_sorted(stats, type='u') + if strategy == BaseStrategy: + u = get_sorted(stats, type='u', recomputed=False) axs[0].plot([me[0] for me in u], [me[1][0] for me in u], color='black', label=r'$u$') - axs[0].plot([me[0] for me in u], [me[1][1] for me in u], color='black', ls='--', label=r'$u_t$') - axs[0].legend(frameon=False) + # axs[0].plot([me[0] for me in u], [me[1][1] for me in u], color='black', ls='--', label=r'$u_t$') + # axs[0].legend(frameon=False) - axs[1].set_ylim(bottom=1e-9) axs[2].set_xlabel(r'$t$') axs[0].set_ylabel('solution') axs[2].legend(frameon=JOURNAL == 'JSC_beamer') @@ -347,15 +402,16 @@ def plot_fault_vdp(bit=0): # pragma: no cover ) my_setup_mpl() - fig, ax = plt.subplots(1, 1, figsize=(TEXTWIDTH * 3.0 / 4.0, 5 * cm)) + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.8, 0.5)) colors = ['blue', 'red', 'magenta'] ls = ['--', '-'] - markers = ['*', '.', 'y'] + markers = ['*', '^'] do_faults = [False, True] superscripts = ['*', ''] subscripts = ['', 't', ''] - run = 779 + 12 * bit + run = 779 + 12 * bit # for faults in u_t + # run = 11 + 12 * bit # for faults in u for i in range(len(do_faults)): stats, controller, Tend = stats_analyser.single_run( @@ -366,15 +422,15 @@ def plot_fault_vdp(bit=0): # pragma: no cover ) u = get_sorted(stats, type='u') faults = get_sorted(stats, type='bitflip') - for j in range(len(u[0][1])): + for j in [0, 1]: ax.plot( [me[0] for me in u], [me[1][j] for me in u], ls=ls[i], color=colors[j], label=rf'$u^{{{superscripts[i]}}}_{{{subscripts[j]}}}$', - marker=markers[0], - markevery=15, + marker=markers[j], + markevery=60, ) for idx in range(len(faults)): ax.axvline(faults[idx][0], color='black', label='Fault', ls=':') @@ -383,11 +439,104 @@ def plot_fault_vdp(bit=0): # pragma: no cover ) ax.set_title(f'Fault in bit {faults[idx][1][4]}') - ax.legend(frameon=False) + ax.legend(frameon=True, loc='lower left') ax.set_xlabel(r'$t$') savefig(fig, f'fault_bit_{bit}') +def plot_quench_solution(): # pragma: no cover + """ + Plot the solution of Quench problem over time + + Returns: + None + """ + my_setup_mpl() + if JOURNAL == 'JSC_beamer': + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9)) + else: + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.45)) + + strategy = BaseStrategy() + + custom_description = strategy.get_custom_description(run_quench) + + stats, controller, _ = run_quench(custom_description=custom_description, Tend=strategy.get_Tend(run_quench)) + + prob = controller.MS[0].levels[0].prob + + u = get_sorted(stats, type='u') + + ax.plot([me[0] for me in u], [max(me[1]) for me in u], color='black', label='$T$') + ax.axhline(prob.u_thresh, label='$T_\mathrm{thresh}$', ls='--', color='grey', zorder=-1) + ax.axhline(prob.u_max, label='$T_\mathrm{max}$', ls=':', color='grey', zorder=-1) + + ax.set_xlabel(r'$t$') + ax.legend(frameon=False) + savefig(fig, 'quench_sol') + + +def plot_Lorenz_solution(): # pragma: no cover + """ + Plot the solution of Lorenz attractor problem over time + + Returns: + None + """ + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + from pySDC.projects.Resilience.strategies import AdaptivityStrategy + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun + + strategy = AdaptivityStrategy() + + my_setup_mpl() + + fig = plt.figure() + ax = fig.add_subplot(projection='3d') + + # if JOURNAL == 'JSC_beamer': + # fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9)) + # else: + # fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.33)) + + custom_description = strategy.get_custom_description(run_Lorenz, 1) + custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-10}} + + stats, _, _ = run_Lorenz( + custom_description=custom_description, + Tend=strategy.get_Tend(run_Lorenz, 1) * 20, + hook_class=LogGlobalErrorPostRun, + ) + + u = get_sorted(stats, type='u') + e = get_sorted(stats, type='e_global_post_run')[-1] + print(u[-1], e) + ax.plot([me[1][0] for me in u], [me[1][1] for me in u], [me[1][2] for me in u]) + + ################## + from pySDC.projects.Resilience.strategies import DIRKStrategy, ERKStrategy, IterateStrategy + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK + + strategy = ERKStrategy() + custom_description = strategy.get_custom_description(run_Lorenz, 1) + custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-10}} + stats, _, _ = run_Lorenz( + custom_description=custom_description, + Tend=strategy.get_Tend(run_Lorenz, 1) * 20, + hook_class=LogGlobalErrorPostRun, + ) + + u = get_sorted(stats, type='u') + e = get_sorted(stats, type='e_global_post_run')[-1] + print(u[-1], e) + ax.plot([me[1][0] for me in u], [me[1][1] for me in u], [me[1][2] for me in u], ls='--') + ################ + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_zlabel('z') + savefig(fig, 'lorenz_sol') + + def plot_vdp_solution(): # pragma: no cover """ Plot the solution of van der Pol problem over time to illustrate the varying time scales. @@ -398,12 +547,14 @@ def plot_vdp_solution(): # pragma: no cover from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity my_setup_mpl() - fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9)) + if JOURNAL == 'JSC_beamer': + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9)) + else: + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.33)) custom_description = {'convergence_controllers': {Adaptivity: {'e_tol': 1e-7}}} - problem_params = {} - stats, _, _ = run_vdp(custom_description=custom_description, custom_problem_params=problem_params, Tend=28.6) + stats, _, _ = run_vdp(custom_description=custom_description, Tend=28.6) u = get_sorted(stats, type='u') ax.plot([me[0] for me in u], [me[1][0] for me in u], color='black') @@ -412,6 +563,80 @@ def plot_vdp_solution(): # pragma: no cover savefig(fig, 'vdp_sol') +def work_precision(): # pragma: no cover + from pySDC.projects.Resilience.work_precision import ( + all_problems, + single_problem, + ODEs, + get_fig, + execute_configurations, + save_fig, + get_configs, + MPI, + vdp_stiffness_plot, + ) + + all_params = { + 'record': False, + 'work_key': 't', + 'precision_key': 'e_global_rel', + 'plotting': True, + 'base_path': 'data/paper', + } + + for mode in ['compare_strategies', 'parallel_efficiency']: + all_problems(**all_params, mode=mode) + + # Quench stuff + fig, axs = get_fig(x=3, y=1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1, 0.47)) + quench_params = { + **all_params, + 'problem': run_quench, + 'decorate': True, + 'configurations': get_configs('step_size_limiting', run_quench), + 'num_procs': 1, + 'runs': 1, + 'comm_world': MPI.COMM_WORLD, + } + quench_params.pop('base_path', None) + execute_configurations(**{**quench_params, 'work_key': 'k_SDC', 'precision_key': 'k_Newton'}, ax=axs[2]) + execute_configurations(**{**quench_params, 'work_key': 'param', 'precision_key': 'restart'}, ax=axs[1]) + execute_configurations(**{**quench_params, 'work_key': 't', 'precision_key': 'e_global_rel'}, ax=axs[0]) + axs[1].set_yscale('linear') + axs[2].set_yscale('linear') + axs[2].set_xscale('linear') + axs[1].set_xlabel(r'$e_\mathrm{tol}$') + + for ax in axs: + ax.set_title(ax.get_ylabel()) + ax.set_ylabel('') + fig.suptitle('Quench') + save_fig( + fig=fig, + name=f'{run_quench.__name__}', + work_key='step-size', + precision_key='limiting', + legend=True, + base_path=all_params["base_path"], + ) + + vdp_stiffness_plot(base_path='data/paper') + + +def make_plots_for_TIME_X_website(): # pragma: no cover + global JOURNAL, BASE_PATH + JOURNAL = 'JSC_beamer' + BASE_PATH = 'data/paper/time-x_website' + + fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 2.0 / 3.0)) + plot_recovery_rate_recoverable_only(get_stats(run_vdp), fig, ax) + savefig(fig, 'recovery_rate', format='png') + + from pySDC.projects.Resilience.work_precision import vdp_stiffness_plot + + vdp_stiffness_plot(base_path=BASE_PATH, format='png') + + def make_plots_for_SIAM_CSE23(): # pragma: no cover """ Make plots for the SIAM talk @@ -437,12 +662,14 @@ def make_plots_for_paper(): # pragma: no cover JOURNAL = 'Springer_Numerical_Algorithms' BASE_PATH = 'data/paper' + plot_vdp_solution() + plot_quench_solution() plot_recovery_rate(get_stats(run_vdp)) plot_fault_vdp(0) plot_fault_vdp(13) plot_adaptivity_stuff() - plot_efficiency_polar(run_vdp) compare_recovery_rate_problems() + work_precision() def make_plots_for_notes(): # pragma: no cover @@ -454,9 +681,11 @@ def make_plots_for_notes(): # pragma: no cover BASE_PATH = 'notes/Lorenz' analyse_resilience(run_Lorenz, format='png') + analyse_resilience(run_quench, format='png') if __name__ == "__main__": - make_plots_for_notes() - make_plots_for_SIAM_CSE23() + # make_plots_for_notes() + # make_plots_for_SIAM_CSE23() + # make_plots_for_TIME_X_website() make_plots_for_paper() diff --git a/pySDC/projects/Resilience/piline.py b/pySDC/projects/Resilience/piline.py index 8a5ea40f8f6e951fa75fb5eaf90acefd9c60867f..9c231cb01fbd7893fe3eb55d613888203bd78dd6 100644 --- a/pySDC/projects/Resilience/piline.py +++ b/pySDC/projects/Resilience/piline.py @@ -8,6 +8,7 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controlle from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.implementations.convergence_controller_classes.hotrod import HotRod from pySDC.projects.Resilience.hook import LogData, hook_collection +from pySDC.projects.Resilience.strategies import merge_descriptions def run_piline( @@ -17,7 +18,6 @@ def run_piline( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, ): """ Run a Piline problem with default parameters. @@ -29,7 +29,6 @@ def run_piline( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets Returns: dict: The stats object @@ -58,9 +57,6 @@ def run_piline( 'Rl': 5.0, } - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters step_params = dict() step_params['maxiter'] = 4 @@ -84,11 +80,7 @@ def run_piline( description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 @@ -352,6 +344,7 @@ def main(): if use_adaptivity: custom_description['convergence_controllers'][Adaptivity] = { 'e_tol': 1e-7, + 'embedded_error_flavor': 'linearized', } for num_procs in [1, 4]: diff --git a/pySDC/projects/Resilience/quench.py b/pySDC/projects/Resilience/quench.py new file mode 100644 index 0000000000000000000000000000000000000000..d9447428f885364fc297c0b0733e23eb2419d784 --- /dev/null +++ b/pySDC/projects/Resilience/quench.py @@ -0,0 +1,557 @@ +# script to run a quench problem +from pySDC.implementations.problem_classes.Quench import Quench, QuenchIMEX +from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit +from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order +from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +from pySDC.core.Hooks import hooks +from pySDC.helpers.stats_helper import get_sorted +from pySDC.projects.Resilience.hook import hook_collection, LogData +from pySDC.projects.Resilience.strategies import merge_descriptions +import numpy as np + +import matplotlib.pyplot as plt +from pySDC.core.Errors import ConvergenceError + + +class live_plot(hooks): # pragma: no cover + """ + This hook plots the solution and the non-linear part of the right hand side after every step. Keep in mind that using adaptivity will result in restarts, which is not marked in these plots. Prepare to see the temperature profile jumping back again after a restart. + """ + + def _plot_state(self, step, level_number): # pragma: no cover + """ + Plot the solution at all collocation nodes and the non-linear part of the right hand side + + Args: + step (pySDC.Step.step): The current step + level_number (int): Number of current level + + Returns: + None + """ + L = step.levels[level_number] + for ax in self.axs: + ax.cla() + # [self.axs[0].plot(L.prob.xv, L.u[i], label=f"node {i}") for i in range(len(L.u))] + self.axs[0].plot(L.prob.xv, L.u[-1]) + self.axs[0].axhline(L.prob.u_thresh, color='black') + self.axs[1].plot(L.prob.xv, L.prob.eval_f_non_linear(L.u[-1], L.time)) + self.axs[0].set_ylim(0, 0.025) + self.fig.suptitle(f"t={L.time:.2e}, k={step.status.iter}") + plt.pause(1e-1) + + def pre_run(self, step, level_number): # pragma: no cover + """ + Setup a figure to plot into + + Args: + step (pySDC.Step.step): The current step + level_number (int): Number of current level + + Returns: + None + """ + self.fig, self.axs = plt.subplots(1, 2, figsize=(10, 4)) + + def post_step(self, step, level_number): # pragma: no cover + """ + Call the plotting function after the step + + Args: + step (pySDC.Step.step): The current step + level_number (int): Number of current level + + Returns: + None + """ + self._plot_state(step, level_number) + + +def run_quench( + custom_description=None, + num_procs=1, + Tend=6e2, + hook_class=LogData, + fault_stuff=None, + custom_controller_params=None, + imex=False, + u0=None, + t0=None, + use_MPI=False, + **kwargs, +): + """ + Run a toy problem of a superconducting magnet with a temperature leak with default parameters. + + Args: + custom_description (dict): Overwrite presets + num_procs (int): Number of steps for MSSDC + Tend (float): Time to integrate to + hook_class (pySDC.Hook): A hook to store data + fault_stuff (dict): A dictionary with information on how to add faults + custom_controller_params (dict): Overwrite presets + imex (bool): Solve the problem IMEX or fully implicit + u0 (dtype_u): Initial value + t0 (float): Starting time + use_MPI (bool): Whether or not to use MPI + + Returns: + dict: The stats object + controller: The controller + Tend: The time that was supposed to be integrated to + """ + + # initialize level parameters + level_params = {} + level_params['dt'] = 10.0 + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'IE' + sweeper_params['QE'] = 'PIC' + + problem_params = { + 'newton_tol': 1e-9, + } + + # initialize step parameters + step_params = {} + step_params['maxiter'] = 5 + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 30 + controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class]) + controller_params['mssdc_jac'] = False + + if custom_controller_params is not None: + controller_params = {**controller_params, **custom_controller_params} + + # fill description dictionary for easy step instantiation + description = {} + description['problem_class'] = QuenchIMEX if imex else Quench + description['problem_params'] = problem_params + description['sweeper_class'] = imex_1st_order if imex else generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + + if custom_description is not None: + description = merge_descriptions(description, custom_description) + + # set time parameters + t0 = 0.0 if t0 is None else t0 + + # instantiate controller + controller_args = { + 'controller_params': controller_params, + 'description': description, + } + if use_MPI: + from mpi4py import MPI + from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + + comm = kwargs.get('comm', MPI.COMM_WORLD) + controller = controller_MPI(**controller_args, comm=comm) + P = controller.S.levels[0].prob + else: + controller = controller_nonMPI(**controller_args, num_procs=num_procs) + P = controller.MS[0].levels[0].prob + + uinit = P.u_exact(t0) if u0 is None else u0 + + # insert faults + if fault_stuff is not None: + from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults + + rnd_args = {'iteration': 1, 'min_node': 1} + args = {'time': 31.0, 'target': 0} + prepare_controller_for_faults(controller, fault_stuff, rnd_args, args) + + # call main function to get things done... + try: + uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) + except ConvergenceError: + print('Warning: Premature termination!') + stats = controller.return_stats() + return stats, controller, Tend + + +def faults(seed=0): # pragma: no cover + import matplotlib.pyplot as plt + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + fig, ax = plt.subplots(1, 1) + + rng = np.random.RandomState(seed) + fault_stuff = {'rng': rng, 'args': {}, 'rnd_args': {}} + + controller_params = {'logger_level': 30} + description = {'level_params': {'dt': 1e1}, 'step_params': {'maxiter': 5}} + stats, controller, _ = run_quench(custom_controller_params=controller_params, custom_description=description) + plot_solution_faults(stats, controller, ax, plot_lines=True, label='ref') + + stats, controller, _ = run_quench( + fault_stuff=fault_stuff, + custom_controller_params=controller_params, + ) + plot_solution_faults(stats, controller, ax, label='fixed') + + description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-7, 'dt_max': 1e2, 'dt_min': 1e-3}} + stats, controller, _ = run_quench( + fault_stuff=fault_stuff, custom_controller_params=controller_params, custom_description=description + ) + + plot_solution_faults(stats, controller, ax, label='adaptivity', ls='--') + plt.show() + + +def plot_solution_faults(stats, controller, ax, plot_lines=False, **kwargs): # pragma: no cover + u_ax = ax + + u = get_sorted(stats, type='u', recomputed=False) + u_ax.plot([me[0] for me in u], [np.mean(me[1]) for me in u], **kwargs) + + if plot_lines: + P = controller.MS[0].levels[0].prob + u_ax.axhline(P.u_thresh, color='grey', ls='-.', label=r'$T_\mathrm{thresh}$') + u_ax.axhline(P.u_max, color='grey', ls=':', label=r'$T_\mathrm{max}$') + + [ax.axvline(me[0], color='grey', label=f'fault at t={me[0]:.2f}') for me in get_sorted(stats, type='bitflip')] + + u_ax.legend() + u_ax.set_xlabel(r'$t$') + u_ax.set_ylabel(r'$T$') + + +def get_crossing_time(stats, controller, num_points=5, inter_points=50, temperature_error_thresh=1e-5): + """ + Compute the time when the temperature threshold is crossed based on interpolation. + + Args: + stats (dict): The stats from a pySDC run + controller (pySDC.Controller.controller): The controller + num_points (int): The number of points in the solution you want to use for interpolation + inter_points (int): The resolution of the interpolation + temperature_error_thresh (float): The temperature error compared to the actual threshold you want to allow + + Returns: + float: The time when the temperature threshold is crossed + """ + from pySDC.core.Lagrange import LagrangeApproximation + from pySDC.core.Collocation import CollBase + + P = controller.MS[0].levels[0].prob + u_thresh = P.u_thresh + + u = get_sorted(stats, type='u', recomputed=False) + temp = np.array([np.mean(me[1]) for me in u]) + t = np.array([me[0] for me in u]) + + crossing_index = np.arange(len(temp))[temp > u_thresh][0] + + # interpolation stuff + num_points = min([num_points, crossing_index * 2, len(temp) - crossing_index]) + idx = np.arange(num_points) - num_points // 2 + crossing_index + t_grid = t[idx] + u_grid = temp[idx] + t_inter = np.linspace(t_grid[0], t_grid[-1], inter_points) + interpolator = LagrangeApproximation(points=t_grid) + u_inter = interpolator.getInterpolationMatrix(t_inter) @ u_grid + + crossing_inter = np.arange(len(u_inter))[u_inter > u_thresh][0] + + temperature_error = abs(u_inter[crossing_inter] - u_thresh) + + assert temperature_error < temp[crossing_index], "Temperature error is rising due to interpolation!" + + if temperature_error > temperature_error_thresh and inter_points < 300: + return get_crossing_time(stats, controller, num_points + 4, inter_points + 15, temperature_error_thresh) + + return t_inter[crossing_inter] + + +def plot_solution(stats, controller): # pragma: no cover + import matplotlib.pyplot as plt + + fig, ax = plt.subplots(1, 1) + u_ax = ax + dt_ax = u_ax.twinx() + + u = get_sorted(stats, type='u', recomputed=False) + u_ax.plot([me[0] for me in u], [np.mean(me[1]) for me in u], label=r'$T$') + + dt = get_sorted(stats, type='dt', recomputed=False) + dt_ax.plot([me[0] for me in dt], [me[1] for me in dt], color='black', ls='--') + u_ax.plot([None], [None], color='black', ls='--', label=r'$\Delta t$') + + if controller.useMPI: + P = controller.S.levels[0].prob + else: + P = controller.MS[0].levels[0].prob + u_ax.axhline(P.u_thresh, color='grey', ls='-.', label=r'$T_\mathrm{thresh}$') + u_ax.axhline(P.u_max, color='grey', ls=':', label=r'$T_\mathrm{max}$') + + [ax.axvline(me[0], color='grey', label=f'fault at t={me[0]:.2f}') for me in get_sorted(stats, type='bitflip')] + + u_ax.legend() + u_ax.set_xlabel(r'$t$') + u_ax.set_ylabel(r'$T$') + dt_ax.set_ylabel(r'$\Delta t$') + + +def compare_imex_full(plotting=False, leak_type='linear'): + """ + Compare the results of IMEX and fully implicit runs. For IMEX we need to limit the step size in order to achieve convergence, but for fully implicit, adaptivity can handle itself better. + + Args: + plotting (bool): Plot the solution or not + """ + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + from pySDC.implementations.hooks.log_work import LogWork + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun + + maxiter = 5 + num_nodes = 3 + newton_iter_max = 99 + + res = {} + rhs = {} + error = {} + + custom_description = {} + custom_description['problem_params'] = { + 'newton_tol': 1e-10, + 'newton_iter': newton_iter_max, + 'nvars': 2**9, + 'leak_type': leak_type, + } + custom_description['step_params'] = {'maxiter': maxiter} + custom_description['sweeper_params'] = {'num_nodes': num_nodes} + custom_description['convergence_controllers'] = { + Adaptivity: {'e_tol': 1e-6, 'dt_max': 50}, + } + + custom_controller_params = {'logger_level': 30} + for imex in [False, True]: + stats, controller, _ = run_quench( + custom_description=custom_description, + custom_controller_params=custom_controller_params, + imex=imex, + Tend=4.3e2, + use_MPI=False, + hook_class=[LogWork, LogGlobalErrorPostRun], + ) + + res[imex] = get_sorted(stats, type='u')[-1][1] + newton_iter = [me[1] for me in get_sorted(stats, type='work_newton')] + rhs[imex] = np.mean([me[1] for me in get_sorted(stats, type='work_rhs')]) // 1 + error[imex] = get_sorted(stats, type='e_global_post_run')[-1][1] + + if imex: + assert all(me == 0 for me in newton_iter), "IMEX is not supposed to do Newton iterations!" + else: + assert ( + max(newton_iter) / num_nodes / maxiter <= newton_iter_max + ), "Took more Newton iterations than allowed!" + if plotting: # pragma: no cover + plot_solution(stats, controller) + + diff = abs(res[True] - res[False]) + thresh = 4e-3 + assert ( + diff < thresh + ), f"Difference between IMEX and fully-implicit too large! Got {diff:.2e}, allowed is only {thresh:.2e}!" + prob = controller.MS[0].levels[0].prob + assert ( + max(res[True]) > prob.u_max + ), f"Expected runaway to happen, but maximum temperature is {max(res[True]):.2e} < u_max={prob.u_max:.2e}!" + + assert ( + rhs[True] == rhs[False] + ), f"Expected IMEX and fully implicit schemes to take the same number of right hand side evaluations per step, but got {rhs[True]} and {rhs[False]}!" + + assert error[True] < 1e-4, f'Expected error of IMEX version to be less than 1e-4, but got e={error[True]:.2e}!' + assert ( + error[False] < 7.7e-5 + ), f'Expected error of fully implicit version to be less than 7.7e-5, but got e={error[False]:.2e}!' + + +def compare_reference_solutions_single(): + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostStep, LogLocalErrorPostStep + from pySDC.implementations.hooks.log_solution import LogSolution + + types = ['DIRK', 'SDC', 'scipy'] + types = ['scipy'] + fig, ax = plt.subplots() + error_ax = ax.twinx() + Tend = 500 + + colors = ['black', 'teal', 'magenta'] + + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, merge_descriptions, DoubleAdaptivityStrategy + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + strategy = DoubleAdaptivityStrategy() + + controller_params = {'logger_level': 15} + + for j in range(len(types)): + description = {} + description['level_params'] = {'dt': 5.0, 'restol': 1e-10} + description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3} + description['problem_params'] = { + 'leak_type': 'linear', + 'leak_transition': 'step', + 'nvars': 2**10, + 'reference_sol_type': types[j], + 'newton_tol': 1e-12, + } + + description['level_params'] = {'dt': 5.0, 'restol': -1} + description = merge_descriptions(description, strategy.get_custom_description(run_quench, 1)) + description['step_params'] = {'maxiter': 5} + description['convergence_controllers'][Adaptivity]['e_tol'] = 1e-4 + + stats, controller, _ = run_quench( + custom_description=description, + hook_class=[LogGlobalErrorPostStep, LogLocalErrorPostStep, LogSolution], + Tend=Tend, + imex=False, + custom_controller_params=controller_params, + ) + e_glob = get_sorted(stats, type='e_global_post_step', recomputed=False) + e_loc = get_sorted(stats, type='e_local_post_step', recomputed=False) + u = get_sorted(stats, type='u', recomputed=False) + + ax.plot([me[0] for me in u], [max(me[1]) for me in u], color=colors[j], label=f'{types[j]} reference') + + error_ax.plot([me[0] for me in e_glob], [me[1] for me in e_glob], color=colors[j], ls='--') + error_ax.plot([me[0] for me in e_loc], [me[1] for me in e_loc], color=colors[j], ls=':') + + prob = controller.MS[0].levels[0].prob + ax.axhline(prob.u_thresh, ls='-.', color='grey') + ax.axhline(prob.u_max, ls='-.', color='grey') + ax.plot([None], [None], ls='--', label=r'$e_\mathrm{global}$', color='grey') + ax.plot([None], [None], ls=':', label=r'$e_\mathrm{local}$', color='grey') + error_ax.set_yscale('log') + ax.legend(frameon=False) + ax.set_xlabel(r'$t$') + ax.set_ylabel('solution') + error_ax.set_ylabel('error') + ax.set_title('Fully implicit quench problem') + fig.tight_layout() + fig.savefig('data/quench_refs_single.pdf', bbox_inches='tight') + + +def compare_reference_solutions(): + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun, LogLocalErrorPostStep + + types = ['DIRK', 'SDC', 'scipy'] + fig, ax = plt.subplots() + Tend = 500 + dt_list = [Tend / 2.0**me for me in [2, 3, 4, 5, 6, 7, 8, 9, 10]] + # dt_list = [Tend / 2.**me for me in [2, 3, 4, 5, 6, 7]] + + for j in range(len(types)): + errors = [None] * len(dt_list) + for i in range(len(dt_list)): + description = {} + description['level_params'] = {'dt': dt_list[i], 'restol': 1e-10} + description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3} + description['problem_params'] = { + 'leak_type': 'linear', + 'leak_transition': 'step', + 'nvars': 2**10, + 'reference_sol_type': types[j], + } + + stats, controller, _ = run_quench( + custom_description=description, + hook_class=[LogGlobalErrorPostRun, LogLocalErrorPostStep], + Tend=Tend, + imex=False, + ) + # errors[i] = get_sorted(stats, type='e_global_post_run')[-1][1] + errors[i] = max([me[1] for me in get_sorted(stats, type='e_local_post_step', recomputed=False)]) + print(errors) + ax.loglog(dt_list, errors, label=f'{types[j]} reference') + + ax.legend(frameon=False) + ax.set_xlabel(r'$\Delta t$') + ax.set_ylabel('global error') + ax.set_title('Fully implicit quench problem') + fig.tight_layout() + fig.savefig('data/quench_refs.pdf', bbox_inches='tight') + + +def check_order(reference_sol_type='scipy'): + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError + + Tend = 500 + maxiter_list = [1, 2, 3, 4, 5] + dt_list = [Tend / 2.0**me for me in [4, 5, 6, 7, 8, 9]] + # dt_list = [Tend / 2.**me for me in [6, 7, 8]] + + fig, ax = plt.subplots() + + from pySDC.implementations.sweeper_classes.Runge_Kutta import DIRK34 + + colors = ['black', 'teal', 'magenta', 'orange', 'red'] + for j in range(len(maxiter_list)): + errors = [None] * len(dt_list) + + for i in range(len(dt_list)): + description = {} + description['level_params'] = {'dt': dt_list[i]} + description['step_params'] = {'maxiter': maxiter_list[j]} + description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3} + description['problem_params'] = { + 'leak_type': 'linear', + 'leak_transition': 'step', + 'nvars': 2**10, + 'reference_sol_type': reference_sol_type, + } + description['convergence_controllers'] = {EstimateEmbeddedError: {}} + + # if maxiter_list[j] == 5: + # description['sweeper_class'] = DIRK34 + # description['sweeper_params'] = {'maxiter': 1} + + stats, controller, _ = run_quench( + custom_description=description, hook_class=[LogGlobalErrorPostRun], Tend=Tend, imex=False + ) + # errors[i] = max([me[1] for me in get_sorted(stats, type='error_embedded_estimate')]) + errors[i] = get_sorted(stats, type='e_global_post_run')[-1][1] + print(errors) + ax.loglog(dt_list, errors, color=colors[j], label=f'{maxiter_list[j]} iterations') + ax.loglog( + dt_list, [errors[0] * (me / dt_list[0]) ** maxiter_list[j] for me in dt_list], color=colors[j], ls='--' + ) + + dt_list = np.array(dt_list) + errors = np.array(errors) + orders = np.log(errors[1:] / errors[:-1]) / np.log(dt_list[1:] / dt_list[:-1]) + print(orders, np.mean(orders)) + + # ax.loglog(dt_list, local_errors) + ax.legend(frameon=False) + ax.set_xlabel(r'$\Delta t$') + ax.set_ylabel('global error') + # ax.set_ylabel('max. local error') + ax.set_title('Fully implicit quench problem') + fig.tight_layout() + fig.savefig(f'data/order_quench_{reference_sol_type}.pdf', bbox_inches='tight') + + +if __name__ == '__main__': + compare_reference_solutions_single() + # for reference_sol_type in ['DIRK', 'SDC', 'scipy']: + # check_order(reference_sol_type=reference_sol_type) + ## faults(19) + ## # get_crossing_time() + # compare_imex_full(plotting=True) + plt.show() diff --git a/pySDC/projects/Resilience/strategies.py b/pySDC/projects/Resilience/strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..428393e9f3a020035091871a62cf1cd3f694a5b5 --- /dev/null +++ b/pySDC/projects/Resilience/strategies.py @@ -0,0 +1,1209 @@ +import numpy as np +from matplotlib.colors import TABLEAU_COLORS + +cmap = TABLEAU_COLORS + + +def merge_descriptions(descA, descB): + """ + Merge two dictionaries that may contain dictionaries, which happens when merging descriptions, for instance. + + Keys that occur in both dictionaries will be overwritten by the ones from `descB` and `descA` will be modified, not + copied! + + Args: + descA (dict): Dictionary that you want to merge into + descB (dict): Dictionary you want to merge from + + Returns: + dict: decsA with updated parameters + """ + for key in descB.keys(): + if type(descB[key]) == dict: + descA[key] = merge_descriptions(descA.get(key, {}), descB[key]) + else: + descA[key] = descB[key] + return descA + + +class Strategy: + ''' + Abstract class for resilience strategies + ''' + + def __init__(self, useMPI=False, skip_residual_computation='none'): + ''' + Initialization routine + ''' + self.useMPI = useMPI + + # set default values for plotting + self.linestyle = '-' + self.marker = '.' + self.name = '' + self.bar_plot_x_label = '' + self.color = list(cmap.values())[0] + + # parameters for computational efficiency + if skip_residual_computation == 'all': + self.skip_residual_computation = ('IT_CHECK', 'IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE') + elif skip_residual_computation == 'most': + self.skip_residual_computation = ('IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE') + else: + self.skip_residual_computation = () + + # setup custom descriptions + self.custom_description = {} + self.custom_description['sweeper_params'] = {'skip_residual_computation': self.skip_residual_computation} + + # prepare parameters for masks to identify faults that cannot be fixed by this strategy + self.fixable = [] + self.fixable += [ + { + 'key': 'node', + 'op': 'gt', + 'val': 0, + } + ] + self.fixable += [ + { + 'key': 'error', + 'op': 'isfinite', + } + ] + + # stuff for work-precision diagrams + self.precision_parameter = None + self.precision_parameter_loc = [] + + def get_fixable_params(self, **kwargs): + """ + Return a list containing dictionaries which can be passed to `FaultStats.get_mask` as keyword arguments to + obtain a mask of faults that can be fixed + + Returns: + list: Dictionary of parameters + """ + return self.fixable + + def get_fault_args(self, problem, num_procs): + ''' + Routine to get arguments for the faults that are exempt from randomization + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + dict: Arguments for the faults that are exempt from randomization + ''' + + return {} + + def get_random_params(self, problem, num_procs): + ''' + Routine to get parameters for the randomization of faults + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + dict: Randomization parameters + ''' + + return {} + + @property + def style(self): + """ + Get the plotting parameters for the strategy. + Supply them to a plotting function using `**` + + Returns: + (dict): The plotting parameters as a dictionary + """ + return { + 'marker': self.marker, + 'label': self.label, + 'color': self.color, + 'ls': self.linestyle, + } + + @property + def label(self): + """ + Get a label for plotting + """ + return self.name + + def get_Tend(self, problem, num_procs=1): + ''' + Get the final time of runs for fault stats based on the problem + + Args: + problem (function): A problem to run + num_procs (int): Number of processes + + Returns: + float: Tend to put into the run + ''' + if problem.__name__ == "run_vdp": + return 11.5 + # return 2.3752559741400825 # old stuff + elif problem.__name__ == "run_piline": + return 20.0 + elif problem.__name__ == "run_Lorenz": + return 1.5 + elif problem.__name__ == "run_Schroedinger": + return 1.0 + elif problem.__name__ == "run_quench": + return 500.0 + else: + raise NotImplementedError('I don\'t have a final time for your problem!') + + def get_custom_description(self, problem, num_procs=1): + ''' + Get a custom description based on the problem + + Args: + problem (function): A problem to run + num_procs (int): Number of processes + + Returns: + dict: Custom description + ''' + custom_description = {} + if problem.__name__ == "run_vdp": + custom_description['step_params'] = {'maxiter': 3} + custom_description['problem_params'] = { + 'u0': np.array([2, 0], dtype=np.float64), + # 'u0': np.array([0.99995, -0.00999985], dtype=np.float64), # old stuff + 'crash_at_maxiter': False, + 'newton_tol': 1e-11, + } + custom_description['level_params'] = {'dt': 1e-2} + + elif problem.__name__ == "run_Lorenz": + custom_description['step_params'] = {'maxiter': 5} + custom_description['level_params'] = {'dt': 1e-2} + elif problem.__name__ == "run_Schroedinger": + custom_description['step_params'] = {'maxiter': 5} + custom_description['level_params'] = {'dt': 1e-2, 'restol': -1} + elif problem.__name__ == "run_quench": + custom_description['level_params'] = {'restol': -1, 'dt': 8.0} + custom_description['step_params'] = {'maxiter': 5} + custom_description['problem_params'] = {'newton_iter': 99, 'newton_tol': 1e-11} + return merge_descriptions(custom_description, self.custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class BaseStrategy(Strategy): + ''' + Do a fixed iteration count + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[0] + self.marker = 'o' + self.name = 'base' + self.bar_plot_x_label = 'base' + self.precision_parameter = 'dt' + self.precision_parameter_loc = ['level_params', 'dt'] + + @property + def label(self): + return r'fixed' + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 12453 + elif key == 'e_global_post_run' and op == max: + return 4.3956128381594795e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityStrategy(Strategy): + ''' + Adaptivity as a resilience strategy + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[1] + self.marker = '*' + self.name = 'adaptivity' + self.bar_plot_x_label = 'adaptivity' + self.precision_parameter = 'e_tol' + self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol'] + + def get_fixable_params(self, maxiter, **kwargs): + """ + Here faults occurring in the last iteration cannot be fixed. + + Args: + maxiter (int): Max. iterations until convergence is declared + + Returns: + (list): Contains dictionaries of keyword arguments for `FaultStats.get_mask` + """ + self.fixable += [ + { + 'key': 'iteration', + 'op': 'lt', + 'val': maxiter, + } + ] + return self.fixable + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + custom_description = {} + custom_description['convergence_controllers'] = {} + + dt_max = np.inf + dt_min = 1e-5 + dt_slope_max = np.inf + + if problem.__name__ == "run_piline": + e_tol = 1e-7 + dt_min = 1e-2 + elif problem.__name__ == "run_vdp": + e_tol = 2e-5 + dt_min = 1e-3 + elif problem.__name__ == "run_Lorenz": + e_tol = 2e-5 + dt_min = 1e-3 + elif problem.__name__ == "run_Schroedinger": + e_tol = 4e-6 + dt_min = 1e-3 + elif problem.__name__ == "run_quench": + e_tol = 1e-5 + dt_min = 1e-3 + # dt_max = 25 + # dt_slope_max = 4. + + from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting + + flavor = 'MPI' if self.useMPI else 'nonMPI' + custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = { + 'max_restarts': 15 + } + else: + raise NotImplementedError( + 'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\ + strategy' + ) + + custom_description['convergence_controllers'][Adaptivity] = { + 'e_tol': e_tol, + 'dt_min': dt_min, + 'dt_max': dt_max, + 'dt_slope_max': dt_slope_max, + } + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 3825 + elif key == 'e_global_post_run' and op == max: + return 1.3370376368393444e-05 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptiveHotRodStrategy(Strategy): + ''' + Adaptivity + Hot Rod as a resilience strategy + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[4] + self.marker = '.' + self.name = 'adaptive Hot Rod' + self.bar_plot_x_label = 'adaptive\nHot Rod' + self.precision_parameter = 'e_tol' + self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol'] + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity and Hot Rod + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom description you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.hotrod import HotRod + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + if problem.__name__ == "run_vdp": + e_tol = 3e-7 + dt_min = 1e-3 + maxiter = 4 + HotRod_tol = 2e-6 + else: + raise NotImplementedError( + 'I don\'t have a tolerance for adaptive Hot Rod for your problem. Please add one \ +to the strategy' + ) + + no_storage = num_procs > 1 + + custom_description = { + 'convergence_controllers': { + HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage}, + Adaptivity: {'e_tol': e_tol, 'dt_min': dt_min, 'embedded_error_flavor': 'linearized'}, + }, + 'step_params': {'maxiter': maxiter}, + } + + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 4466 + elif key == 'e_global_post_run' and op == max: + return 2.1455229857747504e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class IterateStrategy(Strategy): + ''' + Iterate for as much as you want + ''' + + def __init__(self, useMPI=False, skip_residual_computation='most'): + ''' + Initialization routine + ''' + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[2] + self.marker = 'v' + self.name = 'iterate' + self.bar_plot_x_label = 'iterate' + self.precision_parameter = 'restol' + self.precision_parameter_loc = ['level_params', 'restol'] + + @property + def label(self): + return r'$k$ adaptivity' + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that allows for adaptive iteration counts + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom description you can supply to the problem when running it + ''' + restol = -1 + e_tol = -1 + + if problem.__name__ == "run_piline": + restol = 2.3e-8 + elif problem.__name__ == "run_vdp": + restol = 9e-7 + elif problem.__name__ == "run_Lorenz": + restol = 16e-7 + elif problem.__name__ == "run_Schroedinger": + restol = 6.5e-7 + elif problem.__name__ == "run_quench": + restol = 1e-7 + else: + raise NotImplementedError( + 'I don\'t have a residual tolerance for your problem. Please add one to the \ +strategy' + ) + + custom_description = { + 'step_params': {'maxiter': 99}, + 'level_params': {'restol': restol, 'e_tol': e_tol}, + } + + if problem.__name__ == "run_quench": + custom_description['level_params']['dt'] = 1 + + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 8534 + elif key == 'e_global_post_run' and op == max: + return 0.0005961192269257065 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class HotRodStrategy(Strategy): + ''' + Hot Rod as a resilience strategy + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[3] + self.marker = '^' + self.name = 'Hot Rod' + self.bar_plot_x_label = 'Hot Rod' + self.precision_parameter = 'dt' + self.precision_parameter_loc = ['level_params', 'dt'] + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds Hot Rod + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom description you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.hotrod import HotRod + from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestartingNonMPI + + if problem.__name__ == "run_vdp": + HotRod_tol = 5e-7 + maxiter = 4 + elif problem.__name__ == "run_Lorenz": + HotRod_tol = 4e-7 + maxiter = 6 + elif problem.__name__ == "run_Schroedinger": + HotRod_tol = 3e-7 + maxiter = 6 + elif problem.__name__ == "run_quench": + HotRod_tol = 3e-5 + maxiter = 6 + else: + raise NotImplementedError( + 'I don\'t have a tolerance for Hot Rod for your problem. Please add one to the\ + strategy' + ) + + no_storage = num_procs > 1 + + custom_description = { + 'convergence_controllers': { + HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage}, + BasicRestartingNonMPI: {'max_restarts': 2, 'crash_after_max_restarts': False}, + }, + 'step_params': {'maxiter': maxiter}, + } + + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 15230 + elif key == 'e_global_post_run' and op == max: + return 4.3956128381594795e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityCollocationStrategy(Strategy): + ''' + Adaptivity based on collocation as a resilience strategy + ''' + + def __init__(self, useMPI=False, skip_residual_computation='most'): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityCollocation + + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[1] + self.marker = '*' + self.name = 'adaptivity_coll' + self.bar_plot_x_label = 'adaptivity collocation' + self.precision_parameter = 'e_tol' + self.adaptive_coll_params = {} + self.precision_parameter_loc = ['convergence_controllers', AdaptivityCollocation, 'e_tol'] + self.restol = None + self.maxiter = 99 + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityCollocation + + custom_description = {} + custom_description['step_params'] = {'maxiter': self.maxiter} + + dt_max = np.inf + dt_min = 1e-5 + + if problem.__name__ == "run_piline": + e_tol = 1e-7 + dt_min = 1e-2 + elif problem.__name__ == "run_vdp": + e_tol = 2e-5 + dt_min = 1e-3 + elif problem.__name__ == "run_Lorenz": + e_tol = 2e-5 + dt_min = 1e-3 + elif problem.__name__ == "run_Schroedinger": + e_tol = 4e-6 + dt_min = 1e-3 + elif problem.__name__ == "run_quench": + e_tol = 1e-5 + dt_min = 1e-3 + dt_max = 1e2 + else: + raise NotImplementedError( + 'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\ + strategy' + ) + + custom_description['level_params'] = {'restol': e_tol / 10 if self.restol is None else self.restol} + custom_description['convergence_controllers'] = { + AdaptivityCollocation: { + 'e_tol': e_tol, + 'dt_min': dt_min, + 'dt_max': dt_max, + 'adaptive_coll_params': self.adaptive_coll_params, + } + } + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + +class AdaptivityCollocationTypeStrategy(AdaptivityCollocationStrategy): + def __init__(self, useMPI=False, skip_residual_computation='most'): + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[4] + self.marker = '.' + self.adaptive_coll_params = { + 'quad_type': ['RADAU-RIGHT', 'GAUSS'], + 'do_coll_update': [False, True], + } + + @property + def label(self): + return 'adaptivity type' + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 2694 + elif key == 'e_global_post_run' and op == max: + return 2.1707816100224875e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityCollocationRefinementStrategy(AdaptivityCollocationStrategy): + def __init__(self, useMPI=False, skip_residual_computation='most'): + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[5] + self.marker = '^' + self.adaptive_coll_params = { + 'num_nodes': [2, 3], + 'quad_type': ['GAUSS', 'RADAU-RIGHT'], + 'do_coll_update': [True, False], + } + + @property + def label(self): + return 'adaptivity refinement' + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 1881 + elif key == 'e_global_post_run' and op == max: + return 3.3428689244496823e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityCollocationDerefinementStrategy(AdaptivityCollocationStrategy): + def __init__(self, useMPI=False, skip_residual_computation='most'): + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[6] + self.marker = '^' + self.adaptive_coll_params = {'num_nodes': [4, 3]} + + @property + def label(self): + return 'adaptivity de-refinement' + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 3421 + elif key == 'e_global_post_run' and op == max: + return 2.1130961994131336e-05 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class DIRKStrategy(AdaptivityStrategy): + ''' + DIRK4(3) + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK + + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[7] + self.marker = '^' + self.name = 'DIRK' + self.bar_plot_x_label = 'DIRK4(3)' + self.precision_parameter = 'e_tol' + self.precision_parameter_loc = ['convergence_controllers', AdaptivityRK, 'e_tol'] + + @property + def label(self): + return 'DIRK4(3)' + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK, Adaptivity + from pySDC.implementations.sweeper_classes.Runge_Kutta import DIRK34 + + adaptivity_description = super().get_custom_description(problem, num_procs) + + e_tol = adaptivity_description['convergence_controllers'][Adaptivity]['e_tol'] + adaptivity_description['convergence_controllers'].pop(Adaptivity, None) + adaptivity_description.pop('sweeper_params', None) + + rk_params = { + 'step_params': {'maxiter': 1}, + 'sweeper_class': DIRK34, + 'convergence_controllers': {AdaptivityRK: {'e_tol': e_tol}}, + } + + custom_description = merge_descriptions(adaptivity_description, rk_params) + + return custom_description + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 3522 + elif key == 'e_global_post_run' and op == max: + return 0.00020173129027772907 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class ERKStrategy(DIRKStrategy): + """ + Explicit embedded RK using Cash-Karp's method + """ + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[9] + self.marker = 'x' + self.name = 'ERK' + self.bar_plot_x_label = 'ERK5(4)' + + @property + def label(self): + return 'CP5(4)' + + """ + Explicit Cash-Karp's method + """ + + def get_custom_description(self, problem, num_procs=1): + from pySDC.implementations.sweeper_classes.Runge_Kutta import Cash_Karp + + desc = super().get_custom_description(problem, num_procs) + desc['sweeper_class'] = Cash_Karp + return desc + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 0 + elif key == 'e_global_post_run' and op == max: + return 2.0606132165701396e-05 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class DoubleAdaptivityStrategy(AdaptivityStrategy): + ''' + Adaptivity based both on embedded estimate and on residual + ''' + + def __init__(self, useMPI=False, skip_residual_computation='all'): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + + super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation) + self.color = list(cmap.values())[7] + self.marker = '^' + self.name = 'double_adaptivity' + self.bar_plot_x_label = 'double adaptivity' + self.precision_parameter = 'e_tol' + self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol'] + self.residual_e_tol_ratio = 1.0 + self.residual_e_tol_abs = None + + @property + def label(self): + return 'double adaptivity' + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityResidual, Adaptivity + from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting + + custom_description = super().get_custom_description(problem, num_procs) + + if self.residual_e_tol_abs: + e_tol = self.residual_e_tol_abs + else: + e_tol = custom_description['convergence_controllers'][Adaptivity]['e_tol'] * self.residual_e_tol_ratio + custom_description['convergence_controllers'][AdaptivityResidual] = { + 'e_tol': e_tol, + 'allowed_modifications': ['decrease'], + } + + flavor = 'MPI' if self.useMPI else 'nonMPI' + custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15} + + return custom_description + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 3825 + elif key == 'e_global_post_run' and op == max: + return 1.3370376368393444e-05 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityAvoidRestartsStrategy(AdaptivityStrategy): + """ + Adaptivity with the avoid restarts option + """ + + @property + def label(self): + return 'adaptivity (avoid restarts)' + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting + + custom_description = super().get_custom_description(problem, num_procs) + + custom_description['convergence_controllers'][Adaptivity]['avoid_restarts'] = True + + flavor = 'MPI' if self.useMPI else 'nonMPI' + custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15} + + return custom_description + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 2955 + elif key == 'e_global_post_run' and op == max: + return 5.274015506540053e-07 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityInterpolationStrategy(AdaptivityStrategy): + """ + Adaptivity with interpolation between restarts + """ + + @property + def label(self): + return 'adaptivity+interpolation' + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity + from pySDC.implementations.convergence_controller_classes.interpolate_between_restarts import ( + InterpolateBetweenRestarts, + ) + from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting + + custom_description = super().get_custom_description(problem, num_procs) + + custom_description['convergence_controllers'][Adaptivity]['avoid_restarts'] = False + custom_description['convergence_controllers'][InterpolateBetweenRestarts] = {} + + flavor = 'MPI' if self.useMPI else 'nonMPI' + custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15} + + return custom_description + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 6659 + elif key == 'e_global_post_run' and op == max: + return 2.9780002756552015e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') + + +class AdaptivityExtrapolationWithinQStrategy(Strategy): + ''' + Adaptivity based on extrapolation between collocation nodes as a resilience strategy + ''' + + def __init__(self, useMPI=False): + ''' + Initialization routine + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityExtrapolationWithinQ + + super().__init__(useMPI=useMPI) + self.color = list(cmap.values())[8] + self.marker = '*' + self.name = 'adaptivity_extraQ' + self.bar_plot_x_label = 'adaptivity Q' + self.precision_parameter = 'e_tol' + self.adaptive_coll_params = {} + self.precision_parameter_loc = ['convergence_controllers', AdaptivityExtrapolationWithinQ, 'e_tol'] + self.restol = None + self.maxiter = 99 + + def get_custom_description(self, problem, num_procs): + ''' + Routine to get a custom description that adds adaptivity + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + num_procs (int): Number of processes you intend to run with + + Returns: + The custom descriptions you can supply to the problem when running it + ''' + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityExtrapolationWithinQ + + custom_description = {} + custom_description['step_params'] = {'maxiter': self.maxiter} + + dt_max = np.inf + dt_min = 1e-5 + + if problem.__name__ == "run_vdp": + e_tol = 2e-5 + dt_min = 1e-3 + # elif problem.__name__ == "run_piline": + # e_tol = 1e-7 + # dt_min = 1e-2 + # elif problem.__name__ == "run_Lorenz": + # e_tol = 2e-5 + # dt_min = 1e-3 + # elif problem.__name__ == "run_Schroedinger": + # e_tol = 4e-6 + # dt_min = 1e-3 + # elif problem.__name__ == "run_quench": + # e_tol = 1e-5 + # dt_min = 1e-3 + # dt_max = 1e2 + else: + raise NotImplementedError( + 'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\ + strategy' + ) + + custom_description['level_params'] = {'restol': e_tol / 10 if self.restol is None else self.restol} + custom_description['convergence_controllers'] = { + AdaptivityExtrapolationWithinQ: { + 'e_tol': e_tol, + 'dt_min': dt_min, + 'dt_max': dt_max, + } + } + return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description) + + def get_reference_value(self, problem, key, op, num_procs=1): + """ + Get a reference value for a given problem for testing in CI. + + Args: + problem: A function that runs a pySDC problem, see imports for available problems + key (str): The name of the variable you want to compare + op (function): The operation you want to apply to the data + num_procs (int): Number of processes + + Returns: + The reference value + """ + if problem.__name__ == "run_vdp": + if key == 'work_newton' and op == sum: + return 2259 + elif key == 'e_global_post_run' and op == max: + return 9.319882663172407e-06 + + raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!') diff --git a/pySDC/projects/Resilience/sweepers.py b/pySDC/projects/Resilience/sweepers.py new file mode 100644 index 0000000000000000000000000000000000000000..f03a55314ead62eff447c1cbb4d7c94931f4ae9d --- /dev/null +++ b/pySDC/projects/Resilience/sweepers.py @@ -0,0 +1,175 @@ +import numpy as np +from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit +from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order + + +class generic_implicit_efficient(generic_implicit): + """ + This sweeper has the same functionality of the `generic_implicit` sweeper, but saves a few operations at the expense + of readability. + """ + + def integrate(self, Q=None): + """ + Integrates the right-hand side. Depending on `Q`, this may or may not be consistent with an integral + approximation. + + Args: + Q (numpy.ndarray): Some sort of quadrature rule + + Returns: + list of dtype_u: containing the integral as values + """ + + # get current level and problem description + L = self.level + P = L.prob + + Q = self.coll.Qmat if Q is None else Q + + me = [] + + # integrate RHS over all collocation nodes + for m in range(1, self.coll.num_nodes + 1): + # new instance of dtype_u, initialize values with 0 + me.append(P.dtype_u(P.init, val=0.0)) + for j in range(1, self.coll.num_nodes + 1): + me[-1] += L.dt * Q[m, j] * L.f[j] + + return me + + def update_nodes(self): + """ + Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + # get number of collocation nodes for easier access + M = self.coll.num_nodes + + # gather all terms which are known already (e.g. from the previous iteration) + # this corresponds to u0 + QF(u^k) - QdF(u^k) + tau + + # get QF(u^k) + integral = self.integrate(Q=self.coll.Qmat - self.QI) + for m in range(M): + # add initial value + integral[m] += L.u[0] + # add tau if associated + if L.tau[m] is not None: + integral[m] += L.tau[m] + + # do the sweep + for m in range(0, M): + # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) + rhs = P.dtype_u(integral[m]) + for j in range(1, m + 1): + rhs += L.dt * self.QI[m + 1, j] * L.f[j] + + # implicit solve with prefactor stemming from the diagonal of Qd + L.u[m + 1] = P.solve_system( + rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m] + ) + # update function values + L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) + + # indicate presence of new values at this level + L.status.updated = True + + return None + + +class imex_1st_order_efficient(imex_1st_order): + """ + Duplicate of `imex_1st_order` sweeper which is slightly more efficient at the cost of code readability. + """ + + def integrate(self, Q=None, QI=None, QE=None): + """ + Integrates the right-hand side (here impl + expl) + + Args: + Q (numpy.ndarray): Full quadrature rule + QI (numpy.ndarray): Implicit preconditioner + QE (numpy.ndarray): Explicit preconditioner + + Returns: + list of dtype_u: containing the integral as values + """ + + Q = self.coll.Qmat if Q is None else Q + QI = np.zeros_like(Q) if QI is None else QI + QE = np.zeros_like(Q) if QE is None else QE + + # get current level and problem description + L = self.level + + me = [] + + # integrate RHS over all collocation nodes + for m in range(1, self.coll.num_nodes + 1): + me.append(L.dt * ((Q - QI)[m, 1] * L.f[1].impl + (Q - QE)[m, 1] * L.f[1].expl)) + # new instance of dtype_u, initialize values with 0 + for j in range(2, self.coll.num_nodes + 1): + me[m - 1] += L.dt * ((Q - QI)[m, j] * L.f[j].impl + (Q - QE)[m, j] * L.f[j].expl) + + return me + + def update_nodes(self): + """ + Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes + + Returns: + None + """ + + # get current level and problem description + L = self.level + P = L.prob + + # only if the level has been touched before + assert L.status.unlocked + + # get number of collocation nodes for easier access + M = self.coll.num_nodes + + # gather all terms which are known already (e.g. from the previous iteration) + # this corresponds to u0 + QF(u^k) - QIFI(u^k) - QEFE(u^k) + tau + + # get QF(u^k) + integral = self.integrate(Q=self.coll.Qmat, QI=self.QI, QE=self.QE) + for m in range(M): + # add initial value + integral[m] += L.u[0] + # add tau if associated + if L.tau[m] is not None: + integral[m] += L.tau[m] + + # do the sweep + for m in range(0, M): + # build rhs, consisting of the known values from above and new values from previous nodes (at k+1) + rhs = P.dtype_u(integral[m]) + for j in range(1, m + 1): + rhs += L.dt * (self.QI[m + 1, j] * L.f[j].impl + self.QE[m + 1, j] * L.f[j].expl) + + # implicit solve with prefactor stemming from QI + L.u[m + 1] = P.solve_system( + rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m] + ) + + # update function values + L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m]) + + # indicate presence of new values at this level + L.status.updated = True + + return None diff --git a/pySDC/projects/Resilience/vdp.py b/pySDC/projects/Resilience/vdp.py index 244c86dd849f5941449c1429ca1d29362b6c62b0..ffa412751e84c2c78edd5795e4c4979ea6804b8b 100644 --- a/pySDC/projects/Resilience/vdp.py +++ b/pySDC/projects/Resilience/vdp.py @@ -4,11 +4,12 @@ import matplotlib.pyplot as plt from pySDC.helpers.stats_helper import get_sorted, get_list_of_types from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol -from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.core.Errors import ProblemError, ConvergenceError from pySDC.projects.Resilience.hook import LogData, hook_collection +from pySDC.projects.Resilience.strategies import merge_descriptions +from pySDC.projects.Resilience.sweepers import generic_implicit_efficient def plot_step_sizes(stats, ax, e_em_key='error_embedded_estimate'): @@ -88,7 +89,6 @@ def run_vdp( hook_class=LogData, fault_stuff=None, custom_controller_params=None, - custom_problem_params=None, use_MPI=False, **kwargs, ): @@ -102,7 +102,6 @@ def run_vdp( hook_class (pySDC.Hook): A hook to store data fault_stuff (dict): A dictionary with information on how to add faults custom_controller_params (dict): Overwrite presets - custom_problem_params (dict): Overwrite presets use_MPI (bool): Whether or not to use MPI Returns: @@ -112,11 +111,11 @@ def run_vdp( """ # initialize level parameters - level_params = dict() + level_params = {} level_params['dt'] = 1e-2 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = 3 sweeper_params['QI'] = 'LU' @@ -128,15 +127,12 @@ def run_vdp( 'u0': np.array([2.0, 0.0]), } - if custom_problem_params is not None: - problem_params = {**problem_params, **custom_problem_params} - # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 4 # initialize controller parameters - controller_params = dict() + controller_params = {} controller_params['logger_level'] = 30 controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class]) controller_params['mssdc_jac'] = False @@ -145,20 +141,16 @@ def run_vdp( controller_params = {**controller_params, **custom_controller_params} # fill description dictionary for easy step instantiation - description = dict() - description['problem_class'] = vanderpol # pass problem class - description['problem_params'] = problem_params # pass problem parameters - description['sweeper_class'] = generic_implicit # pass sweeper - description['sweeper_params'] = sweeper_params # pass sweeper parameters - description['level_params'] = level_params # pass level parameters + description = {} + description['problem_class'] = vanderpol + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit_efficient + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params description['step_params'] = step_params if custom_description is not None: - for k in custom_description.keys(): - if k == 'sweeper_class': - description[k] = custom_description[k] - continue - description[k] = {**description.get(k, {}), **custom_description.get(k, {})} + description = merge_descriptions(description, custom_description) # set time parameters t0 = 0.0 @@ -188,7 +180,8 @@ def run_vdp( from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults rnd_args = {'iteration': 3} - args = {'time': 1.0, 'target': 0} + # args = {'time': 0.9, 'target': 0} + args = {'time': 5.25, 'target': 0} prepare_controller_for_faults(controller, fault_stuff, rnd_args, args) # call main function to get things done... @@ -219,7 +212,7 @@ def fetch_test_data(stats, comm=None, use_MPI=False): if type not in get_list_of_types(stats): raise ValueError(f"Can't read type \"{type}\" from stats, only got", get_list_of_types(stats)) - if comm is None or use_MPI == False: + if comm is None or use_MPI is False: data[type] = [me[1] for me in get_sorted(stats, type=type, recomputed=None, sortby='time')] else: data[type] = [me[1] for me in get_sorted(stats, type=type, recomputed=None, sortby='time', comm=comm)] @@ -275,8 +268,6 @@ def mpi_vs_nonMPI(MPI_ready, comm): custom_description = {'convergence_controllers': {}} custom_description['convergence_controllers'][Adaptivity] = {'e_tol': 1e-7, 'avoid_restarts': False} - custom_controller_params = {'logger_level': 30} - data = [{}, {}] for i in range(2): @@ -285,7 +276,6 @@ def mpi_vs_nonMPI(MPI_ready, comm): custom_description=custom_description, num_procs=size, use_MPI=use_MPI[i], - custom_controller_params=custom_controller_params, Tend=1.0, comm=comm, ) @@ -316,7 +306,7 @@ def check_adaptivity_with_avoid_restarts(comm=None, size=1): """ fig, ax = plt.subplots() custom_description = {'convergence_controllers': {}, 'level_params': {'dt': 1.0e-2}} - custom_controller_params = {'logger_level': 30, 'all_to_done': False} + custom_controller_params = {'all_to_done': False} results = {'e': {}, 'sweeps': {}, 'restarts': {}} size = comm.size if comm is not None else size @@ -391,7 +381,6 @@ def check_step_size_limiter(size=4, comm=None): from pySDC.implementations.convergence_controller_classes.step_size_limiter import StepSizeLimiter custom_description = {'convergence_controllers': {}, 'level_params': {'dt': 1.0e-2}} - custom_controller_params = {'logger_level': 30} expect = {} params = {'e_tol': 1e-6} @@ -399,9 +388,11 @@ def check_step_size_limiter(size=4, comm=None): if limit_step_sizes: params['dt_max'] = expect['dt_max'] * 0.9 params['dt_min'] = np.inf + params['dt_slope_max'] = expect['dt_slope_max'] * 0.9 + params['dt_slope_min'] = expect['dt_slope_min'] * 1.1 custom_description['convergence_controllers'][StepSizeLimiter] = {'dt_min': expect['dt_min'] * 1.1} else: - for k in ['dt_max', 'dt_min']: + for k in ['dt_max', 'dt_min', 'dt_slope_max', 'dt_slope_min']: params.pop(k, None) custom_description['convergence_controllers'].pop(StepSizeLimiter, None) @@ -410,33 +401,50 @@ def check_step_size_limiter(size=4, comm=None): custom_description=custom_description, num_procs=size, use_MPI=comm is not None, - custom_controller_params=custom_controller_params, Tend=5.0e0, comm=comm, ) # plot the step sizes - dt = get_sorted(stats, type='dt', recomputed=False, comm=comm) + dt = get_sorted(stats, type='dt', recomputed=None, comm=comm) # make sure that the convergence controllers are only added once convergence_controller_classes = [type(me) for me in controller.convergence_controllers] for c in convergence_controller_classes: assert convergence_controller_classes.count(c) == 1, f'Convergence controller {c} added multiple times' + dt_numpy = np.array([me[1] for me in dt]) if not limit_step_sizes: - expect['dt_max'] = max([me[1] for me in dt]) - expect['dt_min'] = min([me[1] for me in dt]) + expect['dt_max'] = max(dt_numpy) + expect['dt_min'] = min(dt_numpy) + expect['dt_slope_max'] = max(dt_numpy[:-2] / dt_numpy[1:-1]) + expect['dt_slope_min'] = min(dt_numpy[:-2] / dt_numpy[1:-1]) else: - dt_max = max([me[1] for me in dt]) - dt_min = min([me[1] for me in dt[size:-size]]) # The first and last step might fall below the limits + dt_max = max(dt_numpy) + dt_min = min(dt_numpy[size:-size]) # The first and last step might fall below the limits + dt_slope_max = max(dt_numpy[:-2] / dt_numpy[1:-1]) + dt_slope_min = min(dt_numpy[:-2] / dt_numpy[1:-1]) assert ( dt_max <= expect['dt_max'] ), f"Exceeded maximum allowed step size! Got {dt_max:.4e}, allowed {params['dt_max']:.4e}." assert ( dt_min >= expect['dt_min'] ), f"Exceeded minimum allowed step size! Got {dt_min:.4e}, allowed {params['dt_min']:.4e}." + assert ( + dt_slope_max <= expect['dt_slope_max'] + ), f"Exceeded maximum allowed step size slope! Got {dt_slope_max:.4e}, allowed {params['dt_slope_max']:.4e}." + assert ( + dt_slope_min >= expect['dt_slope_min'] + ), f"Exceeded minimum allowed step size slope! Got {dt_slope_min:.4e}, allowed {params['dt_slope_min']:.4e}." - if comm == None: + assert ( + dt_slope_max <= expect['dt_slope_max'] + ), f"Exceeded maximum allowed step size slope! Got {dt_slope_max:.4e}, allowed {params['dt_slope_max']:.4e}." + assert ( + dt_slope_min >= expect['dt_slope_min'] + ), f"Exceeded minimum allowed step size slope! Got {dt_slope_min:.4e}, allowed {params['dt_slope_min']:.4e}." + + if comm is None: print(f'Passed step size limiter test with {size} ranks in nonMPI implementation') else: if comm.rank == 0: @@ -483,10 +491,8 @@ def interpolation_stuff(): # pragma: no cover 'sweeper_params': sweeper_params, } - custom_controller_params = {'logger_level': 30} stats, controller, _ = run_vdp( custom_description=custom_description, - custom_controller_params=custom_controller_params, hook_class=[LogLocalErrorPostStep, LogData, LogWork] + hook_collection, ) @@ -516,6 +522,8 @@ def interpolation_stuff(): # pragma: no cover if __name__ == "__main__": + import sys + try: from mpi4py import MPI @@ -527,8 +535,18 @@ if __name__ == "__main__": comm = None size = 1 - mpi_vs_nonMPI(MPI_ready, comm) - check_step_size_limiter(size, comm) + if len(sys.argv) == 1: + mpi_vs_nonMPI(MPI_ready, comm) + check_step_size_limiter(size, comm) - if size == 1: + if size == 1: + check_adaptivity_with_avoid_restarts(comm=None, size=1) + + elif 'mpi_vs_nonMPI' in sys.argv: + mpi_vs_nonMPI(MPI_ready, comm) + elif 'check_step_size_limiter' in sys.argv: + check_step_size_limiter(MPI_ready, comm) + elif 'check_adaptivity_with_avoid_restarts' and size == 1: check_adaptivity_with_avoid_restarts(comm=None, size=1) + else: + raise NotImplementedError('Your test is not implemented!') diff --git a/pySDC/projects/Resilience/work_precision.py b/pySDC/projects/Resilience/work_precision.py new file mode 100644 index 0000000000000000000000000000000000000000..417f95e758be0989bcdf75a1a847e34a4d0387d8 --- /dev/null +++ b/pySDC/projects/Resilience/work_precision.py @@ -0,0 +1,998 @@ +from mpi4py import MPI +import numpy as np +import matplotlib.pyplot as plt +import pickle + +from pySDC.projects.Resilience.strategies import merge_descriptions +from pySDC.projects.Resilience.Lorenz import run_Lorenz +from pySDC.projects.Resilience.vdp import run_vdp +from pySDC.projects.Resilience.Schroedinger import run_Schroedinger +from pySDC.projects.Resilience.quench import run_quench + +from pySDC.helpers.stats_helper import get_sorted +from pySDC.helpers.plot_helper import setup_mpl, figsize_by_journal + +setup_mpl(reset=True) +LOGGER_LEVEL = 30 +VERBOSE = True + +MAPPINGS = { + 'e_global': ('e_global_post_run', max, False), + 'e_global_rel': ('e_global_rel_post_run', max, False), + 't': ('timing_run', max, False), + # 'e_local_max': ('e_local_post_step', max, False), + 'k_SDC': ('k', sum, None), + 'k_SDC_no_restart': ('k', sum, False), + 'k_Newton': ('work_newton', sum, None), + 'k_Newton_no_restart': ('work_newton', sum, False), + 'k_rhs': ('work_rhs', sum, None), + 'restart': ('restart', sum, None), + 'dt_mean': ('dt', np.mean, False), + 'dt_max': ('dt', max, False), + 'e_embedded_max': ('error_embedded_estimate', max, False), +} + + +def single_run(problem, strategy, data, custom_description, num_procs=1, comm_world=None, problem_args=None): + """ + Make a single run of a particular problem with a certain strategy. + + Args: + problem (function): A problem to run + strategy (Strategy): SDC strategy + data (dict): Put the results in here + custom_description (dict): Overwrite presets + num_procs (int): Number of processes for the time communicator + comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script + + Returns: + None + """ + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI + from pySDC.implementations.hooks.log_work import LogWork + from pySDC.projects.Resilience.hook import LogData + + comm = comm_world.Split(comm_world.rank < num_procs) + if comm_world.rank >= num_procs: + return None + + strategy_description = strategy.get_custom_description(problem, num_procs) + description = merge_descriptions(strategy_description, custom_description) + + controller_params = {'logger_level': LOGGER_LEVEL} + problem_args = {} if problem_args is None else problem_args + + stats, controller, _ = problem( + custom_description=description, + Tend=strategy.get_Tend(problem, num_procs), + hook_class=[LogData, LogWork, LogGlobalErrorPostRunMPI], + custom_controller_params=controller_params, + use_MPI=True, + comm=comm, + **problem_args, + ) + + # record all the metrics + for key, mapping in MAPPINGS.items(): + me = get_sorted(stats, type=mapping[0], recomputed=mapping[2], comm=comm) + if len(me) == 0: + data[key] += [np.nan] + else: + data[key] += [mapping[1]([you[1] for you in me])] + return None + + +def get_parameter(dictionary, where): + """ + Get a parameter at a certain position in a dictionary of dictionaries. + + Args: + dictionary (dict): The dictionary + where (list): The list of keys leading to the value you want + + Returns: + The value of the dictionary + """ + if len(where) == 1: + return dictionary[where[0]] + else: + return get_parameter(dictionary[where[0]], where[1:]) + + +def set_parameter(dictionary, where, parameter): + """ + Set a parameter at a certain position in a dictionary of dictionaries + + Args: + dictionary (dict): The dictionary + where (list): The list of keys leading to the value you want to set + parameter: Whatever you want to set the parameter to + + Returns: + None + """ + if len(where) == 1: + dictionary[where[0]] = parameter + else: + set_parameter(dictionary[where[0]], where[1:], parameter) + + +def get_path(problem, strategy, num_procs, handle='', base_path='data/work_precision'): + """ + Get the path to a certain data. + + Args: + problem (function): A problem to run + strategy (Strategy): SDC strategy + num_procs (int): Number of processes for the time communicator + handle (str): The name of the configuration + base_path (str): Some path where all the files are stored + + Returns: + str: The path to the data you are looking for + """ + return f'{base_path}/{problem.__name__}-{strategy.__class__.__name__}-{handle}{"-wp" if handle else "wp"}-{num_procs}procs.pickle' + + +def record_work_precision( + problem, + strategy, + num_procs=1, + custom_description=None, + handle='', + runs=1, + comm_world=None, + problem_args=None, + param_range=None, +): + """ + Run problem with strategy and record the cost parameters. + + Args: + problem (function): A problem to run + strategy (Strategy): SDC strategy + num_procs (int): Number of processes for the time communicator + custom_description (dict): Overwrite presets + handle (str): The name of the configuration + runs (int): Number of runs you want to do + comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script + + Returns: + None + """ + data = {} + + # prepare precision parameters + param = strategy.precision_parameter + description = merge_descriptions( + strategy.get_custom_description(problem, num_procs), + {} if custom_description is None else custom_description, + ) + if param == 'e_tol': + power = 10.0 + set_parameter(description, strategy.precision_parameter_loc[:-1] + ['dt_min'], 0) + exponents = [-3, -2, -1, 0, 1, 2, 3] + if problem.__name__ == 'run_vdp': + exponents = [-4, -3, -2, -1, 0, 1, 2] + elif param == 'dt': + power = 2.0 + exponents = [-1, 0, 1, 2, 3] + elif param == 'restol': + power = 10.0 + exponents = [-2, -1, 0, 1, 2, 3] + if problem.__name__ == 'run_vdp': + exponents = [-4, -3, -2, -1, 0, 1] + else: + raise NotImplementedError(f"I don't know how to get default value for parameter \"{param}\"") + + where = strategy.precision_parameter_loc + default = get_parameter(description, where) + param_range = [default * power**i for i in exponents] if param_range is None else param_range + + if problem.__name__ == 'run_quench': + if param == 'restol': + param_range = [1e-5, 1e-6, 1e-7, 1e-8, 1e-9] + elif param == 'e_tol': + param_range = [1e-2 / 2.0**me for me in [4, 5, 6, 7, 8, 9, 10]] + elif param == 'dt': + param_range = [500 / 2.0**me for me in [5, 6, 7, 8]] + + # run multiple times with different parameters + for i in range(len(param_range)): + set_parameter(description, where, param_range[i]) + + if strategy.name == 'adaptivity_coll': + # set_parameter(description, ['level_params', 'restol'], 1e-9) + set_parameter(description, ['level_params', 'restol'], param_range[i] / 10.0) + + data[param_range[i]] = {key: [] for key in MAPPINGS.keys()} + data[param_range[i]]['param'] = [param_range[i]] + data[param_range[i]][param] = [param_range[i]] + for _j in range(runs): + single_run( + problem, + strategy, + data[param_range[i]], + custom_description=description, + comm_world=comm_world, + problem_args=problem_args, + num_procs=num_procs, + ) + + comm_world.Barrier() + + if VERBOSE and comm_world.rank == 0: + print( + f'{problem.__name__} {handle} {num_procs} procs, {param}={param_range[i]:.2e}: e={data[param_range[i]]["e_global"][-1]}, t={data[param_range[i]]["t"][-1]}, k={data[param_range[i]]["k_SDC"][-1]}' + ) + + if comm_world.rank == 0: + import socket + import time + + data['meta'] = { + 'hostname': socket.gethostname(), + 'time': time.time, + 'runs': runs, + } + with open(get_path(problem, strategy, num_procs, handle), 'wb') as f: + pickle.dump(data, f) + + +def plot_work_precision( + problem, + strategy, + num_procs, + ax, + work_key='k_SDC', + precision_key='e_global', + handle='', + plotting_params=None, + comm_world=None, +): # pragma: no cover + """ + Plot data from running a problem with a strategy. + + Args: + problem (function): A problem to run + strategy (Strategy): SDC strategy + num_procs (int): Number of processes for the time communicator + ax (matplotlib.pyplot.axes): Somewhere to plot + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + handle (str): The name of the configuration + plotting_params (dict): Will be passed when plotting + comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script + + Returns: + None + """ + if comm_world.rank > 0: + return None + + with open(get_path(problem, strategy, num_procs, handle=handle), 'rb') as f: + data = pickle.load(f) + + keys = [key for key in data.keys() if key not in ['meta']] + work = [np.nanmean(data[key][work_key]) for key in keys] + precision = [np.nanmean(data[key][precision_key]) for key in keys] + + for key in [work_key, precision_key]: + rel_variance = [np.std(data[me][key]) / max([np.nanmean(data[me][key]), 1.0]) for me in keys] + if not all(me < 1e-1 or not np.isfinite(me) for me in rel_variance): + print( + f"WARNING: Variance in \"{key}\" for {get_path(problem, strategy, num_procs, handle)} too large! Got {rel_variance}" + ) + + style = merge_descriptions( + {**strategy.style, 'label': f'{strategy.style["label"]}{f" {handle}" if handle else ""}'}, + plotting_params if plotting_params else {}, + ) + + ax.loglog(work, precision, **style) + + if 't' in [work_key, precision_key]: + meta = data.get('meta', {}) + + if meta.get('hostname', None) in ['thomas-work']: + ax.text(0.1, 0.1, "Laptop timings!", transform=ax.transAxes) + if meta.get('runs', None) == 1: + ax.text(0.1, 0.2, "No sampling!", transform=ax.transAxes) + + +def decorate_panel(ax, problem, work_key, precision_key, num_procs=1, title_only=False): # pragma: no cover + """ + Decorate a plot + + Args: + ax (matplotlib.pyplot.axes): Somewhere to plot + problem (function): A problem to run + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + num_procs (int): Number of processes for the time communicator + title_only (bool): Put only the title on top, or do the whole shebang + + Returns: + None + """ + labels = { + 'k_SDC': 'SDC iterations', + 'k_SDC_no_restart': 'SDC iterations (restarts excluded)', + 'k_Newton': 'Newton iterations', + 'k_Newton_no_restart': 'Newton iterations (restarts excluded)', + 'k_rhs': 'right hand side evaluations', + 't': 'wall clock time / s', + 'e_global': 'global error', + 'e_global_rel': 'relative global error', + 'e_local_max': 'max. local error', + 'restart': 'restarts', + 'dt_max': r'$\Delta t_\mathrm{max}$', + 'dt_mean': r'$\bar{\Delta t}$', + 'param': 'parameter', + } + + if not title_only: + ax.set_xlabel(labels.get(work_key, 'work')) + ax.set_ylabel(labels.get(precision_key, 'precision')) + # ax.legend(frameon=False) + + titles = { + 'run_vdp': 'Van der Pol', + 'run_Lorenz': 'Lorenz attractor', + 'run_Schroedinger': r'Schr\"odinger', + 'run_quench': 'Quench', + } + ax.set_title(titles.get(problem.__name__, '')) + + +def execute_configurations( + problem, + configurations, + work_key, + precision_key, + num_procs, + ax, + decorate, + record, + runs, + comm_world, + plotting, +): + """ + Run for multiple configurations. + + Args: + problem (function): A problem to run + configurations (dict): The configurations you want to run with + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + num_procs (int): Number of processes for the time communicator + ax (matplotlib.pyplot.axes): Somewhere to plot + decorate (bool): Whether to decorate fully or only put the title + record (bool): Whether to only plot or also record the data first + runs (int): Number of runs you want to do + comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script + plotting (bool): Whether to plot something + + Returns: + None + """ + for _, config in configurations.items(): + for strategy in config['strategies']: + shared_args = { + 'problem': problem, + 'strategy': strategy, + 'handle': config.get('handle', ''), + 'num_procs': config.get('num_procs', num_procs), + } + if record: + record_work_precision( + **shared_args, + custom_description=config.get('custom_description', {}), + runs=runs, + comm_world=comm_world, + problem_args=config.get('problem_args', {}), + param_range=config.get('param_range', None), + ) + if plotting and comm_world.rank == 0: + plot_work_precision( + **shared_args, + work_key=work_key, + precision_key=precision_key, + ax=ax, + plotting_params=config.get('plotting_params', {}), + comm_world=comm_world, + ) + + decorate_panel( + ax=ax, + problem=problem, + work_key=work_key, + precision_key=precision_key, + num_procs=num_procs, + title_only=not decorate, + ) + + +def get_configs(mode, problem): + """ + Get configurations for work-precision plots. These are dictionaries containing strategies and handles and so on. + + Args: + mode (str): The of the configurations you want to retrieve + problem (function): A problem to run + + Returns: + dict: Configurations + """ + configurations = {} + if mode == 'regular': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy + + handle = 'regular' + configurations[0] = { + 'handle': handle, + 'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True), IterateStrategy(useMPI=True)], + } + elif mode == 'step_size_limiting': + from pySDC.implementations.convergence_controller_classes.step_size_limiter import StepSizeLimiter + from pySDC.projects.Resilience.strategies import AdaptivityStrategy + + configurations[0] = { + 'custom_description': {'convergence_controllers': {StepSizeLimiter: {'dt_max': 25}}}, + 'handle': 'step limiter', + 'strategies': [AdaptivityStrategy(useMPI=True)], + 'plotting_params': {'color': 'teal', 'marker': 'v'}, + } + configurations[1] = { + 'custom_description': {'convergence_controllers': {StepSizeLimiter: {'dt_slope_max': 2}}}, + 'handle': 'slope limiter', + 'strategies': [AdaptivityStrategy(useMPI=True)], + 'plotting_params': {'color': 'magenta', 'marker': 'x'}, + } + configurations[2] = { + 'custom_description': {}, + 'handle': 'no limits', + 'plotting_params': {'label': 'adaptivity'}, + 'strategies': [AdaptivityStrategy(useMPI=True)], + } + elif mode == 'compare_strategies': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy + + description_high_order = {'step_params': {'maxiter': 5}} + description_low_order = {'step_params': {'maxiter': 3}} + dashed = {'ls': '--'} + + configurations[0] = { + 'custom_description': description_high_order, + 'handle': r'high order', + 'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)], + } + configurations[1] = { + 'custom_description': description_low_order, + 'handle': r'low order', + 'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)], + 'plotting_params': dashed, + } + + description_large_step = {'level_params': {'dt': 5.0 if problem.__name__ == 'run_quench' else 3e-2}} + description_small_step = {'level_params': {'dt': 1.0 if problem.__name__ == 'run_quench' else 1e-2}} + + configurations[2] = { + 'custom_description': description_large_step, + 'handle': r'large step', + 'strategies': [IterateStrategy(useMPI=True)], + 'plotting_params': dashed, + } + configurations[3] = { + 'custom_description': description_small_step, + 'handle': r'small step', + 'strategies': [IterateStrategy(useMPI=True)], + } + elif mode == 'RK': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, DIRKStrategy, ERKStrategy + + # from pySDC.implementations.sweeper_classes.explicit import explicit + # configurations[3] = { + # 'custom_description': { + # 'step_params': {'maxiter': 5}, + # 'sweeper_params': {'QE': 'EE'}, + # 'sweeper_class': explicit, + # }, + # 'handle': 'explicit order 4', + # 'strategies': [AdaptivityStrategy(useMPI=True)], + # 'plotting_params': {'ls': ':', 'label': 'explicit SDC5(4)'}, + # } + configurations[0] = { + 'strategies': [ERKStrategy(useMPI=True), DIRKStrategy(useMPI=True)], + } + configurations[1] = { + 'custom_description': {'step_params': {'maxiter': 5}}, + 'handle': 'order 5', + 'strategies': [AdaptivityStrategy(useMPI=True)], + 'plotting_params': {'label': 'SDC5(4)'}, + } + configurations[2] = { + 'custom_description': {'step_params': {'maxiter': 4}}, + 'handle': 'order 4', + 'strategies': [AdaptivityStrategy(useMPI=True)], + 'plotting_params': {'ls': '--', 'label': 'SDC4(3)'}, + } + elif mode == 'parallel_efficiency': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy, ERKStrategy + + desc = {} + desc['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'} + desc['step_params'] = {'maxiter': 5} + + descIterate = {} + descIterate['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'} + + ls = { + 1: '-', + 2: '--', + 3: '-.', + 4: ':', + 5: 'loosely dashdotted', + } + + # configurations[-1] = { + # 'strategies': [ERKStrategy(useMPI=False)], 'num_procs':1, + # } + + for num_procs in [4, 2, 1]: + plotting_params = {'ls': ls[num_procs], 'label': f'adaptivity {num_procs} procs'} + configurations[num_procs] = { + 'strategies': [AdaptivityStrategy(True)], + 'custom_description': desc, + 'num_procs': num_procs, + 'plotting_params': plotting_params, + } + plotting_params = {'ls': ls[num_procs], 'label': fr'$k$ adaptivity {num_procs} procs'} + configurations[num_procs + 100] = { + 'strategies': [IterateStrategy(True)], + 'custom_description': descIterate, + 'num_procs': num_procs, + 'plotting_params': plotting_params, + } + + elif mode[:13] == 'vdp_stiffness': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, ERKStrategy, DIRKStrategy + + mu = float(mode[14:]) + + problem_desc = {'problem_params': {'mu': mu}} + + desc = {} + desc['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'} + desc['step_params'] = {'maxiter': 5} + desc['problem_params'] = problem_desc['problem_params'] + + ls = { + 1: '-', + 2: '--', + 3: '-.', + 4: ':', + 5: 'loosely dashdotted', + } + + for num_procs in [4, 1]: + plotting_params = {'ls': ls[num_procs], 'label': f'SDC {num_procs} procs'} + configurations[num_procs] = { + 'strategies': [AdaptivityStrategy(True)], + 'custom_description': desc, + 'num_procs': num_procs, + 'plotting_params': plotting_params, + 'handle': mode, + } + + configurations[2] = { + 'strategies': [ERKStrategy(useMPI=True)], + 'num_procs': 1, + 'handle': mode, + 'plotting_params': {'label': 'CP5(4)'}, + 'custom_description': problem_desc, + #'param_range': [1e-2], + } + configurations[3] = { + 'strategies': [DIRKStrategy(useMPI=True)], + 'num_procs': 1, + 'handle': mode, + 'plotting_params': {'label': 'DIRK4(3)'}, + 'custom_description': problem_desc, + } + + elif mode == 'compare_adaptivity': + # TODO: configurations not final! + from pySDC.projects.Resilience.strategies import ( + AdaptivityCollocationTypeStrategy, + AdaptivityCollocationRefinementStrategy, + AdaptivityStrategy, + AdaptivityExtrapolationWithinQStrategy, + ) + + strategies = [ + AdaptivityCollocationTypeStrategy(useMPI=True), + AdaptivityCollocationRefinementStrategy(useMPI=True), + ] + + restol = None + for strategy in strategies: + strategy.restol = restol + + configurations[1] = { + 'custom_description': {'step_params': {'maxiter': 99}, 'level_params': {'restol': 1e-11}}, + 'strategies': [AdaptivityExtrapolationWithinQStrategy(useMPI=True)], + } + configurations[2] = {'strategies': strategies} + configurations[3] = { + 'custom_description': {'step_params': {'maxiter': 5}}, + 'strategies': [AdaptivityStrategy(useMPI=True)], + } + + # strategies2 = [AdaptivityCollocationTypeStrategy(useMPI=True), AdaptivityCollocationRefinementStrategy(useMPI=True)] + # restol = 1e-6 + # for strategy in strategies2: + # strategy.restol = restol + # configurations[3] = {'strategies':strategies2, 'handle': 'low restol', 'plotting_params': {'ls': '--'}} + + elif mode == 'quench': + from pySDC.projects.Resilience.strategies import ( + AdaptivityStrategy, + DoubleAdaptivityStrategy, + IterateStrategy, + BaseStrategy, + ) + + dumbledoresarmy = DoubleAdaptivityStrategy(useMPI=True) + # dumbledoresarmy.residual_e_tol_ratio = 1e2 + dumbledoresarmy.residual_e_tol_abs = 1e-3 + + strategies = [ + AdaptivityStrategy(useMPI=True), + IterateStrategy(useMPI=True), + BaseStrategy(useMPI=True), + dumbledoresarmy, + ] + configurations[1] = {'strategies': strategies} + configurations[2] = { + 'strategies': strategies, + 'problem_args': {'imex': True}, + 'handle': 'IMEX', + 'plotting_params': {'ls': '--'}, + } + inexact = {'problem_params': {'newton_iter': 30}} + configurations[3] = { + 'strategies': strategies, + 'custom_description': inexact, + 'handle': 'inexact', + 'plotting_params': {'ls': ':'}, + } + LU = {'sweeper_params': {'QI': 'LU'}} + configurations[4] = { + 'strategies': strategies, + 'custom_description': LU, + 'handle': 'LU', + 'plotting_params': {'ls': '-.'}, + } + elif mode == 'preconditioners': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, IterateStrategy, BaseStrategy + + strategies = [AdaptivityStrategy(useMPI=True), IterateStrategy(useMPI=True), BaseStrategy(useMPI=True)] + + precons = ['IE', 'LU', 'MIN'] + ls = ['-', '--', '-.', ':'] + for i in range(len(precons)): + configurations[i] = { + 'strategies': strategies, + 'custom_description': {'sweeper_params': {'QI': precons[i]}}, + 'handle': precons[i], + 'plotting_params': {'ls': ls[i]}, + } + + elif mode == 'newton_tol': + from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy + + tol_range = [1e-7, 1e-9, 1e-11] + ls = ['-', '--', '-.', ':'] + for i in range(len(tol_range)): + configurations[i] = { + 'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)], + 'custom_description': { + 'problem_params': {'newton_tol': tol_range[i]}, + 'step_params': {'maxiter': 5}, + }, + 'handle': f"Newton tol={tol_range[i]:.1e}", + 'plotting_params': {'ls': ls[i]}, + } + configurations[i + len(tol_range)] = { + 'strategies': [IterateStrategy(useMPI=True)], + 'custom_description': { + 'problem_params': {'newton_tol': tol_range[i]}, + }, + 'handle': f"Newton tol={tol_range[i]:.1e}", + 'plotting_params': {'ls': ls[i]}, + } + elif mode == 'avoid_restarts': + from pySDC.projects.Resilience.strategies import ( + AdaptivityStrategy, + AdaptivityAvoidRestartsStrategy, + AdaptivityInterpolationStrategy, + ) + + desc = {'sweeper_params': {'QI': 'IE'}, 'step_params': {'maxiter': 3}} + param_range = [1e-3, 1e-5] + configurations[0] = { + 'strategies': [AdaptivityInterpolationStrategy(useMPI=True)], + 'plotting_params': {'ls': '--'}, + 'custom_description': desc, + 'param_range': param_range, + } + configurations[1] = { + 'strategies': [AdaptivityAvoidRestartsStrategy(useMPI=True)], + 'plotting_params': {'ls': '-.'}, + 'custom_description': desc, + 'param_range': param_range, + } + configurations[2] = { + 'strategies': [AdaptivityStrategy(useMPI=True)], + 'custom_description': desc, + 'param_range': param_range, + } + else: + raise NotImplementedError(f'Don\'t know the mode "{mode}"!') + + return configurations + + +def get_fig(x=1, y=1, **kwargs): # pragma: no cover + """ + Get a figure to plot in. + + Args: + x (int): How many panels in horizontal direction you want + y (int): How many panels in vertical direction you want + + Returns: + matplotlib.pyplot.Figure + """ + width = 1.0 + ratio = 1.0 if y == 2 else 0.5 + keyword_arguments = { + 'figsize': figsize_by_journal('Springer_Numerical_Algorithms', width, ratio), + 'layout': 'constrained', + **kwargs, + } + return plt.subplots(y, x, **keyword_arguments) + + +def save_fig( + fig, name, work_key, precision_key, legend=True, format='pdf', base_path='data', **kwargs +): # pragma: no cover + """ + Save a figure with a legend on the bottom. + + Args: + fig (matplotlib.pyplot.Figure): Figure you want to save + name (str): Name of the plot to put in the path + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + legend (bool): Put a legend or not + format (str): Format to store the figure with + + Returns: + None + """ + handles, labels = fig.get_axes()[0].get_legend_handles_labels() + order = np.argsort([me[0] for me in labels]) + fig.legend( + [handles[i] for i in order], + [labels[i] for i in order], + loc='outside lower center', + ncols=3 if len(handles) % 3 == 0 else 4, + frameon=False, + fancybox=True, + ) + + path = f'{base_path}/wp-{name}-{work_key}-{precision_key}.{format}' + fig.savefig(path, bbox_inches='tight', **kwargs) + print(f'Stored figure \"{path}\"') + + +def all_problems(mode='compare_strategies', plotting=True, base_path='data', **kwargs): # pragma: no cover + """ + Make a plot comparing various strategies for all problems. + + Args: + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + + Returns: + None + """ + + fig, axs = get_fig(2, 2) + + shared_params = { + 'work_key': 'k_SDC', + 'precision_key': 'e_global', + 'num_procs': 1, + 'runs': 1, + 'comm_world': MPI.COMM_WORLD, + 'record': False, + 'plotting': plotting, + **kwargs, + } + + problems = [run_vdp, run_Lorenz, run_Schroedinger, run_quench] + + for i in range(len(problems)): + execute_configurations( + **shared_params, + problem=problems[i], + ax=axs.flatten()[i], + decorate=True, + configurations=get_configs(mode, problems[i]), + ) + + if plotting and shared_params['comm_world'].rank == 0: + save_fig( + fig=fig, + name=mode, + work_key=shared_params['work_key'], + precision_key=shared_params['precision_key'], + legend=True, + base_path=base_path, + ) + + +def ODEs(mode='compare_strategies', plotting=True, base_path='data', **kwargs): # pragma: no cover + """ + Make a plot comparing various strategies for the two ODEs. + + Args: + work_key (str): The key in the recorded data you want on the x-axis + precision_key (str): The key in the recorded data you want on the y-axis + + Returns: + None + """ + + fig, axs = get_fig(x=2, y=1) + + shared_params = { + 'work_key': 'k_SDC', + 'precision_key': 'e_global', + 'num_procs': 1, + 'runs': 1, + 'comm_world': MPI.COMM_WORLD, + 'record': False, + 'plotting': plotting, + **kwargs, + } + + problems = [run_vdp, run_Lorenz] + + for i in range(len(problems)): + execute_configurations( + **shared_params, + problem=problems[i], + ax=axs.flatten()[i], + decorate=i == 0, + configurations=get_configs(mode, problems[i]), + ) + + if plotting and shared_params['comm_world'].rank == 0: + save_fig( + fig=fig, + name=f'ODEs-{mode}', + work_key=shared_params['work_key'], + precision_key=shared_params['precision_key'], + legend=True, + base_path=base_path, + ) + + +def single_problem(mode, problem, plotting=True, base_path='data', **kwargs): # pragma: no cover + """ + Make a plot for a single problem + + Args: + mode (str): What you want to look at + problem (function): A problem to run + """ + fig, ax = get_fig(1, 1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1, 0.5)) + + params = { + 'work_key': 'k_SDC', + 'precision_key': 'e_global', + 'num_procs': 1, + 'runs': 1, + 'comm_world': MPI.COMM_WORLD, + 'record': False, + 'plotting': plotting, + **kwargs, + } + + execute_configurations(**params, problem=problem, ax=ax, decorate=True, configurations=get_configs(mode, problem)) + + if plotting: + save_fig( + fig=fig, + name=f'{problem.__name__}-{mode}', + work_key=params['work_key'], + precision_key=params['precision_key'], + legend=False, + base_path=base_path, + ) + + +def vdp_stiffness_plot(base_path='data', format='pdf', **kwargs): # pragma: no cover + fig, axs = get_fig(2, 2, sharex=True) + + mus = [0, 5, 10, 15] + + for i in range(len(mus)): + params = { + 'runs': 1, + 'problem': run_vdp, + 'record': False, + 'work_key': 't', + 'precision_key': 'e_global_rel', + 'comm_world': MPI.COMM_WORLD, + **kwargs, + } + params['num_procs'] = min(params['comm_world'].size, 5) + params['plotting'] = params['comm_world'].rank == 0 + + configurations = get_configs(mode=f'vdp_stiffness-{mus[i]}', problem=run_vdp) + execute_configurations(**params, ax=axs.flatten()[i], decorate=True, configurations=configurations) + axs.flatten()[i].set_title(rf'$\mu={{{mus[i]}}}$') + + fig.suptitle('Van der Pol') + if params['comm_world'].rank == 0: + save_fig( + fig=fig, + name='vdp-stiffness', + work_key=params['work_key'], + precision_key=params['precision_key'], + legend=False, + base_path=base_path, + format=format, + ) + + +if __name__ == "__main__": + comm_world = MPI.COMM_WORLD + + params = { + 'mode': 'compare_adaptivity', + 'runs': 1, + 'num_procs': min(comm_world.size, 5), + 'plotting': comm_world.rank == 0, + } + params_single = { + **params, + 'problem': run_vdp, + } + record = True + single_problem(**params_single, work_key='t', precision_key='e_global_rel', record=record) + # single_problem(**params_single, work_key='k_Newton_no_restart', precision_key='e_global_rel', record=False) + # single_problem(**params_single, work_key='param', precision_key='e_global_rel', record=False) + # ODEs(**params, work_key='t', precision_key='e_global_rel', record=record) + + all_params = { + 'record': False, + 'runs': 1, + 'work_key': 't', + 'precision_key': 'e_global_rel', + 'plotting': comm_world.rank == 0, + } + + for _mode in ['parallel_efficiency']: # , 'preconditioners', 'compare_adaptivity']: + # all_problems(**all_params, mode=mode) + comm_world.Barrier() + + if comm_world.rank == 0: + # parallel_efficiency(**params_single, work_key='k_SDC', precision_key='e_global_rel') + plt.show() diff --git a/pySDC/projects/compression/Docker/Dockerfile b/pySDC/projects/compression/Docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5500bd6b72218133a6f47a62b9857dc17cfa7c54 --- /dev/null +++ b/pySDC/projects/compression/Docker/Dockerfile @@ -0,0 +1,14 @@ +FROM spack/ubuntu-jammy + +# Disable Prompt During Packages Installation +ARG DEBIAN_FRONTEND=noninteractive + +# Update Ubuntu Software repository +RUN apt update -y && apt install -y cmake ccache pkg-config python3 pip mpich + +# Install libpressio with Python and SZ3 support +RUN spack install libpressio+mgard+python+sz+sz3 +szauto+zfp +bitgrooming+digitrounding+fpzip+openmp+qoz+blosc ^sz@master+stats+random_access+time_compression + +# Prepare an entrypoint that installs pySDC +COPY docker_entrypoint.sh /bin/ +ENTRYPOINT ["docker_entrypoint.sh"] diff --git a/pySDC/projects/compression/Docker/Readme.rst b/pySDC/projects/compression/Docker/Readme.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e6a0ee34898bfd8057981e626972a2e1368aa09 --- /dev/null +++ b/pySDC/projects/compression/Docker/Readme.rst @@ -0,0 +1,43 @@ +Instructions for using libpressio in the Docker container +--------------------------------------------------------- + +As docker desktop is no longer available for commercial use for free, you may need to install an alternative, such as `colima <https://github.com/abiosoft/colima>`_ first. + +If you haven't done this already, build the container using + +.. code-block:: bash + + cd <local_path_to_pySDC>/pySDC/projects/compression/Docker + docker build -t libpressio . + +This creates an image with the name 'libpressio'. +Please pay attention to the platform you are using and you intend to run on. If you use this command on an ARM machine and try to use the image in a GitHub action, it will not run because it requires AMD architecture. You can build a platform specific version for GitHub using + +.. code-block:: bash + + docker buildx build --platform linux/amd64 -t libpressio:amd64 . + + +If you are on an ARM machine like me, replace `amd64` by `arm64` to build an image for your local machine. Remember to replace the tag with something useful, such as ``-t libpressio:arm64``. + +Start the image using + +.. code-block:: bash + + docker run -v <local_absolute_path_to_pySDC_installation>:/pySDC -ti --rm libpressio + + +You may have to change the tag to the platform specific version. +The `-v` does a `"bind_mount" <https://docs.docker.com/storage/bind-mounts/>`_ to pySDC on your local machine. +We want that because it let's us access the same version of pySDC that we have locally inside the container, in particular with all modifications that we make while the container is running. +The ``-ti`` flag opens the image in an interactive manner, which allows us to run things inside the container and the ``--rm`` flag removes the image once we are done with it. + +We have specified an entry point in the Docker file which will install the local version of pySDC using ``pip``. +If you run into trouble, you may consult the file ``Docker/docker-entrypoint.sh`` in the compression project folder for what is required to install pySDC. +Keep in mind that spack wants its own python, which means we are not working with Conda here. Just use ``pip`` to install more dependencies. You can also add ``pip`` commands to the entry point file in order to make persistent changes to the container or you can create a new Dockerfile based on the current one and replace the entry point by whatever you want if you're doing something non-generic. + +Have fun! + +TODOs +_____ + - Streamline the multiplatform business. See, for instance `here <https://docs.docker.com/build/building/multi-platform/>`_. diff --git a/pySDC/projects/compression/Docker/docker_entrypoint.sh b/pySDC/projects/compression/Docker/docker_entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..bf3dc07e3575d79232c42b0bf029c6cf180b40f3 --- /dev/null +++ b/pySDC/projects/compression/Docker/docker_entrypoint.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# load spack environment variables +source /opt/spack/share/spack/setup-env.sh + +allparams=("$@") + +echo "Install pySDC" +source /pySDC/pySDC/projects/compression/Docker/install_pySDC.sh + +echo "Done" + +# open a new shell to keep the container running +/bin/bash "${allparams[@]}" diff --git a/pySDC/projects/compression/Docker/install_pySDC.sh b/pySDC/projects/compression/Docker/install_pySDC.sh new file mode 100644 index 0000000000000000000000000000000000000000..5dfce6b4b0efbdcede9b19c5c96d7dfe5140d3c2 --- /dev/null +++ b/pySDC/projects/compression/Docker/install_pySDC.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# store current working directory to return here later +current_dir=$(pwd) + +# load the spack environment variables +source /opt/spack/share/spack/setup-env.sh + +# load libpressio in spack to make sure we are using the correct Python +spack load libpressio + +# install local version of pySDC and other dependencies +python -m pip install --upgrade pip +cd /pySDC +pip install -e . +python -m pip install pytest +python -m pip install coverage +python -m pip install mpi4py + +# go back to original working directory +cd $current_dir diff --git a/pySDC/projects/compression/README.rst b/pySDC/projects/compression/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..393d594c63734545e7dcee2a379ad5b33318c4be --- /dev/null +++ b/pySDC/projects/compression/README.rst @@ -0,0 +1,70 @@ +Compression in pySDC +-------------------- + +This project aims to implement compression in pySDC in whatever way proves to be useful. +It is a collaboration between Clemson University, Argonne National Laboratory and Forschungszentrum Juelich under the umbrella of `JLESC <https://jlesc.github.io>`_. +See the project web page `here <https://jlesc.github.io/projects/lossy-compress-linear-algebra/>`_. + +Introduction +____________ +PDE solvers provide ample opportunity for compression to improve certain aspects of the code. +See for instance `Sebastian Goetschel's and Martin Weiser's nice review <https://doi.org/10.3390/a12090197>`_ on the topic. + +Due to current hardware trends, codes are often memory bound rather than compute bound, which means computational resources are perhaps more wisely spent on compression such that memory access can be reduced and more performance can be leveraged. +This applies, in particular, to distributed systems where interconnect speeds are yet slower than memory access speeds. +As PinT algorithms target large scale distributed systems because concurrency in the time direction usually comes with lower parallel efficiency than in the space direction and is hence best implemented on top of spatial parallelism, it is an ideal candidate to benefit from compression. + +SDC, in particular, is a collocation method, which approximates the integral in time by a weighted sum of right hand side evaluations at intermediate time points (collocation nodes). +All of these need to be available in memory during the sweeps where the solutions at the collocation nodes are updated. +For PDEs, this can be substantial in size and the algorithm could benefit greatly from compressing these data. +As right hand side evaluations at individual collocation nodes are required during the sweeps, they either need to be compressed separately, or random access needs to be maintained. + +In parallel computation based on decomposition of the domain, the interface needs to be communicated between processors. +For PinT, this interface corresponds to the solution of the time step allotted to a process, which becomes the initial conditions for the next process. +As this is a large object, communication can become expensive and compressed communication can speed this up. + +We are also interested in compression with respect to resilience. +For instance, we introduce inexactness during lossy compression, which provides significantly greater compression factors than lossless compression, and we want to be able to answer the question of how large of an inexactness we can afford while maintaining the accuracy we desire from the final outcome. +This is interesting for algorithms detecting soft faults. +Picture SDC in an early iteration, where the solution is not yet converged and a soft fault occurs. +A resilient algorithm might trigger a costly restart which is unnecessary as the impact of the soft fault may not be noticeable in the converged solution. + +Opportunities for compression to be useful in PDE solvers are endless. +We will see how the project progresses and update this page accordingly. + + +Methods +_______ +Since pySDC is a prototyping library, it provides a good playground to easily implement compression. +However, we may not be able to measure a reduction in memory footprint due to Python's memory management. + +For compression, we use the `libpressio <https://github.com/robertu94/libpressio>`_ library maintained by Robert Underwood at Argonne National Laboratory. +As a starting point we use the `SZ3 <https://github.com/szcompressor/SZ3>`_ compressor. +We use a docker container with an installation of libpressio and pySDC working together. +See the `guide <https://github.com/Parallel-in-Time/pySDC/tree/master/pySDC/projects/compression/Docker>`_ on how to use the container. + + +Proof of Concept +________________ +For a proof of concept, we take the solution and right hand sides and compress and immediately decompress them every time they get updated during the sweeps. +While this provides no benefit, it should capture the downsides of compression. +We measure the local order of accuracy in time and verify that it increases by one with each sweep for an advection problem. +While the order is typically only maintained up to machine precision or the discretization error, we find now that accuracy now stalls at the error bound that we set for the compressor. +See below for corresponding figures, where the difference between the colored lines is the number of SDC iterations and the dashed line marks the error bound for SZ3. + +.. image:: ../../../data/compression_order_time_advection_d=1.00e-06_n=1_MPI=False.png + :width: 45% + +.. image:: ../../../data/compression_order_time_advection_d=1.00e-06_n=1_MPI=True.png + :width: 45% + +It has recently been `demonstrated <https://tore.tuhh.de/handle/11420/12370>`_ that the small scale PinT algorithm Block Gauss Seidel SDC maintains the order of single step SDC, so we can repeat the same test but with multiple processors, each with their own time step to solve: + +.. image:: ../../../data/compression_order_time_advection_d=1.00e-06_n=4_MPI=False.png + :width: 45% + +.. image:: ../../../data/compression_order_time_advection_d=1.00e-06_n=4_MPI=True.png + :width: 45% + +The above plots showcase that both time-serial SDC as well as time-parallel Block Gauss-Seidel SDC do not suffer from compression when the compression error bound is below other numerical errors and that both the MPI and simulated parallelism versions work. +After establishing that the downsides of compression can be controlled, it remains to apply compression in a manner that is beneficial to the algorithm. diff --git a/pySDC/projects/compression/compression_convergence_controller.py b/pySDC/projects/compression/compression_convergence_controller.py new file mode 100644 index 0000000000000000000000000000000000000000..10e431942b37db364b80957e2eacb665ed83c6d4 --- /dev/null +++ b/pySDC/projects/compression/compression_convergence_controller.py @@ -0,0 +1,53 @@ +from pySDC.core.ConvergenceController import ConvergenceController +import numpy as np + +np.bool = np.bool_ +import libpressio + + +class Compression(ConvergenceController): + def setup(self, controller, params, description, **kwargs): + default_compressor_args = { + # configure which compressor to use + "compressor_id": "sz3", + # configure the set of metrics to be gathered + "early_config": {"pressio:metric": "composite", "composite:plugins": ["time", "size", "error_stat"]}, + # configure SZ + "compressor_config": { + "pressio:abs": 1e-10, + }, + } + + defaults = { + 'control_order': 0, + **super().setup(controller, params, description, **kwargs), + 'compressor_args': {**default_compressor_args, **params.get('compressor_args', {})}, + 'min_buffer_length': 12, + } + + self.compressor = libpressio.PressioCompressor.from_config(defaults['compressor_args']) + + return defaults + + def post_iteration_processing(self, controller, S, **kwargs): + """ + Replace the solution by the compressed value + """ + assert len(S.levels) == 1 + lvl = S.levels[0] + prob = lvl.prob + nodes = np.append(0, lvl.sweep.coll.nodes) + + encode_buffer = np.zeros(max([len(lvl.u[0]), self.params.min_buffer_length])) + decode_buffer = np.zeros_like(encode_buffer) + + for i in range(len(lvl.u)): + encode_buffer[: len(lvl.u[i])] = lvl.u[i][:] + comp_data = self.compressor.encode(encode_buffer) + decode_buffer = self.compressor.decode(comp_data, decode_buffer) + + lvl.u[i][:] = decode_buffer[: len(lvl.u[i])] + lvl.f[i] = prob.eval_f(lvl.u[i], lvl.time + lvl.dt * nodes[i]) + + # metrics = self.compressor.get_metrics() + # print(metrics) diff --git a/pySDC/projects/compression/order.py b/pySDC/projects/compression/order.py new file mode 100644 index 0000000000000000000000000000000000000000..1f10d8a37b61eba3f4a2bb1ff12d77efffac10fe --- /dev/null +++ b/pySDC/projects/compression/order.py @@ -0,0 +1,166 @@ +import numpy as np +import matplotlib.pyplot as plt + +from pySDC.projects.Resilience.advection import run_advection + +from pySDC.helpers.stats_helper import get_sorted +from pySDC.helpers.plot_helper import figsize_by_journal +import pySDC.implementations.hooks.log_errors as error_hooks + +from pySDC.projects.compression.compression_convergence_controller import Compression + +MACHINEPRECISION = ( + 1e-8 # generous tolerance below which we ascribe errors to floating point rounding errors rather than compression +) +LOGGER_LEVEL = 30 + + +def single_run(problem, description=None, thresh=1e-10, Tend=2e-1, useMPI=False, num_procs=1): + description = {} if description is None else description + + compressor_args = {} + compressor_args['compressor_config'] = {'pressio:abs': thresh} + + if thresh > 0: + description['convergence_controllers'] = {Compression: {'compressor_args': compressor_args}} + + controller_params = { + 'mssdc_jac': False, + 'logger_level': LOGGER_LEVEL, + } + + error_hook = error_hooks.LogGlobalErrorPostRunMPI if useMPI else error_hooks.LogGlobalErrorPostRun + + stats, _, _ = problem( + custom_description=description, + hook_class=error_hook, + Tend=Tend, + use_MPI=useMPI, + num_procs=num_procs, + custom_controller_params=controller_params, + ) + + if useMPI: + from mpi4py import MPI + + comm = MPI.COMM_WORLD + else: + comm = None + e = min([me[1] for me in get_sorted(stats, type='e_global_post_run', comm=comm)]) + return e + + +def multiple_runs(problem, values, expected_order, mode='dt', thresh=1e-10, useMPI=False, num_procs=1, **kwargs): + errors = np.zeros_like(values) + + description = { + 'level_params': {}, + 'problam_params': {}, + 'step_params': {}, + } + if mode == 'dt': + description['step_params'] = {'maxiter': expected_order} + elif mode == 'nvars': + description['problem_params'] = {'order': expected_order} + + for i in range(len(values)): + if mode == 'dt': + description['level_params']['dt'] = values[i] + Tend = values[i] * (5 if num_procs == 1 else 2 * num_procs) + elif mode == 'nvars': + description['problem_params']['nvars'] = values[i] + Tend = 2e-1 + + errors[i] = single_run(problem, description, thresh=thresh, Tend=Tend, useMPI=useMPI, num_procs=num_procs) + return values, errors + + +def get_order(values, errors, thresh=1e-16, expected_order=None): + values = np.array(values) + idx = np.argsort(values) + local_orders = np.log(errors[idx][1:] / errors[idx][:-1]) / np.log(values[idx][1:] / values[idx][:-1]) + order = np.mean(local_orders[errors[idx][1:] > max([thresh, MACHINEPRECISION])]) + if expected_order is not None: + assert np.isclose(order, expected_order, atol=0.5), f"Expected order {expected_order}, but got {order:.2f}!" + return order + + +def plot_order(values, errors, ax, thresh=1e-16, color='black', expected_order=None, **kwargs): + values = np.array(values) + order = get_order(values, errors, thresh=thresh, expected_order=expected_order) + ax.scatter(values, errors, color=color, **kwargs) + ax.loglog(values, errors[0] * (values / values[0]) ** order, color=color, label=f'p={order:.2f}', **kwargs) + + +def plot_order_in_time(ax, thresh, useMPI=False, num_procs=1): + problem = run_advection + + base_configs_dt = { + 'values': np.array([2.0 ** (-i) for i in [2, 3, 4, 5, 6, 7, 8, 9]]), + 'mode': 'dt', + 'ax': ax, + 'thresh': thresh, + } + + configs_dt = {} + configs_dt[2] = {**base_configs_dt, 'color': 'black'} + configs_dt[3] = {**base_configs_dt, 'color': 'magenta'} + configs_dt[4] = {**base_configs_dt, 'color': 'teal'} + configs_dt[5] = {**base_configs_dt, 'color': 'orange'} + # configs_dt[6] = {**base_configs_dt, 'color': 'blue'} + + for key in configs_dt.keys(): + values, errors = multiple_runs( + problem, expected_order=key, useMPI=useMPI, **configs_dt[key], num_procs=num_procs + ) + plot_order( + values, + errors, + ax=configs_dt[key]['ax'], + thresh=configs_dt[key]['thresh'] * 1e2, + color=configs_dt[key]['color'], + expected_order=key + 1, + ) + base_configs_dt['ax'].set_xlabel(r'$\Delta t$') + base_configs_dt['ax'].set_ylabel('local error') + base_configs_dt['ax'].axhline( + base_configs_dt['thresh'], color='grey', ls='--', label=rf'$\|\delta\|={{{thresh:.0e}}}$' + ) + base_configs_dt['ax'].legend(frameon=False) + + +def order_in_time_different_error_bounds(): + fig, axs = plt.subplots( + 2, 2, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1.0, 1.0), sharex=True, sharey=True + ) + threshs = [1e-6, 1e-8, 1e-10, 1e-12] + + for i in range(len(threshs)): + ax = axs.flatten()[i] + plot_order_in_time(ax, threshs[i]) + if i != 2: + ax.set_ylabel('') + ax.set_xlabel('') + fig.suptitle('Order in time for advection problem') + fig.tight_layout() + fig.savefig('compression-order-time.pdf') + + +if __name__ == '__main__': + order_in_time_different_error_bounds() + + # base_configs_nvars = { + # 'values': [128, 256, 512, 1024], + # # 'values': np.array([2**(i) for i in [4, 5, 6, 7, 8, 9]]), + # 'mode': 'nvars', + # } + + # configs_nvars = {} + # configs_nvars[2] = {**base_configs_nvars, 'color': 'black'} + # configs_nvars[4] = {**base_configs_nvars, 'color': 'magenta'} + + # for key in configs_nvars.keys(): + # values, errors = multiple_runs(problem, expected_order=key, **configs_nvars[key]) + # plot_order(values, errors, axs[1], color=configs_nvars[key]['color']) + + plt.show() diff --git a/pySDC/projects/parallelSDC/generic_implicit_MPI.py b/pySDC/projects/parallelSDC/generic_implicit_MPI.py index 24f061667b8ee3054619b70d3b5200c7727c9a7e..ce43e76f6889a593f7cb025fc9234c226dbc1417 100644 --- a/pySDC/projects/parallelSDC/generic_implicit_MPI.py +++ b/pySDC/projects/parallelSDC/generic_implicit_MPI.py @@ -125,14 +125,23 @@ class generic_implicit_MPI(sweeper): return None - def compute_residual(self): + def compute_residual(self, stage=None): """ Computation of the residual using the collocation matrix Q + + Args: + stage (str): The current stage of the step the level belongs to """ # get current level and problem description L = self.level + # Check if we want to skip the residual computation to gain performance + # Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual! + if stage in self.params.skip_residual_computation: + L.status.residual = 0.0 if L.status.residual is None else L.status.residual + return None + # check if there are new values (e.g. from a sweep) # assert L.status.updated diff --git a/pySDC/tests/test_Runge_Kutta_sweeper.py b/pySDC/tests/test_Runge_Kutta_sweeper.py index 2ffbcac5c50e22338fb037afbd71f29f5798b6b3..ed61d8ad70335434a546cbbd9124120de42cf9e5 100644 --- a/pySDC/tests/test_Runge_Kutta_sweeper.py +++ b/pySDC/tests/test_Runge_Kutta_sweeper.py @@ -1,36 +1,35 @@ -import matplotlib.pyplot as plt -import numpy as np import pytest -from pySDC.implementations.sweeper_classes.Runge_Kutta import ( - RK1, - RK4, - MidpointMethod, - CrankNicholson, - Cash_Karp, - Heun_Euler, - DIRK34, -) -from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK -from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedErrorNonMPI -from pySDC.helpers.stats_helper import get_sorted - - -colors = { - RK1: 'blue', - MidpointMethod: 'red', - RK4: 'orange', - CrankNicholson: 'purple', - Cash_Karp: 'teal', +COLORS = { + 'RK1': 'blue', + 'MidpointMethod': 'red', + 'RK4': 'orange', + 'CrankNicholson': 'purple', + 'Cash_Karp': 'teal', } -def plot_order(sweeper, prob, dt_list, description=None, ax=None, Tend_fixed=None, implicit=True): +def get_sweeper(sweeper_name): + """ + Retrieve a sweeper from a name + + Args: + sweeper_name (str): + + Returns: + pySDC.Sweeper.RungeKutta: The sweeper + """ + import pySDC.implementations.sweeper_classes.Runge_Kutta as RK + + return eval(f'RK.{sweeper_name}') + + +def plot_order(sweeper_name, prob, dt_list, description=None, ax=None, Tend_fixed=None, implicit=True): """ Make a plot of the order of the scheme and test if it has the correct order Args: - sweeper (pySDC.Sweeper.RungeKutta): The RK rule to try + sweeper_name (str): Name of the RK rule you want prob (function): Some function that runs a pySDC problem and accepts suitable parameters, see resilience project dt_list (list): List of step sizes to try description (dict): A description to use for running the problem @@ -41,13 +40,15 @@ def plot_order(sweeper, prob, dt_list, description=None, ax=None, Tend_fixed=Non Returns: None """ + import numpy as np + import matplotlib.pyplot as plt from pySDC.projects.Resilience.accuracy_check import plot_orders if ax is None: fig, ax = plt.subplots(1, 1) - description = dict() if description is None else description - description['sweeper_class'] = sweeper + description = {} if description is None else description + description['sweeper_class'] = get_sweeper(sweeper_name) description['sweeper_params'] = {'implicit': implicit} description['step_params'] = {'maxiter': 1} @@ -67,32 +68,32 @@ def plot_order(sweeper, prob, dt_list, description=None, ax=None, Tend_fixed=Non # check if we got the expected order for the local error orders = { - RK1: 2, - MidpointMethod: 3, - RK4: 5, - CrankNicholson: 3, - Cash_Karp: 6, + 'RK1': 2, + 'MidpointMethod': 3, + 'RK4': 5, + 'CrankNicholson': 3, + 'Cash_Karp': 6, } numerical_order = float(ax.get_lines()[-1].get_label()[7:]) - expected_order = orders.get(sweeper, numerical_order) + expected_order = orders.get(sweeper_name, numerical_order) assert np.isclose( numerical_order, expected_order, atol=2.6e-1 ), f"Expected order {expected_order}, got {numerical_order}!" # decorate - ax.get_lines()[-1].set_color(colors.get(sweeper, 'black')) + ax.get_lines()[-1].set_color(COLORS.get(sweeper_name, 'black')) - label = f'{sweeper.__name__} - {ax.get_lines()[-1].get_label()[5:]}' + label = f'{sweeper_name} - {ax.get_lines()[-1].get_label()[5:]}' ax.get_lines()[-1].set_label(label) ax.legend(frameon=False) -def plot_stability_single(sweeper, ax=None, description=None, implicit=True, re=None, im=None, crosshair=True): +def plot_stability_single(sweeper_name, ax=None, description=None, implicit=True, re=None, im=None, crosshair=True): """ Plot the domain of stability for a single RK rule. Args: - sweeper (pySDC.Sweeper.RungeKutta) + sweeper_name (pySDC.Sweeper.RungeKutta) ax: Somewhere to plot description (dict): A description to use for running the problem implicit (bool): Whether to use implicit or explicit versions of RK rules @@ -103,13 +104,15 @@ def plot_stability_single(sweeper, ax=None, description=None, implicit=True, re= Returns: None """ + import numpy as np + import matplotlib.pyplot as plt from pySDC.projects.Resilience.dahlquist import run_dahlquist, plot_stability if ax is None: fig, ax = plt.subplots(1, 1) - description = dict() if description is None else description - description['sweeper_class'] = sweeper + description = {} if description is None else description + description['sweeper_class'] = get_sweeper(sweeper_name) description['sweeper_params'] = {'implicit': implicit} description['step_params'] = {'maxiter': 1} @@ -128,16 +131,16 @@ def plot_stability_single(sweeper, ax=None, description=None, implicit=True, re= custom_controller_params=custom_controller_params, ) Astable = plot_stability( - stats, ax=ax, iter=[1], colors=[colors.get(sweeper, 'black')], crosshair=crosshair, fill=True + stats, ax=ax, iter=[1], COLORS=[COLORS.get(sweeper_name, 'black')], crosshair=crosshair, fill=True ) # check if we think the method should be A-stable - Astable_methods = [RK1, CrankNicholson, MidpointMethod, DIRK34] # only the implicit versions are A-stable + Astable_methods = ['RK1', 'CrankNicholson', 'MidpointMethod', 'DIRK34'] # only the implicit versions are A-stable assert ( - implicit and sweeper in Astable_methods - ) == Astable, f"Unexpected region of stability for {sweeper.__name__} sweeper!" + implicit and sweeper_name in Astable_methods + ) == Astable, f"Unexpected region of stability for {sweeper_name} sweeper!" - ax.get_lines()[-1].set_label(sweeper.__name__) + ax.get_lines()[-1].set_label(sweeper_name) ax.legend(frameon=False) @@ -149,10 +152,13 @@ def test_all_stability(): Returns: None """ + import numpy as np + import matplotlib.pyplot as plt + fig, axs = plt.subplots(1, 2, figsize=(11, 5)) impl = [True, False] - sweepers = [[RK1, MidpointMethod, CrankNicholson], [RK1, MidpointMethod, RK4, Cash_Karp]] + sweepers = [['RK1', 'MidpointMethod', 'CrankNicholson'], ['RK1', 'MidpointMethod', 'RK4', 'Cash_Karp']] titles = ['implicit', 'explicit'] re = np.linspace(-4, 4, 400) im = np.linspace(-4, 4, 400) @@ -163,7 +169,7 @@ def test_all_stability(): plot_stability_single(sweepers[j][i], implicit=impl[j], ax=axs[j], re=re, im=im, crosshair=crosshair[i]) axs[j].set_title(titles[j]) - plot_stability_single(DIRK34, re=re, im=im) + plot_stability_single('DIRK34', re=re, im=im) fig.tight_layout() @@ -182,6 +188,8 @@ def plot_all_orders(prob, dt_list, Tend, sweepers, implicit=True): Returns: None """ + import matplotlib.pyplot as plt + fig, ax = plt.subplots(1, 1) for i in range(len(sweepers)): plot_order(sweepers[i], prob, dt_list, Tend_fixed=Tend, ax=ax, implicit=implicit) @@ -196,10 +204,13 @@ def test_vdp(): Returns: None """ + import numpy as np from pySDC.projects.Resilience.vdp import run_vdp Tend = 7e-2 - plot_all_orders(run_vdp, Tend * 2.0 ** (-np.arange(8)), Tend, [RK1, MidpointMethod, CrankNicholson, RK4, Cash_Karp]) + plot_all_orders( + run_vdp, Tend * 2.0 ** (-np.arange(8)), Tend, ['RK1', 'MidpointMethod', 'CrankNicholson', 'RK4', 'Cash_Karp'] + ) @pytest.mark.base @@ -211,54 +222,52 @@ def test_advection(): None """ from pySDC.projects.Resilience.advection import run_advection + import numpy as np plot_all_orders( - run_advection, 1.0e-3 * 2.0 ** (-np.arange(8)), None, [RK1, MidpointMethod, CrankNicholson], implicit=True + run_advection, 1.0e-3 * 2.0 ** (-np.arange(8)), None, ['RK1', 'MidpointMethod', 'CrankNicholson'], implicit=True ) - plot_all_orders(run_advection, 1.0e-3 * 2.0 ** (-np.arange(8)), None, [RK1, MidpointMethod], implicit=False) + plot_all_orders(run_advection, 1.0e-3 * 2.0 ** (-np.arange(8)), None, ['RK1', 'MidpointMethod'], implicit=False) @pytest.mark.base -@pytest.mark.parametrize("sweeper", [Cash_Karp, Heun_Euler, DIRK34]) -def test_embedded_estimate_order(sweeper): +@pytest.mark.parametrize('sweeper_name', ['Cash_Karp', 'Heun_Euler', 'DIRK34']) +def test_embedded_estimate_order(sweeper_name): """ Test the order of embedded Runge-Kutta schemes. They are not run with adaptivity here, so we can simply vary the step size and check the embedded error estimate. Args: - sweeper (pySDC.Sweeper.RungeKutta) + sweeper_name (pySDC.Sweeper.RungeKutta) Returns: None """ + import numpy as np + import matplotlib.pyplot as plt from pySDC.projects.Resilience.vdp import run_vdp from pySDC.projects.Resilience.accuracy_check import plot_all_errors + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError fig, ax = plt.subplots(1, 1) # change only the things in the description that we need for adaptivity - convergence_controllers = dict() - convergence_controllers[EstimateEmbeddedErrorNonMPI] = {} + convergence_controllers = {} + convergence_controllers[EstimateEmbeddedError] = {} - description = dict() + description = {} description['convergence_controllers'] = convergence_controllers - description['sweeper_class'] = sweeper + description['sweeper_class'] = get_sweeper(sweeper_name) description['step_params'] = {'maxiter': 1} custom_controller_params = {'logger_level': 40} - expected_order = { - Cash_Karp: [5], - Heun_Euler: [2], - DIRK34: [4], - } - Tend = 7e-2 dt_list = Tend * 2.0 ** (-np.arange(8)) prob = run_vdp plot_all_errors( ax, - expected_order.get(sweeper, None), + [get_sweeper(sweeper_name).get_update_order()], True, Tend_fixed=Tend, custom_description=description, @@ -276,22 +285,26 @@ def test_embedded_method(): Returns: None """ + import numpy as np + import matplotlib.pyplot as plt from pySDC.projects.Resilience.vdp import run_vdp, plot_step_sizes + from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK + from pySDC.helpers.stats_helper import get_sorted - sweeper = Cash_Karp + sweeper_name = 'Cash_Karp' fig, ax = plt.subplots(1, 1) # change only the things in the description that we need for adaptivity - adaptivity_params = dict() + adaptivity_params = {} adaptivity_params['e_tol'] = 1e-7 adaptivity_params['update_order'] = 5 - convergence_controllers = dict() + convergence_controllers = {} convergence_controllers[AdaptivityRK] = adaptivity_params - description = dict() + description = {} description['convergence_controllers'] = convergence_controllers - description['sweeper_class'] = sweeper + description['sweeper_class'] = get_sweeper(sweeper_name) description['step_params'] = {'maxiter': 1} custom_controller_params = {'logger_level': 40} @@ -309,8 +322,8 @@ def test_embedded_method(): if __name__ == '__main__': test_embedded_method() - test_embedded_estimate_order(Cash_Karp) + for sweep in ['Cash_Karp', 'Heun_Euler', 'DIRK34']: + test_embedded_estimate_order(sweep) test_vdp() test_advection() test_all_stability() - plt.show() diff --git a/pySDC/tests/test_convergence_controllers/test_Newton_inexactness.py b/pySDC/tests/test_convergence_controllers/test_Newton_inexactness.py new file mode 100644 index 0000000000000000000000000000000000000000..ea24c613140aaf4a7617695144517c50a720a7ef --- /dev/null +++ b/pySDC/tests/test_convergence_controllers/test_Newton_inexactness.py @@ -0,0 +1,109 @@ +import pytest + + +@pytest.mark.base +def test_Newton_inexactness(ratio=1e-2, min_tol=1e-11, max_tol=1e-6): + import numpy as np + from pySDC.implementations.convergence_controller_classes.inexactness import NewtonInexactness + from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + from pySDC.helpers.stats_helper import get_sorted, filter_stats + from pySDC.core.Hooks import hooks + + class log_newton_tol(hooks): + def pre_iteration(self, step, level_number): + lvl = step.levels[level_number] + self.add_to_stats( + process=step.status.slot, + time=step.time, + level=level_number, + iter=step.status.iter, + sweep=lvl.status.sweep, + type='newton_tol_post_spread', + value=lvl.prob.newton_tol, + ) + + def post_iteration(self, step, level_number): + lvl = step.levels[level_number] + self.add_to_stats( + process=step.status.slot, + time=step.time, + level=level_number, + iter=step.status.iter, + sweep=lvl.status.sweep, + type='newton_tol', + value=lvl.prob.newton_tol, + ) + + # initialize level parameters + level_params = {} + level_params['dt'] = 1e-2 + level_params['restol'] = 1e-10 + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'LU' + + problem_params = { + 'mu': 5.0, + 'newton_tol': 1e-9, + 'newton_maxiter': 99, + 'u0': np.array([2.0, 0.0]), + } + + # initialize step parameters + step_params = {} + step_params['maxiter'] = 99 + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 30 + controller_params['hook_class'] = log_newton_tol + controller_params['mssdc_jac'] = False + + convergence_controllers = {} + convergence_controllers[NewtonInexactness] = {'ratio': ratio, 'min_tol': min_tol, 'max_tol': max_tol} + + # fill description dictionary for easy step instantiation + description = {} + description['problem_class'] = vanderpol + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + description['convergence_controllers'] = convergence_controllers + + # set time parameters + t0 = 0.0 + + # instantiate controller + controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description) + + # get initial values on finest level + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + # call main function to get things done... + uend, stats = controller.run(u0=uinit, t0=0, Tend=2 * level_params['dt']) + + for me in get_sorted(stats, type='newton_tol'): + stats_now = filter_stats(stats, time=me[0]) + tols = get_sorted(stats_now, type='newton_tol', sortby='iter') + res = get_sorted(stats_now, type='residual_post_iteration', sortby='iter') + + for i in range(len(tols) - 1): + expect = res[i][1] * ratio + assert ( + tols[i + 1][1] <= expect or expect < min_tol + ), f'Expected Newton tolerance smaller {expect:.2e}, but got {tols[i+1][1]:.2e} in iteration {i+1}!' + assert ( + tols[i + 1][1] <= max_tol + ), f'Exceeded maximal allowed Newton tolerance {max_tol:.2e} in iteration {i+1} with {tols[i+1][1]:.2e}!' + + +if __name__ == "__main__": + test_Newton_inexactness() diff --git a/pySDC/tests/test_convergence_controllers/test_error_convergence_controllers.py b/pySDC/tests/test_convergence_controllers/test_error_convergence_controllers.py new file mode 100644 index 0000000000000000000000000000000000000000..d75932a09248f56613e73ba2e73b4bb5703c8716 --- /dev/null +++ b/pySDC/tests/test_convergence_controllers/test_error_convergence_controllers.py @@ -0,0 +1,231 @@ +import pytest + + +def run_problem(maxiter=1, num_procs=1, n_steps=1, error_estimator=None, params=None, restol=-1): + import numpy as np + from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + from pySDC.implementations.hooks.log_errors import ( + LogLocalErrorPostIter, + LogGlobalErrorPostIter, + LogLocalErrorPostStep, + ) + + # initialize level parameters + level_params = {} + level_params['dt'] = 6e-3 + level_params['restol'] = restol + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'IE' + # sweeper_params['initial_guess'] = 'random' + + # build lambdas + re = np.linspace(-30, -1, 10) + im = np.linspace(-50, 50, 11) + lambdas = np.array([[complex(re[i], im[j]) for i in range(len(re))] for j in range(len(im))]).reshape( + (len(re) * len(im)) + ) + + problem_params = { + 'lambdas': lambdas, + 'u0': 1.0 + 0.0j, + } + + # initialize step parameters + step_params = dict() + step_params['maxiter'] = maxiter + + # convergence controllers + convergence_controllers = {error_estimator: params} + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 15 + controller_params['hook_class'] = [LogLocalErrorPostIter, LogGlobalErrorPostIter, LogLocalErrorPostStep] + controller_params['mssdc_jac'] = False + + # fill description dictionary for easy step instantiation + description = {} + description['problem_class'] = testequation0d + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + description['convergence_controllers'] = convergence_controllers + + # set time parameters + t0 = 0.0 + + # instantiate controller + controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) + + # get initial values on finest level + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + # call main function to get things done... + uend, stats = controller.run(u0=uinit, t0=t0, Tend=n_steps * level_params['dt']) + return stats + + +@pytest.mark.base +def test_EstimateExtrapolationErrorNonMPI_serial(order_time_marching=2, n_steps=3, thresh=0.15): + from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import ( + EstimateExtrapolationErrorNonMPI, + ) + from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats + + params = { + 'no_storage': False, + } + preperatory_steps = (order_time_marching + 3) // 2 + + stats = run_problem( + maxiter=order_time_marching, + n_steps=n_steps + preperatory_steps, + error_estimator=EstimateExtrapolationErrorNonMPI, + params=params, + num_procs=1, + ) + + e_local = sort_stats(filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching), sortby='time') + e_estimated = get_sorted(stats, type='error_extrapolation_estimate') + + rel_diff = [ + abs(e_local[i][1] - e_estimated[i][1]) / e_estimated[i][1] + for i in range(len(e_estimated)) + if e_estimated[i][1] is not None + ] + assert all( + me < thresh for me in rel_diff + ), f'Extrapolated error estimate failed! Relative difference to true error: {rel_diff}' + + +@pytest.mark.base +@pytest.mark.parametrize('no_storage', [True, False]) +def test_EstimateExtrapolationErrorNonMPI_parallel( + no_storage, order_time_marching=4, n_steps=3, num_procs=3, thresh=0.50 +): + from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import ( + EstimateExtrapolationErrorNonMPI, + ) + from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats + + params = { + 'no_storage': no_storage, + } + preperatory_steps = (order_time_marching + 3) // 2 + + if no_storage: + num_procs = max(num_procs, preperatory_steps + 1) + + stats = run_problem( + maxiter=order_time_marching, + n_steps=n_steps + preperatory_steps, + error_estimator=EstimateExtrapolationErrorNonMPI, + params=params, + num_procs=num_procs, + ) + + e_local = sort_stats(filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching), sortby='time') + e_estimated = get_sorted(stats, type='error_extrapolation_estimate') + + rel_diff = [ + abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1] + for i in range(len(e_estimated)) + if e_estimated[i][1] is not None + ] + assert all( + me < thresh for me in rel_diff + ), f'Extrapolated error estimate failed! Relative difference to true error: {rel_diff}' + + +@pytest.mark.base +def test_EstimateEmbeddedErrorSerial(order_time_marching=3, n_steps=6, thresh=0.05): + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError + from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats + + params = {} + + stats = run_problem( + maxiter=order_time_marching, n_steps=n_steps, error_estimator=EstimateEmbeddedError, params=params, num_procs=1 + ) + + e_local = sort_stats( + filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching - 1), sortby='time' + ) + e_estimated = get_sorted(stats, type='error_embedded_estimate') + + rel_diff = [abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1] for i in range(len(e_estimated))] + + assert all( + me < thresh for me in rel_diff + ), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}' + + +@pytest.mark.base +def test_EstimateEmbeddedErrorParallel(order_time_marching=3, num_procs=3, thresh=0.10): + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError + from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats + + params = {} + + stats = run_problem( + maxiter=order_time_marching, + n_steps=num_procs, + error_estimator=EstimateEmbeddedError, + params=params, + num_procs=num_procs, + ) + + e_global = sort_stats( + filter_stats(stats, type='e_global_post_iteration', iter=order_time_marching - 1), sortby='time' + ) + e_estimated = get_sorted(stats, type='error_embedded_estimate') + + rel_diff = [abs(e_global[i][1] - e_estimated[i][1]) / e_global[i][1] for i in range(len(e_estimated))] + + assert all( + me < thresh for me in rel_diff + ), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}' + + +@pytest.mark.base +def test_EstimateEmbeddedErrorCollocation(n_steps=6, thresh=0.01): + from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import ( + EstimateEmbeddedErrorCollocation, + ) + from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats + + adaptive_coll_params = { + 'num_nodes': [3, 2], + } + params = {'adaptive_coll_params': adaptive_coll_params} + + stats = run_problem( + maxiter=99, + n_steps=n_steps, + error_estimator=EstimateEmbeddedErrorCollocation, + params=params, + num_procs=1, + restol=1e-13, + ) + + e_estimated = get_sorted(stats, type='error_embedded_estimate_collocation') + e_local = sort_stats(filter_stats(stats, type='e_local_post_step'), sortby='time') + + rel_diff = [abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1] for i in range(len(e_estimated))] + + assert all( + me < thresh for me in rel_diff + ), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}' + + +if __name__ == '__main__': + test_EstimateEmbeddedErrorCollocation() diff --git a/pySDC/tests/test_hooks/test_log_work.py b/pySDC/tests/test_hooks/test_log_work.py new file mode 100644 index 0000000000000000000000000000000000000000..8d65d5675fbca4bb46808f329793e0068444ee5b --- /dev/null +++ b/pySDC/tests/test_hooks/test_log_work.py @@ -0,0 +1,145 @@ +import pytest + + +def run_Lorenz(useMPI, maxiter=4, newton_maxiter=5, num_procs=1): + from pySDC.implementations.hooks.log_work import LogWork + from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit + from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor + from pySDC.helpers.stats_helper import get_sorted + + num_steps = 2 + + # initialize level parameters + level_params = {} + level_params['dt'] = 1e-2 + level_params['restol'] = -1 + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 1 + sweeper_params['QI'] = 'IE' + + problem_params = { + 'newton_tol': -1, # force to iterate to `newton_maxiter` + 'newton_maxiter': newton_maxiter, + } + + # initialize step parameters + step_params = {} + step_params['maxiter'] = maxiter + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 30 + controller_params['hook_class'] = LogWork + controller_params['mssdc_jac'] = False + + # fill description dictionary for easy step instantiation + description = {} + description['problem_class'] = LorenzAttractor + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + + # set time parameters + t0 = 0.0 + + # instantiate controller + if useMPI: + from mpi4py import MPI + from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + + comm = MPI.COMM_WORLD + num_procs = comm.size + + controller = controller_MPI(controller_params=controller_params, description=description, comm=comm) + P = controller.S.levels[0].prob + else: + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + + comm = None + controller = controller_nonMPI( + num_procs=num_procs, controller_params=controller_params, description=description + ) + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=num_steps * num_procs * level_params['dt']) + + for i in range(num_procs): + res = { + key: [me[1] for me in get_sorted(stats, type=key, comm=comm, process=i)] + for key in ['work_newton', 'work_rhs'] + } + + expected = {} + if i == 0: + # we evaluate all nodes when beginning the step and then every node except the initial conditions in every iteration + expected['work_rhs'] = maxiter * sweeper_params['num_nodes'] + sweeper_params['num_nodes'] + 1 + else: + # Additionally, we reevaluate what we received. Once before we start iterating and then whenever we start a new iteration and in `it_check` + expected['work_rhs'] = maxiter * (sweeper_params['num_nodes'] + 2) + sweeper_params['num_nodes'] + 2 + + expected['work_newton'] = newton_maxiter * sweeper_params['num_nodes'] * maxiter + + for key, val in res.items(): + assert all( + me == expected[key] for me in val + ), f'Error in LogWork hook when recording \"{key}\" for process {i}! Got {val}, expected {expected[key]}!' + + return None + + +@pytest.mark.mpi4py +@pytest.mark.parametrize("num_procs", [1, 3]) +@pytest.mark.parametrize("maxiter", [0, 3]) +@pytest.mark.parametrize("newton_maxiter", [1, 3]) +def test_LogWork_MPI(num_procs, newton_maxiter, maxiter): + import os + import subprocess + + kwargs = {} + kwargs['useMPI'] = 1 + kwargs['num_procs'] = num_procs + kwargs['newton_maxiter'] = newton_maxiter + kwargs['maxiter'] = maxiter + + # Set python path once + my_env = os.environ.copy() + my_env['PYTHONPATH'] = '../../..:.' + my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' + + # run code with different number of MPI processes + kwargs_str = "".join([f"{key}:{item} " for key, item in kwargs.items()]) + cmd = f"mpirun -np {num_procs} python {__file__} {kwargs_str}".split() + + p = subprocess.Popen(cmd, env=my_env, cwd=".") + + p.wait() + assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( + p.returncode, + num_procs, + ) + + +@pytest.mark.base +@pytest.mark.parametrize("num_procs", [1, 3]) +@pytest.mark.parametrize("maxiter", [0, 3]) +@pytest.mark.parametrize("newton_maxiter", [1, 3]) +def test_LogWork_nonMPI(num_procs, newton_maxiter, maxiter): + kwargs = {} + kwargs['useMPI'] = 0 + kwargs['num_procs'] = num_procs + kwargs['newton_maxiter'] = newton_maxiter + kwargs['maxiter'] = maxiter + run_Lorenz(**kwargs) + + +if __name__ == "__main__": + import sys + + kwargs = {me.split(':')[0]: int(me.split(':')[1]) for me in sys.argv[1:]} + run_Lorenz(**kwargs) diff --git a/pySDC/tests/test_projects/test_compression/__init__.py b/pySDC/tests/test_projects/test_compression/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pySDC/tests/test_projects/test_compression/test_proof_of_concept.py b/pySDC/tests/test_projects/test_compression/test_proof_of_concept.py new file mode 100644 index 0000000000000000000000000000000000000000..29296d17a249e7b8148ea8ca22ae202a257d2cd8 --- /dev/null +++ b/pySDC/tests/test_projects/test_compression/test_proof_of_concept.py @@ -0,0 +1,63 @@ +import pytest + + +@pytest.mark.libpressio +@pytest.mark.parametrize("thresh", [1e-6, 1e-8]) +@pytest.mark.parametrize("useMPI", [True, False]) +@pytest.mark.parametrize("num_procs", [1, 4]) +def test_compression_proof_of_concept(thresh, useMPI, num_procs): + if useMPI: + import subprocess + import os + + # Setup environment + my_env = os.environ.copy() + + cmd = f"mpirun -np {num_procs} python {__file__} -t {thresh} -M {useMPI} -n {num_procs}".split() + + p = subprocess.Popen(cmd, env=my_env, cwd=".") + + p.wait() + assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( + p.returncode, + num_procs, + ) + else: + run_single_test(thresh=thresh, useMPI=useMPI, num_procs=num_procs) + + +def run_single_test(thresh, useMPI, num_procs): + print(f'Running with error bound {thresh} and {num_procs}. MPI: {useMPI}') + import matplotlib.pyplot as plt + import os + from pySDC.projects.compression.order import plot_order_in_time + + fig, ax = plt.subplots(figsize=(3, 2)) + plot_order_in_time(ax=ax, thresh=thresh, useMPI=useMPI, num_procs=num_procs) + if os.path.exists('data'): + ax.set_title(f'{num_procs} procs, {"MPI" if useMPI else "non MPI"}') + fig.savefig(f'data/compression_order_time_advection_d={thresh:.2e}_n={num_procs}_MPI={useMPI}.png', dpi=200) + + +if __name__ == '__main__': + import sys + + # defaults for arguments + num_procs = 1 + useMPI = False + thresh = -1 + + # parse command line arguments + for i in range(len(sys.argv)): + if sys.argv[i] == '-n': + num_procs = int(sys.argv[i + 1]) + elif sys.argv[i] == '-M': + useMPI = True if sys.argv[i + 1] == 'True' else False + elif sys.argv[i] == '-t': + thresh = float(sys.argv[i + 1]) + + # execute test + if '--use-subprocess' in sys.argv: + test_compression_proof_of_concept(thresh=thresh, useMPI=useMPI, num_procs=num_procs) + else: + run_single_test(thresh=thresh, useMPI=useMPI, num_procs=num_procs) diff --git a/pySDC/tests/test_projects/test_resilience/__init__.py b/pySDC/tests/test_projects/test_resilience/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pySDC/tests/test_projects/test_resilience/test_efficient_sweepers.py b/pySDC/tests/test_projects/test_resilience/test_efficient_sweepers.py new file mode 100644 index 0000000000000000000000000000000000000000..c388d4799c9435b65c5a9bb0d4c55eeea24d230c --- /dev/null +++ b/pySDC/tests/test_projects/test_resilience/test_efficient_sweepers.py @@ -0,0 +1,173 @@ +import pytest + + +def run_Lorenz(efficient, skip_residual_computation, num_procs=1): + from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.implementations.hooks.log_work import LogWork + from pySDC.projects.Resilience.sweepers import generic_implicit_efficient, generic_implicit + from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI + + # initialize level parameters + level_params = {} + level_params['dt'] = 1e-2 + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'IE' + sweeper_params['skip_residual_computation'] = ( + ('IT_CHECK', 'IT_FINE', 'IT_COARSE', 'IT_DOWN', 'IT_UP') if skip_residual_computation else () + ) + + problem_params = { + 'newton_tol': 1e-9, + 'newton_maxiter': 99, + } + + # initialize step parameters + step_params = {} + step_params['maxiter'] = 4 + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 30 + controller_params['hook_class'] = [LogSolution, LogWork, LogGlobalErrorPostRun] + controller_params['mssdc_jac'] = False + + # fill description dictionary for easy step instantiation + description = {} + description['problem_class'] = LorenzAttractor + description['problem_params'] = problem_params + description['sweeper_class'] = generic_implicit_efficient if efficient else generic_implicit + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + + # set time parameters + t0 = 0.0 + + # instantiate controller + controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) + P = controller.MS[0].levels[0].prob + uinit = P.u_exact(t0) + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=1.0) + + return stats + + +def run_Schroedinger(efficient=False, num_procs=1, skip_residual_computation=False): + from pySDC.implementations.problem_classes.NonlinearSchroedinger_MPIFFT import nonlinearschroedinger_imex + from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order + from pySDC.projects.Resilience.sweepers import imex_1st_order_efficient + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI + from pySDC.implementations.hooks.log_solution import LogSolution + from pySDC.implementations.hooks.log_work import LogWork + from pySDC.implementations.controller_classes.controller_MPI import controller_MPI + from mpi4py import MPI + + space_comm = MPI.COMM_SELF + rank = space_comm.Get_rank() + + # initialize level parameters + level_params = {} + level_params['restol'] = 1e-8 + level_params['dt'] = 1e-01 / 2 + level_params['nsweeps'] = 1 + + # initialize sweeper parameters + sweeper_params = {} + sweeper_params['quad_type'] = 'RADAU-RIGHT' + sweeper_params['num_nodes'] = 3 + sweeper_params['QI'] = 'IE' + sweeper_params['initial_guess'] = 'spread' + sweeper_params['skip_residual_computation'] = ( + ('IT_FINE', 'IT_COARSE', 'IT_DOWN', 'IT_UP') if skip_residual_computation else () + ) + + # initialize problem parameters + problem_params = {} + problem_params['nvars'] = (128, 128) + problem_params['spectral'] = False + problem_params['c'] = 1.0 + problem_params['comm'] = space_comm + + # initialize step parameters + step_params = {} + step_params['maxiter'] = 50 + + # initialize controller parameters + controller_params = {} + controller_params['logger_level'] = 30 if rank == 0 else 99 + controller_params['hook_class'] = [LogSolution, LogWork, LogGlobalErrorPostRunMPI] + controller_params['mssdc_jac'] = False + + # fill description dictionary for easy step instantiation + description = {} + description['problem_params'] = problem_params + description['problem_class'] = nonlinearschroedinger_imex + description['sweeper_class'] = imex_1st_order_efficient if efficient else imex_1st_order + description['sweeper_params'] = sweeper_params + description['level_params'] = level_params + description['step_params'] = step_params + + # set time parameters + t0 = 0.0 + + # instantiate controller + controller_args = { + 'controller_params': controller_params, + 'description': description, + } + + comm = MPI.COMM_SELF + controller = controller_MPI(**controller_args, comm=comm) + P = controller.S.levels[0].prob + uinit = P.u_exact(t0) + + uend, stats = controller.run(u0=uinit, t0=t0, Tend=1.0) + return stats + + +@pytest.mark.base +def test_generic_implicit_efficient(skip_residual_computation=True): + stats_normal = run_Lorenz(efficient=False, skip_residual_computation=skip_residual_computation) + stats_efficient = run_Lorenz(efficient=True, skip_residual_computation=skip_residual_computation) + assert_sameness(stats_normal, stats_efficient, 'generic_implicit') + + +@pytest.mark.base +def test_residual_skipping(): + stats_normal = run_Lorenz(efficient=True, skip_residual_computation=False) + stats_efficient = run_Lorenz(efficient=True, skip_residual_computation=True) + assert_sameness(stats_normal, stats_efficient, 'generic_implicit', check_residual=False) + + +@pytest.mark.mpi4py +def test_residual_skipping_with_residual_tolerance(): + stats_normal = run_Schroedinger(efficient=True, skip_residual_computation=False) + stats_efficient = run_Schroedinger(efficient=True, skip_residual_computation=True) + assert_sameness(stats_normal, stats_efficient, 'imex_first_order', check_residual=False) + + +@pytest.mark.mpi4py +def test_imex_first_order_efficient(): + stats_normal = run_Schroedinger(efficient=False) + stats_efficient = run_Schroedinger(efficient=True) + assert_sameness(stats_normal, stats_efficient, 'imex_first_order') + + +def assert_sameness(stats_normal, stats_efficient, sweeper_name, check_residual=True): + from pySDC.helpers.stats_helper import get_sorted, get_list_of_types + import numpy as np + + for me in get_list_of_types(stats_normal): + normal = [you[1] for you in get_sorted(stats_normal, type=me)] + if 'timing' in me or all(you is None for you in normal) or (not check_residual and 'residual' in me): + continue + assert np.allclose( + normal, [you[1] for you in get_sorted(stats_efficient, type=me)] + ), f'Stats don\'t match in type \"{me}\" for efficient and regular implementations of {sweeper_name} sweeper!' diff --git a/pySDC/tests/test_projects/test_resilience/test_extrapolated_error_within_Q.py b/pySDC/tests/test_projects/test_resilience/test_extrapolated_error_within_Q.py new file mode 100644 index 0000000000000000000000000000000000000000..f090379c6047eed0eda375cbc32adb72af7cb1d7 --- /dev/null +++ b/pySDC/tests/test_projects/test_resilience/test_extrapolated_error_within_Q.py @@ -0,0 +1,23 @@ +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize("prob_name", ['advection', 'piline']) +@pytest.mark.parametrize('num_nodes', [2, 3]) +@pytest.mark.parametrize('quad_type', ['RADAU-RIGHT', 'GAUSS']) +def test_order_extrapolation_estimate_within_Q(prob_name, num_nodes, quad_type): + from pySDC.projects.Resilience.extrapolation_within_Q import check_order + + if prob_name == 'advection': + from pySDC.projects.Resilience.advection import run_advection + + prob = run_advection + elif prob_name == 'piline': + from pySDC.projects.Resilience.piline import run_piline + + prob = run_piline + + else: + raise NotImplementedError(f'Problem \"{prob_name}\" not implemented in this test!') + + check_order(None, prob=prob, dts=[5e-1, 1e-1, 5e-2, 1e-2], num_nodes=num_nodes, quad_type=quad_type) diff --git a/pySDC/tests/test_projects/test_resilience/test_fault_injection.py b/pySDC/tests/test_projects/test_resilience/test_fault_injection.py index 94e252c4cf2f71ec529664945befad0e85172028..796176ceb65c1e48d947d8580ece2c6cdcddb998 100644 --- a/pySDC/tests/test_projects/test_resilience/test_fault_injection.py +++ b/pySDC/tests/test_projects/test_resilience/test_fault_injection.py @@ -60,7 +60,7 @@ def test_complex_conversion(): injector = FaultInjector() num_tests = int(1e3) - for i in range(num_tests): + for _i in range(num_tests): rand_complex = get_random_float() + get_random_float() * 1j # convert to bytes and back @@ -144,20 +144,13 @@ def test_fault_injection(): @pytest.mark.mpi4py +@pytest.mark.slow @pytest.mark.parametrize("numprocs", [5]) def test_fault_stats(numprocs): """ Test generation of fault statistics and their recovery rates """ import numpy as np - from pySDC.projects.Resilience.fault_stats import ( - FaultStats, - BaseStrategy, - AdaptivityStrategy, - IterateStrategy, - HotRodStrategy, - run_vdp, - ) # Set python path once my_env = os.environ.copy() @@ -174,27 +167,35 @@ def test_fault_stats(numprocs): numprocs, ) - vdp_stats = generate_stats(True) + stats = generate_stats(True) # test number of possible combinations for faults + expected_max_combinations = 3840 assert ( - vdp_stats.get_max_combinations() == 1536 - ), f"Expected 1536 possible combinations for faults in van der Pol problem, but got {vdp_stats.get_max_combinations()}!" + stats.get_max_combinations() == expected_max_combinations + ), f"Expected {expected_max_combinations} possible combinations for faults in van der Pol problem, but got {stats.get_max_combinations()}!" recovered_reference = { 'base': 1, 'adaptivity': 2, 'iterate': 1, 'Hot Rod': 2, + 'adaptivity_coll': 0, + 'double_adaptivity': 0, } - vdp_stats.get_recovered() + stats.get_recovered() - for strategy in vdp_stats.strategies: - dat = vdp_stats.load(strategy, True) - fixable_mask = vdp_stats.get_fixable_faults_only(strategy) - recovered_mask = vdp_stats.get_mask(strategy=strategy, key='recovered', op='eq', val=True) + for strategy in stats.strategies: + dat = stats.load(strategy=strategy, faults=True) + fixable_mask = stats.get_fixable_faults_only(strategy) + recovered_mask = stats.get_mask(strategy=strategy, key='recovered', op='eq', val=True) + index = stats.get_index(mask=fixable_mask) assert all(fixable_mask[:-1] == [False, True, False]), "Error in generating mask of fixable faults" + assert all(index == [1, 3]), "Error when converting to index" + + combinations = np.array(stats.get_combination_counts(dat, keys=['bit'], mask=fixable_mask)) + assert all(combinations == [1.0, 1.0]), "Error when counting combinations" recovered = len(dat['recovered'][recovered_mask]) crashed = len(dat['error'][dat['error'] == np.inf]) # on some systems the last run crashes... @@ -213,30 +214,36 @@ def generate_stats(load=False): Returns: Object containing the stats """ - from pySDC.projects.Resilience.fault_stats import ( - FaultStats, + from pySDC.projects.Resilience.strategies import ( BaseStrategy, AdaptivityStrategy, IterateStrategy, HotRodStrategy, - run_vdp, ) - import matplotlib.pyplot as plt + from pySDC.projects.Resilience.fault_stats import ( + FaultStats, + ) + from pySDC.projects.Resilience.Lorenz import run_Lorenz np.seterr(all='warn') # get consistent behaviour across platforms - vdp_stats = FaultStats( - prob=run_vdp, + stats = FaultStats( + prob=run_Lorenz, faults=[False, True], reload=load, recovery_thresh=1.1, num_procs=1, mode='random', - strategies=[BaseStrategy(), AdaptivityStrategy(), IterateStrategy(), HotRodStrategy()], + strategies=[ + BaseStrategy(), + AdaptivityStrategy(), + IterateStrategy(), + HotRodStrategy(), + ], stats_path='data', ) - vdp_stats.run_stats_generation(runs=4, step=2) - return vdp_stats + stats.run_stats_generation(runs=4, step=2) + return stats if __name__ == "__main__": diff --git a/pySDC/tests/test_projects/test_resilience/test_leaky_superconductor.py b/pySDC/tests/test_projects/test_resilience/test_leaky_superconductor.py deleted file mode 100644 index 4b41d366c4c37f3761aa4597d2e87736937f06e9..0000000000000000000000000000000000000000 --- a/pySDC/tests/test_projects/test_resilience/test_leaky_superconductor.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest - - -@pytest.mark.base -@pytest.mark.parametrize('leak_type', ['linear', 'exponential']) -def test_imex_vs_fully_implicit_leaky_superconductor(leak_type): - """ - Test if the IMEX and fully implicit schemes get the same solution and that the runaway process has started. - """ - from pySDC.projects.Resilience.leaky_superconductor import compare_imex_full - - compare_imex_full(plotting=False, leak_type=leak_type) diff --git a/pySDC/tests/test_projects/test_resilience/test_order.py b/pySDC/tests/test_projects/test_resilience/test_order.py index 60138ee16a3b5de610fcbe47ba8918bea1d159a6..52d5f4b5d487c4d091f6e33a89777bb4fa654ed9 100644 --- a/pySDC/tests/test_projects/test_resilience/test_order.py +++ b/pySDC/tests/test_projects/test_resilience/test_order.py @@ -2,7 +2,21 @@ import pytest @pytest.mark.base -def test_main(): - from pySDC.projects.Resilience.accuracy_check import main +@pytest.mark.parametrize("ks", [[2], [3], [4]]) +@pytest.mark.parametrize("serial", [True, False]) +def test_order_fixed_step_size(ks, serial): + from pySDC.projects.Resilience.accuracy_check import plot_all_errors, plt - main() + fig, ax = plt.subplots() + plot_all_errors(ax, ks, serial, Tend_fixed=1.0) + + +@pytest.mark.base +@pytest.mark.parametrize("ks", [[2], [3]]) +@pytest.mark.parametrize("serial", [True, False]) +def test_order_adaptive_step_size(ks, serial): + print(locals()) + from pySDC.projects.Resilience.accuracy_check import plot_all_errors, plt + + fig, ax = plt.subplots() + plot_all_errors(ax, ks, serial, Tend_fixed=5e-1, var='e_tol', dt_list=[1e-5, 5e-6], avoid_restarts=False) diff --git a/pySDC/tests/test_projects/test_resilience/test_quench.py b/pySDC/tests/test_projects/test_resilience/test_quench.py new file mode 100644 index 0000000000000000000000000000000000000000..d020e2c2402925d06a78b0a5e90ae1ca5f411c81 --- /dev/null +++ b/pySDC/tests/test_projects/test_resilience/test_quench.py @@ -0,0 +1,26 @@ +import pytest + + +@pytest.mark.base +@pytest.mark.parametrize('leak_type', ['linear', 'exponential']) +def test_imex_vs_fully_implicit_quench(leak_type): + """ + Test if the IMEX and fully implicit schemes get the same solution and that the runaway process has started. + """ + from pySDC.projects.Resilience.quench import compare_imex_full + + compare_imex_full(plotting=False, leak_type=leak_type) + + +@pytest.mark.base +def test_crossing_time_computation(): + from pySDC.projects.Resilience.quench import run_quench, get_crossing_time + + controller_params = {'logger_level': 30} + description = {'level_params': {'dt': 2.5e1}, 'step_params': {'maxiter': 5}} + stats, controller, _ = run_quench( + custom_controller_params=controller_params, + custom_description=description, + Tend=400, + ) + _ = get_crossing_time(stats, controller, num_points=5, inter_points=155) diff --git a/pySDC/tests/test_projects/test_resilience/test_strategies.py b/pySDC/tests/test_projects/test_resilience/test_strategies.py new file mode 100644 index 0000000000000000000000000000000000000000..f994c362e3882a30fa1c9748cdf0b1fbb7a8d1cc --- /dev/null +++ b/pySDC/tests/test_projects/test_resilience/test_strategies.py @@ -0,0 +1,97 @@ +import pytest + +STRATEGY_NAMES = [ + 'adaptivity', + 'DIRK', + 'iterate', + 'explicitRK', + 'doubleAdaptivity', + 'collocationType', + 'collocationRefinement', + 'collocationDerefinement', + 'adaptivityAvoidRestarts', + # 'adaptivityInterpolation', + 'adaptivityQExtrapolation', + 'base', +] +STRATEGY_NAMES_NONMPIONLY = ['adaptiveHR', 'HotRod'] +LOGGER_LEVEL = 30 + + +def single_test_vdp(strategy_name, useMPI=False, num_procs=1): + import numpy as np + from pySDC.helpers.stats_helper import get_sorted + from pySDC.projects.Resilience.vdp import run_vdp + import pySDC.projects.Resilience.strategies as strategies + from pySDC.implementations.hooks.log_work import LogWork + + if useMPI: + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI as errorhook + else: + from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun as errorhook + + # load the strategy + avail_strategies = { + 'adaptivity': strategies.AdaptivityStrategy(useMPI=useMPI), + 'DIRK': strategies.DIRKStrategy(useMPI=useMPI), + 'adaptiveHR': strategies.AdaptiveHotRodStrategy(useMPI=useMPI), + 'iterate': strategies.IterateStrategy(useMPI=useMPI), + 'HotRod': strategies.HotRodStrategy(useMPI=useMPI), + 'explicitRK': strategies.ERKStrategy(useMPI=useMPI), + 'doubleAdaptivity': strategies.DoubleAdaptivityStrategy(useMPI=useMPI), + 'collocationRefinement': strategies.AdaptivityCollocationRefinementStrategy(useMPI=useMPI), + 'collocationDerefinement': strategies.AdaptivityCollocationDerefinementStrategy(useMPI=useMPI), + 'collocationType': strategies.AdaptivityCollocationTypeStrategy(useMPI=useMPI), + 'adaptivityAvoidRestarts': strategies.AdaptivityAvoidRestartsStrategy(useMPI=useMPI), + 'adaptivityInterpolation': strategies.AdaptivityInterpolationStrategy(useMPI=useMPI), + 'adaptivityQExtrapolation': strategies.AdaptivityExtrapolationWithinQStrategy(useMPI=useMPI), + 'base': strategies.BaseStrategy(useMPI=useMPI), + } + + strategy = avail_strategies.get + if strategy_name in avail_strategies.keys(): + strategy = avail_strategies[strategy_name] + else: + raise NotImplementedError(f'Strategy \"{strategy_name}\" not implemented for this test!') + + prob = run_vdp + controller_params = {'logger_level': LOGGER_LEVEL} + stats, _, Tend = prob( + custom_description=strategy.get_custom_description(problem=prob, num_procs=num_procs), + hook_class=[errorhook, LogWork], + use_MPI=useMPI, + custom_controller_params=controller_params, + ) + + # things we want to test + tests = { + 'e': ('e_global_post_run', max), + 'k_newton': ('work_newton', sum), + } + + for key, val in tests.items(): + act = val[1]([me[1] for me in get_sorted(stats, type=val[0])]) + ref = strategy.get_reference_value(prob, val[0], val[1], num_procs) + + assert np.isclose( + ref, act, rtol=1e-2 + ), f'Error in \"{strategy.name}\" strategy ({strategy_name})! Expected {key}={ref} but got {act}!' + + +@pytest.mark.mpi4py +@pytest.mark.parametrize('strategy_name', STRATEGY_NAMES) +def test_strategy_with_vdp_MPI(strategy_name, num_procs=1): + single_test_vdp(strategy_name=strategy_name, useMPI=True, num_procs=num_procs) + + +@pytest.mark.base +@pytest.mark.parametrize('strategy_name', STRATEGY_NAMES + STRATEGY_NAMES_NONMPIONLY) +def test_strategy_with_vdp_nonMPI(strategy_name, num_procs=1): + single_test_vdp(strategy_name=strategy_name, useMPI=False, num_procs=num_procs) + + +if __name__ == '__main__': + for name in STRATEGY_NAMES + STRATEGY_NAMES_NONMPIONLY: + test_strategy_with_vdp_nonMPI(name) + for name in STRATEGY_NAMES: + test_strategy_with_vdp_MPI(name) diff --git a/pySDC/tests/test_projects/test_resilience/test_vdp.py b/pySDC/tests/test_projects/test_resilience/test_vdp.py index 4915b86586a98d1561d130542195f69c1502fd29..246bdb4db603aa870d16485eb93b5f1fba41f3f1 100644 --- a/pySDC/tests/test_projects/test_resilience/test_vdp.py +++ b/pySDC/tests/test_projects/test_resilience/test_vdp.py @@ -1,32 +1,35 @@ import pytest -import os -import subprocess @pytest.mark.mpi4py -def test_main(): +@pytest.mark.parametrize('num_procs', [1, 2, 5, 8]) +@pytest.mark.parametrize('test_name', ['mpi_vs_nonMPI', 'check_step_size_limiter']) +def test_stuff(num_procs, test_name): import pySDC.projects.Resilience.vdp as vdp + import os + import subprocess # Set python path once my_env = os.environ.copy() my_env['PYTHONPATH'] = '../../..:.' my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml' - # set list of number of parallel steps - num_procs_list = [1, 2, 5, 8] - # run code with different number of MPI processes - for num_procs in num_procs_list: - cmd = f"mpirun -np {num_procs} python {vdp.__file__}".split() + cmd = f"mpirun -np {num_procs} python {vdp.__file__} {test_name}".split() + + p = subprocess.Popen(cmd, env=my_env, cwd=".") - p = subprocess.Popen(cmd, env=my_env, cwd=".") + p.wait() + assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( + p.returncode, + num_procs, + ) - p.wait() - assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % ( - p.returncode, - num_procs, - ) + +@pytest.mark.mpi4py +def test_adaptivity_with_avoid_restarts(): + test_stuff(1, 'adaptivity_with_avoid_restarts') if __name__ == "__main__": - test_main() + test_stuff(8, '') diff --git a/pySDC/tutorial/step_3/A_getting_statistics.py b/pySDC/tutorial/step_3/A_getting_statistics.py index deffb269b1780c0f364f80c7378a15bf5213e4c3..43c5f92219d14c52d6aaa9628cd0fff42270eb61 100644 --- a/pySDC/tutorial/step_3/A_getting_statistics.py +++ b/pySDC/tutorial/step_3/A_getting_statistics.py @@ -40,7 +40,7 @@ def main(): f.close() - assert all([item[1] == 12 for item in iter_counts]), ( + assert all(item[1] == 12 for item in iter_counts), ( 'ERROR: number of iterations are not as expected, got %s' % iter_counts ) @@ -50,32 +50,32 @@ def run_simulation(): A simple test program to run IMEX SDC for a single time step """ # initialize level parameters - level_params = dict() + level_params = {} level_params['restol'] = 1e-10 level_params['dt'] = 0.1 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = 3 # initialize problem parameters - problem_params = dict() + problem_params = {} problem_params['nu'] = 0.1 # diffusion coefficient problem_params['freq'] = 4 # frequency for the test value problem_params['nvars'] = 1023 # number of degrees of freedom problem_params['bc'] = 'dirichlet-zero' # boundary conditions # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 20 # initialize controller parameters (<-- this is new!) - controller_params = dict() + controller_params = {} controller_params['logger_level'] = 30 # reduce verbosity of each run # Fill description dictionary for easy hierarchy creation - description = dict() + description = {} description['problem_class'] = heatNd_forced description['problem_params'] = problem_params description['sweeper_class'] = imex_1st_order diff --git a/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py b/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py index 0c51932aed3c14bde9ed8a2cf4343d261b66ce05..44e3d935b5f370e3e02b1cfef040599c338e5cf7 100644 --- a/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py +++ b/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py @@ -13,33 +13,33 @@ def main(): """ # initialize level parameters - level_params = dict() + level_params = {} level_params['restol'] = 1e-10 level_params['dt'] = 0.5 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = [3] # initialize problem parameters - problem_params = dict() + problem_params = {} problem_params['nu'] = 0.1 # diffusion coefficient problem_params['freq'] = 4 # frequency for the test value problem_params['nvars'] = [31, 15, 7] # number of degrees of freedom for each level problem_params['bc'] = 'dirichlet-zero' # boundary conditions # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 20 # initialize space transfer parameters - space_transfer_params = dict() + space_transfer_params = {} space_transfer_params['rorder'] = 2 space_transfer_params['iorder'] = 6 # fill description dictionary for easy step instantiation - description = dict() + description = {} description['problem_class'] = heatNd_forced # pass problem class description['problem_params'] = problem_params # pass problem parameters description['sweeper_class'] = imex_1st_order # pass sweeper (see part B) @@ -61,7 +61,7 @@ def main(): print(out) f.close() - assert all([len(S.levels) == 3 for S in controller.MS]), "ERROR: not all steps have the same number of levels" + assert all(len(S.levels) == 3 for S in controller.MS), "ERROR: not all steps have the same number of levels" if __name__ == "__main__": diff --git a/pySDC/tutorial/step_6/A_run_non_MPI_controller.py b/pySDC/tutorial/step_6/A_run_non_MPI_controller.py index 83a62618921b7b646eab6b7eb867830b2a83ca91..e85d627a3248e1496d4c04edccfaac3266447dd4 100644 --- a/pySDC/tutorial/step_6/A_run_non_MPI_controller.py +++ b/pySDC/tutorial/step_6/A_run_non_MPI_controller.py @@ -64,7 +64,7 @@ def main(num_proc_list=None, fname=None, multi_level=True): f.write('\n') print() - assert all([item[1] <= 8 for item in iter_counts]), "ERROR: weird iteration counts, got %s" % iter_counts + assert all(item[1] <= 8 for item in iter_counts), "ERROR: weird iteration counts, got %s" % iter_counts f.close() @@ -80,40 +80,40 @@ def set_parameters_ml(): float: end time """ # initialize level parameters - level_params = dict() + level_params = {} level_params['restol'] = 5e-10 level_params['dt'] = 0.125 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = [3] # initialize problem parameters - problem_params = dict() + problem_params = {} problem_params['nu'] = 0.1 # diffusion coefficient problem_params['freq'] = 2 # frequency for the test value problem_params['nvars'] = [63, 31] # number of degrees of freedom for each level problem_params['bc'] = 'dirichlet-zero' # boundary conditions # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 50 step_params['errtol'] = 1e-05 # initialize space transfer parameters - space_transfer_params = dict() + space_transfer_params = {} space_transfer_params['rorder'] = 2 space_transfer_params['iorder'] = 6 # initialize controller parameters - controller_params = dict() + controller_params = {} controller_params['logger_level'] = 30 controller_params['all_to_done'] = True # can ask the controller to keep iterating all steps until the end controller_params['predict_type'] = 'pfasst_burnin' # activate iteration estimator # fill description dictionary for easy step instantiation - description = dict() + description = {} description['problem_class'] = heatNd_unforced # pass problem class description['problem_params'] = problem_params # pass problem parameters description['sweeper_class'] = generic_LU # pass sweeper @@ -141,32 +141,32 @@ def set_parameters_sl(): float: end time """ # initialize level parameters - level_params = dict() + level_params = {} level_params['restol'] = 5e-10 level_params['dt'] = 0.125 # initialize sweeper parameters - sweeper_params = dict() + sweeper_params = {} sweeper_params['quad_type'] = 'RADAU-RIGHT' sweeper_params['num_nodes'] = 3 # initialize problem parameters - problem_params = dict() + problem_params = {} problem_params['nu'] = 0.1 # diffusion coefficient problem_params['freq'] = 2 # frequency for the test value problem_params['nvars'] = 63 # number of degrees of freedom for each level problem_params['bc'] = 'dirichlet-zero' # boundary conditions # initialize step parameters - step_params = dict() + step_params = {} step_params['maxiter'] = 50 # initialize controller parameters - controller_params = dict() + controller_params = {} controller_params['logger_level'] = 30 # fill description dictionary for easy step instantiation - description = dict() + description = {} description['problem_class'] = heatNd_unforced # pass problem class description['problem_params'] = problem_params # pass problem parameters description['sweeper_class'] = generic_LU # pass sweeper diff --git a/pySDC/tutorial/step_6/playground_parallelization.py b/pySDC/tutorial/step_6/playground_parallelization.py index a04390950786c9ccbfd39eb9ee6cafca60e8fbaa..6fa88195271b21475417736b3b7fda3fb8515dd8 100644 --- a/pySDC/tutorial/step_6/playground_parallelization.py +++ b/pySDC/tutorial/step_6/playground_parallelization.py @@ -70,4 +70,4 @@ if __name__ == "__main__": f.write('\n') print() - assert all([item[1] <= 8 for item in iter_counts]), "ERROR: weird iteration counts, got %s" % iter_counts + assert all(item[1] <= 8 for item in iter_counts), "ERROR: weird iteration counts, got %s" % iter_counts diff --git a/pyproject.toml b/pyproject.toml index 61abd39fc9280d61d3fc1cf1fb2d637ed2b4f1a0..864e75dce9450d99c21927af388f8c62a4750760 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,7 @@ markers = [ 'petsc: tests relying on PETSc/petsc4py', 'benchmark: tests for benchmarking', 'cupy: tests for cupy on GPUs', + 'libpressio: tests using the libpressio library', ] [tool.flakeheaven]