diff --git a/pySDC/helpers/pySDC_as_gusto_time_discretization.py b/pySDC/helpers/pySDC_as_gusto_time_discretization.py
index 33bc7228784e519b84276e36ce1aaa43386c2b89..80c686e69602bcb1f8c3e101562e479a20d2f78a 100644
--- a/pySDC/helpers/pySDC_as_gusto_time_discretization.py
+++ b/pySDC/helpers/pySDC_as_gusto_time_discretization.py
@@ -38,7 +38,6 @@ class pySDC_integrator(TimeDiscretisation):
 
     def __init__(
         self,
-        equation,
         description,
         controller_params,
         domain,
@@ -52,7 +51,6 @@ class pySDC_integrator(TimeDiscretisation):
         Initialization
 
         Args:
-            equation (:class:`PrognosticEquation`): the prognostic equation.
             description (dict): pySDC description
             controller_params (dict): pySDC controller params
             domain (:class:`Domain`): the model's domain object, containing the
@@ -96,6 +94,7 @@ class pySDC_integrator(TimeDiscretisation):
             'equation': equation,
             'solver_parameters': self.solver_parameters,
             'residual': self._residual,
+            **self.description['problem_params'],
         }
         self.description['level_params']['dt'] = float(self.domain.dt)
 
diff --git a/pySDC/implementations/problem_classes/HeatFiredrake.py b/pySDC/implementations/problem_classes/HeatFiredrake.py
index a2cdfdaf8af0a20c82f516837fb39ad9d9d8d361..434f53e7718fdad167fcebb147deba320b791ccc 100644
--- a/pySDC/implementations/problem_classes/HeatFiredrake.py
+++ b/pySDC/implementations/problem_classes/HeatFiredrake.py
@@ -53,7 +53,7 @@ class Heat1DForcedFiredrake(Problem):
     dtype_u = firedrake_mesh
     dtype_f = IMEX_firedrake_mesh
 
-    def __init__(self, n=30, nu=0.1, c=0.0, LHS_cache_size=12, comm=None):
+    def __init__(self, n=30, nu=0.1, c=0.0, order=4, LHS_cache_size=12, mesh=None, comm=None):
         """
         Initialization
 
@@ -61,18 +61,22 @@ class Heat1DForcedFiredrake(Problem):
             n (int): Number of degrees of freedom
             nu (float): Diffusion parameter
             c (float): Boundary condition constant
+            order (int): Order of finite elements
             LHS_cache_size (int): Size of the cache for solvers
-            comm (mpi4pi.Intracomm): MPI communicator for spatial parallelism
+            mesh (Firedrake mesh, optional): Give a custom mesh, for instance from a hierarchy
+            comm (mpi4pi.Intracomm, optional): MPI communicator for spatial parallelism
         """
         comm = MPI.COMM_WORLD if comm is None else comm
 
         # prepare Firedrake mesh and function space
-        self.mesh = fd.UnitIntervalMesh(n, comm=comm)
-        self.V = fd.FunctionSpace(self.mesh, "CG", 4)
+        self.mesh = fd.UnitIntervalMesh(n, comm=comm) if mesh is None else mesh
+        self.V = fd.FunctionSpace(self.mesh, "CG", order)
 
         # prepare pySDC problem class infrastructure by passing the function space to super init
         super().__init__(self.V)
-        self._makeAttributeAndRegister('n', 'nu', 'c', 'LHS_cache_size', 'comm', localVars=locals(), readOnly=True)
+        self._makeAttributeAndRegister(
+            'n', 'nu', 'c', 'order', 'LHS_cache_size', 'comm', localVars=locals(), readOnly=True
+        )
 
         # prepare caches and IO variables for solvers
         self.solvers = {}
@@ -191,6 +195,10 @@ class Heat1DForcedFiredrake(Problem):
         self.work_counters['solves']()
         return me
 
+    @fd.utils.cached_property
+    def x(self):
+        return fd.SpatialCoordinate(self.mesh)
+
     def u_exact(self, t):
         r"""
         Routine to compute the exact solution at time :math:`t`.
@@ -206,6 +214,5 @@ class Heat1DForcedFiredrake(Problem):
             Exact solution.
         """
         me = self.u_init
-        x = fd.SpatialCoordinate(self.mesh)
-        me.interpolate(np.cos(t) * fd.sin(np.pi * x[0]) + self.c)
+        me.interpolate(np.cos(t) * fd.sin(np.pi * self.x[0]) + self.c)
         return me
diff --git a/pySDC/implementations/transfer_classes/TransferFiredrakeMesh.py b/pySDC/implementations/transfer_classes/TransferFiredrakeMesh.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ef1fd17c2786785acd0a35e53c9c7c686280506
--- /dev/null
+++ b/pySDC/implementations/transfer_classes/TransferFiredrakeMesh.py
@@ -0,0 +1,124 @@
+from firedrake import assemble, prolong, inject
+from firedrake.__future__ import interpolate
+
+from pySDC.core.errors import TransferError
+from pySDC.core.space_transfer import SpaceTransfer
+from pySDC.implementations.datatype_classes.firedrake_mesh import firedrake_mesh, IMEX_firedrake_mesh
+
+
+class MeshToMeshFiredrake(SpaceTransfer):
+    """
+    This implementation can restrict and prolong between Firedrake meshes
+    """
+
+    def restrict(self, F):
+        """
+        Restrict from fine to coarse grid
+
+        Args:
+            F: the fine level data
+
+        Returns:
+            Coarse level data
+        """
+        if isinstance(F, firedrake_mesh):
+            u_coarse = self.coarse_prob.dtype_u(assemble(interpolate(F.functionspace, self.coarse_prob.init)))
+        elif isinstance(F, IMEX_firedrake_mesh):
+            u_coarse = IMEX_firedrake_mesh(self.coarse_prob.init)
+            u_coarse.impl.functionspace.assign(assemble(interpolate(F.impl.functionspace, self.coarse_prob.init)))
+            u_coarse.expl.functionspace.assign(assemble(interpolate(F.expl.functionspace, self.coarse_prob.init)))
+        else:
+            raise TransferError('Unknown type of fine data, got %s' % type(F))
+
+        return u_coarse
+
+    def prolong(self, G):
+        """
+        Prolongate from coarse to fine grid
+
+        Args:
+            G: the coarse level data
+
+        Returns:
+            fine level data
+        """
+        if isinstance(G, firedrake_mesh):
+            u_fine = self.fine_prob.dtype_u(assemble(interpolate(G.functionspace, self.fine_prob.init)))
+        elif isinstance(G, IMEX_firedrake_mesh):
+            u_fine = IMEX_firedrake_mesh(self.fine_prob.init)
+            u_fine.impl.functionspace.assign(assemble(interpolate(G.impl.functionspace, self.fine_prob.init)))
+            u_fine.expl.functionspace.assign(assemble(interpolate(G.expl.functionspace, self.fine_prob.init)))
+        else:
+            raise TransferError('Unknown type of coarse data, got %s' % type(G))
+
+        return u_fine
+
+
+class MeshToMeshFiredrakeHierarchy(SpaceTransfer):
+    """
+    This implementation can restrict and prolong between Firedrake meshes that are generated from a hierarchy.
+    Example:
+
+    ```
+    from firedrake import *
+
+    mesh = UnitSquareMesh(8, 8)
+    hierarchy = MeshHierarchy(mesh, 4)
+
+    mesh = hierarchy[-1]
+    ```
+    """
+
+    @staticmethod
+    def _restrict(u_fine, u_coarse):
+        """Perform restriction in Firedrake"""
+        inject(u_fine.functionspace, u_coarse.functionspace)
+
+    @staticmethod
+    def _prolong(u_coarse, u_fine):
+        """Perform prolongation in Firedrake"""
+        prolong(u_coarse.functionspace, u_fine.functionspace)
+
+    def restrict(self, F):
+        """
+        Restrict from fine to coarse grid
+
+        Args:
+            F: the fine level data
+
+        Returns:
+            Coarse level data
+        """
+        if isinstance(F, firedrake_mesh):
+            G = self.coarse_prob.u_init
+            self._restrict(u_fine=F, u_coarse=G)
+        elif isinstance(F, IMEX_firedrake_mesh):
+            G = IMEX_firedrake_mesh(self.coarse_prob.init)
+            self._restrict(u_fine=F.impl, u_coarse=G.impl)
+            self._restrict(u_fine=F.expl, u_coarse=G.expl)
+        else:
+            raise TransferError('Unknown type of fine data, got %s' % type(F))
+
+        return G
+
+    def prolong(self, G):
+        """
+        Prolongate from coarse to fine grid
+
+        Args:
+            G: the coarse level data
+
+        Returns:
+            fine level data
+        """
+        if isinstance(G, firedrake_mesh):
+            F = self.fine_prob.u_init
+            self._prolong(u_coarse=G, u_fine=F)
+        elif isinstance(G, IMEX_firedrake_mesh):
+            F = self.fine_prob.f_init
+            self._prolong(u_coarse=G.impl, u_fine=F.impl)
+            self._prolong(u_coarse=G.expl, u_fine=F.expl)
+        else:
+            raise TransferError('Unknown type of coarse data, got %s' % type(G))
+
+        return F
diff --git a/pySDC/tests/test_helpers/test_gusto_coupling.py b/pySDC/tests/test_helpers/test_gusto_coupling.py
index 69f8611819189b94e1e223b65612ff7a8f5f69ef..4563d7a9c14a57b5380ff86fc623764bcd3ca585 100644
--- a/pySDC/tests/test_helpers/test_gusto_coupling.py
+++ b/pySDC/tests/test_helpers/test_gusto_coupling.py
@@ -291,7 +291,6 @@ def test_pySDC_integrator_RK(use_transport_scheme, method, setup):
     stepper_pySDC = get_gusto_stepper(
         eqns,
         pySDC_integrator(
-            eqns,
             description,
             controller_params,
             domain,
@@ -415,7 +414,6 @@ def test_pySDC_integrator(use_transport_scheme, imex, setup):
     stepper_pySDC = get_gusto_stepper(
         eqns,
         pySDC_integrator(
-            eqns,
             description,
             controller_params,
             domain,
@@ -550,7 +548,6 @@ def test_pySDC_integrator_with_adaptivity(dt_initial, setup):
     stepper_pySDC = get_gusto_stepper(
         eqns,
         pySDC_integrator(
-            eqns,
             description,
             controller_params,
             domain,
diff --git a/pySDC/tests/test_transfer_classes/test_firedrake_transfer.py b/pySDC/tests/test_transfer_classes/test_firedrake_transfer.py
new file mode 100644
index 0000000000000000000000000000000000000000..55bf1650f7dc6cd9e347fadec2a41a9d902cd318
--- /dev/null
+++ b/pySDC/tests/test_transfer_classes/test_firedrake_transfer.py
@@ -0,0 +1,90 @@
+import pytest
+
+
+@pytest.mark.firedrake
+def test_Firedrake_transfer():
+    import firedrake as fd
+    from firedrake.__future__ import interpolate
+    from pySDC.implementations.problem_classes.HeatFiredrake import Heat1DForcedFiredrake
+    from pySDC.implementations.transfer_classes.TransferFiredrakeMesh import MeshToMeshFiredrake
+    import numpy as np
+
+    # prepare fine and coarse problems
+    resolutions = [2, 4]
+    probs = [Heat1DForcedFiredrake(n=n) for n in resolutions]
+
+    # prepare data that we can interpolate exactly in this discretization
+    functions = [fd.Function(me.V).interpolate(me.x**2) for me in probs]
+    data = [me.u_init for me in probs]
+    [data[i].assign(functions[i]) for i in range(len(functions))]
+    rhs = [me.f_init for me in probs]
+    [rhs[i].impl.assign(functions[i]) for i in range(len(functions))]
+    [rhs[i].expl.assign(functions[i]) for i in range(len(functions))]
+
+    # setup transfer class
+    transfer = MeshToMeshFiredrake(*probs[::-1], {})
+
+    # test that restriction and interpolation were indeed exact on the mesh
+    restriction = transfer.restrict(data[1])
+    error = abs(restriction - data[0])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during restriction!'
+
+    interpolation = transfer.prolong(data[0])
+    error = abs(interpolation - data[1])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during interpolation!'
+
+    # test that restriction and interpolation were indeed exact on the IMEX mesh
+    restriction = transfer.restrict(rhs[1])
+    error = max([abs(restriction.impl - rhs[0].impl), abs(restriction.expl - rhs[0].expl)])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during restriction!'
+
+    interpolation = transfer.prolong(rhs[0])
+    error = max([abs(interpolation.impl - rhs[1].impl), abs(interpolation.expl - rhs[1].expl)])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during interpolation!'
+
+
+@pytest.mark.firedrake
+def test_Firedrake_transfer_hierarchy():
+    import firedrake as fd
+    from firedrake.__future__ import interpolate
+    from pySDC.implementations.problem_classes.HeatFiredrake import Heat1DForcedFiredrake
+    from pySDC.implementations.transfer_classes.TransferFiredrakeMesh import MeshToMeshFiredrakeHierarchy
+    import numpy as np
+
+    # prepare fine and coarse problems with the hierarchy
+    _prob = Heat1DForcedFiredrake(n=2)
+    hierarchy = fd.MeshHierarchy(_prob.mesh, 1)
+    probs = [Heat1DForcedFiredrake(mesh=mesh) for mesh in hierarchy]
+
+    # prepare data that we can interpolate exactly in this discretization
+    functions = [fd.Function(me.V).interpolate(me.x**2) for me in probs]
+    data = [me.u_init for me in probs]
+    [data[i].assign(functions[i]) for i in range(len(functions))]
+    rhs = [me.f_init for me in probs]
+    [rhs[i].impl.assign(functions[i]) for i in range(len(functions))]
+    [rhs[i].expl.assign(functions[i]) for i in range(len(functions))]
+
+    # setup transfer class
+    transfer = MeshToMeshFiredrakeHierarchy(*probs[::-1], {})
+
+    # test that restriction and interpolation were indeed exact on the mesh
+    restriction = transfer.restrict(data[1])
+    error = abs(restriction - data[0])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during restriction!'
+
+    interpolation = transfer.prolong(data[0])
+    error = abs(interpolation - data[1])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during interpolation!'
+
+    # test that restriction and interpolation were indeed exact on the IMEX mesh
+    restriction = transfer.restrict(rhs[1])
+    error = max([abs(restriction.impl - rhs[0].impl), abs(restriction.expl - rhs[0].expl)])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during restriction!'
+
+    interpolation = transfer.prolong(rhs[0])
+    error = max([abs(interpolation.impl - rhs[1].impl), abs(interpolation.expl - rhs[1].expl)])
+    assert np.isclose(error, 0), f'Got unexpectedly large {error=} during interpolation!'
+
+
+if __name__ == '__main__':
+    test_Firedrake_transfer_hierarchy()
diff --git a/pySDC/tests/test_tutorials/test_step_7.py b/pySDC/tests/test_tutorials/test_step_7.py
index 6820a17d15a8b94eec7e42d45d8dfafff72b552c..3445aea4b07b40047c0e6d10a885db9c12a4fe07 100644
--- a/pySDC/tests/test_tutorials/test_step_7.py
+++ b/pySDC/tests/test_tutorials/test_step_7.py
@@ -130,10 +130,11 @@ def test_D():
 
 
 @pytest.mark.firedrake
-def test_E():
+@pytest.mark.parametrize('ML', [True, False])
+def test_E(ML):
     from pySDC.tutorial.step_7.E_pySDC_with_Firedrake import runHeatFiredrake
 
-    runHeatFiredrake(useMPIsweeper=False)
+    runHeatFiredrake(useMPIsweeper=False, ML=ML)
 
 
 @pytest.mark.firedrake
@@ -142,7 +143,7 @@ def test_E_MPI():
     my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
     cwd = '.'
     num_procs = 3
-    cmd = f'mpiexec -np {num_procs} python pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py'.split()
+    cmd = f'mpiexec -np {num_procs} python pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py --useMPIsweeper'.split()
 
     p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
     p.wait()
@@ -179,3 +180,32 @@ def test_F():
     assert (
         error < 1e-8
     ), f'Unexpectedly large difference of {error} between pySDC and Gusto SDC implementations in Williamson 5 test case'
+
+
+@pytest.mark.firedrake
+def test_F_ML():
+    """
+    Test that the Gusto coupling with multiple levels in space converges
+    """
+    from pySDC.tutorial.step_7.F_pySDC_with_Gusto import williamson_5
+    from pySDC.helpers.stats_helper import get_sorted, filter_stats
+    import sys
+
+    if '--running-tests' not in sys.argv:
+        sys.argv += ['--running-tests']
+
+    params = {'use_pySDC': True, 'dt': 1000, 'tmax': 1000, 'use_adaptivity': False, 'M': 2, 'kmax': 4, 'QI': 'LU'}
+    stepper_ML, _ = williamson_5(Nlevels=2, **params)
+    stepper_SL, _ = williamson_5(Nlevels=1, **params)
+
+    stats = stepper_ML.scheme.stats
+    residual_fine = get_sorted(stats, type='residual_post_sweep', level=0, sortby='iter')
+    residual_coarse = get_sorted(stats, type='residual_post_sweep', level=1, sortby='iter')
+    assert residual_fine[0][1] / residual_fine[-1][1] > 3e2, 'Fine residual did not converge as expected'
+    assert residual_coarse[0][1] / residual_coarse[-1][1] > 3e2, 'Coarse residual did not converge as expected'
+
+    stats_SL = stepper_SL.scheme.stats
+    residual_SL = get_sorted(stats_SL, type='residual_post_sweep', sortby='iter')
+    assert all(
+        res_SL > res_ML for res_SL, res_ML in zip(residual_SL, residual_fine)
+    ), 'Single level SDC converged faster than multi-level!'
diff --git a/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py b/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
index d85d9a536b9151694df0fed52d62770811a65354..0307d53592745aef3db3a0fa6c2f2395e8b30ea2 100644
--- a/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
+++ b/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
@@ -5,7 +5,15 @@ The function `setup` generates the description and controller_params dictionarie
 This proceeds very similar to earlier tutorials. The interesting part of this tutorial is rather in the problem class.
 See `pySDC/implementations/problem_classes/HeatFiredrake` for an easy example of how to use Firedrake within pySDC.
 
-Run in serial using simply `python E_pySDC_with_Firedrake.py` or with parallel diagonal SDC with `mpiexec -np 3 python E_pySDC_with_Firedrake.py`.
+The script allows to run in three different ways. Use
+ - `python E_pySDC_with_Firedrake.py` for single-level serial SDC
+ - `mpiexec -np 3 E_pySDC_with_Firedrake --useMPIsweeper` for single-level MPI-parallel diagonal SDC
+ - `python E_pySDC_with_Firedrake --ML` for three-level serial SDC
+
+You should notice that speedup of MPI parallelisation is quite good and that, while multi-level SDC reduces the number
+of SDC iterations quite a bit, it does not reduce time to solution in this case. This is partly due to more solvers being
+constructed when using coarse levels. Also, we do not claim to have found the best parameters, though. This is just an
+example to demonstrate how to use it.
 """
 
 import numpy as np
@@ -65,7 +73,60 @@ def setup(useMPIsweeper):
     return description, controller_params
 
 
-def runHeatFiredrake(useMPIsweeper):
+def setup_ML():
+    """
+    Helper routine to set up parameters
+
+    Returns:
+        description and controller_params parameter dictionaries
+    """
+    from pySDC.implementations.problem_classes.HeatFiredrake import Heat1DForcedFiredrake
+    from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
+    from pySDC.implementations.sweeper_classes.imex_1st_order_MPI import imex_1st_order_MPI
+    from pySDC.implementations.transfer_classes.TransferFiredrakeMesh import MeshToMeshFiredrake
+    from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
+    from pySDC.implementations.hooks.log_work import LogWork
+    from pySDC.helpers.firedrake_ensemble_communicator import FiredrakeEnsembleCommunicator
+
+    level_params = dict()
+    level_params['restol'] = 5e-10
+    level_params['dt'] = 0.2
+
+    step_params = dict()
+    step_params['maxiter'] = 20
+
+    sweeper_params = dict()
+    sweeper_params['quad_type'] = 'RADAU-RIGHT'
+    sweeper_params['num_nodes'] = 3
+    sweeper_params['QI'] = 'MIN-SR-S'
+    sweeper_params['QE'] = 'PIC'
+
+    problem_params = dict()
+    problem_params['nu'] = 0.1
+    problem_params['n'] = [128, 32, 4]
+    problem_params['c'] = 1.0
+
+    base_transfer_params = dict()
+    base_transfer_params['finter'] = True
+
+    controller_params = dict()
+    controller_params['logger_level'] = 15 if MPI.COMM_WORLD.rank == 0 else 30
+    controller_params['hook_class'] = [LogGlobalErrorPostRun, LogWork]
+
+    description = dict()
+    description['problem_class'] = Heat1DForcedFiredrake
+    description['problem_params'] = problem_params
+    description['sweeper_class'] = imex_1st_order
+    description['sweeper_params'] = sweeper_params
+    description['level_params'] = level_params
+    description['step_params'] = step_params
+    description['space_transfer_class'] = MeshToMeshFiredrake
+    description['base_transfer_params'] = base_transfer_params
+
+    return description, controller_params
+
+
+def runHeatFiredrake(useMPIsweeper=False, ML=False):
     """
     Run the example defined by the above parameters
     """
@@ -75,7 +136,11 @@ def runHeatFiredrake(useMPIsweeper):
     Tend = 1.0
     t0 = 0.0
 
-    description, controller_params = setup(useMPIsweeper)
+    if ML:
+        assert not useMPIsweeper, 'MPI parallel diagonal SDC and ML SDC are not compatible at the moment'
+        description, controller_params = setup_ML()
+    else:
+        description, controller_params = setup(useMPIsweeper)
 
     controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
 
@@ -98,18 +163,41 @@ def runHeatFiredrake(useMPIsweeper):
     tot_solves = np.sum([me[1] for me in work_solves])
     tot_rhs = np.sum([me[1] for me in work_rhs])
 
+    time_rank = description["sweeper_params"]["comm"].rank if useMPIsweeper else 0
     print(
-        f'Finished with error {error[0][1]:.2e}. Used {tot_iter} SDC iterations, with {tot_solver_setup} solver setups, {tot_solves} solves and {tot_rhs} right hand side evaluations on time task {description["sweeper_params"]["comm"].rank}.'
+        f'Finished with error {error[0][1]:.2e}. Used {tot_iter} SDC iterations, with {tot_solver_setup} solver setups, {tot_solves} solves and {tot_rhs} right hand side evaluations on the finest level of time task {time_rank}.'
     )
 
     # do tests that we got the same as last time
     n_nodes = 1 if useMPIsweeper else description['sweeper_params']['num_nodes']
     assert error[0][1] < 2e-8
-    assert tot_iter == 29
+    assert tot_iter == 10 if ML else 29
     assert tot_solver_setup == n_nodes
     assert tot_solves == n_nodes * tot_iter
     assert tot_rhs == n_nodes * tot_iter + (n_nodes + 1) * len(niter)
 
 
 if __name__ == "__main__":
-    runHeatFiredrake(useMPIsweeper=MPI.COMM_WORLD.size > 1)
+    from argparse import ArgumentParser
+
+    parser = ArgumentParser()
+    parser.add_argument(
+        '--ML',
+        help='Whether you want to run multi-level',
+        default=False,
+        required=False,
+        action='store_const',
+        const=True,
+    )
+    parser.add_argument(
+        '--useMPIsweeper',
+        help='Whether you want to use MPI parallel diagonal SDC',
+        default=False,
+        required=False,
+        action='store_const',
+        const=True,
+    )
+
+    args = parser.parse_args()
+
+    runHeatFiredrake(**vars(args))
diff --git a/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py b/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
index 2053f79ce0ca83f51883887631a253526d66ff16..cc885430515ac13edfeaf1279c4c753b7fb8a3c5 100644
--- a/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
+++ b/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
@@ -61,6 +61,8 @@ williamson_5_defaults = {
     'kmax': '5',  # use fixed number of iteration up to this value
     'use_pySDC': True,  # whether to use pySDC for time integration
     'use_adaptivity': True,  # whether to use adaptive step size selection
+    'Nlevels': 1,  # number of levels in SDC
+    'logger_level': 15,  # pySDC logger level
 }
 
 
@@ -76,7 +78,10 @@ def williamson_5(
     kmax=williamson_5_defaults['kmax'],
     use_pySDC=williamson_5_defaults['use_pySDC'],
     use_adaptivity=williamson_5_defaults['use_adaptivity'],
+    Nlevels=williamson_5_defaults['Nlevels'],
+    logger_level=williamson_5_defaults['logger_level'],
     mesh=None,
+    _ML_is_setup=True,
 ):
     """
     Run the Williamson 5 test case.
@@ -91,9 +96,15 @@ def williamson_5(
         M (int): Number of collocation nodes
         kmax (int): Max number of SDC iterations
         use_pySDC (bool): Use pySDC as Gusto time integrator or Gusto SDC implementation
+        Nlevels (int): Number of SDC levels
+        logger_level (int): Logger level
     """
     if not use_pySDC and use_adaptivity:
         raise NotImplementedError('Adaptive step size selection not yet implemented in Gusto')
+    if not use_pySDC and Nlevels > 1:
+        raise NotImplementedError('Multi-level SDC not yet implemented in Gusto')
+    if time_parallelism and Nlevels > 1:
+        raise NotImplementedError('Multi-level SDC does not work with MPI parallel sweeper yet')
 
     # ------------------------------------------------------------------------ #
     # Parameters for test case
@@ -133,6 +144,9 @@ def williamson_5(
 
     # Domain
     mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2, comm=space_comm) if mesh is None else mesh
+    if Nlevels > 1:
+        hierarchy = fd.MeshHierarchy(mesh, Nlevels - 1)
+        mesh = hierarchy[-1]
     domain = Domain(mesh, dt, 'BDM', element_order)
     x, y, z = SpatialCoordinate(mesh)
     lamda, phi, _ = lonlatr_from_xyz(x, y, z)
@@ -200,7 +214,7 @@ def williamson_5(
         convergence_controllers[SpreadStepSizesBlockwiseNonMPI] = {'overwrite_to_reach_Tend': False}
 
     controller_params = dict()
-    controller_params['logger_level'] = 15 if fd.COMM_WORLD.rank == 0 else 30
+    controller_params['logger_level'] = logger_level if fd.COMM_WORLD.rank == 0 else 30
     controller_params['mssdc_jac'] = False
 
     description = dict()
@@ -211,6 +225,10 @@ def williamson_5(
     description['step_params'] = step_params
     description['convergence_controllers'] = convergence_controllers
 
+    from pySDC.implementations.transfer_classes.TransferFiredrakeMesh import MeshToMeshFiredrakeHierarchy
+
+    description['space_transfer_class'] = MeshToMeshFiredrakeHierarchy
+
     # ------------------------------------------------------------------------ #
     # petsc solver parameters
     # ------------------------------------------------------------------------ #
@@ -254,17 +272,51 @@ def williamson_5(
     # ------------------------------------------------------------------------ #
 
     if use_pySDC:
-        method = pySDC_integrator(
-            eqns, description, controller_params, domain=domain, solver_parameters=solver_parameters
-        )
+        method = pySDC_integrator(description, controller_params, domain=domain, solver_parameters=solver_parameters)
     else:
         method = SDC(**SDC_params, domain=domain)
 
     stepper = Timestepper(eqns, method, io, spatial_methods=transport_methods)
 
-    if use_pySDC and use_adaptivity:
-        # we have to do this for adaptive time stepping, because it is a bit of a mess
-        method.timestepper = stepper
+    # ------------------------------------------------------------------------ #
+    # Setup multi-level SDC
+    # ------------------------------------------------------------------------ #
+
+    if not _ML_is_setup:
+        return stepper
+
+    if Nlevels > 1:
+        steppers = [
+            None,
+        ] * (Nlevels)
+        steppers[0] = stepper
+
+        # get different steppers on the different levels
+        # recall that the setup of the problems is only finished when the stepper is setup
+        for i in range(1, Nlevels):
+            steppers[i] = williamson_5(
+                ncells_per_edge=ncells_per_edge,
+                dt=dt,
+                tmax=tmax,
+                dumpfreq=dumpfreq,
+                dirname=f'{dirname}_unused_{i}',
+                time_parallelism=time_parallelism,
+                QI=QI,
+                M=M,
+                kmax=kmax,
+                use_pySDC=use_pySDC,
+                use_adaptivity=use_adaptivity,
+                Nlevels=1,
+                mesh=hierarchy[-i - 1],  # mind that the finest level in pySDC is 0, but -1 in hierarchy
+                logger_level=50,
+                _ML_is_setup=False,
+            )
+
+        # update description and setup pySDC again with the discretizations from different steppers
+        description['problem_params']['residual'] = [me.scheme.residual for me in steppers]
+        description['problem_params']['equation'] = [me.scheme.equation for me in steppers]
+        method = pySDC_integrator(description, controller_params, domain=domain, solver_parameters=solver_parameters)
+        stepper = Timestepper(eqns, method, io, spatial_methods=transport_methods)
 
     # ------------------------------------------------------------------------ #
     # Initial conditions
@@ -285,6 +337,10 @@ def williamson_5(
     # Run
     # ------------------------------------------------------------------------ #
 
+    if use_pySDC and use_adaptivity:
+        # we have to do this for adaptive time stepping, because it is a bit of a mess
+        method.timestepper = stepper
+
     stepper.run(t=0, tmax=tmax)
     return stepper, mesh
 
@@ -337,6 +393,12 @@ if __name__ == "__main__":
         default=williamson_5_defaults['use_adaptivity'],
     )
     parser.add_argument('--QI', help='Implicit preconditioner', type=str, default=williamson_5_defaults['QI'])
+    parser.add_argument(
+        '--Nlevels',
+        help="Number of SDC levels.",
+        type=int,
+        default=williamson_5_defaults['Nlevels'],
+    )
     args, unknown = parser.parse_known_args()
 
     options = vars(args)