diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml
index 7650b8638b1af264a78f65c255a9a613fa42702f..586ef8f9c004449883350416ccb3b27be86cd777 100644
--- a/.github/workflows/ci_pipeline.yml
+++ b/.github/workflows/ci_pipeline.yml
@@ -173,7 +173,7 @@ jobs:
   user_firedrake_tests:
     runs-on: ubuntu-latest
     container:
-      image: firedrakeproject/firedrake-vanilla:latest
+      image: firedrakeproject/firedrake-vanilla-default:latest
       options: --user root
       volumes:
         - ${{ github.workspace }}:/repositories
@@ -181,6 +181,11 @@ jobs:
       run:
         shell: bash -l {0}
     steps:
+      - name: Fix HOME
+        # For unknown reasons GitHub actions overwrite HOME to /github/home
+        # which will break everything unless fixed
+        # (https://github.com/actions/runner/issues/863)
+        run: echo "HOME=/home/firedrake" >> "$GITHUB_ENV"
       - name: Checkout pySDC
         uses: actions/checkout@v4
         with: 
@@ -190,25 +195,32 @@ jobs:
         with:
           repository: firedrakeproject/gusto
           path: ./gusto_repo
+      - name: Create virtual environment
+        # pass '--system-site-packages' so Firedrake can be found
+        run: python3 -m venv --system-site-packages venv-pySDC
+
       - name: Install pySDC
         run: |
-          . /home/firedrake/firedrake/bin/activate
-          python -m pip install --no-deps -e /repositories/pySDC
-          python -m pip install qmat
+          . venv-pySDC/bin/activate
+          pip install -e /repositories/pySDC
+          pip install qmat
+          # test installation
+          python -c "import pySDC; print(f'pySDC module: {pySDC}')"
       - name: Install gusto
         run: |
-          . /home/firedrake/firedrake/bin/activate
-          python -m pip install -e /repositories/gusto_repo
+          . venv-pySDC/bin/activate
+          pip install -e /repositories/gusto_repo
+          # test installation
+          python -c "import gusto; print(f'gusto module: {gusto}')"
       - name: run pytest
         run: |
-          . /home/firedrake/firedrake/bin/activate
+          . venv-pySDC/bin/activate
           firedrake-clean
           cd ./pySDC
-          coverage run -m pytest --continue-on-collection-errors -v --durations=0 /repositories/pySDC/pySDC/tests -m firedrake
+          python -m coverage run -m pytest --continue-on-collection-errors -v --durations=0 /repositories/pySDC/pySDC/tests -m firedrake
         timeout-minutes: 45
       - name: Make coverage report
         run: |
-          . /home/firedrake/firedrake/bin/activate
 
           cd ./pySDC
           mv data ../data_firedrake
diff --git a/pySDC/helpers/fieldsIO.py b/pySDC/helpers/fieldsIO.py
index 578d125733135ae45a5b6bc40cb0e748b1ea498f..3c1588af74a014b5aeb5022f775e1b5ef4b36939 100644
--- a/pySDC/helpers/fieldsIO.py
+++ b/pySDC/helpers/fieldsIO.py
@@ -45,9 +45,7 @@ See :class:`pySDC.helpers.fieldsIO.writeFields_MPI` for an illustrative example.
 Warning
 -------
 To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
-Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, wether the code is run in parallel or not.
-
-> ⚠️ Also : this module can only be imported with **Python 3.11 or higher** !
+Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, whether the code is run in parallel or not.
 """
 import os
 import numpy as np
@@ -207,7 +205,7 @@ class FieldsIO:
         if not self.ALLOW_OVERWRITE:
             assert not os.path.isfile(
                 self.fileName
-            ), "file already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
+            ), f"file {self.fileName!r} already exists, use FieldsIO.ALLOW_OVERWRITE = True to allow overwriting"
 
         with open(self.fileName, "w+b") as f:
             self.hBase.tofile(f)
@@ -480,7 +478,7 @@ class Rectilinear(Scalar):
 
         Example
         -------
-        >>> # Suppose the FieldsIO object is already writen into outputs.pysdc
+        >>> # Suppose the FieldsIO object is already written into outputs.pysdc
         >>> import os
         >>> from pySDC.utils.fieldsIO import Rectilinear
         >>> os.makedirs("vtrFiles")  # to store all VTR files into a subfolder
@@ -499,12 +497,13 @@ class Rectilinear(Scalar):
     # MPI-parallel implementation
     # -------------------------------------------------------------------------
     comm: MPI.Intracomm = None
+    _nCollectiveIO = None
 
     @classmethod
     def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
         """
         Setup the MPI mode for the files IO, considering a decomposition
-        of the 1D grid into contiuous subintervals.
+        of the 1D grid into contiguous subintervals.
 
         Parameters
         ----------
@@ -519,6 +518,20 @@ class Rectilinear(Scalar):
         cls.iLoc = iLoc
         cls.nLoc = nLoc
         cls.mpiFile: MPI.File = None
+        cls._nCollectiveIO = None
+
+    @property
+    def nCollectiveIO(self):
+        """
+        Number of collective IO operations over all processes, when reading or writing a field.
+
+        Returns:
+        --------
+        int: Number of collective IO accesses
+        """
+        if self._nCollectiveIO is None:
+            self._nCollectiveIO = self.comm.allreduce(self.nVar * np.prod(self.nLoc[:-1]), op=MPI.MAX)
+        return self._nCollectiveIO
 
     @property
     def MPI_ON(self):
@@ -546,7 +559,7 @@ class Rectilinear(Scalar):
         """Write data (np.ndarray) in the binary file in MPI mode, at the current file cursor position."""
         self.mpiFile.Write(data)
 
-    def MPI_WRITE_AT(self, offset, data: np.ndarray):
+    def MPI_WRITE_AT_ALL(self, offset, data: np.ndarray):
         """
         Write data in the binary file in MPI mode, with a given offset
         **relative to the beginning of the file**.
@@ -560,7 +573,7 @@ class Rectilinear(Scalar):
         """
         self.mpiFile.Write_at_all(offset, data)
 
-    def MPI_READ_AT(self, offset, data):
+    def MPI_READ_AT_ALL(self, offset, data: np.ndarray):
         """
         Read data from the binary file in MPI mode, with a given offset
         **relative to the beginning of the file**.
@@ -625,13 +638,22 @@ class Rectilinear(Scalar):
 
         offset0 = self.fileSize
         self.MPI_FILE_OPEN(mode="a")
+        nWrites = 0
+        nCollectiveIO = self.nCollectiveIO
+
         if self.MPI_ROOT:
             self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
         offset0 += self.tSize
 
         for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
             offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
-            self.MPI_WRITE_AT(offset, field[iVar, *iBeg])
+            self.MPI_WRITE_AT_ALL(offset, field[(iVar, *iBeg)])
+            nWrites += 1
+
+        for _ in range(nCollectiveIO - nWrites):
+            # Additional collective write to catch up with other processes
+            self.MPI_WRITE_AT_ALL(offset0, field[:0])
+
         self.MPI_FILE_CLOSE()
 
     def iPos(self, iVar, iX):
@@ -674,9 +696,18 @@ class Rectilinear(Scalar):
         field = np.empty((self.nVar, *self.nLoc), dtype=self.dtype)
 
         self.MPI_FILE_OPEN(mode="r")
+        nReads = 0
+        nCollectiveIO = self.nCollectiveIO
+
         for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
             offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
-            self.MPI_READ_AT(offset, field[iVar, *iBeg])
+            self.MPI_READ_AT_ALL(offset, field[(iVar, *iBeg)])
+            nReads += 1
+
+        for _ in range(nCollectiveIO - nReads):
+            # Additional collective read to catch up with other processes
+            self.MPI_READ_AT_ALL(offset0, field[:0])
+
         self.MPI_FILE_CLOSE()
 
         return t, field
@@ -689,7 +720,7 @@ def initGrid(nVar, gridSizes):
     dim = len(gridSizes)
     coords = [np.linspace(0, 1, num=n, endpoint=False) for n in gridSizes]
     s = [None] * dim
-    u0 = np.array(np.arange(nVar) + 1)[:, *s]
+    u0 = np.array(np.arange(nVar) + 1)[(slice(None), *s)]
     for x in np.meshgrid(*coords, indexing="ij"):
         u0 = u0 * x
     return coords, u0
@@ -711,8 +742,7 @@ def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
     iLoc, nLoc = blocks.localBounds
     Rectilinear.setupMPI(comm, iLoc, nLoc)
     s = [slice(i, i + n) for i, n in zip(iLoc, nLoc)]
-    u0 = u0[:, *s]
-    print(MPI_RANK, u0.shape)
+    u0 = u0[(slice(None), *s)]
 
     f1 = Rectilinear(DTYPES[dtypeIdx], fileName)
     f1.setHeader(nVar=nVar, coords=coords)
@@ -731,6 +761,11 @@ def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
 def compareFields_MPI(fileName, u0, nSteps):
     from pySDC.helpers.fieldsIO import FieldsIO
 
+    comm = MPI.COMM_WORLD
+    MPI_RANK = comm.Get_rank()
+    if MPI_RANK == 0:
+        print("Comparing fields with MPI")
+
     f2 = FieldsIO.fromFile(fileName)
 
     times = np.arange(nSteps) / nSteps
diff --git a/pySDC/tests/test_helpers/test_fieldsIO.py b/pySDC/tests/test_helpers/test_fieldsIO.py
index 1e75505d0faab3f8c062a251466fe1a2b378c4b4..2878be5fe71f6994c45c30ed9e1598e170232a40 100644
--- a/pySDC/tests/test_helpers/test_fieldsIO.py
+++ b/pySDC/tests/test_helpers/test_fieldsIO.py
@@ -3,9 +3,6 @@ import sys
 import glob
 import pytest
 
-if sys.version_info < (3, 11):
-    pytest.skip("skipping fieldsIO tests on python lower than 3.11", allow_module_level=True)
-
 import itertools
 import numpy as np
 
@@ -14,6 +11,7 @@ from pySDC.helpers.fieldsIO import DTYPES, FieldsIO
 FieldsIO.ALLOW_OVERWRITE = True
 
 
+@pytest.mark.base
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
 @pytest.mark.parametrize("dim", range(4))
 def testHeader(dim, dtypeIdx):
@@ -65,6 +63,7 @@ def testHeader(dim, dtypeIdx):
         assert np.allclose(val, f2.header[key]), f"header's discrepancy for {key} in written {f2}"
 
 
+@pytest.mark.base
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
 @pytest.mark.parametrize("nSteps", [1, 2, 10, 100])
 @pytest.mark.parametrize("nVar", [1, 2, 5])
@@ -106,6 +105,7 @@ def testScalar(nVar, nSteps, dtypeIdx):
         assert np.allclose(u2, u1), f"{idx}'s fields in {f1} has incorrect values"
 
 
+@pytest.mark.base
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
 @pytest.mark.parametrize("nSteps", [1, 2, 5, 10])
 @pytest.mark.parametrize("nVar", [1, 2, 5])
@@ -155,6 +155,7 @@ def testRectilinear(dim, nVar, nSteps, dtypeIdx):
             assert np.allclose(u2, u1), f"{idx}'s fields in {f1} has incorrect values"
 
 
+@pytest.mark.base
 @pytest.mark.parametrize("nSteps", [1, 10])
 @pytest.mark.parametrize("nZ", [1, 5, 16])
 @pytest.mark.parametrize("nY", [1, 5, 16])
@@ -249,8 +250,7 @@ if __name__ == "__main__":
     parser.add_argument('--gridSizes', type=int, nargs='+', help="number of grid points in each dimensions")
     args = parser.parse_args()
 
-    if sys.version_info >= (3, 11):
-        from pySDC.helpers.fieldsIO import writeFields_MPI, compareFields_MPI
+    from pySDC.helpers.fieldsIO import writeFields_MPI, compareFields_MPI
 
-        u0 = writeFields_MPI(**args.__dict__)
-        compareFields_MPI(args.fileName, u0, args.nSteps)
+    u0 = writeFields_MPI(**args.__dict__)
+    compareFields_MPI(args.fileName, u0, args.nSteps)
diff --git a/pySDC/tests/test_helpers/test_gusto_coupling.py b/pySDC/tests/test_helpers/test_gusto_coupling.py
index 1eeeb9fa0f806aae315e42a80e8ad516eb004ea9..119442a223bd4ff088d424e116b8b136785df966 100644
--- a/pySDC/tests/test_helpers/test_gusto_coupling.py
+++ b/pySDC/tests/test_helpers/test_gusto_coupling.py
@@ -165,7 +165,7 @@ def test_generic_gusto_problem(setup):
     error = abs(un_forward - un_ref) / abs(un_ref)
 
     assert (
-        error < np.finfo(float).eps * 1e2
+        error < np.finfo(float).eps * 1e4
     ), f'Forward Euler does not match reference implementation! Got relative difference of {error}'
 
     # test backward Euler step
@@ -326,7 +326,7 @@ def test_pySDC_integrator_RK(use_transport_scheme, method, setup):
     print(error)
 
     assert (
-        error < solver_parameters['snes_rtol'] * 1e3
+        error < solver_parameters['snes_rtol'] * 1e4
     ), f'pySDC and Gusto differ in method {method}! Got relative difference of {error}'
 
 
@@ -449,7 +449,7 @@ def test_pySDC_integrator(use_transport_scheme, imex, setup):
     print(error)
 
     assert (
-        error < solver_parameters['snes_rtol'] * 1e3
+        error < solver_parameters['snes_rtol'] * 1e4
     ), f'pySDC and Gusto differ in SDC! Got relative difference of {error}'
 
 
@@ -633,7 +633,7 @@ def test_pySDC_integrator_MSSDC(n_steps, useMPIController, setup, submit=True, n
         my_env = os.environ.copy()
         my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
         cwd = '.'
-        cmd = f'mpiexec -np {n_tasks} python {__file__} --test=MSSDC --n_steps={n_steps}'.split()
+        cmd = f'mpiexec -np {n_tasks} --oversubscribe python {__file__} --test=MSSDC --n_steps={n_steps}'.split()
 
         p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
         p.wait()
@@ -762,7 +762,7 @@ def test_pySDC_integrator_MSSDC(n_steps, useMPIController, setup, submit=True, n
     print(error)
 
     assert (
-        error < solver_parameters['snes_rtol'] * 1e3
+        error < solver_parameters['snes_rtol'] * 1e4
     ), f'pySDC and Gusto differ in method {method}! Got relative difference of {error}'
 
 
diff --git a/pySDC/tests/test_tutorials/test_step_7.py b/pySDC/tests/test_tutorials/test_step_7.py
index 3445aea4b07b40047c0e6d10a885db9c12a4fe07..72f40a889cfe6cd96cbed88158bb7ac0db04914a 100644
--- a/pySDC/tests/test_tutorials/test_step_7.py
+++ b/pySDC/tests/test_tutorials/test_step_7.py
@@ -143,7 +143,7 @@ def test_E_MPI():
     my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
     cwd = '.'
     num_procs = 3
-    cmd = f'mpiexec -np {num_procs} python pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py --useMPIsweeper'.split()
+    cmd = f'mpiexec -np {num_procs} --oversubscribe python pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py --useMPIsweeper'.split()
 
     p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
     p.wait()
diff --git a/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py b/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
index 0307d53592745aef3db3a0fa6c2f2395e8b30ea2..a1539a693e6dc5d6f164e738f51a4467333f69c5 100644
--- a/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
+++ b/pySDC/tutorial/step_7/E_pySDC_with_Firedrake.py
@@ -170,7 +170,7 @@ def runHeatFiredrake(useMPIsweeper=False, ML=False):
 
     # do tests that we got the same as last time
     n_nodes = 1 if useMPIsweeper else description['sweeper_params']['num_nodes']
-    assert error[0][1] < 2e-8
+    assert error[0][1] < 2e-7
     assert tot_iter == 10 if ML else 29
     assert tot_solver_setup == n_nodes
     assert tot_solves == n_nodes * tot_iter
diff --git a/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py b/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
index fdd9b0eac857c9df776fa28734fd3605823c6b5f..13e91946090e410cf1dc418f671c47453e06b841 100644
--- a/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
+++ b/pySDC/tutorial/step_7/F_pySDC_with_Gusto.py
@@ -155,7 +155,7 @@ def williamson_5(
     lamda, phi, _ = lonlatr_from_xyz(x, y, z)
 
     # Equation: coriolis
-    parameters = ShallowWaterParameters(H=mean_depth, g=g)
+    parameters = ShallowWaterParameters(mesh, H=mean_depth, g=g)
     Omega = parameters.Omega
     fexpr = 2 * Omega * z / radius