diff --git a/.gitignore b/.gitignore
index 845782e782aa642c793b5dd212e503eda6c7142e..d72c27e16e8b919b4d0d08f99f42ad23105cb399 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,6 +11,7 @@ step_*.png
 *_data.json
 !_dataRef.json
 *.pysdc
+*.vtr
 
 # Created by https://www.gitignore.io
 
diff --git a/etc/environment-base.yml b/etc/environment-base.yml
index 94b6af8ca001bb36ac75f034424c54c5b65e5acc..e5d3fb0aac2600835c8296b1c7cefd8b4ecc9381 100644
--- a/etc/environment-base.yml
+++ b/etc/environment-base.yml
@@ -10,6 +10,7 @@ dependencies:
   - sympy>=1.0
   - numba>=0.35
   - dill>=0.2.6
+  - vtk
   - pip
   - pip:
     - qmat>=0.1.8
diff --git a/pySDC/helpers/blocks.py b/pySDC/helpers/blocks.py
index 80482e712e402fd99c2f3a769e63e1178bb20014..d6c846aa9b54f0a180a57c704120d5a7d4ca6a90 100644
--- a/pySDC/helpers/blocks.py
+++ b/pySDC/helpers/blocks.py
@@ -1,3 +1,6 @@
+import numpy as np
+
+
 class BlockDecomposition(object):
     """
     Class decomposing a cartesian space domain (1D to 3D) into a given number of processors.
@@ -92,16 +95,9 @@ class BlockDecomposition(object):
 
     @property
     def ranks(self):
-        gRank, order = self.gRank, self.order
-        assert gRank is not None, "gRank attribute need to be set"
-        dim, nBlocks = self.dim, self.nBlocks
-        if dim == 1:
-            return (gRank,)
-        elif dim == 2:
-            div = nBlocks[-1] if order == "C" else nBlocks[0]
-            return (gRank // div, gRank % div)
-        else:
-            raise NotImplementedError(f"dim={dim}")
+        assert self.gRank is not None, "gRank attribute needs to be set"
+        cart = np.arange(np.prod(self.nBlocks)).reshape(self.nBlocks, order=self.order)
+        return list(np.argwhere(cart == self.gRank)[0])
 
     @property
     def localBounds(self):
diff --git a/pySDC/helpers/fieldsIO.py b/pySDC/helpers/fieldsIO.py
index 82675ac6525aa45ae64fbc4e02ee0e6e8c518cad..3b124dd109aae19a109f88cbccddcafaa04d5d16 100644
--- a/pySDC/helpers/fieldsIO.py
+++ b/pySDC/helpers/fieldsIO.py
@@ -4,9 +4,8 @@
 Generic utility class to write and read cartesian grid field solutions into binary files.
 It implements the base file handler class :class:`FieldsIO`, that is specialized into :
 
-- :class:`Scal0D` : for 0D fields (scalar) with a given number of variables
-- :class:`Cart1D` : for 1D fields with a given number of variables
-- :class:`Cart2D` : for 2D fields on a cartesian grid with a given number of variables
+- :class:`Scalar` : for 0D fields (scalar) with a given number of variables
+- :class:`Rectilinear` : for fields on N-dimensional rectilinear grids
 
 While each file handler need to be setup with specific parameters (grid, ...),
 each written file can be read using the same interface implemented in the
@@ -15,19 +14,23 @@ base abstract class.
 Example
 -------
 >>> import numpy as np
->>> from pySDC.helpers.fieldsIO import Cart1D, FieldsIO
+>>> from pySDC.helpers.fieldsIO import Rectilinear
 >>>
 >>> # Write some fields in files
->>> x = np.linspace(0, 1, 101)
->>> fOut = Cart2D(np.float64, "file.pysdc")
->>> fOut.setHeader(nVar=2, coordX=x)
+>>> x = np.linspace(0, 1, 128)
+>>> y = np.linspace(0, 1, 64)
+>>> fOut = Rectilinear(np.float64, "file.pysdc")
+>>> fOut.setHeader(nVar=2, coords=[x, y])
 >>> fOut.initialize()
 >>> times = [0, 1, 2]
->>> u0 = np.array([-1, 1])[:, None]*x[None, :]
+>>> xGrid, yGrid = np.meshgrid(x, y, indexing="ij")
+>>> u0 = np.array([-1, 1]).reshape((-1, 1, 1))*xGrid*yGrid
+>>> # u0 has shape [2, nX, nY]
 >>> for t in times:
 >>>    fOut.addField(t, t*u0)
 >>>
->>> # Read the file using a the generic interface
+>>> # Read the file using the generic interface
+>>> from pySDC.helpers.fieldsIO import FieldsIO
 >>> fIn = FieldsIO.fromFile("file.pysdc")
 >>> times = fIn.times
 >>> assert len(times) == fIn.nFields
@@ -36,20 +39,21 @@ Example
 
 Notes
 -----
-🚀 :class:`Cart1D` and :class:`Cart2D` are compatible with a MPI-based cartesian decomposition.
-See :class:`pySDC.tests.test_helpers.test_fieldsIO.writeFields_MPI` for an illustrative example.
+🚀 :class:`Rectilinear` is compatible with a MPI-based cartesian decomposition.
+See :class:`pySDC.helpers.fieldsIO.writeFields_MPI` for an illustrative example.
 
 Warning
 -------
-To use MPI collective writing, you need to call first the class methods :class:`Cart1D.initMPI`
-or :class:`Cart2D.initMPI` from the associated class (cf their docstring).
-Also, their associated `setHeader` methods **must be given the global grids coordinates**,
-wether code is run in parallel or not.
+To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
+Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, wether the code is run in parallel or not.
+
+> ⚠️ Also : this module can only be imported with **Python 3.11 or higher** !
 """
 import os
 import numpy as np
 from typing import Type, TypeVar
 import logging
+import itertools
 
 T = TypeVar("T")
 
@@ -330,11 +334,11 @@ class FieldsIO:
 
 
 @FieldsIO.register(sID=0)
-class Scal0D(FieldsIO):
+class Scalar(FieldsIO):
     """FieldsIO handler storing a given number of scalar"""
 
     # -------------------------------------------------------------------------
-    # Overriden methods
+    # Overridden methods
     # -------------------------------------------------------------------------
     def setHeader(self, nVar):
         """
@@ -375,13 +379,21 @@ class Scal0D(FieldsIO):
 
 
 @FieldsIO.register(sID=1)
-class Cart1D(Scal0D):
-    """FieldsIO handler storing a given number of 2D scalar variables"""
+class Rectilinear(Scalar):
+    """FieldsIO handler storing a given number of scalar variables on a N-dimensional rectilinear grid"""
+
+    @staticmethod
+    def setupCoords(*coords):
+        """Utility function to setup grids in multiple dimensions, given the keyword arguments"""
+        coords = [np.asarray(coord, dtype=np.float64) for coord in coords]
+        for axis, coord in enumerate(coords):
+            assert coord.ndim == 1, f"coord for {axis=} must be one dimensional"
+        return coords
 
     # -------------------------------------------------------------------------
-    # Overriden methods
+    # Overridden methods
     # -------------------------------------------------------------------------
-    def setHeader(self, nVar, coordX):
+    def setHeader(self, nVar, coords):
         """
         Set the descriptive grid structure to be stored in the file header.
 
@@ -389,21 +401,25 @@ class Cart1D(Scal0D):
         ----------
         nVar : int
             Number of 1D variables stored.
-        coordX : np.1darray
-            The grid coordinates in X direction.
+        coords : np.1darray or list[np.1darray]
+            The grid coordinates in each dimensions.
 
         Note
         ----
-        When used in MPI decomposition mode, `coordX` **must** be the global grid.
+        When used in MPI decomposition mode, all coordinate **must** be the global grid.
         """
-        coords = self.setupCoords(coordX=coordX)
-        self.header = {"nVar": int(nVar), **coords}
-        self.nItems = nVar * self.nX
+        if not isinstance(coords, (tuple, list)):
+            coords = [coords]
+        coords = self.setupCoords(*coords)
+        self.header = {"nVar": int(nVar), "coords": coords}
+        self.nItems = nVar * self.nDoF
 
     @property
     def hInfos(self):
         """Array representing the grid structure to be written in the binary file."""
-        return [np.array([self.nVar, self.nX], dtype=np.int64), np.array(self.header["coordX"], dtype=np.float64)]
+        return [np.array([self.nVar, self.dim, *self.gridSizes], dtype=np.int32)] + [
+            np.array(coord, dtype=np.float64) for coord in self.header["coords"]
+        ]
 
     def readHeader(self, f):
         """
@@ -414,28 +430,65 @@ class Cart1D(Scal0D):
         f : `_io.TextIOWrapper`
             File to read the header from.
         """
-        nVar, nX = np.fromfile(f, dtype=np.int64, count=2)
-        coordX = np.fromfile(f, dtype=np.float64, count=nX)
-        self.setHeader(nVar, coordX)
+        nVar, dim = np.fromfile(f, dtype=np.int32, count=2)
+        gridSizes = np.fromfile(f, dtype=np.int32, count=dim)
+        coords = [np.fromfile(f, dtype=np.float64, count=n) for n in gridSizes]
+        self.setHeader(nVar, coords)
 
     def reshape(self, fields: np.ndarray):
-        fields.shape = (self.nVar, self.nX)
+        """Reshape the fields to a N-d array (inplace operation)"""
+        fields.shape = (self.nVar, *self.gridSizes)
 
     # -------------------------------------------------------------------------
     # Class specifics
     # -------------------------------------------------------------------------
     @property
-    def nX(self):
-        """Number of points in x direction"""
-        return self.header["coordX"].size
+    def gridSizes(self):
+        """Number of points in y direction"""
+        return [coord.size for coord in self.header["coords"]]
 
-    @staticmethod
-    def setupCoords(**coords):
-        """Utility function to setup grids in multuple dimensions, given the keyword arguments"""
-        coords = {name: np.asarray(coord, dtype=np.float64) for name, coord in coords.items()}
-        for name, coord in coords.items():
-            assert coord.ndim == 1, f"{name} must be one dimensional"
-        return coords
+    @property
+    def dim(self):
+        """Number of grid dimensions"""
+        return len(self.gridSizes)
+
+    @property
+    def nDoF(self):
+        """Number of degrees of freedom for one variable"""
+        return np.prod(self.gridSizes)
+
+    def toVTR(self, baseName, varNames, idxFormat="{:06d}"):
+        """
+        Convert all 3D fields stored in binary format (FieldsIO) into a list
+        of VTR files, that can be read later with Paraview or equivalent to
+        make videos.
+
+        Parameters
+        ----------
+        baseName : str
+            Base name of the VTR file.
+        varNames : list[str]
+            Variable names of the fields.
+        idxFormat : str, optional
+            Formatting string for the index of the VTR file.
+            The default is "{:06d}".
+
+        Example
+        -------
+        >>> # Suppose the FieldsIO object is already writen into outputs.pysdc
+        >>> import os
+        >>> from pySDC.utils.fieldsIO import Rectilinear
+        >>> os.makedirs("vtrFiles")  # to store all VTR files into a subfolder
+        >>> Rectilinear.fromFile("outputs.pysdc").toVTR(
+        >>>    baseName="vtrFiles/field", varNames=["u", "v", "w", "T", "p"])
+        """
+        assert self.dim == 3, "can only be used with 3D fields"
+        from pySDC.helpers.vtkIO import writeToVTR
+
+        template = f"{baseName}_{idxFormat}"
+        for i in range(self.nFields):
+            _, u = self.readField(i)
+            writeToVTR(template.format(i), u, self.header["coords"], varNames)
 
     # -------------------------------------------------------------------------
     # MPI-parallel implementation
@@ -443,7 +496,7 @@ class Cart1D(Scal0D):
     comm: MPI.Intracomm = None
 
     @classmethod
-    def setupMPI(cls, comm: MPI.Intracomm, iLocX, nLocX):
+    def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
         """
         Setup the MPI mode for the files IO, considering a decomposition
         of the 1D grid into contiuous subintervals.
@@ -452,14 +505,14 @@ class Cart1D(Scal0D):
         ----------
         comm : MPI.Intracomm
             The space decomposition communicator.
-        iLocX : int
-            Starting index of the local sub-domain in the global `coordX`.
-        nLocX : int
+        iLoc : list[int]
+            Starting index of the local sub-domain in the global coordinates.
+        nLoc : list[int]
             Number of points in the local sub-domain.
         """
         cls.comm = comm
-        cls.iLocX = iLocX
-        cls.nLocX = nLocX
+        cls.iLoc = iLoc
+        cls.nLoc = nLoc
         cls.mpiFile = None
 
     @property
@@ -560,7 +613,10 @@ class Cart1D(Scal0D):
 
         field = np.asarray(field)
         assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
-        assert field.shape == (self.nVar, self.nLocX), f"expected {(self.nVar, self.nLocX)} shape, got {field.shape}"
+        assert field.shape == (
+            self.nVar,
+            *self.nLoc,
+        ), f"expected {(self.nVar, *self.nLoc)} shape, got {field.shape}"
 
         offset0 = self.fileSize
         self.MPI_FILE_OPEN(mode="a")
@@ -568,15 +624,22 @@ class Cart1D(Scal0D):
             self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
         offset0 += self.tSize
 
-        for iVar in range(self.nVar):
-            offset = offset0 + (iVar * self.nX + self.iLocX) * self.itemSize
-            self.MPI_WRITE_AT(offset, field[iVar])
+        for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
+            offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
+            self.MPI_WRITE_AT(offset, field[iVar, *iBeg])
         self.MPI_FILE_CLOSE()
 
+    def iPos(self, iVar, iX):
+        iPos = iVar * self.nDoF
+        for axis in range(self.dim - 1):
+            iPos += (self.iLoc[axis] + iX[axis]) * np.prod(self.gridSizes[axis + 1 :])
+        iPos += self.iLoc[-1]
+        return iPos
+
     def readField(self, idx):
         """
         Read one field stored in the binary file, corresponding to the given
-        time index, possibly in MPI mode.
+        time index, using MPI in the eventuality of space parallel decomposition.
 
         Parameters
         ----------
@@ -596,174 +659,79 @@ class Cart1D(Scal0D):
         """
         if not self.MPI_ON:
             return super().readField(idx)
-        idx = self.formatIndex(idx)
 
-        offset0 = self.hSize + idx * (self.fSize + self.tSize)
+        idx = self.formatIndex(idx)
+        offset0 = self.hSize + idx * (self.tSize + self.fSize)
         with open(self.fileName, "rb") as f:
             t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset0)[0])
         offset0 += self.tSize
 
-        field = np.empty((self.nVar, self.nLocX), dtype=self.dtype)
+        field = np.empty((self.nVar, *self.nLoc), dtype=self.dtype)
 
         self.MPI_FILE_OPEN(mode="r")
-        for iVar in range(self.nVar):
-            offset = offset0 + (iVar * self.nX + self.iLocX) * self.itemSize
-            self.MPI_READ_AT(offset, field[iVar])
+        for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
+            offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
+            self.MPI_READ_AT(offset, field[iVar, *iBeg])
         self.MPI_FILE_CLOSE()
 
         return t, field
 
 
-@FieldsIO.register(sID=2)
-class Cart2D(Cart1D):
-    """FieldsIO handler storing a given number of 2D scalar variables"""
+# -----------------------------------------------------------------------------------------------
+# Utility functions used for testing
+# -----------------------------------------------------------------------------------------------
+def initGrid(nVar, gridSizes):
+    dim = len(gridSizes)
+    coords = [np.linspace(0, 1, num=n, endpoint=False) for n in gridSizes]
+    s = [None] * dim
+    u0 = np.array(np.arange(nVar) + 1)[:, *s]
+    for x in np.meshgrid(*coords, indexing="ij"):
+        u0 = u0 * x
+    return coords, u0
 
-    # -------------------------------------------------------------------------
-    # Overriden methods
-    # -------------------------------------------------------------------------
-    def setHeader(self, nVar, coordX, coordY):
-        """
-        Set the descriptive grid structure to be stored in the file header.
 
-        Parameters
-        ----------
-        nVar : int
-            Number of 1D variables stored.
-        coordX : np.1darray
-            The grid coordinates in x direction.
-        coordY : np.1darray
-            The grid coordinates in y direction.
-
-        Note
-        ----
-        When used in MPI decomposition mode, `coordX` and `coordX` **must** be the global grid.
-        """
-        coords = self.setupCoords(coordX=coordX, coordY=coordY)
-        self.header = {"nVar": int(nVar), **coords}
-        self.nItems = nVar * self.nX * self.nY
-
-    @property
-    def hInfos(self):
-        """Array representing the grid structure to be written in the binary file."""
-        return [
-            np.array([self.nVar, self.nX, self.nY], dtype=np.int64),
-            np.array(self.header["coordX"], dtype=np.float64),
-            np.array(self.header["coordY"], dtype=np.float64),
-        ]
-
-    def readHeader(self, f):
-        """
-        Read the header from the binary file storing the fields.
+def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):
+    coords, u0 = initGrid(nVar, gridSizes)
 
-        Parameters
-        ----------
-        f : `_io.TextIOWrapper`
-            File to read the header from.
-        """
-        nVar, nX, nY = np.fromfile(f, dtype=np.int64, count=3)
-        coordX = np.fromfile(f, dtype=np.float64, count=nX)
-        coordY = np.fromfile(f, dtype=np.float64, count=nY)
-        self.setHeader(nVar, coordX, coordY)
-
-    def reshape(self, fields: np.ndarray):
-        """Reshape the fields to a [nVar, nX, nY] array (inplace operation)"""
-        fields.shape = (self.nVar, self.nX, self.nY)
-
-    # -------------------------------------------------------------------------
-    # Class specifics
-    # -------------------------------------------------------------------------
-    @property
-    def nY(self):
-        """Number of points in y direction"""
-        return self.header["coordY"].size
-
-    # -------------------------------------------------------------------------
-    # MPI-parallel implementation
-    # -------------------------------------------------------------------------
-    @classmethod
-    def setupMPI(cls, comm: MPI.Intracomm, iLocX, nLocX, iLocY, nLocY):
-        super().setupMPI(comm, iLocX, nLocX)
-        cls.iLocY = iLocY
-        cls.nLocY = nLocY
-
-    def addField(self, time, field):
-        """
-        Append one field solution at the end of the file with one given time,
-        possibly using MPI.
-
-        Parameters
-        ----------
-        time : float-like
-            The associated time of the field solution.
-        field : np.ndarray
-            The (local) field values.
-
-        Note
-        ----
-        If a MPI decomposition is used, field **must be** the local field values.
-        """
-        if not self.MPI_ON:
-            return super().addField(time, field)
-
-        assert self.initialized, "cannot add field to a non initialized FieldsIO"
+    from mpi4py import MPI
+    from pySDC.helpers.blocks import BlockDecomposition
+    from pySDC.helpers.fieldsIO import Rectilinear
 
-        field = np.asarray(field)
-        assert field.dtype == self.dtype, f"expected {self.dtype} dtype, got {field.dtype}"
-        assert field.shape == (
-            self.nVar,
-            self.nLocX,
-            self.nLocY,
-        ), f"expected {(self.nVar, self.nLocX, self.nLocY)} shape, got {field.shape}"
+    comm = MPI.COMM_WORLD
+    MPI_SIZE = comm.Get_size()
+    MPI_RANK = comm.Get_rank()
 
-        offset0 = self.fileSize
-        self.MPI_FILE_OPEN(mode="a")
-        if self.MPI_ROOT:
-            self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
-        offset0 += self.tSize
+    blocks = BlockDecomposition(MPI_SIZE, gridSizes, algo, MPI_RANK)
 
-        for iVar in range(self.nVar):
-            for iX in range(self.nLocX):
-                offset = offset0 + (iVar * self.nX * self.nY + (self.iLocX + iX) * self.nY + self.iLocY) * self.itemSize
-                self.MPI_WRITE_AT(offset, field[iVar, iX])
-        self.MPI_FILE_CLOSE()
+    iLoc, nLoc = blocks.localBounds
+    Rectilinear.setupMPI(comm, iLoc, nLoc)
+    s = [slice(i, i + n) for i, n in zip(iLoc, nLoc)]
+    u0 = u0[:, *s]
+    print(MPI_RANK, u0.shape)
 
-    def readField(self, idx):
-        """
-        Read one field stored in the binary file, corresponding to the given
-        time index, eventually in MPI mode.
+    f1 = Rectilinear(DTYPES[dtypeIdx], fileName)
+    f1.setHeader(nVar=nVar, coords=coords)
 
-        Parameters
-        ----------
-        idx : int
-            Positional index of the field.
+    u0 = np.asarray(u0, dtype=f1.dtype)
+    f1.initialize()
 
-        Returns
-        -------
-        t : float
-            Stored time for this field.
-        field : np.ndarray
-            Read (local) fields in a numpy array.
+    times = np.arange(nSteps) / nSteps
+    for t in times:
+        ut = (u0 * t).astype(f1.dtype)
+        f1.addField(t, ut)
 
-        Note
-        ----
-        If a MPI decomposition is used, it reads and returns the local fields values only.
-        """
-        if not self.MPI_ON:
-            return super().readField(idx)
-        idx = self.formatIndex(idx)
+    return u0
 
-        offset0 = self.hSize + idx * (self.tSize + self.fSize)
-        with open(self.fileName, "rb") as f:
-            t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset0)[0])
-        offset0 += self.tSize
 
-        field = np.empty((self.nVar, self.nLocX, self.nLocY), dtype=self.dtype)
+def compareFields_MPI(fileName, u0, nSteps):
+    from pySDC.helpers.fieldsIO import FieldsIO
 
-        self.MPI_FILE_OPEN(mode="r")
-        for iVar in range(self.nVar):
-            for iX in range(self.nLocX):
-                offset = offset0 + (iVar * self.nX * self.nY + (self.iLocX + iX) * self.nY + self.iLocY) * self.itemSize
-                self.MPI_READ_AT(offset, field[iVar, iX])
-        self.MPI_FILE_CLOSE()
+    f2 = FieldsIO.fromFile(fileName)
 
-        return t, field
+    times = np.arange(nSteps) / nSteps
+    for idx, t in enumerate(times):
+        u1 = u0 * t
+        t2, u2 = f2.readField(idx)
+        assert t2 == t, f"fields[{idx}] in {f2} has incorrect time ({t2} instead of {t})"
+        assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
+        assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"
diff --git a/pySDC/helpers/vtkIO.py b/pySDC/helpers/vtkIO.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cf0c7f994128d1c88ff62d780dfbc7fb7ddf66d
--- /dev/null
+++ b/pySDC/helpers/vtkIO.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Helper functions for VTK files IO (to be used with Paraview or PyVista)
+"""
+import os
+import vtk
+from vtkmodules.util import numpy_support
+import numpy as np
+
+
+def writeToVTR(fileName: str, data, coords, varNames):
+    """
+    Write a data array containing variables from a 3D rectilinear grid into a VTR file.
+
+    Parameters
+    ----------
+    fileName : str
+        Name of the VTR file, can be with or without the .vtr extension.
+    data : np.4darray
+        Array containing all the variables with [nVar, nX, nY, nZ] shape.
+    coords : list[np.1darray]
+        Coordinates in each direction.
+    varNames : list[str]
+        Variable names.
+
+    Returns
+    -------
+    fileName : str
+        Name of the VTR file.
+    """
+    data = np.asarray(data)
+    nVar, *gridSizes = data.shape
+
+    assert len(gridSizes) == 3, "function can be used only for 3D grid data"
+    assert nVar == len(varNames), "varNames must have as many variable as data"
+    assert [len(np.ravel(coord)) for coord in coords] == gridSizes, "coordinate size incompatible with data shape"
+
+    nX, nY, nZ = gridSizes
+    vtr = vtk.vtkRectilinearGrid()
+    vtr.SetDimensions(nX, nY, nZ)
+
+    def vect(x):
+        return numpy_support.numpy_to_vtk(num_array=x, deep=True, array_type=vtk.VTK_FLOAT)
+
+    x, y, z = coords
+    vtr.SetXCoordinates(vect(x))
+    vtr.SetYCoordinates(vect(y))
+    vtr.SetZCoordinates(vect(z))
+
+    def field(u):
+        return numpy_support.numpy_to_vtk(num_array=u.ravel(order='F'), deep=True, array_type=vtk.VTK_FLOAT)
+
+    pointData = vtr.GetPointData()
+    for name, u in zip(varNames, data):
+        uVTK = field(u)
+        uVTK.SetName(name)
+        pointData.AddArray(uVTK)
+
+    writer = vtk.vtkXMLRectilinearGridWriter()
+    if not fileName.endswith(".vtr"):
+        fileName += ".vtr"
+    writer.SetFileName(fileName)
+    writer.SetInputData(vtr)
+    writer.Write()
+
+    return fileName
+
+
+def readFromVTR(fileName: str):
+    """
+    Read a VTR file into a numpy 4darray
+
+    Parameters
+    ----------
+    fileName : str
+        Name of the VTR file, can be with or without the .vtr extension.
+
+    Returns
+    -------
+    data : np.4darray
+        Array containing all the variables with [nVar, nX, nY, nZ] shape.
+    coords : list[np.1darray]
+        Coordinates in each direction.
+    varNames : list[str]
+        Variable names.
+    """
+    if not fileName.endswith(".vtr"):
+        fileName += ".vtr"
+    assert os.path.isfile(fileName), f"{fileName} does not exist"
+
+    reader = vtk.vtkXMLRectilinearGridReader()
+    reader.SetFileName(fileName)
+    reader.Update()
+
+    vtr = reader.GetOutput()
+    gridSizes = vtr.GetDimensions()
+    assert len(gridSizes) == 3, "can only read 3D data"
+
+    def vect(x):
+        return numpy_support.vtk_to_numpy(x)
+
+    coords = [vect(vtr.GetXCoordinates()), vect(vtr.GetYCoordinates()), vect(vtr.GetZCoordinates())]
+    pointData = vtr.GetPointData()
+    varNames = [pointData.GetArrayName(i) for i in range(pointData.GetNumberOfArrays())]
+    data = [numpy_support.vtk_to_numpy(pointData.GetArray(name)).reshape(gridSizes, order="F") for name in varNames]
+    data = np.array(data)
+
+    return data, coords, varNames
diff --git a/pySDC/tests/test_helpers/test_fieldsIO.py b/pySDC/tests/test_helpers/test_fieldsIO.py
index 5d5be0151c01f30651471da29dadc960be320d41..1e75505d0faab3f8c062a251466fe1a2b378c4b4 100644
--- a/pySDC/tests/test_helpers/test_fieldsIO.py
+++ b/pySDC/tests/test_helpers/test_fieldsIO.py
@@ -1,4 +1,12 @@
+import os
+import sys
+import glob
 import pytest
+
+if sys.version_info < (3, 11):
+    pytest.skip("skipping fieldsIO tests on python lower than 3.11", allow_module_level=True)
+
+import itertools
 import numpy as np
 
 from pySDC.helpers.fieldsIO import DTYPES, FieldsIO
@@ -7,25 +15,21 @@ FieldsIO.ALLOW_OVERWRITE = True
 
 
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
-@pytest.mark.parametrize("nDim", range(3))
-def testHeader(nDim, dtypeIdx):
-    from pySDC.helpers.fieldsIO import FieldsIO, Scal0D, Cart1D, Cart2D
+@pytest.mark.parametrize("dim", range(4))
+def testHeader(dim, dtypeIdx):
+    from pySDC.helpers.fieldsIO import FieldsIO, Scalar, Rectilinear
 
     fileName = "testHeader.pysdc"
     dtype = DTYPES[dtypeIdx]
 
-    coordX = np.linspace(0, 1, num=256, endpoint=False)
-    coordY = np.linspace(0, 1, num=64, endpoint=False)
+    coords = [np.linspace(0, 1, num=256, endpoint=False) for n in [256, 64, 32]]
 
-    if nDim == 0:
-        Class = Scal0D
+    if dim == 0:
+        Class = Scalar
         args = {"nVar": 20}
-    elif nDim == 1:
-        Class = Cart1D
-        args = {"nVar": 10, "coordX": coordX}
-    elif nDim == 2:
-        Class = Cart2D
-        args = {"nVar": 10, "coordX": coordX, "coordY": coordY}
+    else:
+        Class = Rectilinear
+        args = {"nVar": 10, "coords": coords[:dim]}
 
     f1 = Class(dtype, fileName)
     assert f1.__str__() == f1.__repr__(), "__repr__ and __str__ do not return the same result"
@@ -64,13 +68,13 @@ def testHeader(nDim, dtypeIdx):
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
 @pytest.mark.parametrize("nSteps", [1, 2, 10, 100])
 @pytest.mark.parametrize("nVar", [1, 2, 5])
-def testScal0D(nVar, nSteps, dtypeIdx):
-    from pySDC.helpers.fieldsIO import FieldsIO, Scal0D
+def testScalar(nVar, nSteps, dtypeIdx):
+    from pySDC.helpers.fieldsIO import FieldsIO, Scalar
 
-    fileName = "testScal0D.pysdc"
+    fileName = "testScalar.pysdc"
     dtype = DTYPES[dtypeIdx]
 
-    f1 = Scal0D(dtype, fileName)
+    f1 = Scalar(dtype, fileName)
     f1.setHeader(nVar=nVar)
 
     assert f1.nItems == nVar, f"{f1} do not have nItems == nVar"
@@ -104,254 +108,133 @@ def testScal0D(nVar, nSteps, dtypeIdx):
 
 @pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
 @pytest.mark.parametrize("nSteps", [1, 2, 5, 10])
-@pytest.mark.parametrize("nX", [5, 10, 16, 32, 64])
 @pytest.mark.parametrize("nVar", [1, 2, 5])
-def testCart1D(nVar, nX, nSteps, dtypeIdx):
-    from pySDC.helpers.fieldsIO import FieldsIO, Cart1D, DTYPES
+@pytest.mark.parametrize("dim", [1, 2, 3])
+def testRectilinear(dim, nVar, nSteps, dtypeIdx):
+    from pySDC.helpers.fieldsIO import FieldsIO, Rectilinear, DTYPES
 
-    fileName = "testCart1D.pysdc"
+    fileName = f"testRectilinear{dim}D.pysdc"
     dtype = DTYPES[dtypeIdx]
 
-    coordX = np.linspace(0, 1, num=nX, endpoint=False)
-    nX = coordX.size
-
-    f1 = Cart1D(dtype, fileName)
-    f1.setHeader(nVar=nVar, coordX=coordX)
-
-    assert f1.nItems == nVar * nX, f"{f1} do not have nItems == nVar*nX"
-    assert f1.nX == nX, f"{f1} has incorrect nX"
-    f1.initialize()
-
-    u0 = np.random.rand(nVar, nX).astype(f1.dtype)
-    times = np.arange(nSteps) / nSteps
+    for gridSizes in itertools.product(*[[5, 10, 16]] * dim):
 
-    for t in times:
-        ut = (u0 * t).astype(f1.dtype)
-        f1.addField(t, ut)
+        coords = [np.linspace(0, 1, num=n, endpoint=False) for n in gridSizes]
 
-    assert f1.nFields == nSteps, f"{f1} do not have nFields == nSteps"
-    assert np.allclose(f1.times, times), f"{f1} has wrong times stored in file"
+        f1 = Rectilinear(dtype, fileName)
+        f1.setHeader(nVar=nVar, coords=coords)
 
-    f2 = FieldsIO.fromFile(fileName)
-
-    assert f1.nFields == f2.nFields, f"f2 ({f2}) has different nFields than f1 ({f1})"
-    assert f1.times == f2.times, f"f2 ({f2}) has different times than f1 ({f1})"
-    assert (f1.time(-1) == f2.times[-1]) and (
-        f1.times[-1] == f2.time(-1)
-    ), f"f2 ({f2}) has different last time than f1 ({f1})"
-
-    for idx, t in enumerate(times):
-        u1 = u0 * t
-        t2, u2 = f2.readField(idx)
-        assert t2 == t, f"{idx}'s fields in {f1} has incorrect time"
-        assert u2.shape == u1.shape, f"{idx}'s fields in {f1} has incorrect shape"
-        assert np.allclose(u2, u1), f"{idx}'s fields in {f1} has incorrect values"
+        assert f1.dim == dim, f"{f1} has incorrect dimension"
+        assert f1.gridSizes == list(gridSizes), f"{f1} has incorrect gridSizes"
+        assert f1.nDoF == np.prod(gridSizes), f"{f1} has incorrect nDOF"
+        assert f1.nItems == nVar * np.prod(gridSizes), f"{f1} do not have nItems == nVar*product(gridSizes)"
 
+        f1.initialize()
+        u0 = np.random.rand(nVar, *gridSizes).astype(f1.dtype)
+        times = np.arange(nSteps) / nSteps
 
-@pytest.mark.parametrize("dtypeIdx", DTYPES.keys())
-@pytest.mark.parametrize("nSteps", [1, 2, 5, 10])
-@pytest.mark.parametrize("nY", [5, 10, 16])
-@pytest.mark.parametrize("nX", [5, 10, 16])
-@pytest.mark.parametrize("nVar", [1, 2, 5])
-def testCart2D(nVar, nX, nY, nSteps, dtypeIdx):
-    from pySDC.helpers.fieldsIO import FieldsIO, Cart2D, DTYPES
-
-    fileName = "testCart2D.pysdc"
-    dtype = DTYPES[dtypeIdx]
-
-    coordX = np.linspace(0, 1, num=nX, endpoint=False)
-    coordY = np.linspace(0, 1, num=nY, endpoint=False)
-
-    f1 = Cart2D(dtype, fileName)
-    f1.setHeader(nVar=nVar, coordX=coordX, coordY=coordY)
-
-    assert f1.nItems == nVar * nX * nY, f"{f1} do not have nItems == nVar*nX"
-    assert f1.nX == nX, f"{f1} has incorrect nX"
-    assert f1.nY == nY, f"{f1} has incorrect nY"
-    f1.initialize()
-
-    u0 = np.random.rand(nVar, nX, nY).astype(f1.dtype)
-    times = np.arange(nSteps) / nSteps
-
-    for t in times:
-        ut = (u0 * t).astype(f1.dtype)
-        f1.addField(t, ut)
-
-    assert f1.nFields == nSteps, f"{f1} do not have nFields == nSteps"
-    assert np.allclose(f1.times, times), f"{f1} has wrong times stored in file"
-
-    f2 = FieldsIO.fromFile(fileName)
-
-    assert f1.nFields == f2.nFields, f"f2 ({f2}) has different nFields than f1 ({f1})"
-    assert f1.times == f2.times, f"f2 ({f2}) has different times than f1 ({f1})"
-    assert (f1.time(-1) == f2.times[-1]) and (
-        f1.times[-1] == f2.time(-1)
-    ), f"f2 ({f2}) has different last time than f1 ({f1})"
-
-    for idx, t in enumerate(times):
-        u1 = u0 * t
-        t2, u2 = f2.readField(idx)
-        assert t2 == t, f"{idx}'s fields in {f1} has incorrect time"
-        assert u2.shape == u1.shape, f"{idx}'s fields in {f1} has incorrect shape"
-        assert np.allclose(u2, u1), f"{idx}'s fields in {f1} has incorrect values"
-
-
-def initGrid(nVar, nX, nY=None):
-    nDim = 1
-    if nY is not None:
-        nDim += 1
-    x = np.linspace(0, 1, num=nX, endpoint=False)
-    coords = (x,)
-    gridSizes = (nX,)
-    u0 = np.array(np.arange(nVar) + 1)[:, None] * x[None, :]
-
-    if nDim > 1:
-        y = np.linspace(0, 1, num=nY, endpoint=False)
-        coords += (y,)
-        gridSizes += (nY,)
-        u0 = u0[:, :, None] * y[None, None, :]
-
-    return coords, gridSizes, u0
-
-
-def writeFields_MPI(fileName, nDim, dtypeIdx, algo, nSteps, nVar, nX, nY=None):
-    coords, gridSizes, u0 = initGrid(nVar, nX, nY)
-
-    from mpi4py import MPI
-    from pySDC.helpers.blocks import BlockDecomposition
-    from pySDC.helpers.fieldsIO import Cart1D, Cart2D
-
-    comm = MPI.COMM_WORLD
-    MPI_SIZE = comm.Get_size()
-    MPI_RANK = comm.Get_rank()
+        for t in times:
+            ut = (u0 * t).astype(f1.dtype)
+            f1.addField(t, ut)
 
-    blocks = BlockDecomposition(MPI_SIZE, gridSizes, algo, MPI_RANK)
+        assert f1.nFields == nSteps, f"{f1} do not have nFields == nSteps"
+        assert np.allclose(f1.times, times), f"{f1} has wrong times stored in file"
 
-    if nDim == 1:
-        (iLocX,), (nLocX,) = blocks.localBounds
-        (pRankX,) = blocks.ranks
-        Cart1D.setupMPI(comm, iLocX, nLocX)
-        u0 = u0[:, iLocX : iLocX + nLocX]
+        f2 = FieldsIO.fromFile(fileName)
 
-        f1 = Cart1D(DTYPES[dtypeIdx], fileName)
-        f1.setHeader(nVar=nVar, coordX=coords[0])
+        assert f1.nFields == f2.nFields, f"f2 ({f2}) has different nFields than f1 ({f1})"
+        assert f1.times == f2.times, f"f2 ({f2}) has different times than f1 ({f1})"
+        assert (f1.time(-1) == f2.times[-1]) and (
+            f1.times[-1] == f2.time(-1)
+        ), f"f2 ({f2}) has different last time than f1 ({f1})"
 
-    if nDim == 2:
-        (iLocX, iLocY), (nLocX, nLocY) = blocks.localBounds
-        Cart2D.setupMPI(comm, iLocX, nLocX, iLocY, nLocY)
-        u0 = u0[:, iLocX : iLocX + nLocX, iLocY : iLocY + nLocY]
+        for idx, t in enumerate(times):
+            u1 = u0 * t
+            t2, u2 = f2.readField(idx)
+            assert t2 == t, f"{idx}'s fields in {f1} has incorrect time"
+            assert u2.shape == u1.shape, f"{idx}'s fields in {f1} has incorrect shape"
+            assert np.allclose(u2, u1), f"{idx}'s fields in {f1} has incorrect values"
 
-        f1 = Cart2D(DTYPES[dtypeIdx], fileName)
-        f1.setHeader(nVar=nVar, coordX=coords[0], coordY=coords[1])
-
-    u0 = np.asarray(u0, dtype=f1.dtype)
-    f1.initialize()
 
+@pytest.mark.parametrize("nSteps", [1, 10])
+@pytest.mark.parametrize("nZ", [1, 5, 16])
+@pytest.mark.parametrize("nY", [1, 5, 16])
+@pytest.mark.parametrize("nX", [1, 5, 16])
+@pytest.mark.parametrize("nVar", [1, 2, 3])
+def testToVTR(nVar, nX, nY, nZ, nSteps):
+
+    from pySDC.helpers.fieldsIO import Rectilinear
+    from pySDC.helpers.vtkIO import readFromVTR
+
+    coords = [np.linspace(0, 1, num=n, endpoint=False) for n in [nX, nY, nZ]]
+    file = Rectilinear(np.float64, "testToVTR.pysdc")
+    file.setHeader(nVar=nVar, coords=coords)
+    file.initialize()
+    u0 = np.random.rand(nVar, nX, nY, nZ).astype(file.dtype)
     times = np.arange(nSteps) / nSteps
     for t in times:
-        ut = (u0 * t).astype(f1.dtype)
-        f1.addField(t, ut)
-
-    return u0
+        ut = (u0 * t).astype(file.dtype)
+        file.addField(t, ut)
 
+    # Cleaning after eventuall other tests ...
+    for f in glob.glob("testToVTR*.vtr"):
+        os.remove(f)
 
-def compareFields_MPI(fileName, u0, nSteps):
-    from pySDC.helpers.fieldsIO import FieldsIO
+    file.toVTR("testToVTR", varNames=[f"var{i}" for i in range(nVar)])
 
-    f2 = FieldsIO.fromFile(fileName)
+    vtrFiles = glob.glob("testToVTR*.vtr")
+    assert len(vtrFiles) == file.nFields
 
-    times = np.arange(nSteps) / nSteps
-    for idx, t in enumerate(times):
-        u1 = u0 * t
-        t2, u2 = f2.readField(idx)
-        assert t2 == t, f"{idx}'s fields in {f2} has incorrect time"
-        assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
-        assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"
+    vtrFiles.sort(key=lambda name: int(name.split("_")[-1][:-4]))
+    for i, vFile in enumerate(vtrFiles):
+        uVTR, coords, _ = readFromVTR(vFile)
+        _, uFile = file.readField(i)
+        assert np.allclose(uFile, uVTR), "mismatch between data"
+    for i, (xVTR, xFile) in enumerate(zip(coords, file.header["coords"])):
+        assert np.allclose(xVTR, xFile), f"coordinate mismatch in dir. {i}"
 
 
 @pytest.mark.mpi4py
-@pytest.mark.parametrize("nX", [61, 16, 32])
 @pytest.mark.parametrize("nVar", [1, 4])
 @pytest.mark.parametrize("nSteps", [1, 10])
 @pytest.mark.parametrize("algo", ["ChatGPT", "Hybrid"])
 @pytest.mark.parametrize("dtypeIdx", [0, 1])
-@pytest.mark.parametrize("nProcs", [1, 2, 4])
-def testCart1D_MPI(nProcs, dtypeIdx, algo, nSteps, nVar, nX):
+@pytest.mark.parametrize("nProcs", [2, 4])
+@pytest.mark.parametrize("dim", [2, 3])
+def testRectilinear_MPI(dim, nProcs, dtypeIdx, algo, nSteps, nVar):
 
     import subprocess
 
-    fileName = "testCart1D_MPI.pysdc"
-
-    cmd = f"mpirun -np {nProcs} python {__file__} --fileName {fileName} --nDim 1 "
-    cmd += f"--dtypeIdx {dtypeIdx} --algo {algo} --nSteps {nSteps} --nVar {nVar} --nX {nX}"
-
-    p = subprocess.Popen(cmd.split(), cwd=".")
-    p.wait()
-    assert p.returncode == 0, f"MPI write with {nProcs} did not return code 0, but {p.returncode}"
-
-    from pySDC.helpers.fieldsIO import FieldsIO, Cart1D
-
-    f2: Cart1D = FieldsIO.fromFile(fileName)
-
-    assert type(f2) == Cart1D, f"incorrect type in MPI written fields {f2}"
-    assert f2.nFields == nSteps, f"incorrect nFields in MPI written fields {f2}"
-    assert f2.nVar == nVar, f"incorrect nVar in MPI written fields {f2}"
-    assert f2.nX == nX, f"incorrect nX in MPI written fields {f2}"
-
-    coords, _, u0 = initGrid(nVar, nX)
-    assert np.allclose(f2.header['coordX'], coords[0]), f"incorrect coordX in MPI written fields {f2}"
-
-    times = np.arange(nSteps) / nSteps
-    for idx, t in enumerate(times):
-        u1 = u0 * t
-        t2, u2 = f2.readField(idx)
-        assert t2 == t, f"{idx}'s fields in {f2} has incorrect time"
-        assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
-        assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"
-
+    fileName = f"testRectilinear{dim}D_MPI.pysdc"
 
-@pytest.mark.mpi4py
-@pytest.mark.parametrize("nY", [61, 16, 32])
-@pytest.mark.parametrize("nX", [61, 16, 32])
-@pytest.mark.parametrize("nVar", [1, 4])
-@pytest.mark.parametrize("nSteps", [1, 10])
-@pytest.mark.parametrize("algo", ["ChatGPT", "Hybrid"])
-@pytest.mark.parametrize("dtypeIdx", [0, 1])
-@pytest.mark.parametrize("nProcs", [1, 2, 4])
-def testCart2D_MPI(nProcs, dtypeIdx, algo, nSteps, nVar, nX, nY):
+    for gridSizes in itertools.product(*[[61, 16]] * dim):
 
-    import subprocess
+        cmd = f"mpirun -np {nProcs} python {__file__} --fileName {fileName}"
+        cmd += f" --dtypeIdx {dtypeIdx} --algo {algo} --nSteps {nSteps} --nVar {nVar} --gridSizes {' '.join([str(n) for n in gridSizes])}"
 
-    fileName = "testCart2D_MPI.pysdc"
+        p = subprocess.Popen(cmd.split(), cwd=".")
+        p.wait()
+        assert p.returncode == 0, f"MPI write with {nProcs} proc(s) did not return code 0, but {p.returncode}"
 
-    cmd = f"mpirun -np {nProcs} python {__file__} --fileName {fileName} --nDim 2 "
-    cmd += f"--dtypeIdx {dtypeIdx} --algo {algo} --nSteps {nSteps} --nVar {nVar} --nX {nX} --nY {nY}"
+        from pySDC.helpers.fieldsIO import Rectilinear, initGrid
 
-    p = subprocess.Popen(cmd.split(), cwd=".")
-    p.wait()
-    assert p.returncode == 0, f"MPI write with {nProcs} did not return code 0, but {p.returncode}"
+        f2: Rectilinear = FieldsIO.fromFile(fileName)
 
-    from pySDC.helpers.fieldsIO import FieldsIO, Cart2D
+        assert type(f2) == Rectilinear, f"incorrect type in MPI written fields {f2}"
+        assert f2.nFields == nSteps, f"incorrect nFields in MPI written fields {f2} ({f2.nFields} instead of {nSteps})"
+        assert f2.nVar == nVar, f"incorrect nVar in MPI written fields {f2}"
+        assert f2.gridSizes == list(gridSizes), f"incorrect gridSizes in MPI written fields {f2}"
 
-    f2: Cart2D = FieldsIO.fromFile(fileName)
+        coords, u0 = initGrid(nVar, gridSizes)
+        for i, (cFile, cRef) in enumerate(zip(f2.header['coords'], coords)):
+            assert np.allclose(cFile, cRef), f"incorrect coords[{i}] in MPI written fields {f2}"
 
-    assert type(f2) == Cart2D, f"incorrect type in MPI written fields {f2}"
-    assert f2.nFields == nSteps, f"incorrect nFields in MPI written fields {f2}"
-    assert f2.nVar == nVar, f"incorrect nVar in MPI written fields {f2}"
-    assert f2.nX == nX, f"incorrect nX in MPI written fields {f2}"
-    assert f2.nY == nY, f"incorrect nY in MPI written fields {f2}"
-
-    grids, _, u0 = initGrid(nVar, nX, nY)
-    assert np.allclose(f2.header['coordX'], grids[0]), f"incorrect coordX in MPI written fields {f2}"
-    assert np.allclose(f2.header['coordY'], grids[1]), f"incorrect coordY in MPI written fields {f2}"
-
-    times = np.arange(nSteps) / nSteps
-    for idx, t in enumerate(times):
-        u1 = u0 * t
-        t2, u2 = f2.readField(idx)
-        assert t2 == t, f"{idx}'s fields in {f2} has incorrect time"
-        assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
-        assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"
+        times = np.arange(nSteps) / nSteps
+        for idx, t in enumerate(times):
+            u1 = u0 * t
+            t2, u2 = f2.readField(idx)
+            assert t2 == t, f"fields[{idx}] in {f2} has incorrect time ({t2} instead of {t})"
+            assert u2.shape == u1.shape, f"{idx}'s fields in {f2} has incorrect shape"
+            assert np.allclose(u2, u1), f"{idx}'s fields in {f2} has incorrect values"
 
 
 if __name__ == "__main__":
@@ -359,16 +242,15 @@ if __name__ == "__main__":
 
     parser = argparse.ArgumentParser()
     parser.add_argument('--fileName', type=str, help='fileName of the file')
-    parser.add_argument('--nDim', type=int, help='space dimension', choices=[1, 2])
     parser.add_argument('--dtypeIdx', type=int, help="dtype index", choices=DTYPES.keys())
-    parser.add_argument(
-        '--algo', type=str, help="algorithm used for block decomposition", choices=["ChatGPT", "Hybrid"]
-    )
-    parser.add_argument('--nSteps', type=int, help="number of field variables")
+    parser.add_argument('--algo', type=str, help="algorithm used for block decomposition")
+    parser.add_argument('--nSteps', type=int, help="number of time-steps")
     parser.add_argument('--nVar', type=int, help="number of field variables")
-    parser.add_argument('--nX', type=int, help="number of grid points in x dimension")
-    parser.add_argument('--nY', type=int, help="number of grid points in y dimension")
+    parser.add_argument('--gridSizes', type=int, nargs='+', help="number of grid points in each dimensions")
     args = parser.parse_args()
 
-    u0 = writeFields_MPI(**args.__dict__)
-    compareFields_MPI(args.fileName, u0, args.nSteps)
+    if sys.version_info >= (3, 11):
+        from pySDC.helpers.fieldsIO import writeFields_MPI, compareFields_MPI
+
+        u0 = writeFields_MPI(**args.__dict__)
+        compareFields_MPI(args.fileName, u0, args.nSteps)
diff --git a/pySDC/tests/test_helpers/test_vtk.py b/pySDC/tests/test_helpers/test_vtk.py
new file mode 100644
index 0000000000000000000000000000000000000000..94613ac6fe99e406c676d12d6df15481bb12121c
--- /dev/null
+++ b/pySDC/tests/test_helpers/test_vtk.py
@@ -0,0 +1,23 @@
+import pytest
+import numpy as np
+
+
+@pytest.mark.parametrize("nZ", [1, 5, 16])
+@pytest.mark.parametrize("nY", [1, 5, 16])
+@pytest.mark.parametrize("nX", [1, 5, 16])
+@pytest.mark.parametrize("nVar", [1, 2, 3])
+def testVTR(nVar, nX, nY, nZ):
+    from pySDC.helpers.vtkIO import writeToVTR, readFromVTR
+
+    data1 = np.random.rand(nVar, nX, nY, nZ)
+    coords1 = [np.sort(np.random.rand(n)) for n in [nX, nY, nZ]]
+    varNames1 = [f"var{i}" for i in range(nVar)]
+
+    data2, coords2, varNames2 = readFromVTR(writeToVTR("testVTR", data1, coords1, varNames1))
+
+    for i, (x1, x2) in enumerate(zip(coords1, coords2)):
+        print(x1, x2)
+        assert np.allclose(x1, x2), f"coordinate mismatch in dir. {i}"
+    assert varNames1 == varNames2, f"varNames mismatch"
+    assert data1.shape == data2.shape, f"data shape mismatch"
+    assert np.allclose(data1, data2), f"data values mismatch"