diff --git a/README.md b/README.md
index 7dc67baa4f6ef0465d6dbb5a8900535b7b548050..8695cdcdc6b965ef05f810e239726fc22e82162e 100644
--- a/README.md
+++ b/README.md
@@ -112,7 +112,7 @@ Any contribution is dearly welcome! If you want to contribute, please take the t
 This project has received funding from the [European High-Performance
 Computing Joint Undertaking](https://eurohpc-ju.europa.eu/) (JU) under
 grant agreement No 955701 ([TIME-X](https://www.time-x-eurohpc.eu/))
-and grant agreement No 101118139.
+and grant agreement No 101118139. 
 The JU receives support from the European Union's Horizon 2020 research
 and innovation programme and Belgium, France, Germany, and Switzerland.
 This project also received funding from the [German Federal Ministry of
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 53e913c2bcc87f4d24672296d4594b2ae3fc7dea..ce7e098c4f45c5e24a683abbe35e6f17bc69ad73 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -72,7 +72,7 @@ author = 'Robert Speck, Thibaut Lunet, Thomas Baumann, Lisa Wimmer, Ikrom Akramo
 # The short X.Y version.
 version = '5.6'
 # The full version, including alpha/beta/rc tags.
-release = '5.6
+release = '5.6'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pySDC/helpers/fieldsIO.py b/pySDC/helpers/fieldsIO.py
index 2155896773faa393855995f8d9105ca188a537ed..bf1738acdd0f01e3077f73c130af10ed5d200be9 100644
--- a/pySDC/helpers/fieldsIO.py
+++ b/pySDC/helpers/fieldsIO.py
@@ -44,14 +44,13 @@ See :class:`pySDC.helpers.fieldsIO.writeFields_MPI` for an illustrative example.
 
 Warning
 -------
-To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.initMPI` (cf their docstring).
+To use MPI collective writing, you need to call first the class methods :class:`Rectilinear.setupMPI` (cf their docstring).
 Also, `Rectilinear.setHeader` **must be given the global grids coordinates**, whether the code is run in parallel or not.
 """
 import os
 import numpy as np
 from typing import Type, TypeVar
 import logging
-import itertools
 
 T = TypeVar("T")
 
@@ -61,11 +60,17 @@ try:
     except ImportError:
         pass
     from mpi4py import MPI
+    from mpi4py.util.dtlib import from_numpy_dtype as MPI_DTYPE
 except ImportError:
 
     class MPI:
         COMM_WORLD = None
         Intracomm = T
+        File = T
+        Datatype = T
+
+    def MPI_DTYPE():
+        pass
 
 
 # Supported data types
@@ -417,6 +422,8 @@ class Rectilinear(Scalar):
         coords = self.setupCoords(*coords)
         self.header = {"nVar": int(nVar), "coords": coords}
         self.nItems = nVar * self.nDoF
+        if self.MPI_ON:
+            self.MPI_SETUP()
 
     @property
     def hInfos(self):
@@ -438,6 +445,8 @@ class Rectilinear(Scalar):
         gridSizes = np.fromfile(f, dtype=np.int32, count=dim)
         coords = [np.fromfile(f, dtype=np.float64, count=n) for n in gridSizes]
         self.setHeader(nVar, coords)
+        if self.MPI_ON:
+            self.MPI_SETUP()
 
     def reshape(self, fields: np.ndarray):
         """Reshape the fields to a N-d array (inplace operation)"""
@@ -498,7 +507,6 @@ class Rectilinear(Scalar):
     # MPI-parallel implementation
     # -------------------------------------------------------------------------
     comm: MPI.Intracomm = None
-    _nCollectiveIO = None
 
     @classmethod
     def setupMPI(cls, comm: MPI.Intracomm, iLoc, nLoc):
@@ -519,20 +527,8 @@ class Rectilinear(Scalar):
         cls.iLoc = iLoc
         cls.nLoc = nLoc
         cls.mpiFile: MPI.File = None
-        cls._nCollectiveIO = None
-
-    @property
-    def nCollectiveIO(self):
-        """
-        Number of collective IO operations over all processes, when reading or writing a field.
-
-        Returns:
-        --------
-        int: Number of collective IO accesses
-        """
-        if self._nCollectiveIO is None:
-            self._nCollectiveIO = self.comm.allreduce(self.nVar * np.prod(self.nLoc[:-1]), op=MPI.MAX)
-        return self._nCollectiveIO
+        cls.mpiType: MPI.Datatype = None
+        cls.mpiFileType: MPI.Datatype = None
 
     @property
     def MPI_ON(self):
@@ -548,6 +544,16 @@ class Rectilinear(Scalar):
             return True
         return self.comm.Get_rank() == 0
 
+    def MPI_SETUP(self):
+        """Setup subarray masks for each processes"""
+        self.mpiType = MPI_DTYPE(self.dtype)
+        self.mpiFileType = self.mpiType.Create_subarray(
+            [self.nVar, *self.gridSizes],  # Global array sizes
+            [self.nVar, *self.nLoc],  # Local array sizes
+            [0, *self.iLoc],  # Global starting indices of local blocks
+        )
+        self.mpiFileType.Commit()
+
     def MPI_FILE_OPEN(self, mode):
         """Open the binary file in MPI mode"""
         amode = {
@@ -572,7 +578,8 @@ class Rectilinear(Scalar):
         data : np.ndarray
             Data to be written in the binary file.
         """
-        self.mpiFile.Write_at_all(offset, data)
+        self.mpiFile.Set_view(disp=offset, etype=self.mpiType, filetype=self.mpiFileType)
+        self.mpiFile.Write_all(data)
 
     def MPI_READ_AT_ALL(self, offset, data: np.ndarray):
         """
@@ -586,7 +593,8 @@ class Rectilinear(Scalar):
         data : np.ndarray
             Array on which to read the data from the binary file.
         """
-        self.mpiFile.Read_at_all(offset, data)
+        self.mpiFile.Set_view(disp=offset, etype=self.mpiType, filetype=self.mpiFileType)
+        self.mpiFile.Read_all(data)
 
     def MPI_FILE_CLOSE(self):
         """Close the binary file in MPI mode"""
@@ -637,33 +645,15 @@ class Rectilinear(Scalar):
             *self.nLoc,
         ), f"expected {(self.nVar, *self.nLoc)} shape, got {field.shape}"
 
-        offset0 = self.fileSize
+        offset = self.fileSize
         self.MPI_FILE_OPEN(mode="a")
-        nWrites = 0
-        nCollectiveIO = self.nCollectiveIO
 
         if self.MPI_ROOT:
             self.MPI_WRITE(np.array(time, dtype=T_DTYPE))
-        offset0 += self.tSize
-
-        for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
-            offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
-            self.MPI_WRITE_AT_ALL(offset, field[(iVar, *iBeg)])
-            nWrites += 1
-
-        for _ in range(nCollectiveIO - nWrites):
-            # Additional collective write to catch up with other processes
-            self.MPI_WRITE_AT_ALL(offset0, field[:0])
-
+        offset += self.tSize
+        self.MPI_WRITE_AT_ALL(offset, field)
         self.MPI_FILE_CLOSE()
 
-    def iPos(self, iVar, iX):
-        iPos = iVar * self.nDoF
-        for axis in range(self.dim - 1):
-            iPos += (self.iLoc[axis] + iX[axis]) * np.prod(self.gridSizes[axis + 1 :])
-        iPos += self.iLoc[-1]
-        return iPos
-
     def readField(self, idx):
         """
         Read one field stored in the binary file, corresponding to the given
@@ -689,26 +679,15 @@ class Rectilinear(Scalar):
             return super().readField(idx)
 
         idx = self.formatIndex(idx)
-        offset0 = self.hSize + idx * (self.tSize + self.fSize)
+        offset = self.hSize + idx * (self.tSize + self.fSize)
         with open(self.fileName, "rb") as f:
-            t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset0)[0])
-        offset0 += self.tSize
+            t = float(np.fromfile(f, dtype=T_DTYPE, count=1, offset=offset)[0])
+        offset += self.tSize
 
         field = np.empty((self.nVar, *self.nLoc), dtype=self.dtype)
 
         self.MPI_FILE_OPEN(mode="r")
-        nReads = 0
-        nCollectiveIO = self.nCollectiveIO
-
-        for (iVar, *iBeg) in itertools.product(range(self.nVar), *[range(n) for n in self.nLoc[:-1]]):
-            offset = offset0 + self.iPos(iVar, iBeg) * self.itemSize
-            self.MPI_READ_AT_ALL(offset, field[(iVar, *iBeg)])
-            nReads += 1
-
-        for _ in range(nCollectiveIO - nReads):
-            # Additional collective read to catch up with other processes
-            self.MPI_READ_AT_ALL(offset0, field[:0])
-
+        self.MPI_READ_AT_ALL(offset, field)
         self.MPI_FILE_CLOSE()
 
         return t, field
diff --git a/pySDC/projects/GPU/analysis_scripts/parallel_scaling.py b/pySDC/projects/GPU/analysis_scripts/parallel_scaling.py
index fe190bd6f489ef7c00d72a7a211915a4366910ac..94d3b37d8beb37bbad9e7a1b018daef2cf7a61d4 100644
--- a/pySDC/projects/GPU/analysis_scripts/parallel_scaling.py
+++ b/pySDC/projects/GPU/analysis_scripts/parallel_scaling.py
@@ -81,13 +81,17 @@ class ScalingConfig(object):
                     **kwargs,
                 )
 
-    def plot_scaling_test(self, ax, quantity='time', **plotting_params):  # pragma: no cover
+    def plot_scaling_test(self, ax, quantity='time', space_time=None, **plotting_params):  # pragma: no cover
         from matplotlib.colors import TABLEAU_COLORS
 
         cmap = TABLEAU_COLORS
         colors = list(cmap.values())
 
         for experiment in self.experiments:
+            if space_time is not None:
+                if not experiment.PinT == space_time:
+                    continue
+
             tasks_time = self.tasks_time if experiment.PinT else 1
             timings = {}
 
@@ -141,8 +145,12 @@ class ScalingConfig(object):
                     elif quantity == 'throughput_per_task':
                         timings[np.prod(procs)] = experiment.res**self.ndim / t_mean
                     elif quantity == 'efficiency':
+                        if type(config).__name__ == 'GrayScottScaling3D':
+                            norm = 13216322.909
+                        else:
+                            norm = 1
                         timings[np.prod(procs) / self.tasks_per_node] = (
-                            experiment.res**self.ndim / t_mean / np.prod(procs)
+                            experiment.res**self.ndim / t_mean / np.prod(procs) / norm
                         )
                     elif quantity == 'time':
                         timings[np.prod(procs) / self.tasks_per_node] = t_mean
@@ -150,11 +158,17 @@ class ScalingConfig(object):
                         timings[np.prod(procs)] = t_mean
                     elif quantity == 'min_time_per_task':
                         timings[np.prod(procs)] = t_min
+                    elif quantity == 'min_time':
+                        timings[np.prod(procs) / self.tasks_per_node] = t_min
                     else:
                         raise NotImplementedError
                 except (FileNotFoundError, ValueError):
                     pass
 
+            if quantity == 'efficiency' and type(config).__name__ == 'RayleighBenard_scaling':
+                norm = max(timings.values())
+                timings = {key: value / norm for key, value in timings.items()}
+
             ax.loglog(
                 timings.keys(),
                 timings.values(),
@@ -171,7 +185,8 @@ class ScalingConfig(object):
             'time': r'$t_\mathrm{step}$ / s',
             'time_per_task': r'$t_\mathrm{step}$ / s',
             'min_time_per_task': r'minimal $t_\mathrm{step}$ / s',
-            'efficiency': 'efficiency / DoF/s/task',
+            'min_time': r'minimal $t_\mathrm{step}$ / s',
+            'efficiency': r'parallel efficiency / \%',
         }
         ax.set_ylabel(labels[quantity])
 
@@ -331,17 +346,28 @@ class RayleighBenardDedalusComparisonGPU(GPUConfig, ScalingConfig):
     ]
 
 
-def plot_scalings(problem, **kwargs):  # pragma: no cover
+def plot_scalings(problem, XPU=None, space_time=None, **kwargs):  # pragma: no cover
     if problem == 'GS3D':
-        configs = [
-            GrayScottSpaceScalingCPU3D(),
-            GrayScottSpaceScalingGPU3D(),
-        ]
+        if XPU == 'CPU':
+            configs = [GrayScottSpaceScalingCPU3D()]
+        elif XPU == 'GPU':
+            configs = [GrayScottSpaceScalingGPU3D()]
+        else:
+            configs = [GrayScottSpaceScalingCPU3D(), GrayScottSpaceScalingGPU3D()]
     elif problem == 'RBC':
-        configs = [
-            RayleighBenardSpaceScalingGPU(),
-            RayleighBenardSpaceScalingCPU(),
-        ]
+        if XPU == 'CPU':
+            configs = [
+                RayleighBenardSpaceScalingCPU(),
+            ]
+        elif XPU == 'GPU':
+            configs = [
+                RayleighBenardSpaceScalingGPU(),
+            ]
+        else:
+            configs = [
+                RayleighBenardSpaceScalingGPU(),
+                RayleighBenardSpaceScalingCPU(),
+            ]
     elif problem == 'RBC_dedalus':
         configs = [
             RayleighBenardDedalusComparison(),
@@ -358,31 +384,26 @@ def plot_scalings(problem, **kwargs):  # pragma: no cover
         ('RBC', 'time'): {'x': [1 / 10, 64], 'y': [60, 60 / 640]},
         ('RBC', 'time_per_task'): {'x': [1, 640], 'y': [60, 60 / 640]},
         ('RBC', 'min_time_per_task'): {'x': [1, 640], 'y': [60, 60 / 640]},
+        ('RBC', 'min_time'): {'x': [1, 640], 'y': [60, 60 / 640]},
         ('RBC', 'throughput_per_task'): {'x': [1 / 1, 640], 'y': [2e4, 2e4 * 640]},
     }
 
-    fig, ax = plt.subplots(figsize=figsize_by_journal('TUHH_thesis', 1, 0.6))
-    configs[1].plot_scaling_test(ax=ax, quantity='efficiency')
-    # ax.legend(frameon=False)
-    box = ax.get_position()
-    ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
-    ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
-
-    ax.set_yscale('linear')
-    path = f'{PROJECT_PATH}/plots/scaling_{problem}_efficiency.pdf'
-    fig.savefig(path, bbox_inches='tight')
-    print(f'Saved {path!r}', flush=True)
-
-    for quantity in ['time', 'throughput', 'time_per_task', 'throughput_per_task', 'min_time_per_task'][::-1]:
+    for quantity in ['time', 'throughput', 'time_per_task', 'throughput_per_task', 'min_time_per_task', 'efficiency'][
+        ::-1
+    ]:
         fig, ax = plt.subplots(figsize=figsize_by_journal('TUHH_thesis', 1, 0.6))
         for config in configs:
-            config.plot_scaling_test(ax=ax, quantity=quantity)
+            config.plot_scaling_test(ax=ax, quantity=quantity, space_time=space_time)
         if (problem, quantity) in ideal_lines.keys():
             ax.loglog(*ideal_lines[(problem, quantity)].values(), color='black', ls=':', label='ideal')
+        elif quantity == 'efficiency':
+            ax.axhline(1, color='black', ls=':', label='ideal')
+            ax.set_yscale('linear')
+            ax.set_ylim(0, 1.1)
         box = ax.get_position()
         ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
         ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
-        path = f'{PROJECT_PATH}/plots/scaling_{problem}_{quantity}.pdf'
+        path = f'{PROJECT_PATH}/plots/scaling_{problem}_{quantity}_{XPU}_{space_time}.pdf'
         fig.savefig(path, bbox_inches='tight')
         print(f'Saved {path!r}', flush=True)
 
@@ -393,8 +414,8 @@ if __name__ == '__main__':
     parser = argparse.ArgumentParser()
     parser.add_argument('--mode', type=str, choices=['run', 'plot'], default='run')
     parser.add_argument('--problem', type=str, default='GS')
-    parser.add_argument('--XPU', type=str, choices=['CPU', 'GPU'], default='CPU')
-    parser.add_argument('--space_time', type=str, choices=['True', 'False'], default='False')
+    parser.add_argument('--XPU', type=str, choices=['CPU', 'GPU', 'both'], default='CPU')
+    parser.add_argument('--space_time', type=str, choices=['True', 'False', 'None'], default='False')
     parser.add_argument('--submit', type=str, choices=['True', 'False'], default='True')
     parser.add_argument('--nsys_profiling', type=str, choices=['True', 'False'], default='False')
 
@@ -403,6 +424,13 @@ if __name__ == '__main__':
     submit = args.submit == 'True'
     nsys_profiling = args.nsys_profiling == 'True'
 
+    if args.space_time == 'True':
+        space_time = True
+    elif args.space_time == 'False':
+        space_time = False
+    else:
+        space_time = None
+
     config_classes = []
 
     if args.problem == 'GS3D':
@@ -429,6 +457,6 @@ if __name__ == '__main__':
         if args.mode == 'run':
             config.run_scaling_test(submit=submit, nsys_profiling=nsys_profiling)
         elif args.mode == 'plot':
-            plot_scalings(problem=args.problem)
+            plot_scalings(problem=args.problem, XPU=args.XPU, space_time=space_time)
         else:
             raise NotImplementedError(f'Don\'t know mode {args.mode!r}')
diff --git a/pySDC/projects/GPU/paper_plots.py b/pySDC/projects/GPU/paper_plots.py
index 607bd8d2d557f3357c895641f1f84f36633cf6f0..2efd6ab7a864b113665058856b3d8e89e955a94a 100644
--- a/pySDC/projects/GPU/paper_plots.py
+++ b/pySDC/projects/GPU/paper_plots.py
@@ -54,6 +54,7 @@ def plot_scalings_separately(problem, journal='TUHH_thesis', **kwargs):  # pragm
 
 def make_plots_for_thesis():  # pragma: no cover
     from pySDC.projects.GPU.analysis_scripts.plot_RBC_matrix import plot_DCT, plot_preconditioners, plot_ultraspherical
+    from pySDC.projects.GPU.analysis_scripts.parallel_scaling import plot_scalings
 
     # small plots with no simulations
     plot_DCT()
@@ -61,8 +62,11 @@ def make_plots_for_thesis():  # pragma: no cover
     plot_ultraspherical()
 
     # plot space-time parallel scaling
-    for problem in ['GS3D', 'RBC']:
-        plot_scalings_separately(problem=problem)
+    plot_scalings(problem='GS3D', XPU='both', space_time=False)
+    plot_scalings(problem='GS3D', XPU='GPU', space_time=None)
+    plot_scalings(problem='RBC', XPU='both', space_time=False)
+    plot_scalings(problem='RBC', XPU='GPU', space_time=None)
+    plot_scalings(problem='RBC', XPU='CPU', space_time=None)
 
 
 if __name__ == '__main__':
diff --git a/pySDC/projects/Resilience/paper_plots.py b/pySDC/projects/Resilience/paper_plots.py
index 69d176236567ba41acc860f3ea8a7805b4f38964..2095c9e5ed7fb05468b03c7b0ad002afb6564e68 100644
--- a/pySDC/projects/Resilience/paper_plots.py
+++ b/pySDC/projects/Resilience/paper_plots.py
@@ -813,6 +813,7 @@ def plot_recovery_rate_per_acceptance_threshold(problem, target='resilience'):
     else:
         fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.8, 0.5))
 
+    ax.axvline(1.1, color='grey', ls=':', label='1.1')
     stats_analyser.plot_recovery_thresholds(thresh_range=np.logspace(-1, 4, 500), recoverable_only=False, ax=ax)
     ax.set_xscale('log')
     ax.set_ylim((-0.05, 1.05))
diff --git a/pySDC/projects/Resilience/work_precision.py b/pySDC/projects/Resilience/work_precision.py
index 2a1c448aa304669cc008ec14951ea7fc6e24cbab..f4bc016bcb62c26d30824c39a96da8d500f78600 100644
--- a/pySDC/projects/Resilience/work_precision.py
+++ b/pySDC/projects/Resilience/work_precision.py
@@ -301,7 +301,7 @@ def record_work_precision(
             exponents = [-2, -1, 0, 1, 2, 3][::-1]
         if problem.__name__ == 'run_Lorenz':
             exponents = [0, 1, 2, 3][::-1]
-            if type(strategy).__name__ in ["AdaptivityStrategy"]:
+            if type(strategy).__name__ in ["AdaptivityStrategy", "ESDIRKStrategy", "ERKStrategy"]:
                 exponents = [0, 1, 2, 3, 4, 5][::-1]
     elif param == 'dt':
         power = 2.0
@@ -579,6 +579,20 @@ def plot_work_precision(
                 * 7,
                 minor=True,
             )
+    elif problem.__name__ == 'run_Lorenz':
+        if mode == 'parallel_efficiency_dt_k':
+            ax.set_xticks(
+                ticks=[
+                    0.1,
+                    0.2,
+                    0.3,
+                    0.4,
+                    5e-1,
+                    6e-1,
+                ],
+                labels=['', r'$2\times 10^{-1}$', '', r'$4\times 10^{-1}$', '', ''],
+                minor=True,
+            )
 
 
 def plot_parallel_efficiency_diagonalSDC(
@@ -945,28 +959,16 @@ def get_configs(mode, problem):
             'strategies': RK_strategies,
             'num_procs': 1,
         }
-        if problem.__name__ == 'run_Lorenz':
-            configurations[3] = {
-                'custom_description': desc_poly,
-                'strategies': [AdaptivityPolynomialError(useMPI=True, newton_inexactness=newton_inexactness)],
-                'num_procs': 4,
-                'num_procs_sweeper': num_procs_dt_k,
-                'plotting_params': {
-                    'label': rf'$\Delta t$-$k$-adaptivity $N$=4x{num_procs_dt_k}',
-                    'ls': ls[num_procs_dt_k * 4],
-                },
-            }
-        else:
-            configurations[3] = {
-                'custom_description': desc_poly,
-                'strategies': [AdaptivityPolynomialError(useMPI=True, newton_inexactness=newton_inexactness)],
-                'num_procs': 1,
-                'num_procs_sweeper': num_procs_dt_k,
-                'plotting_params': {
-                    'label': rf'$\Delta t$-$k$-adaptivity $N$=1x{num_procs_dt_k}',
-                    'ls': ls[num_procs_dt_k],
-                },
-            }
+        configurations[3] = {
+            'custom_description': desc_poly,
+            'strategies': [AdaptivityPolynomialError(useMPI=True, newton_inexactness=newton_inexactness)],
+            'num_procs': 1,
+            'num_procs_sweeper': num_procs_dt_k,
+            'plotting_params': {
+                'label': rf'$\Delta t$-$k$-adaptivity $N$=1x{num_procs_dt_k}',
+                'ls': ls[num_procs_dt_k],
+            },
+        }
         if problem.__name__ in ['run_Lorenz']:
             configurations[2] = {
                 'strategies': [AdaptivityStrategy(useMPI=True)],
@@ -1201,7 +1203,7 @@ def get_configs(mode, problem):
         QI = {
             (1, 3, 'run_Lorenz'): 'MIN-SR-NS',
             (1, 1, 'run_Lorenz'): 'MIN-SR-NS',
-            (4, 1, 'run_Lorenz'): 'IE',
+            (4, 1, 'run_Lorenz'): 'MIN-SR-NS',
         }
 
         newton_inexactness = False if problem.__name__ in ['run_vdp'] else True
diff --git a/pySDC/tests/test_benchmarks/test_collocation.py b/pySDC/tests/test_benchmarks/test_collocation.py
index 14cd361d296e728a5e23bb4404b0feadb7552035..2d6963e374978ac0480d4f801063cf845989fff8 100644
--- a/pySDC/tests/test_benchmarks/test_collocation.py
+++ b/pySDC/tests/test_benchmarks/test_collocation.py
@@ -3,8 +3,8 @@ import numpy as np
 
 from pySDC.core.collocation import CollBase
 
-t_start = float(np.random.rand(1) * 0.2)
-t_end = float(0.8 + np.random.rand(1) * 0.2)
+t_start = float(np.random.rand(1)[0] * 0.2)
+t_end = float(0.8 + np.random.rand(1)[0] * 0.2)
 
 tolQuad = 1e-13