diff --git a/example/ALL_Tensor.cpp b/example/ALL_Tensor.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0fe492f84101d6119e576a94d214ff83ec84d4c1
--- /dev/null
+++ b/example/ALL_Tensor.cpp
@@ -0,0 +1,257 @@
+/*
+   Copyright 2020-2020 Stephan Schulz, Forschungszentrum Juelich GmbH, Germany
+   Copyright 2018-2020 Rene Halver, Forschungszentrum Juelich GmbH, Germany
+   Copyright 2018-2020 Godehard Sutmann, Forschungszentrum Juelich GmbH, Germany
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are met:
+
+   1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+
+   2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+   3. Neither the name of the copyright holder nor the names of its contributors
+   may be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+   POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+//#include <time.h>
+#include <mpi.h>
+
+//#define ALL_VTK_OUTPUT
+
+#ifdef USE_AMALGAMATED
+#include "ALL_Amalgamated.hpp"
+#else
+#include <ALL.hpp>
+#endif
+
+// Run fun in order of ranks
+// Todo(s.schulz): This seems to only work roughly with the result width an 32 ranks, with up to 16 it seems to work correctly.
+//                 Adding sleep(1) also orders everything correctly. So this is probably a flushing problem.
+//                 It also exists for the cout stream with endl.
+#define MPI_RUN_ORDER(comm, rank, max_ranks, fun) {int MPI_RO_IT;\
+	for(MPI_RO_IT=0;MPI_RO_IT<max_ranks;MPI_RO_IT++)\
+	{\
+		if(MPI_RO_IT==rank)\
+		{\
+			fun;\
+			MPI_Barrier(comm);\
+		} else {\
+			MPI_Barrier(comm);\
+		}\
+	}\
+}
+
+// Quick and dirty helper function. Assumes comm, rank and max_ranks
+// CAVEAT: the function call must be wrapped in () if it contains a comma
+#define MPI_RUN_ORDER_DEF(fun) MPI_RUN_ORDER(MPI_COMM_WORLD, MyRank, MaximumRank, fun)
+
+//void millisleep(unsigned long ms)
+//{
+//	struct timespec SleepTime;
+//	SleepTime.tv_sec = ms/1000;
+//	SleepTime.tv_nsec = (ms%1000)*1000000L;
+//	struct timespec RemainingTime;
+//	while(nanosleep(&SleepTime, &RemainingTime))
+//	{
+//		SleepTime.tv_sec = RemainingTime.tv_sec;
+//		SleepTime.tv_nsec = RemainingTime.tv_nsec;
+//	}
+//}
+
+void print_width(int rank, double width, double bottom, double top)
+{
+	printf("[%03d] Result Width: %10.6f (%10.6f -- %10.6f)\n", rank, width, bottom, top);
+	fflush(stdout);
+}
+
+void print_testing_output(int rank, std::vector<ALL::Point<double>>& vertices, int timestep)
+{
+//	printf("[%4d,%03d] Result Width: %10.6f %10.6f %10.6f\n",
+//			timestep,
+//			rank,
+//			vertices.at(1)[0]-vertices.at(0)[0],
+//			vertices.at(1)[1]-vertices.at(0)[1],
+//			vertices.at(1)[2]-vertices.at(0)[2]);
+//	fflush(stdout);
+	printf("[%4d,%03d,%02d] Result Vertex: %10.6f %10.6f %10.6f\n",
+			timestep,
+			rank,
+			0,
+			vertices.at(0)[0],
+			vertices.at(0)[1],
+			vertices.at(0)[2]);
+	fflush(stdout);
+	printf("[%4d,%03d,%02d] Result Vertex: %10.6f %10.6f %10.6f\n",
+			timestep,
+			rank,
+			1,
+			vertices.at(1)[0],
+			vertices.at(1)[1],
+			vertices.at(1)[2]);
+	fflush(stdout);
+}
+
+void print_loc(int rank, int* loc, int* size)
+{
+	printf("[%03d] Location: (%3d,%3d,%3d)/(%3d,%3d,%3d)\n", rank, loc[0], loc[1], loc[2], size[0], size[1], size[2]);
+	fflush(stdout);
+}
+
+void print_domain(int rank, double* verts)
+{
+	printf("[%03d] Lower: %g\t%g\t%g\n", rank, verts[0], verts[1], verts[2]);
+	printf("[%03d] Upper: %g\t%g\t%g\n", rank, verts[3], verts[4], verts[5]);
+	fflush(stdout);
+}
+
+void print_work(int rank, double work)
+{
+	printf("[%03d] Work: %g\n", rank, work);
+	fflush(stdout);
+}
+
+void convert_verts(std::vector<ALL::Point<double>>* vv, double* verts)
+{
+	verts[0] = vv->at(0)[0];
+	verts[1] = vv->at(0)[1];
+	verts[2] = vv->at(0)[2];
+	verts[3] = vv->at(1)[0];
+	verts[4] = vv->at(1)[1];
+	verts[5] = vv->at(1)[2];
+}
+
+int main(int argc, char** argv)
+{
+	MPI_Init(&argc,&argv);
+	int CurrentStep = 0;
+	const int NumberOfSteps = 50;
+
+	const int Dimensions = 3;
+	const int LoadbalancerGamma = 0; // ignored for tensor method
+	// Todo(s.schulz): maybe change to automatic destruction on scope end
+	ALL::ALL<double, double> *jall = new ALL::ALL<double, double>(ALL::TENSOR, Dimensions, LoadbalancerGamma);
+	int MyLocation[3] = {0};
+	// All domains are placed along a line in z direction, even though they are three dimensional
+	MPI_Comm_rank(MPI_COMM_WORLD,&MyLocation[2]);
+	int MyRank = MyLocation[2];
+
+	int NumberOfProcesses[3] = {1,1,1};
+	MPI_Comm_size(MPI_COMM_WORLD, &NumberOfProcesses[2]);
+	int MaximumRank = NumberOfProcesses[2];
+
+	if(MyRank==0)
+	{
+		printf("Ranks: %d\nNumber of Steps: %d\n", MaximumRank, NumberOfSteps);
+		fflush(stdout);
+	}
+	MPI_Barrier(MPI_COMM_WORLD);
+	MPI_RUN_ORDER_DEF((print_loc(MyRank, MyLocation, NumberOfProcesses)));
+
+	// For a cartesian communicator this is not required, but we are using
+	// MPI_COMM_WORLD here.
+	std::vector<int> MyLocationVector(MyLocation, MyLocation+3);
+	std::vector<int> NumberOfProcessesVector(NumberOfProcesses, NumberOfProcesses+3);
+	jall->setProcGridParams(MyLocationVector, NumberOfProcessesVector);
+
+	// If we want to set a minimum domain size:
+	std::vector<double> MinimumDomainSize{0.1, 0.1, 0.1};
+	jall->setMinDomainSize(MinimumDomainSize);
+
+	jall->setCommunicator(MPI_COMM_WORLD);
+
+	// We also set the optional process tag for the output.
+	// This can be useful if we want to know which of 'our'
+	// ranks is which in the output produces by the library.
+	// The ranks used inside the library do not necessarily
+	// match our own numbering.
+	jall->setProcTag(MyRank);
+
+	jall->setup();
+	// Todo(s.schulz): document: what exactly must be set before setup()?
+
+	// A first domain distribution must be given to the balancer.
+	// We use the provided ALL::Point class to define the vertices,
+	// but a simple double array can also be used. We need 2 vertices
+	// which correspond to the two opposing corners.
+	std::vector<ALL::Point<double>> DomainVertices(2, ALL::Point<double>(3));
+	const double DomainSize = 1.0; // Domain size
+	// We create a cubic domain initially
+	for(int VertexIndex=0; VertexIndex<2; VertexIndex++)
+	{
+		for(int DimensionIndex=0; DimensionIndex<Dimensions; DimensionIndex++)
+		{
+			DomainVertices.at(VertexIndex)[DimensionIndex] = (MyLocation[DimensionIndex]+VertexIndex) * DomainSize;
+		}
+	}
+	double VertexArray[6];
+	convert_verts(&DomainVertices, VertexArray);
+	MPI_RUN_ORDER_DEF((print_domain(MyRank, VertexArray)));
+	jall->setVertices(DomainVertices);
+
+	// Calculate the work of our domain. Here we just use
+	double MyWork = (double) MyRank + 1.;
+	jall->setWork(MyWork);
+	MPI_RUN_ORDER_DEF((print_work(MyRank,MyWork)));
+	for(CurrentStep=0; CurrentStep<NumberOfSteps; CurrentStep++)
+	{
+		// In a real code we need to set the updated work in each
+		// iteration before calling balance()
+		if(MyRank==0)
+		{
+			printf("Starting step: %d/%d\n", CurrentStep+1, NumberOfSteps);
+			fflush(stdout);
+		}
+#ifdef ALL_VTK_OUTPUT_EXAMPLE
+		try {
+			jall->printVTKoutlines(CurrentStep);
+		} catch (ALL::FilesystemErrorException &e) {
+			std::cout << e.what() << std::endl;
+			std::exit(2);
+			// Maybe also treat this error in some way, e.g. whether the simulation
+			// should abort if no output could be written or not.
+		}
+#endif
+		jall->balance();
+
+		std::vector<ALL::Point<double>> NewVertices = jall->getVertices();
+		//MPI_RUN_ORDER(MPI_COMM_WORLD, MyRank, MaximumRank, (print_width(MyRank, NewVertices.at(1)[2]-NewVertices.at(0)[2], NewVertices.at(0)[2], NewVertices.at(1)[2])));
+		MPI_RUN_ORDER_DEF((print_testing_output(MyRank, NewVertices, CurrentStep+1)));
+		// Maybe print our new domain? Or not..
+		//convert_verts(&NewVertices, VertexArray);
+		//MPI_RUN_ORDER_DEF((print_domain(MyRank, VertexArray)));
+		//jall->getWork(MyWork);
+		//MPI_RUN_ORDER_DEF((print_work(MyRank,MyWork)));
+		MPI_Barrier(MPI_COMM_WORLD);
+	}
+#ifdef ALL_VTK_OUTPUT_EXAMPLE
+	try {
+		jall->printVTKoutlines(CurrentStep);
+	} catch (ALL::FilesystemErrorException &e) {
+		std::cout << e.what() << std::endl;
+	}
+#endif
+
+	delete jall;
+
+	MPI_Finalize();
+	return EXIT_SUCCESS;
+}
diff --git a/example/ALL_Tensor_f.F90 b/example/ALL_Tensor_f.F90
new file mode 100644
index 0000000000000000000000000000000000000000..e69945c87b6f34dc3232336f4b4cf64308c09c1e
--- /dev/null
+++ b/example/ALL_Tensor_f.F90
@@ -0,0 +1,247 @@
+!Copyright 2018-2020 Rene Halver, Forschungszentrum Juelich GmbH, Germany
+!Copyright 2018-2020 Godehard Sutmann, Forschungszentrum Juelich GmbH, Germany
+!Copyright 2020-2020 Stephan Schulz, Forschungszentrum Juelich GmbH, Germany
+!
+!Redistribution and use in source and binary forms, with or without modification, are
+!permitted provided that the following conditions are met:
+!
+!1. Redistributions of source code must retain the above copyright notice, this list
+!   of conditions and the following disclaimer.
+!
+!2. Redistributions in binary form must reproduce the above copyright notice, this
+!   list of conditions and the following disclaimer in the documentation and/or
+!   other materials provided with the distribution.
+!
+!3. Neither the name of the copyright holder nor the names of its contributors
+!   may be used to endorse or promote products derived from this software without
+!   specific prior written permission.
+!
+!THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
+!EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+!OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+!SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+!INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+!TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+!BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+!CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+!ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+!SUCH DAMAGE.
+
+program ALL_Staggered_f
+    use ALL_module
+    use iso_c_binding
+    use, intrinsic :: iso_fortran_env, only: stdin=>input_unit&
+        , stdout=>output_unit&
+        , stderr=>error_unit&
+        , file_storage_size
+#ifndef ALL_USE_F08
+    use mpi
+#else
+    use mpi_f08
+#endif
+    implicit none
+    integer, parameter :: real64 = selected_real_kind(15)
+
+    call main()
+contains
+    subroutine print_loc(my_rank, maximum_rank, my_location, number_of_processes)
+        integer, intent(in) :: my_rank, maximum_rank
+        integer, dimension(3), intent(in) :: my_location, number_of_processes
+        integer :: rank, error
+        do rank=0, maximum_rank-1
+            if(rank == my_rank) then
+                write(stdout,'(a,i3.3,a,i3,",",i3,",",i3,a,i3,",",i3,",",i3,a)')&
+                    "[", my_rank, "] Location: (", my_location, ")/(", number_of_processes, ")"
+                flush(stdout)
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            else
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            endif
+        enddo
+    end subroutine
+
+    subroutine print_domain(my_rank, maximum_rank, domain_vertices)
+        integer, intent(in) :: my_rank, maximum_rank
+        real(real64), dimension(3,2), intent(in) :: domain_vertices
+        integer :: rank, error
+        do rank=0, maximum_rank-1
+            if(rank == my_rank) then
+                write(stdout,'("[",i3.3,"]",a,es12.4,a,es12.4,a,es12.4)')&
+                    my_rank, " Lower: ",&
+                    domain_vertices(1,1), achar(9),&
+                    domain_vertices(2,1), achar(9),&
+                    domain_vertices(3,1)
+                write(stdout,'("[",i3.3,"]",a,es12.4,a,es12.4,a,es12.4)')&
+                    my_rank, " Upper: ",&
+                    domain_vertices(1,2), achar(9),&
+                    domain_vertices(2,2), achar(9),&
+                    domain_vertices(3,2)
+                flush(stdout)
+                call MPI_Barrier(MPI_COMM_WORLD,error)
+            else
+                call MPI_Barrier(MPI_COMM_WORLD,error)
+            endif
+        enddo
+    end subroutine
+
+    subroutine print_work(my_rank, maximum_rank, my_work)
+        integer, intent(in) :: my_rank, maximum_rank
+        real(real64), intent(in) :: my_work
+        integer :: rank, error
+        do rank=0, maximum_rank-1
+            if(rank == my_rank) then
+                write(stdout,'(a,i3.3,a,es12.4)')&
+                    "[", my_rank, "] Work: ", my_work
+                flush(stdout)
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            else
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            endif
+        enddo
+    end subroutine
+
+    subroutine print_testing_output(my_rank, maximum_rank, new_vertices, timestep)
+        integer, intent(in) :: my_rank, maximum_rank, timestep
+        real(real64), dimension(3,2), intent(in) :: new_vertices
+        integer :: rank, error
+        do rank=0, maximum_rank-1
+            if(rank == my_rank) then
+                !write(stdout,'(a,i4,",",i3.3,a,3f11.6)')&
+                !    "[", timestep, my_rank, "] Result Width: ",&
+                !    new_vertices(:,2)-new_vertices(:,1)
+                !flush(stdout)
+                write(stdout,'(a,i4,",",i3.3,",",i2.2,a,3f11.6)')&
+                    "[", timestep, my_rank, 0, "] Result Vertex: ",&
+                    new_vertices(:,1)
+                flush(stdout)
+                write(stdout,'(a,i4,",",i3.3,",",i2.2,a,3f11.6)')&
+                    "[", timestep, my_rank, 1, "] Result Vertex: ",&
+                    new_vertices(:,2)
+                flush(stdout)
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            else
+                call MPI_Barrier(MPI_COMM_WORLD, error)
+            endif
+        enddo
+    end subroutine
+
+    subroutine main()
+        integer :: error
+        integer :: current_step
+        integer, parameter :: number_of_steps = 50
+        integer, parameter :: dimensions = 3
+        real(real64), parameter :: loadbalancer_gamma = 0. ! ignored for tensor method
+        integer, dimension(dimensions) :: my_location, number_of_processes
+        real(real64), dimension(dimensions) :: minimum_domain_size
+        real(real64), dimension(dimensions,2) :: domain_vertices, new_vertices
+        real(real64), parameter :: domain_size = 1.0
+        real(real64) :: my_work
+        integer :: my_rank, maximum_rank
+        integer :: i,j
+        type(ALL_t) :: jall
+#ifdef ALL_VTK_OUTPUT_EXAMPLE
+        character (len=ALL_ERROR_LENGTH) :: error_msg
+#endif
+
+        call MPI_Init(error)
+
+        call jall%init(ALL_TENSOR, dimensions, loadbalancer_gamma)
+        my_location(:) = 0
+        ! All domains are placed along a line in z direction, evne though they are three dimensional
+        call MPI_Comm_rank(MPI_COMM_WORLD, my_location(3), error)
+        my_rank = my_location(3)
+
+        number_of_processes(:) = 1
+        call MPI_Comm_size(MPI_COMM_WORLD, number_of_processes(3), error)
+        maximum_rank = number_of_processes(3)
+
+        if(my_rank == 0) then
+            write(stdout,'(a,i0)') "Ranks: ", maximum_rank
+            write(stdout,'(a,i0)') "Number of Steps: ", number_of_steps
+            flush(stdout)
+        endif
+
+        call MPI_Barrier(MPI_COMM_WORLD, error)
+        call print_loc(my_rank, maximum_rank, my_location, number_of_processes)
+
+        ! For a cartesian communicator this is not required, but we are using
+        ! MPI_COMM_WORLD here.
+        call jall%set_proc_grid_params(my_location, number_of_processes)
+
+        ! If we want ot set a minimum domain size:
+        minimum_domain_size(:) = 0.1
+        call jall%set_min_domain_size(minimum_domain_size)
+
+        call jall%set_communicator(MPI_COMM_WORLD)
+
+        ! We also set the optional process tag for the output.
+        ! This can be useful if we want to know which of 'our'
+        ! ranks is which in the output produces by the library.
+        ! The ranks used inside the library do not necessarily
+        ! match our own numbering.
+        call jall%set_proc_tag(my_rank)
+
+        call jall%setup()
+
+        ! A first domain distribution must be given to the balancer.
+        ! We use the provided ALL::Point class to define the vertices,
+        ! but a simple double array can also be used. We need 2 vertices
+        ! which correspond to the two opposing corners.
+        ! We create a cubic domain initially
+        do i=1, 2
+            do j=1,dimensions
+                domain_vertices(j,i) = (my_location(j)+i-1) * domain_size
+            enddo
+        enddo
+        call print_domain(my_rank, maximum_rank, domain_vertices)
+        call jall%set_vertices(domain_vertices)
+
+        ! Calculate the work fo our domain. Here we just use
+        my_work = my_rank + 1.
+        call jall%set_work(my_work)
+        call print_work(my_rank, maximum_rank, my_work)
+        do current_step=1, number_of_steps
+            ! In a real code we need to set the updated work in each
+            ! iteration before calling balance()
+            if(my_rank == 0) then
+                write(stdout,'(a,i0,"/",i0)') "Starting step: ", current_step, number_of_steps
+                flush(stdout)
+            endif
+#ifdef ALL_VTK_OUTPUT_EXAMPLE
+            call ALL_reset_error()
+            call jall%print_vtk_outlines(current_step)
+            if(ALL_error() /= 0) then
+                error_msg = ALL_error_description()
+                write(stdout, '(a)') error_msg
+                ! Maybe also abort if the output is necesssary or handle this
+                ! some other way.
+                call MPI_Abort(MPI_COMM_WORLD, 2, error)
+                stop
+            endif
+#endif
+            call jall%balance()
+
+            call jall%get_vertices(new_vertices)
+
+            call print_testing_output(my_rank, maximum_rank, new_vertices, current_step)
+
+            call MPI_Barrier(MPI_COMM_WORLD, error)
+        enddo
+#ifdef ALL_VTK_OUTPUT_EXAMPLE
+        call ALL_reset_error()
+        call jall%print_vtk_outlines(current_step)
+        if(ALL_error() /= 0) then
+            error_msg = ALL_error_description()
+            write(stdout, '(a)') error_msg
+            ! Maybe also abort if the output is necesssary or handle this
+            ! some other way.
+            call MPI_Abort(MPI_COMM_WORLD, 2, error)
+            stop
+        endif
+#endif
+
+        call jall%finalize()
+
+        call MPI_Finalize(error)
+    end subroutine
+end program
diff --git a/example/CMakeLists.txt b/example/CMakeLists.txt
index 029269dab8cd1e9dbf16cd0074965e73a6b1db07..ac05ee34cab74000dc230dc46066c3d26468ebdc 100644
--- a/example/CMakeLists.txt
+++ b/example/CMakeLists.txt
@@ -54,6 +54,18 @@ install(TARGETS ALL_Staggered
 	LIBRARY DESTINATION lib
 	INCLUDES DESTINATION include)
 
+add_executable(ALL_Tensor ALL_Tensor.cpp)
+if(CM_ALL_VTK_OUTPUT)
+	target_compile_definitions(ALL_Tensor PRIVATE ALL_VTK_OUTPUT_EXAMPLE)
+	target_include_directories(ALL_Tensor PUBLIC ${VTK_INCLUDE_DIRS})
+	target_link_libraries(ALL_Tensor PUBLIC ${VTK_LIBRARY_DIRS})
+endif(CM_ALL_VTK_OUTPUT)
+target_link_libraries (ALL_Tensor LINK_PUBLIC ALL)
+install(TARGETS ALL_Tensor
+	RUNTIME DESTINATION bin
+	LIBRARY DESTINATION lib
+	INCLUDES DESTINATION include)
+
 if(CM_ALL_VORONOI)
 	add_executable(ALL_Voronoi ALL_Voronoi.cpp)
 	if(CM_ALL_VTK_OUTPUT)
@@ -125,4 +137,20 @@ if(CM_ALL_FORTRAN)
             RUNTIME DESTINATION bin
             LIBRARY DESTINATION lib
             INCLUDES DESTINATION include)
+
+    add_executable(ALL_Tensor_f ALL_Tensor_f.F90)
+    if(CM_ALL_VTK_OUTPUT)
+	target_compile_definitions(ALL_Tensor_f PRIVATE ALL_VTK_OUTPUT_EXAMPLE)
+        target_include_directories(ALL_Tensor_f PUBLIC ${VTK_INCLUDE_DIRS})
+        target_link_libraries(ALL_Tensor_f PUBLIC ${VTK_LIBRARY_DIRS})
+    endif(CM_ALL_VTK_OUTPUT)
+    target_link_libraries(ALL_Tensor_f LINK_PUBLIC ALL)
+    target_link_libraries(ALL_Tensor_f LINK_PUBLIC ALL_fortran)
+    target_include_directories(ALL_Tensor_f PUBLIC ${CMAKE_BINARY_DIR}/modules)
+    list(APPEND CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR}/modules)
+    set_property(TARGET ALL_Tensor_f PROPERTY LINKER_LANGUAGE Fortran)
+    install(TARGETS ALL_Tensor_f
+            RUNTIME DESTINATION bin
+            LIBRARY DESTINATION lib
+            INCLUDES DESTINATION include)
 endif(CM_ALL_FORTRAN)