From 014442b906e0ddbb38ea54a985136cf50b64f0e4 Mon Sep 17 00:00:00 2001
From: Kjartan Thor Wikfeldt <ktwikfeldt@gmail.com>
Date: Thu, 5 Jul 2018 14:57:48 +0200
Subject: [PATCH] rm solutions

---
 lab1/README.md                          |   2 +-
 lab1/sol/game_of_life_duplicate.c       | 135 ------------
 lab1/sol/game_of_life_duplicate.f90     |  98 ---------
 lab1/sol/game_of_life_p2p.c             | 229 --------------------
 lab1/sol/game_of_life_p2p.f90           | 226 --------------------
 lab1/sol/hello_world.c                  |  17 --
 lab1/sol/hello_world.f90                |  17 --
 lab1/sol/parallel_search-duplicate.c    |  52 -----
 lab1/sol/parallel_search-duplicate.f90  |  48 -----
 lab1/sol/parallel_search-p2p.c          |  75 -------
 lab1/sol/parallel_search-p2p.f90        |  70 -------
 lab1/sol/pi_mpi.c                       | 171 ---------------
 lab1/sol/pi_mpi.f90                     |  97 ---------
 lab1/sol/send_recv.c                    |  27 ---
 lab1/sol/send_recv.f90                  |  32 ---
 lab2/README.md                          |   2 +-
 lab2/sol/game_of_life-collective.c      | 257 -----------------------
 lab2/sol/game_of_life-collective.f90    | 207 ------------------
 lab2/sol/parallel_search-collective.c   | 109 ----------
 lab2/sol/parallel_search-collective.f90 |  98 ---------
 lab2/sol/pi_collectives.c               | 150 --------------
 lab2/sol/pi_collectives.f90             |  79 -------
 lab2/sol/pi_nonblocking.c               | 177 ----------------
 lab2/sol/pi_nonblocking.f90             | 101 ---------
 lab2/sol/send_recv-nonblocking.c        |  32 ---
 lab2/sol/send_recv-nonblocking.f90      |  34 ---
 lab2/sol/send_recv-race.c               |  31 ---
 lab2/sol/send_recv-race.f90             |  33 ---
 lab3/README.md                          |   2 +-
 lab3/sol/game_of_life-one_sided.c       | 264 -----------------------
 lab3/sol/game_of_life-one_sided.f90     | 225 --------------------
 lab3/sol/game_of_life-topology.c        | 265 ------------------------
 lab3/sol/game_of_life-topology.f90      | 226 --------------------
 lab3/sol/hello_world_mpiio.c            |  42 ----
 lab3/sol/hello_world_mpiio.f90          |  41 ----
 35 files changed, 3 insertions(+), 3668 deletions(-)
 delete mode 100644 lab1/sol/game_of_life_duplicate.c
 delete mode 100644 lab1/sol/game_of_life_duplicate.f90
 delete mode 100644 lab1/sol/game_of_life_p2p.c
 delete mode 100644 lab1/sol/game_of_life_p2p.f90
 delete mode 100644 lab1/sol/hello_world.c
 delete mode 100644 lab1/sol/hello_world.f90
 delete mode 100644 lab1/sol/parallel_search-duplicate.c
 delete mode 100644 lab1/sol/parallel_search-duplicate.f90
 delete mode 100644 lab1/sol/parallel_search-p2p.c
 delete mode 100644 lab1/sol/parallel_search-p2p.f90
 delete mode 100644 lab1/sol/pi_mpi.c
 delete mode 100644 lab1/sol/pi_mpi.f90
 delete mode 100644 lab1/sol/send_recv.c
 delete mode 100644 lab1/sol/send_recv.f90
 delete mode 100644 lab2/sol/game_of_life-collective.c
 delete mode 100644 lab2/sol/game_of_life-collective.f90
 delete mode 100644 lab2/sol/parallel_search-collective.c
 delete mode 100644 lab2/sol/parallel_search-collective.f90
 delete mode 100644 lab2/sol/pi_collectives.c
 delete mode 100644 lab2/sol/pi_collectives.f90
 delete mode 100644 lab2/sol/pi_nonblocking.c
 delete mode 100644 lab2/sol/pi_nonblocking.f90
 delete mode 100644 lab2/sol/send_recv-nonblocking.c
 delete mode 100644 lab2/sol/send_recv-nonblocking.f90
 delete mode 100644 lab2/sol/send_recv-race.c
 delete mode 100644 lab2/sol/send_recv-race.f90
 delete mode 100644 lab3/sol/game_of_life-one_sided.c
 delete mode 100644 lab3/sol/game_of_life-one_sided.f90
 delete mode 100644 lab3/sol/game_of_life-topology.c
 delete mode 100644 lab3/sol/game_of_life-topology.f90
 delete mode 100644 lab3/sol/hello_world_mpiio.c
 delete mode 100644 lab3/sol/hello_world_mpiio.f90

diff --git a/lab1/README.md b/lab1/README.md
index 951bad7..1d3e15c 100644
--- a/lab1/README.md
+++ b/lab1/README.md
@@ -107,7 +107,7 @@ open(unit=11,file=outfilename)
 
 # Solutions
 
-The solutions can be found in the [sol/ directory](sol/).
+The solutions will be made available at the end of the lab.
 
 # Acknowledgment
 
diff --git a/lab1/sol/game_of_life_duplicate.c b/lab1/sol/game_of_life_duplicate.c
deleted file mode 100644
index 005921a..0000000
--- a/lab1/sol/game_of_life_duplicate.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/***************************
-  Conway Game of Life
-
-  initialize and finalize MPI;
-  run duplicate calculation
-  on each processor
-
-****************************/
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define NI 200        /* array sizes */
-#define NJ 200
-#define NSTEPS 500    /* number of time steps */
-
-int main(int argc, char *argv[]){
-
-  int i, j, n, im, ip, jm, jp, ni, nj, nsum, isum, nprocs, myid;
-  int **old, **new;  
-  float x;
-
-  char outfilename[16];
-  FILE *outfile;
-
-  /* initialize MPI */
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
-  MPI_Comm_rank(MPI_COMM_WORLD,&myid);
-
-  /* allocate arrays */
-
-  ni = NI + 2;  /* add 2 for left and right ghost cells */
-  nj = NJ + 2;
-  old = malloc(ni*sizeof(int*));
-  new = malloc(ni*sizeof(int*));
-  for(i=0; i<ni; i++){
-    old[i] = malloc(nj*sizeof(int));
-    new[i] = malloc(nj*sizeof(int));
-  }
-
-  /*  initialize elements of old to 0 or 1 */
-
-  for(i=1; i<=NI; i++){
-    for(j=1; j<=NJ; j++){
-      x = rand()/((float)RAND_MAX + 1);
-      if(x<0.5){
-        old[i][j] = 0;
-      }
-      else{
-        old[i][j] = 1;
-      }
-    }
-  }
-
-  /*  time steps */
-
-  for(n=0; n<NSTEPS; n++){
-
-    /* corner boundary conditions */
-    
-    old[0][0] = old[NI][NJ];
-    old[0][NJ+1] = old[NI][1];
-    old[NI+1][NJ+1] = old[1][1];
-    old[NI+1][0] = old[1][NJ];
-    
-    /* left-right boundary conditions */
-    
-    for(i=1; i<=NI; i++){
-      old[i][0] = old[i][NJ];
-      old[i][NJ+1] = old[i][1];
-    }
-
-    /* top-bottom boundary conditions */
-
-    for(j=1; j<=NJ; j++){
-      old[0][j] = old[NI][j];
-      old[NI+1][j] = old[1][j];
-    }
-
-    for(i=1; i<=NI; i++){
-      for(j=1; j<=NJ; j++){
-
-        im = i-1;
-        ip = i+1;
-        jm = j-1;
-        jp = j+1;
-        nsum =  old[im][jp] + old[i][jp] + old[ip][jp]
-          + old[im][j ]              + old[ip][j ] 
-          + old[im][jm] + old[i][jm] + old[ip][jm];
-
-        switch(nsum){
-        case 3:
-          new[i][j] = 1;
-          break;
-        case 2:
-          new[i][j] = old[i][j];
-          break;
-        default:
-          new[i][j] = 0;
-        }
-      }
-    }
-
-    /* copy new state into old state */
-
-    for(i=1; i<=NI; i++){
-      for(j=1; j<=NJ; j++){
-        old[i][j] = new[i][j];
-      }
-    }
-  }
-
-  /*  Iterations are done; sum the number of live cells */
-
-  isum = 0;
-  for(i=1; i<=NI; i++){
-    for(j=1; j<=NJ; j++){
-      isum = isum + new[i][j];
-    }
-  }
-
- 
-  /* print process identification and nunmber of live cells*/
-
-  sprintf(outfilename,"found.data_%d",myid);
-  outfile = fopen(outfilename,"w") ;
-  fprintf(outfile, "Process %d of %d:  Number of live cells = %d\n", myid, nprocs, isum);
-
-  MPI_Finalize();
-
-}
-
diff --git a/lab1/sol/game_of_life_duplicate.f90 b/lab1/sol/game_of_life_duplicate.f90
deleted file mode 100644
index 9ebb369..0000000
--- a/lab1/sol/game_of_life_duplicate.f90
+++ /dev/null
@@ -1,98 +0,0 @@
-!-------------------------
-!  Conway Game of Life
-!  duplicate calculations
-!  on each process
-!-------------------------
-
-program life
-  
-  implicit none
-
-  include 'mpif.h'
-  
-  integer, parameter :: ni=200, nj=200, nsteps = 500
-  integer :: i, j, n, im, ip, jm, jp, nsum, isum, ierr, myid, nprocs
-  integer, allocatable, dimension(:,:) :: old, new
-  real :: arand
-  
-  ! initialize MPI
-
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid,   ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! allocate arrays, including room for ghost cells
-
-  allocate(old(0:ni+1,0:nj+1), new(0:ni+1,0:nj+1))
-
-  ! initialize elements of old to 0 or 1
-
-  do j = 1, nj
-     do i = 1, ni
-        call random_number(arand)
-        old(i,j) = nint(arand)
-     enddo
-  enddo
-
-  !  iterate
-
-  time_iteration: do n = 1, nsteps
-
-     ! corner boundary conditions
-
-     old(0,0) = old(ni,nj)
-     old(0,nj+1) = old(ni,1)
-     old(ni+1,nj+1) = old(1,1)
-     old(ni+1,0) = old(1,nj)
-
-     ! left-right boundary conditions
-
-     old(1:ni,0) = old(1:ni,nj)
-     old(1:ni,nj+1) = old(1:ni,1)
-
-     ! top-bottom boundary conditions
-
-     old(0,1:nj) = old(ni,1:nj)
-     old(ni+1,1:nj) = old(1,1:nj)
-
-     do j = 1, nj
-        do i = 1, ni
-
-           im = i - 1
-           ip = i + 1
-           jm = j - 1
-           jp = j + 1
-           nsum = old(im,jp) + old(i,jp) + old(ip,jp) &
-                + old(im,j )             + old(ip,j ) &
-                + old(im,jm) + old(i,jm) + old(ip,jm)
-
-           select case (nsum)
-           case (3)
-              new(i,j) = 1
-           case (2)
-              new(i,j) = old(i,j)
-           case default
-              new(i,j) = 0
-           end select
-
-        enddo
-     enddo
-
-     ! copy new state into old state
-
-     old = new
-
-  enddo time_iteration
-
-  ! Iterations are done; sum the number of live cells
-  
-  isum = sum(new(1:ni,1:nj))
-  
-  ! Print final number of live cells.
-
-  write(*,*) "my rank = ",myid," of ",nprocs," : Number of live cells =",isum
-
-  deallocate(old, new)
-  call mpi_finalize(ierr)
-
-end program life
diff --git a/lab1/sol/game_of_life_p2p.c b/lab1/sol/game_of_life_p2p.c
deleted file mode 100644
index b927ad3..0000000
--- a/lab1/sol/game_of_life_p2p.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/**************************************
-      Conway Game of Life
-
- 2-processor domain decomposition;
- domain decomposed with horizontal
- line, i.e., top half and bottom half
-***************************************/
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define NI 200
-#define NJ 200
-#define NSTEPS 500
-
-void main(int argc, char *argv[]){
-
-  int i, j, n, im, ip, jm, jp, nsum, isum, isum1, isumloc,
-    nprocs ,myid;
-  int ig, jg, i1g, i2g, j1g, j2g, ninom, njnom, ninj, 
-    i1, i2, i2m, j1, j2, j2m, ni, nj;
-  int niproc, njproc;  /* no. procs in each direction */
-  int **old, **new, *old1d, *new1d;
-  MPI_Status status;
-  float x;
-
-
-  /* initialize MPI */
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
-  MPI_Comm_rank(MPI_COMM_WORLD,&myid);
-
-  /* only 2 MPI tasks supported */
-  if(nprocs!=2) {
-    printf("Only 2 mpi tasks supported\n");
-    printf("Number of tasks = %d nAborting...\n",nprocs);
-    MPI_Abort(MPI_COMM_WORLD, 1);
-  }
-
-  /* domain decomposition */
-
-  /* nominal number of points per proc. in each direction
-     (without ghost cells; assume numbers divide evenly) */ 
-  niproc = nprocs;  /* divide domain in i direction only */
-  njproc = 1;
-  ninom = NI/niproc;
-  njnom = NJ/njproc;
-
-  /* global starting and ending indices (without ghost cells) */
-  i1g = (myid*ninom) + 1;
-  i2g = i1g + ninom - 1;
-  j1g = 1;
-  j2g = NJ;
-
-  /* local starting and ending indices, including ghost cells */
-  i1  = 0;
-  i2  = ninom + 1;
-  i2m = i2-1;
-  j1  = 0;
-  j2  = NJ+1;
-  j2m = j2-1;
-
-  /* allocate arrays; want elements to be contiguous,
-     so allocate 1-D arrays, then set pointer to each row
-     (old and new) to allow use of array notation for convenience */
-
-  ni = i2-i1+1;
-  nj = j2-j1+1;
-  ninj = ni*nj;
-
-  old1d = malloc(ninj*sizeof(int));
-  new1d = malloc(ninj*sizeof(int));
-  old   = malloc(ni*sizeof(int*));
-  new   = malloc(ni*sizeof(int*));
-
-  for(i=0; i<ni; i++){
-    old[i] = &old1d[i*nj];
-    new[i] = &new1d[i*nj];
-  }
-
-  /*  Initialize elements of old to 0 or 1.
-      We're doing some sleight of hand here to make sure we
-      initialize to the same values as in the serial code.
-      The rand() function is called for every i and j, even
-      if they are not on the current processor, to get the same
-      random distribution as the serial case, but they are
-      only used if this (i,j) resides on the current procesor. */
-
-  for(ig=1; ig<=NI; ig++){
-    for(jg=1; jg<=NJ; jg++){
-      x = rand()/((float)RAND_MAX + 1);
-
-      /* if this i is on the current processor */
-      if( ig >= i1g && ig <= i2g ){
-
-        /* local i and j indices, accounting for lower ghost cell */
-        i = ig - i1g + 1;
-        j = jg;
-
-        if(x<0.5){
-          old[i][j] = 0;
-        }else{
-          old[i][j] = 1;
-        }
-      }
-
-    }
-  }
-
-  /*  Iterate */
-
-  for(n=0; n<NSTEPS; n++){
-
-    /* transfer data to ghost cells */
-
-    for(i=1; i<i2; i++){          /* left and right columns */
-      old[i][0]  = old[i][j2m];
-      old[i][j2] = old[i][1];
-    }
-
-    if(nprocs == 1){
-      for(j=1; j<j2; j++){          /* top and bottom rows */
-        old[0][j]  = old[i2m][j];
-        old[i2][j] = old[1][j];
-      }
-      old[0][0]   = old[i2m][j2m];  /* corners */
-      old[0][j2]  = old[i2m][1];
-      old[i2][0]  = old[1][j2m];
-      old[i2][j2] = old[1][1];
-    }else{
-
-
-      if(myid == 0){
-
-        /* top and bottom rows */
-
-        MPI_Send(&old[i2-1][0], nj, MPI_INT, 1,  0, MPI_COMM_WORLD);
-        MPI_Recv(&old[i2][0],   nj, MPI_INT, 1,  1, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][0],    nj, MPI_INT, 1,  2, MPI_COMM_WORLD);
-        MPI_Recv(&old[0][0],    nj, MPI_INT, 1,  3, MPI_COMM_WORLD, &status);
-
-        /* corners */
-
-        MPI_Send(&old[1][1],     1, MPI_INT, 1, 10, MPI_COMM_WORLD);
-        MPI_Recv(&old[0][0],     1, MPI_INT, 1, 11, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][j2m],   1, MPI_INT, 1, 12, MPI_COMM_WORLD);
-        MPI_Recv(&old[0][j2],    1, MPI_INT, 1, 13, MPI_COMM_WORLD, &status);
-
-      }else{
-
-        /* top and bottom rows */
-
-        MPI_Recv(&old[0][0],    nj, MPI_INT, 0,  0, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][0],    nj, MPI_INT, 0,  1, MPI_COMM_WORLD);
-        MPI_Recv(&old[i2][0],   nj, MPI_INT, 0,  2, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[i2-1][0], nj, MPI_INT, 0,  3, MPI_COMM_WORLD);
-
-        /* corners */
-
-        MPI_Recv(&old[i2][j2],   1, MPI_INT, 0, 10, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[i2m][j2m], 1, MPI_INT, 0, 11, MPI_COMM_WORLD);
-        MPI_Recv(&old[i2][0],    1, MPI_INT, 0, 12, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[i2m][1],   1, MPI_INT, 0, 13, MPI_COMM_WORLD);
-
-      }
-
-
-    }
-
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-                
-        im = i-1;
-        ip = i+1;
-        jm = j-1;
-        jp = j+1;
-        nsum =  old[im][jp] + old[i][jp] + old[ip][jp]
-                  + old[im][j ]              + old[ip][j ] 
-	  + old[im][jm] + old[i][jm] + old[ip][jm];
-
-        switch(nsum){
-        case 3:
-          new[i][j] = 1;
-          break;
-        case 2:
-          new[i][j] = old[i][j];
-          break;
-        default:
-          new[i][j] = 0;
-        }
-      }
-    }
-
-    /* copy new state into old state */
-    
-    for(i=1; i<ni; i++){
-      for(j=0; j<nj; j++){
-        old[i][j] = new[i][j];
-      }
-    }
-
-  }
-
-  /*  Iterations are done; sum the number of live cells */
-
-  isum = 0;
-  for(i=1; i<i2; i++){
-    for(j=1; j<j2; j++){
-      isum = isum + new[i][j];
-    }
-  }
-
-  /* Print final number of live cells. */
-  
-  if(nprocs > 1){
-    if(myid == 0){
-      MPI_Recv(&isum1, 1, MPI_INT, 1, 20, MPI_COMM_WORLD, &status);
-      isum = isum + isum1;
-    }else{
-      MPI_Send(&isum,  1, MPI_INT, 0, 20, MPI_COMM_WORLD);
-    }
-  }
-
-  if(myid == 0) printf("Number of live cells = %d\n", isum);
-
-  MPI_Finalize();
-}
diff --git a/lab1/sol/game_of_life_p2p.f90 b/lab1/sol/game_of_life_p2p.f90
deleted file mode 100644
index dd541a6..0000000
--- a/lab1/sol/game_of_life_p2p.f90
+++ /dev/null
@@ -1,226 +0,0 @@
-!------------------------------------
-!     Conway Game of Life
-
-! 2 processors, domain decomposition
-! in j direction only (divide domain
-! with a vertical line)
-
-!------------------------------------
-program life
-
-  implicit none
-  include 'mpif.h'
-
-  integer, parameter :: ni = 200, nj = 200, nsteps = 500
-  integer :: i, j, n, im, ip, jm, jp, nsum, isum, isum1, &
-       ierr, myid, nprocs, i1, i2, i1p, i2m, j1, j2, j1p, &
-       j2m, i1n, i2n, j1n, j2n, ninom, njnom, &
-       niproc, njproc, isumloc, istart, iend
-  integer :: status(mpi_status_size), row_type
-  integer, allocatable, dimension(:,:) :: old, new
-  real :: arand
-
-  ! initialize MPI
-
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid, ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! only 1 or 2 MPI tasks supported
-  if(nprocs.gt.2) then
-     write(*,*) "Only 1 or 2 MPI tasks supported"
-     write(*,*) "Number of Tasks = ",nprocs
-     write(*,*) "Aborting..."
-     call mpi_abort(mpi_comm_world,1,ierr)
-  endif
-
-
-  ! domain decomposition
-  !---------------------
-
-  ! nominal number of points per proc. in each direction
-  ! (without ghost cells, assume numbers divide evenly);
-  ! niproc and njproc are the numbers of procs in the i
-  ! and j directions.
-
-  niproc = 1
-  njproc = nprocs
-  ninom  = ni/niproc
-  njnom  = nj/njproc
-
-  ! nominal starting and ending indices
-  ! (nominal means without ghost cells)
-
-  i1n = 1
-  i2n = ni
-  j1n = mod(myid,2)*njnom + 1
-  j2n = j1n + njnom - 1
-
-  ! local starting and ending indices, including 2 ghost cells
-
-  i1  = i1n - 1
-  i2  = i2n + 1
-  i1p = i1  + 1
-  i2m = i2  - 1
-  j1  = j1n - 1
-  j2  = j2n + 1
-  j1p = j1  + 1
-  j2m = j2  - 1
-  
-  ! allocate arrays
-
-  allocate( old(i1:i2,j1:j2), new(i1:i2,j1:j2) )
-
-  ! Initialize elements of old to 0 or 1.  We're doing some
-  ! sleight of hand here to make sure we initialize to the
-  ! same values as in the serial case. The random_number
-  ! function is called for every i and j, even if they are
-  ! not on the current processor, to get the same random
-  ! distribution as the serial case, but they are only used
-  ! if this i and j reside on the current procesor.
-
-  do j = 1, nj
-     do i = 1, ni
-        call random_number(arand)
-        if( j > j1 .and.j < j2 ) then
-           old(i,j) = nint(arand)
-        endif
-     enddo
-  enddo
-
-  !  iterate
-
-  time_iteration: do n = 1, nsteps
-
-     ! transfer data to ghost cells
-
-     if(nprocs == 1) then
-
-        ! left and right
-        old(i1p:i2m,j1) = old(i1p:i2m,j2m)
-        old(i1p:i2m,j2) = old(i1p:i2m,j1p)
-
-        ! top and bottom
-        old(i1,j1:j2) = old(i2m,j1:j2)
-        old(i2,j1:j2) = old(i1p,j1:j2)
-
-        ! corners
-        old(i1,j1) = old(i2m,j2m)
-        old(i1,j2) = old(i2m,j1p)
-        old(i2,j1) = old(i1p,j2m)
-        old(i2,j2) = old(i1p,j1p)
-
-     else
-
-        if(myid == 0) then
-
-           ! left, right
-           call mpi_send(old(i1p,j1p), ninom, mpi_integer,    &
-                1, 2, mpi_comm_world, ierr)
-           call mpi_recv(old(i1p,j2 ), ninom, mpi_integer,    &
-                1, 2, mpi_comm_world, status, ierr)
-           call mpi_send(old(i1p,j2m), ninom, mpi_integer,    &
-                1, 3, mpi_comm_world, ierr)
-           call mpi_recv(old(i1p,j1 ), ninom, mpi_integer,    &
-                1, 3, mpi_comm_world, status, ierr)
-
-           ! top and bottom
-           old(i1,j1:j2) = old(i2m,j1:j2)
-           old(i2,j1:j2) = old(i1p,j1:j2)
-
-           ! corners
-           call mpi_send(old(i1p,j1p), 1, mpi_integer, &
-                1, 4, mpi_comm_world, ierr)
-           call mpi_recv(old(i1, j1 ), 1, mpi_integer, &
-                1, 4, mpi_comm_world, status, ierr)
-           call mpi_send(old(i2m,j1p), 1, mpi_integer, &
-                1, 5, mpi_comm_world, ierr)
-           call mpi_recv(old(i2, j1 ), 1, mpi_integer, &
-                1, 5, mpi_comm_world, status, ierr)
-
-        else
-
-           ! left, right
-           call mpi_recv(old(i1p,j2 ), ninom, mpi_integer,    &
-                0, 2, mpi_comm_world, status, ierr)
-           call mpi_send(old(i1p,j1p), ninom, mpi_integer,    &
-                0, 2, mpi_comm_world, ierr)
-           call mpi_recv(old(i1p,j1 ), ninom, mpi_integer,    &
-                0, 3, mpi_comm_world, status, ierr)
-           call mpi_send(old(i1p,j2m), ninom, mpi_integer,    &
-                0, 3, mpi_comm_world, ierr)
-
-           ! top and bottom
-           old(i1,j1:j2) = old(i2m,j1:j2)
-           old(i2,j1:j2) = old(i1p,j1:j2)
-
-           ! corners
-           call mpi_recv(old(i2, j2 ), 1, mpi_integer, &
-                0, 4, mpi_comm_world, status, ierr)
-           call mpi_send(old(i2m,j2m), 1, mpi_integer, &
-                0, 4, mpi_comm_world, ierr)
-           call mpi_recv(old(i1, j2 ), 1, mpi_integer, &
-                0, 5, mpi_comm_world, status, ierr)
-           call mpi_send(old(i1p,j2m), 1, mpi_integer, &
-                0, 5, mpi_comm_world, ierr)
-
-        endif  !... myid
-
-     endif  !... nprocs
-
-     ! update states of cells
-
-     do j = j1p, j2m
-        do i = i1p, i2m
-
-           ip = i + 1
-           im = i - 1
-           jp = j + 1
-           jm = j - 1
-           nsum =  old(im,jp)  + old(i,jp)  + old(ip,jp) &
-                     + old(im,j )                   + old(ip,j ) &
-                     + old(im,jm) + old(i,jm) + old(ip,jm)
-
-           select case (nsum)
-           case (3)
-              new(i,j) = 1
-           case (2)
-              new(i,j) = old(i,j)
-           case default
-              new(i,j) = 0
-           end select
-
-        enddo
-     enddo
-
-     ! copy new state into old state
-     old(i1p:i2m,j1p:j2m) = new(i1p:i2m,j1p:j2m)
-
-  enddo time_iteration
-
-  ! Iterations are done; sum the number of live cells
-  
-  isum = sum(new(i1p:i2m,j1p:j2m))
-  
-  ! Print final number of live cells.
-
-  if(nprocs > 1) then
-     if(myid == 0) then
-        call mpi_recv(isum1, 1, mpi_integer, 1, 10, &
-             mpi_comm_world, status, ierr)
-        isum = isum + isum1
-     else
-        call mpi_send(isum,  1, mpi_integer, 0, 10, &
-             mpi_comm_world, ierr)
-     endif
-  endif
-
-  if(myid == 0) then
-     write(*,"(/'Number of live cells = ', i6/)") isum
-  endif
-
-  deallocate(old, new)
-  call mpi_finalize(ierr)
-
-end program life
-
diff --git a/lab1/sol/hello_world.c b/lab1/sol/hello_world.c
deleted file mode 100644
index 66a5af6..0000000
--- a/lab1/sol/hello_world.c
+++ /dev/null
@@ -1,17 +0,0 @@
-#include <stdio.h>
-#include <mpi.h>
-
-int main (int argc, char *argv[]) {
-
-  int myrank, size;
-
-  MPI_Init(&argc, &argv);                 /* Initialize MPI       */
-  MPI_Comm_rank(MPI_COMM_WORLD, &myrank); /* Get my rank          */
-  MPI_Comm_size(MPI_COMM_WORLD, &size);   /* Get the total
-					     number of processors */
-  printf("Processor %d of %d: Hello World\n", myrank, size);
-
-  MPI_Finalize();                         /* Terminate MPI        */
-
-}
-
diff --git a/lab1/sol/hello_world.f90 b/lab1/sol/hello_world.f90
deleted file mode 100644
index 4ea9876..0000000
--- a/lab1/sol/hello_world.f90
+++ /dev/null
@@ -1,17 +0,0 @@
-program hello
-
-implicit none
-
-include "mpif.h"
-
-integer :: rank, comm_size, ierr
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-print *, "Hello from rank ", rank, " of ", comm_size
-
-call MPI_Finalize(ierr)
-
-end program
diff --git a/lab1/sol/parallel_search-duplicate.c b/lab1/sol/parallel_search-duplicate.c
deleted file mode 100644
index 88f4a65..0000000
--- a/lab1/sol/parallel_search-duplicate.c
+++ /dev/null
@@ -1,52 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <mpi.h>
-
-int main (int argc, char *argv[]) {
-  const int N=300;
-  int i,target;
-  int b[N];
-  FILE *infile,*outfile;
-  int myrank, size;
-
-  char outfilename[16] ;
-
-
-  MPI_Init(&argc, &argv);                 /* Initialize MPI       */
-  MPI_Comm_rank(MPI_COMM_WORLD, &myrank); /* Get my rank          */
-  MPI_Comm_size(MPI_COMM_WORLD, &size);   /* Get the total
-					     number of processors */
-
-  /* generate the name of the output file
-     all mpi tasks must write to a different file */
-  sprintf(outfilename,"found.data_%d",myrank);
-
-  /* File b.data has the target value on the first line
-     The remaining 300 lines of b.data have the values for the b array */
-
-  infile = fopen("b.data","r" ) ;
-  outfile = fopen(outfilename,"w") ;
-    
-  /* read in target */
-  fscanf(infile,"%d", &target);
-
-  /* read in b array */
-  for(i=0;i<N;i++) {
-    fscanf(infile,"%d", &b[i]);
-  }
-  fclose(infile);
-
-  /* Search the b array and output the target locations */
-
-  for(i=0;i<N;i++) {
-    if( b[i] == target) {
-      fprintf(outfile,"%d\n",i+1);
-    }
-  }
-  fclose(outfile);
-
-  printf("Processor %d of %d: finished!\n", myrank, size);
-  MPI_Finalize();                         /* Terminate MPI        */
-
-  return 0;
-}
diff --git a/lab1/sol/parallel_search-duplicate.f90 b/lab1/sol/parallel_search-duplicate.f90
deleted file mode 100644
index 3ed1cdc..0000000
--- a/lab1/sol/parallel_search-duplicate.f90
+++ /dev/null
@@ -1,48 +0,0 @@
-PROGRAM search  
-  implicit none
-  include "mpif.h"
-  integer, parameter ::  N=300
-  integer i, target ! local variables
-  integer b(N)      ! the entire array of integers
-  integer myrank,size,ierr
-  character*15 outfilename
-  character*4 rankchar
-
-  call MPI_Init(ierr)
-  call MPI_Comm_rank(MPI_COMM_WORLD,myrank,ierr)
-  call MPI_Comm_size(MPI_COMM_WORLD,size,ierr)
-
-  ! generate the name of the output file
-  ! all mpi tasks must write to a different file
-  write(rankchar,'(i4.4)') myrank
-  outfilename="found.data_" // rankchar
- 
-  ! File b.data has the target value on the first line
-  ! The remaining 300 lines of b.data have the values for the b array
-  open(unit=10,file="b.data")     
-
-  ! File found.data will contain the indices of b where the target is
-  open(unit=11,file=outfilename)
-
-  ! Read in the target
-  read(10,*) target
-
-  ! Read in b array 
-
-  do i=1,N
-     read(10,*) b(i)
-  end do
-
-  ! Search the b array and output the target locations
-
-  do i=1,N
-     if (b(i) == target) then
-        write(11,*) i
-     end if
-  end do
-
-  write(*,*) "Processor ",myrank," of ",size,": Finished!"
-
-  call MPI_Finalize(ierr)
-
-END PROGRAM search
diff --git a/lab1/sol/parallel_search-p2p.c b/lab1/sol/parallel_search-p2p.c
deleted file mode 100644
index 34d450a..0000000
--- a/lab1/sol/parallel_search-p2p.c
+++ /dev/null
@@ -1,75 +0,0 @@
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define N 300
-
-int main(int argc, char *argv[]){
-  int i,target;
-  int b[N],a[N/3]; /* a is the name of the array each slave searches */
-  int rank,err,nproc;
-  MPI_Status status;
-  int end_cnt,x,gi;
-  FILE *infile,*outfile;
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nproc);
-  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-  
-  /* only 4 MPI tasks supported */
-  if(nproc!=4) {
-    printf("Only 4 mpi tasks supported\n");
-    printf("Number of tasks = %d Aborting...\n",nproc);
-    MPI_Abort(MPI_COMM_WORLD, 1);
-  }
-
-  if(rank==0) {
-    infile = fopen("b.data","r" ) ;
-    
-    fscanf(infile,"%d", &target);
-
-    for(i=1;i<=3;i++) {
-      /*  Notice how i is used as the destination process for each send */
-      MPI_Send(&target,1,MPI_INT,i,9,MPI_COMM_WORLD);
-    }
-
-    /* read in b array */
-    for(i=0;i<N;i++) {
-      fscanf(infile,"%d", &b[i]);
-    }
-    fclose(infile);
-
-    MPI_Send(&b[0],100,MPI_INT,1,11,MPI_COMM_WORLD);
-    MPI_Send(&b[100],100,MPI_INT,2,11,MPI_COMM_WORLD);
-    MPI_Send(&b[200],100,MPI_INT,3,11,MPI_COMM_WORLD);
-
-    end_cnt=0;
-    outfile = fopen("found.data","w") ;
-    while (end_cnt != 3) {
-      MPI_Recv(&x,1,MPI_INTEGER,MPI_ANY_SOURCE,MPI_ANY_TAG,
-	       MPI_COMM_WORLD,&status);
-      if (status.MPI_TAG == 52 ) {
-	end_cnt+=1;  /* See Comment */  
-      } else {
-	fprintf(outfile,"P%d  %d\n",status.MPI_SOURCE,x+1);
-      }
-    }
-    fclose(outfile);
-  } else {
-    MPI_Recv(&target,1,MPI_INT,0,9,MPI_COMM_WORLD,&status);
-    MPI_Recv(&a,100,MPI_INT,0,11,MPI_COMM_WORLD,&status);
-
-    for(i=0;i<100;i++) {
-      if (a[i] == target) {
-        gi=(rank-1)*100+i; /* Equation to convert local index to global index*/ 
-	MPI_Send(&gi,1,MPI_INT,0,19,MPI_COMM_WORLD);
-      }
-    }
-
-    MPI_Send(&target,1,MPI_INT,0,52,MPI_COMM_WORLD); /* See Comment */
-   
-  }
-
-  MPI_Finalize();
-  return 0;
-}
diff --git a/lab1/sol/parallel_search-p2p.f90 b/lab1/sol/parallel_search-p2p.f90
deleted file mode 100644
index 8985da0..0000000
--- a/lab1/sol/parallel_search-p2p.f90
+++ /dev/null
@@ -1,70 +0,0 @@
-PROGRAM parallel_search   
-  implicit none
-  include 'mpif.h'
-  integer N
-  parameter (N=300)
-  integer i, target
-  integer b(N),a(N/3) ! a is name of the array each slave searches 
-  integer rank,err,nproc
-  integer status(MPI_STATUS_SIZE)
-  integer end_cnt,x,gi
-
-  CALL MPI_INIT(err)
-  CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, err)
-  CALL MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, err)
- 
-  !only 4 MPI tasks supported
-  if(nproc.ne.4) then
-     write(*,*) "Must be run with 4 MPI tasks"
-     write(*,*) "Number of Tasks = ",nproc
-     write(*,*) "Aborting..."
-     call mpi_abort(mpi_comm_world,1,err)
-  endif
-
-
-  if (rank == 0) then
-    open(unit=10,file="b.data")
-    read(10,*) target
- 
-    do i=1,3  !  Notice how i is used as the destination process for each send 
-      CALL MPI_SEND(target,1,MPI_INTEGER,i,9,MPI_COMM_WORLD,err)
-    end do
-
-    do i=1,300
-      read(10,*) b(i)
-    end do
-
-    CALL MPI_SEND(b(1),100,MPI_INTEGER,1,11,MPI_COMM_WORLD,err)
-    CALL MPI_SEND(b(101),100,MPI_INTEGER,2,11,MPI_COMM_WORLD,err)
-    CALL MPI_SEND(b(201),100,MPI_INTEGER,3,11,MPI_COMM_WORLD,err)
-
-    end_cnt=0
-    open(unit=11,file="found.data")
-    do while (end_cnt .ne. 3 )
-      CALL MPI_RECV(x,1,MPI_INTEGER,MPI_ANY_SOURCE,MPI_ANY_TAG, &
-                     MPI_COMM_WORLD,status,err)
-      if (status(MPI_TAG) == 52 ) then
-        end_cnt=end_cnt+1  ! See Comment  
-      else
-        write(11,*) "P",status(MPI_SOURCE),x
-      end if
-    end do 
-
-  else 
-    CALL MPI_RECV(target,1,MPI_INTEGER,0,9,MPI_COMM_WORLD,status,err) 
-    CALL MPI_RECV(a,100,MPI_INTEGER,0,11,MPI_COMM_WORLD,status,err) 
-
-    do i=1,100
-      if (a(i) == target) then
-        gi=(rank-1)*100+i !  Equation to convert local index to global index 
-        CALL MPI_SEND(gi,1,MPI_INTEGER,0,19,MPI_COMM_WORLD,err)
-      end if
-    end do  
-
-    CALL MPI_SEND(target,1,MPI_INTEGER,0,52,MPI_COMM_WORLD,err) ! See Comment
-     
-  end if 
-
-  CALL MPI_FINALIZE(err)
-
-END PROGRAM parallel_search
diff --git a/lab1/sol/pi_mpi.c b/lab1/sol/pi_mpi.c
deleted file mode 100644
index f8ea612..0000000
--- a/lab1/sol/pi_mpi.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/**********************************************************************
- * FILE: mpi_pi_send.c
- * OTHER FILES: dboard.c
- * DESCRIPTION:  
- *   MPI pi Calculation Example - C Version 
- *   Point-to-Point communications example
- *   This program calculates pi using a "dartboard" algorithm.  See
- *   Fox et al.(1988) Solving Problems on Concurrent Processors, vol.1
- *   page 207.  All processes contribute to the calculation, with the
- *   master averaging the values for pi. This version uses low level 
- *   sends and receives to collect results.
- * AUTHOR: Blaise Barney. Adapted from Ros Leibensperger, Cornell Theory
- *   Center. Converted to MPI: George L. Gusciora, MHPCC (1/95) 
- * LAST REVISED: 04/13/05
- **********************************************************************/
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-void srandom (unsigned seed);
-double dboard (int darts);
-#define DARTS 50000     /* number of throws at dartboard */
-#define ROUNDS 10       /* number of times "darts" is iterated */
-#define MASTER 0        /* task ID of master task */
-
-int main (int argc, char *argv[])
-{
-  doublehomepi,         /* value of pi calculated by current task */
-    pi,             /* average of pi after "darts" is thrown */
-    avepi,          /* average pi value for all iterations */
-    pirecv,         /* pi received from worker */
-    pisum;          /* sum of workers pi values */
-  inttaskid,         /* task ID - also used as seed number */
-    numtasks,       /* number of tasks */
-    source,         /* source of incoming message */ 
-    mtype,          /* message type */
-    rc,             /* return code */
-    mydarts,remainder,
-    i, n;
-  MPI_Status status;
-
-  /* Obtain number of tasks and task ID */
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
-  MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
-  printf ("MPI task %d has started...\n", taskid);
-  if (taskid == MASTER)
-    printf ("Using %d tasks to compute pi (3.1415926535)\n",numtasks);
-
-  /* Split the darts between processes */
-  mydarts=DARTS/numtasks;
-  remainder=DARTS%numtasks;
-  if(taskid < remainder ) mydarts++ ;
-
-  /* Set seed for random number generator equal to task ID */
-  srandom (taskid);
-
-  avepi = 0;
-  for (i = 0; i < ROUNDS; i++) {
-    /* All tasks calculate pi using dartboard algorithm */
-    homepi = dboard(mydarts);
-
-    /* Workers send homepi to master */
-    /* - Message type will be set to the iteration count */
-    if (taskid != MASTER) {
-      mtype = i;
-      rc = MPI_Send(&homepi, 1, MPI_DOUBLE,
-                    MASTER, mtype, MPI_COMM_WORLD);
-      if (rc != MPI_SUCCESS)
-	printf("%d: Send failure on round %d\n", taskid, mtype);
-    } 
-    else
-      {
-	/* Master receives messages from all workers */
-	/* - Message type will be set to the iteration count */
-	/* - Message source will be set to the wildcard DONTCARE: */
-	/*   a message can be received from any task, as long as the */
-	/*   message types match */
-	/* - The return code will be checked, and a message displayed */
-	/*   if a problem occurred */
-	mtype = i;
-	pisum = 0;
-	for (n = 1; n < numtasks; n++) {
-	  rc = MPI_Recv(&pirecv, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
-                        mtype, MPI_COMM_WORLD, &status);
-	  if (rc != MPI_SUCCESS) 
-            printf("%d: Receive failure on round %d\n", taskid, mtype);
-	  /* keep running total of pi */
-	  pisum = pisum + pirecv;
-	}
-	/* Master calculates the average value of pi for this iteration */
-	pi = (pisum + homepi)/numtasks;
-	/* Master calculates the average value of pi over all iterations */
-	avepi = ((avepi * i) + pi)/(i + 1); 
-	printf("   After %8d throws, average value of pi = %10.8f\n",
-	       (DARTS * (i + 1)),avepi);
-      }    
-  } 
-
-  MPI_Finalize();
-  return 0;
-}
-
-/******************************************************************************
- * FILE: dboard.c
- * DESCRIPTION:
- *   Used in pi calculation example codes. 
- *   See mpi_pi_send.c and mpi_pi_reduce.c  
- *   Throw darts at board.  Done by generating random numbers 
- *   between 0 and 1 and converting them to values for x and y 
- *   coordinates and then testing to see if they "land" in 
- *   the circle."  If so, score is incremented.  After throwing the 
- *   specified number of darts, pi is calculated.  The computed value 
- *   of pi is returned as the value of this function, dboard. 
- *   Note:  the seed value for rand() is set in pi_send.f or pi_reduce.f. 
- * AUTHOR: unknown
- * LAST REVISED: 04/14/05 Blaise Barney
- ****************************************************************************/
-/*
-Explanation of constants and variables used in this function:
-  darts       = number of throws at dartboard
-  score       = number of darts that hit circle
-  n           = index variable
-  r           = random number between 0 and 1
-  x_coord     = x coordinate, between -1 and 1
-  x_sqr       = square of x coordinate
-  y_coord     = y coordinate, between -1 and 1
-  y_sqr       = square of y coordinate
-  pi          = computed value of pi
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#define sqr(x)((x)*(x))
-long random(void);
-
-double dboard(int darts)
-{
-  double x_coord, y_coord, pi, r; 
-  int score, n;
-  unsigned int cconst;  /* must be 4-bytes in size */
-  /*************************************************************************
-   * The cconst variable must be 4 bytes. We check this and bail if it is
-   * not the right size
-   ************************************************************************/
-  if (sizeof(cconst) != 4) {
-    printf("Wrong data size for cconst variable in dboard routine!\n");
-    printf("See comments in source file. Quitting.\n");
-    exit(1);
-  }
-  cconst = 2 << (31 - 1);
-  score = 0;
-
-  /* "throw darts at board" */
-  for (n = 1; n <= darts; n++)  {
-    /* generate random numbers for x and y coordinates */
-    r = (double)random()/cconst;
-    x_coord = (2.0 * r) - 1.0;
-    r = (double)random()/cconst;
-    y_coord = (2.0 * r) - 1.0;
-
-    /* if dart lands in circle, increment score */
-    if ((sqr(x_coord) + sqr(y_coord)) <= 1.0)
-      score++;
-  }
-
-  /* calculate pi */
-  pi = 4.0 * (double)score/(double)darts;
-  return(pi);
-} 
-
diff --git a/lab1/sol/pi_mpi.f90 b/lab1/sol/pi_mpi.f90
deleted file mode 100644
index 8292576..0000000
--- a/lab1/sol/pi_mpi.f90
+++ /dev/null
@@ -1,97 +0,0 @@
-program pi
-
-implicit none
-
-include "mpif.h"
-
-integer, parameter :: DARTS = 50000, ROUNDS = 10, MASTER = 0
-
-real(8) :: pi_est
-real(8) :: homepi, avepi, pirecv, pisum
-integer :: rank, comm_size, mtype, ierr
-integer :: i, n
-integer, allocatable :: seed(:)
-integer :: istatus(MPI_STATUS_SIZE)
-integer :: mydarts,remainder
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-print *, "MPI task ", rank, " has started ..."
-
-if (rank == MASTER) then
-   print *, "Using ", comm_size, " tasks to compute pi (3.1415926535)"
-end if
-
-! initialize the random number generator
-! we make sure the seed is different for each task
-call random_seed()
-call random_seed(size = n)
-allocate(seed(n))
-seed = 12 + rank*11
-call random_seed(put=seed(1:n))
-deallocate(seed)
-
-avepi = 0
-! split the darts for each round between the processes
-mydarts=DARTS/comm_size
-remainder=mod(DARTS,comm_size)
-if(rank.lt.remainder) then
- mydarts=mydarts+1
-endif
-
-do i = 0, ROUNDS-1
-   homepi = dboard(mydarts)
-
-   if (rank /= MASTER) then
-      mtype = i
-      call MPI_Send(homepi,1,MPI_DOUBLE_PRECISION, MASTER, mtype, &
-                    MPI_COMM_WORLD, ierr)
-   else
-      mtype = i
-      pisum = 0
-      do n = 1, comm_size-1
-         call MPI_Recv(pirecv, 1, MPI_DOUBLE_PRECISION, MPI_ANY_SOURCE, &
-                       mtype, MPI_COMM_WORLD, istatus, ierr)
-
-         ! keep a running total of pi
-         pisum = pisum + pirecv
-      end do
-
-      ! calculate the average value of pi for this iteration
-      pi_est = (pisum + homepi)/comm_size
-
-      ! calculate the average value of pi over all iterations
-      avepi = ((avepi*i) + pi_est)/(i + 1)
-
-      print *, "After ", DARTS*(i+1), " throws, average value of pi =", avepi
-
-   end if
-end do
-
-call MPI_Finalize(ierr)
-
-contains
-
-   real(8) function dboard(darts)
-
-      integer, intent(in) :: darts
-
-      real(8) :: x_coord, y_coord
-      integer :: score, n
-
-      score = 0
-      do n = 1, darts
-         call random_number(x_coord)
-         call random_number(y_coord)
-
-         if ((x_coord**2 + y_coord**2) <= 1.0d0) then
-            score = score + 1
-         end if
-      end do
-      dboard = 4.0d0*score/darts
-
-   end function
-
-end program
diff --git a/lab1/sol/send_recv.c b/lab1/sol/send_recv.c
deleted file mode 100644
index 0ce4ae7..0000000
--- a/lab1/sol/send_recv.c
+++ /dev/null
@@ -1,27 +0,0 @@
-#include <stdio.h>
-#include "mpi.h"
-
-int main(int argc, char *argv[] )
-{
-  int rank, value, size;
-  MPI_Status status;
-
-  MPI_Init( &argc, &argv );
-
-  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
-  MPI_Comm_size( MPI_COMM_WORLD, &size );
-
-  if (rank == 0) {
-    value = 5;
-    printf( "Process %d sending %d\n", rank, value );
-    MPI_Send( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD );
-  } else {
-    MPI_Recv( &value, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD, &status );
-    printf( "Process %d got %d\n", rank, value );
-    if (rank < size - 1)
-      MPI_Send( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD );
-  }
-
-  MPI_Finalize( );
-  return 0;
-}
diff --git a/lab1/sol/send_recv.f90 b/lab1/sol/send_recv.f90
deleted file mode 100644
index c22c75b..0000000
--- a/lab1/sol/send_recv.f90
+++ /dev/null
@@ -1,32 +0,0 @@
-program send_recv
-
-implicit none
-
-include "mpif.h"
-
-integer :: rank, value, comm_size, ierr
-integer :: istatus(MPI_STATUS_SIZE)
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-value = 0
-
-if (rank == 0) then
-   value = 5 ! we set this value directly for rank 0 process
-   call MPI_Send(value, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, ierr)
-   print *, "Process ", rank, " sent ", value
-else
-   call MPI_Recv(value, 1, MPI_INTEGER, rank-1, 0, MPI_COMM_WORLD, istatus, ierr)
-   print *, "Process ", rank, " got ", value
-   if (rank < comm_size-1) then
-      call MPI_Send(value, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, ierr)
-      print *, "Process ", rank, " sent ", value
-   end if
-end if
-
-call MPI_Barrier(MPI_COMM_WORLD, ierr)
-call MPI_Finalize(ierr)
-
-end program
diff --git a/lab2/README.md b/lab2/README.md
index e37ef58..2d9c2d7 100644
--- a/lab2/README.md
+++ b/lab2/README.md
@@ -64,7 +64,7 @@ When you use the standard MPI scatter routine you will see that the global array
 
 # Solutions
 
-The solutions can be found in the [sol/ directory](sol/).
+The solutions will be made available at the end of the lab.
 
 # Acknowledgment
 
diff --git a/lab2/sol/game_of_life-collective.c b/lab2/sol/game_of_life-collective.c
deleted file mode 100644
index b710ce3..0000000
--- a/lab2/sol/game_of_life-collective.c
+++ /dev/null
@@ -1,257 +0,0 @@
-/****************************
-    Conway Game of Life
-
-       2 processors
-  divide domain left-right
- (break with vertical line)
-*****************************/
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define NI 200
-#define NJ 200
-#define NSTEPS 500
-
-void main(int argc, char *argv[]){
-
-  int i, j, n, im, ip, jm, jp, nsum, isum, isum1, nprocs ,myid, ierr;
-  int ig, jg, i1g, i2g, j1g, j2g, ninom, njnom, ninj, i1, i2, i2m,
-    j1, j2, j2m, ni, nj, isumloc;
-  int niproc, njproc;
-  int **old, **new, *old1d, *new1d;
-  MPI_Status status;
-  MPI_Datatype column_type;
-  float x;
-
-  /* initialize MPI */
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
-  MPI_Comm_rank(MPI_COMM_WORLD,&myid);
-
-  /* only 1 or 2 MPI tasks supported */
-  if(nprocs>2) {
-    printf("Only 1 or 2 mpi tasks supported\n");
-    printf("Number of tasks = %d\nAborting...\n",nprocs);
-    MPI_Abort(MPI_COMM_WORLD, 1);
-  }
-
-  /* nominal number of points per proc. in each direction,
-     without ghost cells, assume numbers divide evenly */ 
-
-  niproc = 1;  
-  njproc = nprocs;    /* divide domain in j direction only */
-  ninom = NI/niproc;
-  njnom = NJ/njproc;
-
-  /* global starting and ending indices (without ghost cells) */
-
-  i1g = 1;
-  i2g = ninom;
-  j1g = (myid*njnom) + 1;
-  j2g = j1g + njnom - 1;
-
-  /* local starting and ending indices, including ghost cells */
-
-  i1  = 0;
-  i2  = ninom + 1;
-  i2m = i2 - 1;
-  j1  = 0;
-  j2  = njnom + 1;
-  j2m = j2 - 1;
-
-  /* allocate arrays; want elements to be contiguous, so
-     allocate 1-D arrays, then set pointer to each row (old
-     and new) to allow use of array notation for convenience */
-
-  ni = i2-i1+1;
-  nj = j2-j1+1;
-  ninj = ni*nj;
-
-  old1d = malloc(ninj*sizeof(int));
-  new1d = malloc(ninj*sizeof(int));
-  old   = malloc(ni*sizeof(int*));
-  new   = malloc(ni*sizeof(int*));
-
-  for(i=0; i<ni; i++){
-    old[i] = &old1d[i*nj];
-    new[i] = &new1d[i*nj];
-  }
-
-  /*  Initialize elements of old to 0 or 1.
-      We're doing some sleight of hand here to make sure we
-      initialize to the same values as in the serial case.
-      The rand() function is called for every i and j, even
-      if they are not on the current processor, to get the same
-      random distribution as the serial case, but they are
-      only used if i and j reside on the current procesor. */
-
-  for(ig=1; ig<=NI; ig++){
-    for(jg=1; jg<=NJ; jg++){
-      x = rand()/((float)RAND_MAX + 1);
-
-      /* if this j is on the current processor */
-      if( jg >= j1g && jg <= j2g ){
-
-        /* local i and j indices, accounting for lower ghost cell */
-        i = ig;
-        j = jg - j1g + 1;
-
-        if(x<0.5){
-          old[i][j] = 0;
-        }else{
-          old[i][j] = 1;
-        }
-      }
-
-    }
-  }
-
-  /* Create derived type for single column of array.
-     There are NI "blocks," each containing 1 element,
-     with a stride of nj between the blocks */
-
-  MPI_Type_vector(NI, 1, nj, MPI_INT, &column_type);
-  MPI_Type_commit(&column_type);
-
-  /* iterate */
-
-  for(n=0; n<NSTEPS; n++){
-
-    /* transfer data to ghost cells */
-
-    if(nprocs == 1){
-
-      /* left and right columns */
-
-      for(i=1; i<i2; i++){
-        old[i][0]  = old[i][j2m];
-        old[i][j2] = old[i][1];
-      }
-
-      /* top and bottom */
-
-      for(j=1; j<j2; j++){
-        old[0][j]  = old[i2m][j];
-        old[i2][j] = old[1][j];
-      }
-
-      /* corners */
-
-      old[0][0]   = old[i2m][j2m];
-      old[0][j2]  = old[i2m][1];
-      old[i2][j2] = old[1][1];
-      old[i2][0]  = old[1][j2m];
-
-    }else{
-
-
-      if(myid == 0){
-
-        /* use derived type "column_type" to transfer columns */
-
-        MPI_Send(&old[1][j2-1], 1, column_type, 1, 0, MPI_COMM_WORLD);
-        MPI_Recv(&old[1][j2],   1, column_type, 1, 1, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][1],    1, column_type, 1, 2, MPI_COMM_WORLD);
-        MPI_Recv(&old[1][0],    1, column_type, 1, 3, MPI_COMM_WORLD, &status);
-
-        /* top and bottom */
-
-        for(j=0; j<nj; j++){
-          old[0][j]  = old[i2m][j];
-          old[i2][j] = old[1][j];
-        }
-
-        /* corners */
-
-        MPI_Send(&old[1][1],     1, MPI_INT, 1, 10, MPI_COMM_WORLD);
-        MPI_Recv(&old[0][0],     1, MPI_INT, 1, 11, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[i2m][1],   1, MPI_INT, 1, 12, MPI_COMM_WORLD);
-        MPI_Recv(&old[i2][0],    1, MPI_INT, 1, 13, MPI_COMM_WORLD, &status);
-
-      }else{
-
-        /* use derived type "column_type" to transfer columns */
-
-        MPI_Recv(&old[1][0],    1, column_type, 0, 0, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][1],    1, column_type, 0, 1, MPI_COMM_WORLD);
-        MPI_Recv(&old[1][j2],   1, column_type, 0, 2, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][j2-1], 1, column_type, 0, 3, MPI_COMM_WORLD);
-
-        /* top and bottom */
-
-        for(j=0; j<nj; j++){
-          old[0][j]  = old[i2m][j];
-          old[i2][j] = old[1][j];
-        }
-
-        /* corners */
-
-        MPI_Recv(&old[i2][j2],   1, MPI_INT, 0, 10, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[i2m][j2m], 1, MPI_INT, 0, 11, MPI_COMM_WORLD);
-        MPI_Recv(&old[0][j2],    1, MPI_INT, 0, 12, MPI_COMM_WORLD, &status);
-        MPI_Send(&old[1][j2m], 1, MPI_INT, 0, 13, MPI_COMM_WORLD);
-
-      }
-    }
-
-    /* update states of cells */
-
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-                
-        im = i-1;
-        ip = i+1;
-        jm = j-1;
-        jp = j+1;
-        nsum =  old[im][jp] + old[i][jp] + old[ip][jp]
-              + old[im][j ]              + old[ip][j ] 
-	  + old[im][jm] + old[i][jm] + old[ip][jm];
-
-        switch(nsum){
-        case 3:
-          new[i][j] = 1;
-          break;
-        case 2:
-          new[i][j] = old[i][j];
-          break;
-        default:
-          new[i][j] = 0;
-        }
-      }
-    }
-
-    /* copy new state into old state */
-    
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-        old[i][j] = new[i][j];
-      }
-    }
-
-  }
-
-  /*  Iterations are done; sum the number of live cells */
-
-  isum = 0;
-  for(i=1; i<i2; i++){
-    for(j=1; j<j2; j++){
-      isum = isum + new[i][j];
-    }
-  }
-
-  /* Print final number of live cells.  For multiple processors,
-     must reduce partial sums */
-  
-  if(nprocs > 1){
-    isumloc = isum;
-    MPI_Reduce(&isumloc, &isum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
-  }
-
-  if(myid == 0) printf("nNumber of live cells = %d\n", isum);
-  
-  MPI_Finalize();
-}
-
diff --git a/lab2/sol/game_of_life-collective.f90 b/lab2/sol/game_of_life-collective.f90
deleted file mode 100644
index 3f59389..0000000
--- a/lab2/sol/game_of_life-collective.f90
+++ /dev/null
@@ -1,207 +0,0 @@
-!------------------------------
-!     Conway Game of Life
-
-!     reduction operation
-!------------------------------
-
-program life
-
-  implicit none
-  include 'mpif.h'
-
-  integer, parameter :: ni=200, nj=200, nsteps = 500
-  integer :: i, j, n, im, ip, jm, jp, nsum, isum, isum1, &
-       ierr, myid, nprocs, i1, i2, j1, j2, i1p, i2m, j1p, j2m, &
-       i1n, i2n, ninom, njnom, niproc, njproc, nitot, isumloc
-  integer :: status(mpi_status_size), row_type
-  integer, allocatable, dimension(:,:) :: old, new
-  real :: arand
-
-  ! initialize MPI
-
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid, ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! only 1 or 2 MPI tasks supported
-  if(nprocs.gt.2) then
-     write(*,*) "Only 1 or 2 MPI tasks supported"
-     write(*,*) "Number of Tasks = ",nprocs
-     write(*,*) "Aborting..."
-     call mpi_abort(mpi_comm_world,1,ierr)
-  endif
-
-
-  ! domain decomposition
-
-  ! nominal number of points per proc., without ghost cells,
-  ! assume numbers divide evenly; niproc and njproc are the
-  ! numbers of procs in the i and j directions.
-  niproc = nprocs
-  njproc = 1
-  ninom  = ni/niproc
-  njnom  = nj/niproc
-
-  ! nominal starting and ending indices, without ghost cells
-  i1n = myid*ninom + 1
-  i2n = i1n + ninom - 1
-
-  ! local starting and ending index, including 2 ghost cells
-  ! in each direction (at beginning and end)
-  i1  = i1n - 1
-  i1p = i1 + 1
-  i2  = i2n + 1
-  i2m = i2 - 1
-  j1  = 0
-  j1p = j1 + 1
-  j2  = nj + 1
-  j2m = j2 - 1
-  nitot = i2 - i1 + 1
-
-  ! allocate arrays
-  allocate( old(i1:i2,j1:j2), new(i1:i2,j1:j2) )
-
-  ! Initialize elements of old to 0 or 1.  We're doing some
-  ! sleight of hand here to make sure we initialize to the
-  ! same values as in the serial case. The random_number
-  ! function is called for every i and j, even if they are
-  ! not on the current processor, to get the same random
-  ! distribution as the serial case, but they are only used
-  ! if this i and j reside on the current procesor.
-
-  do j = 1, nj
-     do i = 1, ni
-        call random_number(arand)
-        if(i > i1 .and. i < i2) old(i,j) = nint(arand)
-     enddo
-  enddo
-
-  ! Create derived type for single row of array.
-  ! There are nj "blocks," each containing 1 element,
-  ! with a stride of nitot between the blocks
-
-  call mpi_type_vector(nj+2, 1, nitot, mpi_integer, row_type, ierr);
-  call mpi_type_commit(row_type, ierr);
-
-  !  iterate
-
-  time_iteration: do n = 1, nsteps
-
-     ! transfer data to ghost cells
-
-     ! left and right boundary conditions
-
-     old(i1p:i2m,0)  = old(i1p:i2m,j2m)
-     old(i1p:i2m,j2) = old(i1p:i2m,1)
-
-     if(nprocs == 1) then
-
-        ! top and bottom boundary conditions
-
-        old(i1,:) = old(i2m,:)
-        old(i2,:) = old(i1p,:)
-
-        ! corners
-
-        old(i1,j1) = old(i2m,j2m)
-        old(i1,j2) = old(i2m,j1p)
-        old(i2,j2) = old(i1p,j1p)
-        old(i2,j1) = old(i1p,j2m)
-
-     elseif(myid == 0) then
-
-        ! top and bottom boundary conditions
-
-        call mpi_send(old(i1p,j1), 1, row_type, &
-             1, 0, mpi_comm_world, ierr)
-        call mpi_recv(old(i1,j1),  1, row_type, &
-             1, 0, mpi_comm_world, status, ierr)
-        call mpi_send(old(i2m,j1), 1, row_type, &
-             1, 1, mpi_comm_world, ierr)
-        call mpi_recv(old(i2,j1),  1, row_type, &
-             1, 1, mpi_comm_world, status, ierr)
-
-        ! corners
-
-        call mpi_send(old(i1p,j1p), 1, mpi_integer, &
-             1, 2, mpi_comm_world, ierr)
-        call mpi_recv(old(i1, j1 ), 1, mpi_integer, &
-             1, 3, mpi_comm_world, status, ierr)
-        call mpi_send(old(i1p,j2m), 1, mpi_integer, &
-             1, 4, mpi_comm_world, ierr)
-        call mpi_recv(old(i1, j2 ), 1, mpi_integer, &
-             1, 5, mpi_comm_world, status, ierr)
-     else
-
-        ! top and bottom boundary conditions
-
-        call mpi_recv(old(i2,j1),  1, row_type, &
-             0, 0, mpi_comm_world, status, ierr)
-        call mpi_send(old(i2m,j1), 1, row_type, &
-             0, 0, mpi_comm_world, ierr)
-        call mpi_recv(old(i1,j1),  1, row_type, &
-             0, 1, mpi_comm_world, status, ierr)
-        call mpi_send(old(i1p,j1), 1, row_type, &
-             0, 1, mpi_comm_world, ierr)
-
-        ! corners
-
-        call mpi_recv(old(i2, j2 ), 1, mpi_integer, &
-             0, 2, mpi_comm_world, status, ierr)
-        call mpi_send(old(i2m,j2m), 1, mpi_integer, &
-             0, 3, mpi_comm_world, ierr)
-        call mpi_recv(old(i2, j1 ), 1, mpi_integer, &
-             0, 4, mpi_comm_world, status, ierr)
-        call mpi_send(old(i2m,j1p), 1, mpi_integer, &
-             0, 5, mpi_comm_world, ierr)
-     endif
-
-     do j = j1p, j2m
-        do i = i1p, i2m
-
-           im = i - 1
-           ip = i + 1
-           jm = j - 1
-           jp = j + 1
-           nsum =  old(im,jp) + old(i,jp) + old(ip,jp) &
-                + old(im,j )             + old(ip,j ) &
-                + old(im,jm) + old(i,jm) + old(ip,jm)
-
-           select case (nsum)
-           case (3)
-              new(i,j) = 1
-           case (2)
-              new(i,j) = old(i,j)
-           case default
-              new(i,j) = 0
-           end select
-
-        enddo
-     enddo
-
-     ! copy new state into old state
-     old(i1p:i2m,j1p:j2m) = new(i1p:i2m,j1p:j2m)
-
-  enddo time_iteration
-
-  ! Iterations are done; sum the number of live cells
-
-  isum = sum(new(i1p:i2m,j1p:j2m))
-
-  ! Print final number of live cells.  For multiple
-  ! processors, must reduce partial sums.
-
-  if(nprocs > 1) then
-     isumloc = isum
-     call mpi_reduce(isumloc, isum, 1, mpi_integer, &
-          mpi_sum, 0, mpi_comm_world, ierr)
-  endif
-
-  if(myid == 0) then
-     write(*,"(/'Number of live cells = ', i6/)") isum
-  endif
-
-  deallocate(old, new)
-  call mpi_finalize(ierr)
-
-end program life
diff --git a/lab2/sol/parallel_search-collective.c b/lab2/sol/parallel_search-collective.c
deleted file mode 100644
index c8301a1..0000000
--- a/lab2/sol/parallel_search-collective.c
+++ /dev/null
@@ -1,109 +0,0 @@
-#include <stdio.h>
-#include <mpi.h>
-
-int main (int argc, char *argv[]) {
-  const int N=300;
-  int N_loc;
-  int i,target;
-  int b[N];
-  int count,full_count;
-
-  int *b_loc, *res, *countA, *displacements;
-  int full_res[N];
-  int rank, err, nproc ;
-
-  FILE *infile,*outfile;
-
-
-
-  MPI_Init(&argc, &argv);                 /* Initialize MPI       */
-  MPI_Comm_rank(MPI_COMM_WORLD, &rank); /* Get my rank          */
-  MPI_Comm_size(MPI_COMM_WORLD, &nproc);   /* Get the total
-                                              number of processors */
-
-  /* check that N/nproc divides evenly */
-
-  if( N % nproc != 0 ) {
-    if (rank == 0) {
-      printf ("number of points %d must divide evenly\n",N);
-      printf ("by number of processors %d\n",nproc);
-    }
-    MPI_Abort(MPI_COMM_WORLD,1);
-  }
-
-  N_loc=N/nproc;
-
-  b_loc = malloc( N_loc * sizeof(int) );
-  res = malloc( N_loc * sizeof(int) );
-  countA = malloc( nproc * sizeof(int) );
-  displacements = malloc( nproc * sizeof(int) );
-
-
-  if (rank == 0 ) {
-    /* File b.data has the target value on the first line
-       The remaining 300 lines of b.data have the values for the b array */
-    infile = fopen("b.data","r" ) ;
-    outfile = fopen("found.data","w") ;
-    
-    /* read in target */
-    fscanf(infile,"%d", &target);
-
-    /* read in b array */
-    for(i=0;i<N;i++) {
-      fscanf(infile,"%d", &b[i]);
-    }
-    fclose(infile);
-  }
- 
-  /* send the target (called by all ranks) */
-  MPI_Bcast(&target,1,MPI_INTEGER,0,MPI_COMM_WORLD);
-
-  /* scatter the data array */
-  MPI_Scatter(b,N_loc,MPI_INTEGER,b_loc,N_loc,MPI_INTEGER,  
-	      0,MPI_COMM_WORLD);
- 
-
-
-  /* Search the b array and save the target locations and number*/
-
-  count=0;
-  for(i=0;i<N_loc;i++) {
-    if( b_loc[i] == target) {
-      res[count]=i+1+rank*N_loc; /* correct for actual position in array*/
-      count++;
-    }
-  }
-
-  /* gather the partial count from each process */
-
-  /* First the number of data points */
-  MPI_Gather(&count,1,MPI_INTEGER,countA,1,MPI_INTEGER, 
-	     0,MPI_COMM_WORLD);
-
-  /* calculate the displacements */
-  if(rank == 0) {
-    full_count=0;
-    for(i=0;i<nproc;i++) {
-      displacements[i]=full_count;
-      full_count=full_count+countA[i];
-    }
-  }
-   
-  /* Now we know the number of data points, we can gather the actual data */
-  MPI_Gatherv(res,count,MPI_INTEGER,full_res,countA, 
-	      displacements,MPI_INTEGER,0,MPI_COMM_WORLD);
-
-  /* now output results */
- if (rank == 0) {
-   for(i=0;i<full_count;i++) {  
-      fprintf(outfile,"%d\n",full_res[i]);
-   }
-  fclose(outfile);
- }
-
- MPI_Barrier(MPI_COMM_WORLD);
- MPI_Finalize();
-
- return 0;
-}
-
diff --git a/lab2/sol/parallel_search-collective.f90 b/lab2/sol/parallel_search-collective.f90
deleted file mode 100644
index f212319..0000000
--- a/lab2/sol/parallel_search-collective.f90
+++ /dev/null
@@ -1,98 +0,0 @@
-PROGRAM search  
-  implicit none
-  include 'mpif.h'
-  integer, parameter ::  N=300
-  integer :: N_loc
-  integer i, target ! local variables
-  integer b(N)      ! the entire array of integers
-  integer :: count, full_count
-  integer, allocatable :: b_loc(:),res(:),countA(:),displacements(:)
-  integer :: full_res(N)
-  integer rank,err,nproc
- 
-  
-  CALL MPI_INIT(err)
-  CALL MPI_COMM_RANK(MPI_COMM_WORLD, rank, err)
-  CALL MPI_COMM_SIZE(MPI_COMM_WORLD, nproc, err)
- 
-  ! check that N/nproc divides evenly
-
-  if( mod(N,nproc) .ne. 0) then
-     if (rank == 0) then
-        write(*,*) "Number of points ",N," must divide evenly by"
-        write(*,*) "number of processors ",nproc
-     endif
-      call mpi_abort(mpi_comm_world,1,err)
-   endif
-
-   N_loc=N/nproc
-   allocate(b_loc(N_loc))
-   allocate(res(N_loc))
-   allocate(countA(nproc))
-   allocate(displacements(nproc))
-
-
-  if (rank == 0) then
-     ! File b.data has the target value on the first line
-     ! The remaining 300 lines of b.data have the values for the b array
-     open(unit=10,file="b.data")     
-
-     ! File found.data will contain the indices of b where the target is
-     open(unit=11,file="found.data")
-
-     ! Read in the target
-     read(10,*) target
-
-     ! Read in b array 
-     
-     do i=1,N
-        read(10,*) b(i)
-     end do
-  endif
-  ! send the target (called by all ranks)
-  call MPI_BCAST(target,1,MPI_INTEGER,0,MPI_COMM_WORLD,err )
-
-  ! scatter the data array
-  call MPI_SCATTER(b,N_loc,MPI_INTEGER,b_loc,N_loc,MPI_INTEGER,  &
-       0,MPI_COMM_WORLD,err)
-
-
-  ! Search the b array and save the target locations, and number
-  count=0
-  do i=1,N_loc
-     if (b_loc(i) == target) then
-        count=count+1
-        res(count)=i+rank*N_loc ! correct for actual position in array
-     end if
-  end do
-
-  !gather the partial count from each process
-
-  ! First the number of data points
-  call MPI_GATHER(count,1,MPI_INTEGER,countA,1,MPI_INTEGER, &
-       0,MPI_COMM_WORLD,err)
-
-  ! calculate the displacements
-  if(rank == 0) then
-     full_count=0
-     do i=1,nproc
-        displacements(i)=full_count
-        full_count=full_count+countA(i)
-     enddo
-  endif
-  
-  ! Now we know the number of data points, we can gather the actual data
-  call MPI_GATHERV(res,count,MPI_INTEGER,full_res,countA, &
-       displacements,MPI_INTEGER,0,MPI_COMM_WORLD,err)
-
-  ! now output results
-  if(rank == 0 ) then
-     do i=1,full_count
-        write(*,*) full_res(i)
-     enddo
-  endif
-
-  call MPI_BARRIER(MPI_COMM_WORLD,err)
-  call MPI_FINALIZE(err)
-    
-END PROGRAM search 
diff --git a/lab2/sol/pi_collectives.c b/lab2/sol/pi_collectives.c
deleted file mode 100644
index 12cb3ed..0000000
--- a/lab2/sol/pi_collectives.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/**********************************************************************
- * FILE: mpi_pi_reduce.c
- * OTHER FILES: dboard.c
- * DESCRIPTION:  
- *   MPI pi Calculation Example - C Version 
- *   Collective Communication example:  
- *   This program calculates pi using a "dartboard" algorithm.  See
- *   Fox et al.(1988) Solving Problems on Concurrent Processors, vol.1
- *   page 207.  All processes contribute to the calculation, with the
- *   master averaging the values for pi. This version uses mpc_reduce to 
- *   collect results
- * AUTHOR: Blaise Barney. Adapted from Ros Leibensperger, Cornell Theory
- *   Center. Converted to MPI: George L. Gusciora, MHPCC (1/95) 
- * LAST REVISED: 04/13/05 
- **********************************************************************/
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-void srandom (unsigned seed);
-double dboard (int darts);
-#define DARTS 50000     /* number of throws at dartboard */
-#define ROUNDS 10       /* number of times "darts" is iterated */
-#define MASTER 0        /* task ID of master task */
-
-int main (int argc, char *argv[])
-{
-  double homepi,         /* value of pi calculated by current task */
-    pisum,        /* sum of tasks' pi values */
-    pi,        /* average of pi after "darts" is thrown */
-    avepi;        /* average pi value for all iterations */
-  int taskid,        /* task ID - also used as seed number */
-    numtasks,       /* number of tasks */
-    rc,             /* return code */
-    i;
-  MPI_Status status;
-  
-  /* Obtain number of tasks and task ID */
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
-  MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
-  printf ("MPI task %d has started...\n", taskid);
-  if (taskid == MASTER) 
-    printf ("Using %d tasks to compute pi (3.1415926535)\n",numtasks);
-  
-  /* Set seed for random number generator equal to task ID */
-  srandom (taskid);
-  
-  avepi = 0;
-  for (i = 0; i < ROUNDS; i++) {
-    /* All tasks calculate pi using dartboard algorithm */
-    homepi = dboard(DARTS);
-    
-    /* Use MPI_Reduce to sum values of homepi across all tasks 
-     * Master will store the accumulated value in pisum 
-     * - homepi is the send buffer
-     * - pisum is the receive buffer (used by the receiving task only)
-     * - the size of the message is sizeof(double)
-     * - MASTER is the task that will receive the result of the reduction
-     *   operation
-     * - MPI_SUM is a pre-defined reduction function (double-precision
-     *   floating-point vector addition).  Must be declared extern.
-     * - MPI_COMM_WORLD is the group of tasks that will participate.
-     */
-    
-    rc = MPI_Reduce(&homepi, &pisum, 1, MPI_DOUBLE, MPI_SUM,
-		    MASTER, MPI_COMM_WORLD);
-    if (rc != MPI_SUCCESS)
-      printf("%d: failure on mpi_reduce\n", taskid);
-    
-    /* Master computes average for this iteration and all iterations */
-    if (taskid == MASTER) {
-      pi = pisum/numtasks;
-      avepi = ((avepi * i) + pi)/(i + 1); 
-      printf("   After %8d throws, average value of pi = %10.8f\n",
-	     (DARTS * (i + 1)),avepi);
-    }    
-  } 
-  MPI_Finalize();
-  return 0;
-}
-
-
-/******************************************************************************
- * FILE: dboard.c
- * DESCRIPTION:
- *   Used in pi calculation example codes. 
- *   See mpi_pi_send.c and mpi_pi_reduce.c  
- *   Throw darts at board.  Done by generating random numbers 
- *   between 0 and 1 and converting them to values for x and y 
- *   coordinates and then testing to see if they "land" in 
- *   the circle."  If so, score is incremented.  After throwing the 
- *   specified number of darts, pi is calculated.  The computed value 
- *   of pi is returned as the value of this function, dboard. 
- *   Note:  the seed value for rand() is set in pi_send.f or pi_reduce.f. 
- * AUTHOR: unknown
- * LAST REVISED: 04/14/05 Blaise Barney
- ****************************************************************************/
-/*
-Explanation of constants and variables used in this function:
-  darts       = number of throws at dartboard
-  score       = number of darts that hit circle
-  n           = index variable
-  r           = random number between 0 and 1
-  x_coord     = x coordinate, between -1 and 1
-  x_sqr       = square of x coordinate
-  y_coord     = y coordinate, between -1 and 1
-  y_sqr       = square of y coordinate
-  pi          = computed value of pi
-*/
-
-#include <stdio.h>
-#include <stdlib.h>
-#define sqr(x)((x)*(x))
-long random(void);
-
-double dboard(int darts)
-{
-  double x_coord, y_coord, pi, r; 
-  int score, n;
-  unsigned int cconst;  /* must be 4-bytes in size */
-  /*************************************************************************
-   * The cconst variable must be 4 bytes. We check this and bail if it is
-   * not the right size
-   ************************************************************************/
-  if (sizeof(cconst) != 4) {
-    printf("Wrong data size for cconst variable in dboard routine!\n");
-    printf("See comments in source file. Quitting.\n");
-    exit(1);
-  }
-  cconst = 2 << (31 - 1);
-  score = 0;
-
-  /* "throw darts at board" */
-  for (n = 1; n <= darts; n++)  {
-    /* generate random numbers for x and y coordinates */
-    r = (double)random()/cconst;
-    x_coord = (2.0 * r) - 1.0;
-    r = (double)random()/cconst;
-    y_coord = (2.0 * r) - 1.0;
-
-    /* if dart lands in circle, increment score */
-    if ((sqr(x_coord) + sqr(y_coord)) <= 1.0)
-      score++;
-  }
-
-  /* calculate pi */
-  pi = 4.0 * (double)score/(double)darts;
-  return(pi);
-}
diff --git a/lab2/sol/pi_collectives.f90 b/lab2/sol/pi_collectives.f90
deleted file mode 100644
index 0ed6eb3..0000000
--- a/lab2/sol/pi_collectives.f90
+++ /dev/null
@@ -1,79 +0,0 @@
-program pi
-
-implicit none
-
-include "mpif.h"
-
-integer, parameter :: DARTS = 50000, ROUNDS = 10, MASTER = 0
-
-real(8) :: pi_est
-real(8) :: homepi, avepi, pirecv, pisum
-integer :: rank, comm_size, mtype, ierr
-integer :: i, n
-integer, allocatable :: seed(:)
-integer :: istatus(MPI_STATUS_SIZE)
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-print *, "MPI task ", rank, " has started ..."
-
-if (rank == MASTER) then
-   print *, "Using ", comm_size, " tasks to compute pi (3.1415926535)"
-end if
-
-! initialize the random number generator
-! we make sure the seed is different for each task
-call random_seed()
-call random_seed(size = n)
-allocate(seed(n))
-seed = 12 + rank*11
-call random_seed(put=seed(1:n))
-deallocate(seed)
-
-avepi = 0
-do i = 0, ROUNDS-1
-   homepi = dboard(DARTS)
-
-   call MPI_Reduce(homepi, pisum, 1, MPI_DOUBLE_PRECISION, MPI_SUM, master, &
-                   MPI_COMM_WORLD, ierr)
-
-   if (rank == master) then
-
-      ! calculate the average value of pi for this iteration
-      pi_est = pisum/comm_size
-
-      ! calculate the average value of pi over all iterations
-      avepi = ((avepi*i) + pi_est)/(i + 1)
-
-      print *, "After ", DARTS*(i+1), " throws, average value of pi =", avepi
-
-   end if
-end do
-
-call MPI_Finalize(ierr)
-
-contains
-
-   real(8) function dboard(darts)
-
-      integer, intent(in) :: darts
-
-      real(8) :: x_coord, y_coord
-      integer :: score, n
-
-      score = 0
-      do n = 1, darts
-         call random_number(x_coord)
-         call random_number(y_coord)
-
-         if ((x_coord**2 + y_coord**2) <= 1.0d0) then
-            score = score + 1
-         end if
-      end do
-      dboard = 4.0d0*score/darts
-
-   end function
-
-end program
diff --git a/lab2/sol/pi_nonblocking.c b/lab2/sol/pi_nonblocking.c
deleted file mode 100644
index ec569ab..0000000
--- a/lab2/sol/pi_nonblocking.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/**********************************************************************
- * FILE: mpi_pi_send.c
- * OTHER FILES: dboard.c
- * DESCRIPTION:  
- *   MPI pi Calculation Example - C Version 
- *   Point-to-Point communications example
- *   This program calculates pi using a "dartboard" algorithm.  See
- *   Fox et al.(1988) Solving Problems on Concurrent Processors, vol.1
- *   page 207.  All processes contribute to the calculation, with the
- *   master averaging the values for pi. This version uses non-blocking
- *   sends and receives to collect results.
- * AUTHOR: Blaise Barney. Adapted from Ros Leibensperger, Cornell Theory
- *   Center. Converted to MPI: George L. Gusciora, MHPCC (1/95) 
- * LAST REVISED: 04/13/05
- **********************************************************************/
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-void srandom (unsigned seed);
-double dboard (int darts);
-#define DARTS 50000     /* number of throws at dartboard */
-#define ROUNDS 10       /* number of times "darts" is iterated */
-#define MASTER 0        /* task ID of master task */
-
-int main (int argc, char *argv[])
-{
-  double homepi,         /* value of pi calculated by current task */
-    sendpi,         /* copy of value of pi calculated by current task */
-    pi,             /* average of pi after "darts" is thrown */
-    avepi,          /* average pi value for all iterations */
-    pirecv,         /* pi received from worker */
-    pisum;          /* sum of workers pi values */
-  int taskid,         /* task ID - also used as seed number */
-    numtasks,       /* number of tasks */
-    source,         /* source of incoming message */ 
-    mtype,          /* message type */
-    rc,             /* return code */
-    i, n;
-  MPI_Status status;
-  MPI_Request request;
-
-  /* Obtain number of tasks and task ID */
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
-  MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
-  printf ("MPI task %d has started...\n", taskid);
-  if (taskid == MASTER)
-    printf ("Using %d tasks to compute pi (3.1415926535)\n",numtasks);
-
-  /* Set seed for random number generator equal to task ID */
-  srandom (taskid);
-
-  avepi = 0;
-  for (i = 0; i < ROUNDS; i++) {
-    /* All tasks calculate pi using dartboard algorithm */
-    homepi = dboard(DARTS);
-
-    /* Workers send homepi to master */
-    /* - Message type will be set to the iteration count */
-    if (taskid != MASTER) {
-      if(i!=0) {
-	/* wait for previous message to finish */
-	MPI_Wait(&request,&status);
-      }
-
-      mtype = i;
-      sendpi=homepi;
-      rc = MPI_Isend(&sendpi, 1, MPI_DOUBLE,
-		     MASTER, mtype, MPI_COMM_WORLD,&request);
-      if (rc != MPI_SUCCESS)
-	printf("%d: Send failure on round %d\n", taskid, mtype);
-     
-      if(i==ROUNDS-1) {
-	/* This is the very last message, so wait now */
-	MPI_Wait(&request,&status);
-      }
-
-    } else {
-      /* Master receives messages from all workers */
-      /* - Message type will be set to the iteration count */
-      /* - Message source will be set to the wildcard DONTCARE: */
-      /*   a message can be received from any task, as long as the */
-      /*   message types match */
-      /* - The return code will be checked, and a message displayed */
-      /*   if a problem occurred */
-      mtype = i;
-      pisum = 0;
-      for (n = 1; n < numtasks; n++) {
-	rc = MPI_Recv(&pirecv, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
-		      mtype, MPI_COMM_WORLD, &status);
-	if (rc != MPI_SUCCESS) 
-	  printf("%d: Receive failure on round %d\n", taskid, mtype);
-	/* keep running total of pi */
-	pisum = pisum + pirecv;
-      }
-      /* Master calculates the average value of pi for this iteration */
-      pi = (pisum + homepi)/numtasks;
-      /* Master calculates the average value of pi over all iterations */
-      avepi = ((avepi * i) + pi)/(i + 1); 
-      printf("   After %8d throws, average value of pi = %10.8f\n",
-	     (DARTS * (i + 1)),avepi);
-    }    
-  } 
-
-  MPI_Finalize();
-  return 0;
-}
-
-/******************************************************************************
- * FILE: dboard.c
- * DESCRIPTION:
- *   Used in pi calculation example codes. 
- *   See mpi_pi_send.c and mpi_pi_reduce.c  
- *   Throw darts at board.  Done by generating random numbers 
- *   between 0 and 1 and converting them to values for x and y 
- *   coordinates and then testing to see if they "land" in 
- *   the circle."  If so, score is incremented.  After throwing the 
- *   specified number of darts, pi is calculated.  The computed value 
- *   of pi is returned as the value of this function, dboard. 
- *   Note:  the seed value for rand() is set in pi_send.f or pi_reduce.f. 
- * AUTHOR: unknown
- * LAST REVISED: 04/14/05 Blaise Barney
- ****************************************************************************/
-/*
-Explanation of constants and variables used in this function:
-  darts       = number of throws at dartboard
-  score       = number of darts that hit circle
-  n           = index variable
-  r           = random number between 0 and 1
-  x_coord     = x coordinate, between -1 and 1
-  x_sqr       = square of x coordinate
-  y_coord     = y coordinate, between -1 and 1
-  y_sqr       = square of y coordinate
-  pi          = computed value of pi
-*/
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#define sqr(x)((x)*(x))
-long random(void);
-
-double dboard(int darts)
-{
-  double x_coord, y_coord, pi, r; 
-  int score, n;
-  unsigned int cconst;  /* must be 4-bytes in size */
-  /*************************************************************************
-   * The cconst variable must be 4 bytes. We check this and bail if it is
-   * not the right size
-   ************************************************************************/
-  if (sizeof(cconst) != 4) {
-    printf("Wrong data size for cconst variable in dboard routine!\n");
-    printf("See comments in source file. Quitting.\n");
-    exit(1);
-  }
-  cconst = 2 << (31 - 1);
-  score = 0;
-
-  /* "throw darts at board" */
-  for (n = 1; n <= darts; n++)  {
-    /* generate random numbers for x and y coordinates */
-    r = (double)random()/cconst;
-    x_coord = (2.0 * r) - 1.0;
-    r = (double)random()/cconst;
-    y_coord = (2.0 * r) - 1.0;
-
-    /* if dart lands in circle, increment score */
-    if ((sqr(x_coord) + sqr(y_coord)) <= 1.0)
-      score++;
-  }
-
-  /* calculate pi */
-  pi = 4.0 * (double)score/(double)darts;
-  return(pi);
-}
diff --git a/lab2/sol/pi_nonblocking.f90 b/lab2/sol/pi_nonblocking.f90
deleted file mode 100644
index f63b329..0000000
--- a/lab2/sol/pi_nonblocking.f90
+++ /dev/null
@@ -1,101 +0,0 @@
-program pi
-
-implicit none
-
-include "mpif.h"
-
-integer, parameter :: DARTS = 50000, ROUNDS = 10, MASTER = 0
-
-real(8) :: pi_est
-real(8) :: homepi, avepi, pirecv, pisum, sendpi
-integer :: rank, comm_size, mtype, ierr, request
-integer :: i, n
-integer, allocatable :: seed(:)
-integer :: istatus(MPI_STATUS_SIZE)
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-print *, "MPI task ", rank, " has started ..."
-
-if (rank == MASTER) then
-   print *, "Using ", comm_size, " tasks to compute pi (3.1415926535)"
-end if
-
-! initialize the random number generator
-! we make sure the seed is different for each task
-call random_seed()
-call random_seed(size = n)
-allocate(seed(n))
-seed = 12 + rank*11
-call random_seed(put=seed(1:n))
-deallocate(seed)
-
-avepi = 0
-do i = 0, ROUNDS-1
-   homepi = dboard(DARTS)
-
-   if (rank /= MASTER) then
-      if (i /= 0) then
-         ! wait for previous message to finish
-         call MPI_Wait(request, istatus, ierr)
-      end if
-
-      mtype = i
-      sendpi = homepi
-      call MPI_Isend(sendpi, 1, MPI_DOUBLE_PRECISION, MASTER, mtype, &
-                     MPI_COMM_WORLD, request, ierr)
-
-      if (i == ROUNDS-1) then
-         ! this is the very last message, so wait
-         call MPI_Wait(request, istatus, ierr)
-      end if
-
-   else
-      mtype = i
-      pisum = 0
-      do n = 1, comm_size-1
-         call MPI_Recv(pirecv, 1, MPI_DOUBLE_PRECISION, MPI_ANY_SOURCE, &
-                       mtype, MPI_COMM_WORLD, istatus, ierr)
-
-         ! keep a running total of pi
-         pisum = pisum + pirecv
-      end do
-
-      ! calculate the average value of pi for this iteration
-      pi_est = (pisum + homepi)/comm_size
-
-      ! calculate the average value of pi over all iterations
-      avepi = ((avepi*i) + pi_est)/(i + 1)
-
-      print *, "After ", DARTS*(i+1), " throws, average value of pi =", avepi
-
-   end if
-end do
-
-call MPI_Finalize(ierr)
-
-contains
-
-   real(8) function dboard(darts)
-
-      integer, intent(in) :: darts
-
-      real(8) :: x_coord, y_coord
-      integer :: score, n
-
-      score = 0
-      do n = 1, darts
-         call random_number(x_coord)
-         call random_number(y_coord)
-
-         if ((x_coord**2 + y_coord**2) <= 1.0d0) then
-            score = score + 1
-         end if
-      end do
-      dboard = 4.0d0*score/darts
-
-   end function
-
-end program
diff --git a/lab2/sol/send_recv-nonblocking.c b/lab2/sol/send_recv-nonblocking.c
deleted file mode 100644
index 0dba278..0000000
--- a/lab2/sol/send_recv-nonblocking.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <stdio.h>
-#include "mpi.h"
-
-int main( argc, argv )
-     int argc;
-     char **argv;
-{
-  int rank, value, size;
-  MPI_Status status;
-  MPI_Request request,request2;
-
-  MPI_Init( &argc, &argv );
-
-  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
-  MPI_Comm_size( MPI_COMM_WORLD, &size );
-
-  value=0;
-  if (rank == 0) {
-    MPI_Isend( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD,&request );
-  }
-  else {
-    MPI_Irecv( &value, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD,&request);
-    MPI_Wait(&request,&status);
-    value+=1;
-    if (rank < size - 1) 
-      MPI_Isend( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD,&request2);
-  }
-  printf( "Process %d got %d\n", rank, value );
-   
-  MPI_Finalize( );
-  return 0;
-}
diff --git a/lab2/sol/send_recv-nonblocking.f90 b/lab2/sol/send_recv-nonblocking.f90
deleted file mode 100644
index 3fe76bd..0000000
--- a/lab2/sol/send_recv-nonblocking.f90
+++ /dev/null
@@ -1,34 +0,0 @@
-program send_recv
-
-implicit none
-
-include "mpif.h"
-
-integer :: rank, value, comm_size, ierr
-integer :: istatus(MPI_STATUS_SIZE)
-integer :: request
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-value = 0
-
-if (rank == 0) then
-   value = 5 ! we set this value directly for rank 0 process
-   call MPI_ISend(value, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, request, ierr)
-   print *, "Process ", rank, " sent ", value
-else
-   call MPI_IRecv(value, 1, MPI_INTEGER, rank-1, 0, MPI_COMM_WORLD, request, ierr)
-   call MPI_Wait(request, istatus, ierr)
-   if (rank < comm_size-1) then
-      call MPI_ISend(value+1, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, request, ierr)
-      print *, "Process ", rank, " sent ", (value+1)
-   end if
-   print *, "Process ", rank, " got ", value
-end if
-
-call MPI_Barrier(MPI_COMM_WORLD, ierr)
-call MPI_Finalize(ierr)
-
-end program
diff --git a/lab2/sol/send_recv-race.c b/lab2/sol/send_recv-race.c
deleted file mode 100644
index 9a0e2df..0000000
--- a/lab2/sol/send_recv-race.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <stdio.h>
-#include "mpi.h"
-
-int main( argc, argv )
-     int argc;
-     char **argv;
-{
-  int rank, value, size;
-  MPI_Status status;
-  MPI_Request request,request2;
-
-  MPI_Init( &argc, &argv );
-
-  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
-  MPI_Comm_size( MPI_COMM_WORLD, &size );
-
-  value=0;
-  if (rank == 0) {
-    MPI_Isend( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD,&request );
-  }
-  else {
-    MPI_Irecv( &value, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD,&request);
-    value+=1;
-    if (rank < size - 1) 
-      MPI_Isend( &value, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD,&request2);
-  }
-  printf( "Process %d got %d\n", rank, value );
-   
-  MPI_Finalize( );
-  return 0;
-}
diff --git a/lab2/sol/send_recv-race.f90 b/lab2/sol/send_recv-race.f90
deleted file mode 100644
index fd86f81..0000000
--- a/lab2/sol/send_recv-race.f90
+++ /dev/null
@@ -1,33 +0,0 @@
-program send_recv
-
-implicit none
-
-include "mpif.h"
-
-integer :: rank, value, comm_size, ierr
-integer :: istatus(MPI_STATUS_SIZE)
-integer :: request
-
-call MPI_Init(ierr)
-call MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr)
-call MPI_Comm_size(MPI_COMM_WORLD, comm_size, ierr)
-
-value = 0
-
-if (rank == 0) then
-   value = 5 ! we set this value directly for rank 0 process
-   call MPI_ISend(value, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, request, ierr)
-   print *, "Process ", rank, " sent ", value
-else
-   call MPI_IRecv(value, 1, MPI_INTEGER, rank-1, 0, MPI_COMM_WORLD, request, ierr)
-   if (rank < comm_size-1) then
-      call MPI_ISend(value, 1, MPI_INTEGER, rank+1, 0, MPI_COMM_WORLD, request, ierr)
-      print *, "Process ", rank, " sent ", value
-   end if
-   print *, "Process ", rank, " got ", value
-end if
-
-call MPI_Barrier(MPI_COMM_WORLD, ierr)
-call MPI_Finalize(ierr)
-
-end program
diff --git a/lab3/README.md b/lab3/README.md
index 130bf68..3b3685a 100644
--- a/lab3/README.md
+++ b/lab3/README.md
@@ -71,7 +71,7 @@ As you would expect the latency is much better on a single node than across node
 
 # Solutions
 
-Solutions are available in the [sol/ directory](sol/)
+The solutions can be found in the [sol/ directory](sol/).
 
 # Acknowledgment
 
diff --git a/lab3/sol/game_of_life-one_sided.c b/lab3/sol/game_of_life-one_sided.c
deleted file mode 100644
index c1e1469..0000000
--- a/lab3/sol/game_of_life-one_sided.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/****************************
-    Conway Game of Life
-
-       2 processors
-  divide domain top-bottom
- (break with horizontal line)
-*****************************/
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define NI 200
-#define NJ 200
-
-#define NSTEPS 500
-
-int main(int argc, char *argv[]){
-
-  int i, j, n, im, ip, jm, jp, nsum, isum, isum1, nprocs ,myid, ierr;
-  int ig, jg, i1g, i2g, j1g, j2g, ninom, njnom, ninj, i1, i2, i2m,
-    j1, j2, j2m, ni, nj, isumloc,igrid;
-  int niproc, njproc;
-  int **old, **new, *old1d, *new1d;
-  MPI_Status status;
-
-  MPI_Win top_win,bottom_win;
-  MPI_Aint size;
-  int disp_unit,above_rank,below_rank;
-
-  float x;
-
-  /* initialize MPI */
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
-  MPI_Comm_rank(MPI_COMM_WORLD,&myid);
-
-  /* nominal number of points per proc. in each direction,
-     without ghost cells, assume numbers divide evenly */ 
-
-  niproc = nprocs;   /* divide domain in i direction only */ 
-  njproc = 1;   
-  ninom = NI/niproc;
-  njnom = NJ/njproc;
-
-  /* NI must be exactly divisble by nprocs */
-
-  if(ninom*niproc!=NI) {
-    igrid=NI;
-    printf("Need to be able to divide the i grid exactly between the processes\n");
-    printf("grid points in i=%d nprocs=%d\n",igrid,nprocs);
-    MPI_Abort(MPI_COMM_WORLD, 1);
-  }
-      
-  /* global starting and ending indices (without ghost cells) */
-
-  i1g = (myid*ninom) + 1;
-  i2g = i1g + ninom - 1;
-  j1g = 1;
-  j2g = njnom;
-
-  /* local starting and ending indices, including ghost cells */
-
-  i1  = 0;
-  i2  = ninom + 1;
-  i2m = i2 - 1;
-  j1  = 0;
-  j2  = njnom + 1;
-  j2m = j2 - 1;
-
-  /* allocate arrays; want elements to be contiguous, so
-     allocate 1-D arrays, then set pointer to each row (old
-     and new) to allow use of array notation for convenience */
-
-  ni = i2-i1+1;
-  nj = j2-j1+1;
-  ninj = ni*nj;
-
-  old1d = malloc(ninj*sizeof(int));
-  new1d = malloc(ninj*sizeof(int));
-  old   = malloc(ni*sizeof(int*));
-  new   = malloc(ni*sizeof(int*));
-
-  for(i=0; i<ni; i++){
-    old[i] = &old1d[i*nj];
-    new[i] = &new1d[i*nj];
-  }
-
-  /* create the memory window */
-
-  disp_unit=sizeof(int);
-  size=(njnom)*sizeof(int);
-
-  MPI_Win_create(&old[1][1],size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&top_win);
-  MPI_Win_create(&old[ninom][1],size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&bottom_win);
-
-  /*  Initialize elements of old to 0 or 1.
-      We're doing some sleight of hand here to make sure we
-      initialize to the same values as in the serial case.
-      The rand() function is called for every i and j, even
-      if they are not on the current processor, to get the same
-      random distribution as the serial case, but they are
-      only used if i and j reside on the current procesor. */
-
-  
-  for(ig=1; ig<=NI; ig++){
-    for(jg=1; jg<=NJ; jg++){
-      x = rand()/((float)RAND_MAX + 1);
-      
-      /* if this i is on the current processor */
-      if( ig >= i1g && ig <= i2g ){
-
-        /* local i and j indices, accounting for lower ghost cell */
-        i = ig - i1g + 1;
-        j = jg ;
-	
-        if(x<0.5){
-          old[i][j] = 0;
-        }else{
-          old[i][j] = 1;
-        }
-      }
-      
-    }
-  }
-  
-   /* iterate */
-
-  for(n=0; n<NSTEPS; n++){
-
-    /* transfer data to ghost cells */
-
-    if(nprocs == 1){
-
-      /* left and right columns */
-
-      for(i=1; i<i2; i++){
-        old[i][0]  = old[i][j2m];
-        old[i][j2] = old[i][1];
-      }
-
-      /* top and bottom */
-
-      for(j=1; j<j2; j++){
-        old[0][j]  = old[i2m][j];
-        old[i2][j] = old[1][j];
-      }
-
-      /* corners */
-
-      old[0][0]   = old[i2m][j2m];
-      old[0][j2]  = old[i2m][1];
-      old[i2][j2] = old[1][1];
-      old[i2][0]  = old[1][j2m];
-
-    }else{
-
-      if(myid==0) {
-	above_rank=nprocs-1;
-      } else {
-	above_rank=myid-1;
-      } 
-
-      if(myid==nprocs-1) {
-	below_rank=0;
-      } else {
-	below_rank=myid+1;
-      }
-	
-      /* use one sided communication to move row from above and */
-      /* below into ghost cells */  
-
-      /* read row from bottom of above process into top row of ghost cells */
-      /* remember a fence call is needed before and after the get */
-            
-     
-      MPI_Win_fence(0 , bottom_win);
-      MPI_Get(&old[0][1],njnom,MPI_INT,above_rank,0,njnom,
-		MPI_INT,bottom_win);
-      MPI_Win_fence(0 , bottom_win); 
-     
-      /* read row from below into bottom row of ghost cells */
-      /* remember a fence call is needed before and after the get */
-      
-      MPI_Win_fence(0 , top_win);
-      MPI_Get(&old[i2][1],njnom,MPI_INT,below_rank,0,njnom,
-	      MPI_INT,top_win);
-      MPI_Win_fence(0 , top_win); 
-      
-      /* left and right including corners*/
-      /* needs something more complicated if problem split into 2d */
-
-        for(i=i1; i<=i2; i++){
-         old[i][0]  = old[i][j2m];
-	 old[i][j2] = old[i][1];
-        }
-
-    }
-
-    /* update states of cells */
-
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-                
-        im = i-1;
-        ip = i+1;
-        jm = j-1;
-        jp = j+1;
-        nsum =  old[im][jp] + old[i][jp] + old[ip][jp]
-              + old[im][j ]              + old[ip][j ] 
-              + old[im][jm] + old[i][jm] + old[ip][jm];
-
-        switch(nsum){
-        case 3:
-          new[i][j] = 1;
-          break;
-        case 2:
-          new[i][j] = old[i][j];
-          break;
-        default:
-          new[i][j] = 0;
-        }
-      }
-    }
-
-    /* copy new state into old state */
-    
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-        old[i][j] = new[i][j];
-      }
-    }
-
-  }
-
-  /*  Iterations are done; sum the number of live cells */
-
-  isum = 0;
-  for(i=1; i<i2; i++){
-    for(j=1; j<j2; j++){
-      isum = isum + new[i][j];
-    }
-  }
-
-  /* Print final number of live cells.  For multiple processors,
-     must reduce partial sums */
-  
-  if(nprocs > 1){
-    isumloc = isum;
-    MPI_Reduce(&isumloc, &isum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
-  }
-
-  if(myid == 0) printf("Number of live cells = %d\n", isum);
-  
-   
-  /* free the windows */
-
-  MPI_Win_free(&top_win);
-  MPI_Win_free(&bottom_win);
-
-  MPI_Finalize();
-}
- 
diff --git a/lab3/sol/game_of_life-one_sided.f90 b/lab3/sol/game_of_life-one_sided.f90
deleted file mode 100644
index bcf66c7..0000000
--- a/lab3/sol/game_of_life-one_sided.f90
+++ /dev/null
@@ -1,225 +0,0 @@
-!------------------------------
-!     Conway Game of Life
-!
-!     One sided communication
-!     
-!     Domain is split vertically (left/right) so that data
-!     to be transfered is contiguous     
-!
-!     leftmost rank is rank 0
-!------------------------------
-
-program life
-  implicit none
-  include 'mpif.h'
-  
-  integer, parameter :: ni=200, nj=200, nsteps = 500
-  integer :: i, j, n, im, ip, jm, jp, nsum, isum, isum1, &
-       ierr, myid, nprocs, i1, i2, j1, j2, i1p, i2m, j1p, j2m, &
-       i1n, i2n, ninom, njnom, niproc, njproc, nitot, isumloc, &
-       j1n,j2n
-  integer, allocatable, dimension(:,:) :: old, new
-  real :: arand
-
-  integer :: left_rank, right_rank
-  integer(kind=MPI_ADDRESS_KIND) :: size,target_disp
-  integer disp_unit,left_win,right_win
-  
-  ! initialize MPI
-
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid, ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! domain decomposition
-
-  ! nominal number of points per proc., without ghost cells,
-  ! assume numbers divide evenly; niproc and njproc are the
-  ! numbers of procs in the i and j directions.
-  niproc = 1
-  njproc = nprocs
-  ninom  = ni/niproc
-  njnom  = nj/njproc
-
-  if(njnom*njproc.ne.nj) then
-     if(myid.eq.0) then
-        write(*,*) "Need to be able to divide the j grid exactly between the processes"
-        write(*,*) "grid points in j",nj," nprocs=",nprocs
-        write(*,*) "grid points per cell",njnom
-        write(*,*) "cells that would be used",njnom*njproc
-     endif
-     call mpi_abort(mpi_comm_world,1,ierr)
-  endif
-
-
-  ! nominal starting and ending indices, without ghost cells
-  i1n = 1
-  i2n = ninom 
-  j1n = myid*njnom + 1
-  j2n = j1n + njnom - 1
-
-  ! local starting and ending index, including 2 ghost cells
-  ! in each direction (at beginning and end)
-  i1  = i1n - 1
-  i1p = i1 + 1
-  i2  = i2n + 1
-  i2m = i2 - 1
-  j1  = j1n-1
-  j1p = j1 + 1
-  j2  = j2n + 1
-  j2m = j2 - 1
-  nitot = i2 - i1 + 1
-
-  ! allocate arrays
-  allocate( old(i1:i2,j1:j2), new(i1:i2,j1:j2) )
-
-  ! Create the memory window
-
-  target_disp=0
-  call mpi_type_extent(MPI_INTEGER,disp_unit,ierr)
-  size=ninom*disp_unit
-
-  call mpi_win_create(old(i1n,j1n),size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,left_win,ierr)
-  call mpi_win_create(old(i1n,j2n),size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,right_win,ierr)
-
-  ! Initialize elements of old to 0 or 1.  We're doing some
-  ! sleight of hand here to make sure we initialize to the
-  ! same values as in the serial case. The random_number
-  ! function is called for every i and j, even if they are
-  ! not on the current processor, to get the same random
-  ! distribution as the serial case, but they are only used
-  ! if this i and j reside on the current procesor.
-
-  do j = 1, nj
-     do i = 1, ni
-        call random_number(arand)
-        if(j > j1 .and. j < j2) old(i,j) = nint(arand)
-     enddo
-  enddo
-
-  !  iterate
-
-  
-
-  time_iteration: do n = 1, nsteps
-
-     ! transfer data to ghost cells
-
-     if(nprocs == 1) then
-
-        ! left and right boundary conditions
-
-        old(i1p:i2m,0)  = old(i1p:i2m,j2m)
-        old(i1p:i2m,j2) = old(i1p:i2m,1)
-
-        ! top and bottom boundary conditions
-
-        old(i1,:) = old(i2m,:)
-        old(i2,:) = old(i1p,:)
-
-        ! corners
-
-        old(i1,j1) = old(i2m,j2m)
-        old(i1,j2) = old(i2m,j1p)
-        old(i2,j2) = old(i1p,j1p)
-        old(i2,j1) = old(i1p,j2m)
-
-     else
-
-        if(myid.eq.0) then
-           left_rank=nprocs-1
-        else
-           left_rank=myid-1
-        endif
-
-         if(myid.eq.nprocs-1) then
-           right_rank=0
-        else
-           right_rank=myid+1
-        endif
-
-
-        ! use one sided communication to move row from left and 
-        ! right into ghost cells
-
-        ! read row from the left into the left row of ghost cells
-        ! remember a fence call is needed before and after the get
-        ! make sure you use a variable of type
-        ! integer(kind=MPI_ADDRESS_KIND) for the target displacement
-
-        call mpi_win_fence(0,right_win,ierr)
-        call mpi_get(old(i1n,j1),ninom,MPI_INTEGER,left_rank,target_disp, &
-             ninom, MPI_INTEGER,right_win,ierr)
-        call mpi_win_fence(0,right_win,ierr)
-        
-        ! read row from the right into the right row of ghost cells
-        ! remember a fence call is needed before and after the get
-
-        call mpi_win_fence(0,left_win,ierr)
-        call mpi_get(old(i1n,j2),ninom,MPI_INTEGER,right_rank,target_disp, &
-             ninom,MPI_INTEGER,left_win,ierr)
-        call mpi_win_fence(0,left_win,ierr)
-
-        ! top and bottom including corners
-        ! will not work with 2d distribution of cells
-
-        old(i1,:) = old(i2n,:)
-        old(i2,:) = old(i1n,:)
-
-     endif
-
-     do j = j1p, j2m
-        do i = i1p, i2m
-
-           im = i - 1
-           ip = i + 1
-           jm = j - 1
-           jp = j + 1
-           nsum =  old(im,jp) + old(i,jp) + old(ip,jp) &
-                + old(im,j )             + old(ip,j ) &
-                + old(im,jm) + old(i,jm) + old(ip,jm)
-
-           select case (nsum)
-           case (3)
-              new(i,j) = 1
-           case (2)
-              new(i,j) = old(i,j)
-           case default
-              new(i,j) = 0
-           end select
-
-        enddo
-     enddo
-
-     ! copy new state into old state
-     old(i1p:i2m,j1p:j2m) = new(i1p:i2m,j1p:j2m)
-
-  enddo time_iteration
-
- 
-  ! Iterations are done; sum the number of live cells
-
-  isum = sum(new(i1p:i2m,j1p:j2m))
-
-  ! Print final number of live cells.  For multiple
-  ! processors, must reduce partial sums.
-
-  if(nprocs > 1) then
-     isumloc = isum
-     call mpi_reduce(isumloc, isum, 1, mpi_integer, &
-          mpi_sum, 0, mpi_comm_world, ierr)
-  endif
-
-  if(myid == 0) then
-     write(*,"(/'Number of live cells = ', i6/)") isum
-  endif
-
-!  call mpi_win_free(left_win,ierr)
-!  call mpi_win_free(right_win,ierr)
-
-  deallocate(old, new)
-  call mpi_finalize(ierr)
-
-end program life
-
-
diff --git a/lab3/sol/game_of_life-topology.c b/lab3/sol/game_of_life-topology.c
deleted file mode 100644
index 1f044db..0000000
--- a/lab3/sol/game_of_life-topology.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/****************************
-    Conway Game of Life
-
-       2 processors
-  divide domain top-bottom
- (break with horizontal line)
-*****************************/
-
-#include "mpi.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-#define NI 200
-#define NJ 200
-
-#define NSTEPS 500
-
-int main(int argc, char *argv[]){
-
-  int i, j, n, im, ip, jm, jp, nsum, isum, isum1, nprocs ,myid, ierr;
-  int ig, jg, i1g, i2g, j1g, j2g, ninom, njnom, ninj, i1, i2, i2m,
-    j1, j2, j2m, ni, nj, isumloc,igrid;
-  int niproc, njproc;
-  int **old, **new, *old1d, *new1d;
-
-  MPI_Win top_win,bottom_win;
-  MPI_Aint size;
-  int disp_unit,above_rank,below_rank;
-
-  MPI_Comm cart_comm;
-  int period,plus_one,minus_one,cart_position,cart_id;
-
-  float x;
-
-  /* initialize MPI */
-
-  MPI_Init(&argc,&argv);
-  MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
-  MPI_Comm_rank(MPI_COMM_WORLD,&myid);
-
-  /* nominal number of points per proc. in each direction,
-     without ghost cells, assume numbers divide evenly */ 
-
-  niproc = nprocs;   /* divide domain in i direction only */ 
-  njproc = 1;   
-  ninom = NI/niproc;
-  njnom = NJ/njproc;
-
-  /* NI must be exactly divisble by nprocs */
-
-  if(ninom*niproc!=NI) {
-    igrid=NI;
-    printf("Need to be able to divide the i grid exactly between the processes\n");
-    printf("grid points in i=%d nprocs=%d\n",igrid,nprocs);
-    MPI_Abort(MPI_COMM_WORLD, 1);
-  }
-      
-  /* create a periodic 1d topology  */
-  /* set the ID to be the ID in this topology */
-
-  period=1;
-  MPI_Cart_create(MPI_COMM_WORLD,1,&nprocs,&period,1,&cart_comm);
-  MPI_Comm_rank(cart_comm,&cart_id);
-  MPI_Cart_coords(cart_comm,cart_id,1,&cart_position);
-  
-  /* global starting and ending indices (without ghost cells) */
-
-  i1g = (cart_position*ninom) + 1;
-  i2g = i1g + ninom - 1;
-  j1g = 1;
-  j2g = njnom;
-
-  /* local starting and ending indices, including ghost cells */
-
-  i1  = 0;
-  i2  = ninom + 1;
-  i2m = i2 - 1;
-  j1  = 0;
-  j2  = njnom + 1;
-  j2m = j2 - 1;
-
-  /* allocate arrays; want elements to be contiguous, so
-     allocate 1-D arrays, then set pointer to each row (old
-     and new) to allow use of array notation for convenience */
-
-  ni = i2-i1+1;
-  nj = j2-j1+1;
-  ninj = ni*nj;
-
-  old1d = malloc(ninj*sizeof(int));
-  new1d = malloc(ninj*sizeof(int));
-  old   = malloc(ni*sizeof(int*));
-  new   = malloc(ni*sizeof(int*));
-
-  for(i=0; i<ni; i++){
-    old[i] = &old1d[i*nj];
-    new[i] = &new1d[i*nj];
-  }
-
-  /* create the memory window */
-
-  disp_unit=sizeof(int);
-  size=(njnom)*sizeof(int);
-
-  MPI_Win_create(&old[1][1],size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&top_win);
-  MPI_Win_create(&old[ninom][1],size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,&bottom_win);
-
-  /*  Initialize elements of old to 0 or 1.
-      We're doing some sleight of hand here to make sure we
-      initialize to the same values as in the serial case.
-      The rand() function is called for every i and j, even
-      if they are not on the current processor, to get the same
-      random distribution as the serial case, but they are
-      only used if i and j reside on the current procesor. */
-
-  
-  for(ig=1; ig<=NI; ig++){
-    for(jg=1; jg<=NJ; jg++){
-      x = rand()/((float)RAND_MAX + 1);
-      
-      /* if this i is on the current processor */
-      if( ig >= i1g && ig <= i2g ){
-
-        /* local i and j indices, accounting for lower ghost cell */
-        i = ig - i1g + 1;
-        j = jg ;
-	
-        if(x<0.5){
-          old[i][j] = 0;
-        }else{
-          old[i][j] = 1;
-        }
-      }
-      
-    }
-  }
-  
-   /* iterate */
-
-  for(n=0; n<NSTEPS; n++){
-
-    /* transfer data to ghost cells */
-
-    if(nprocs == 1){
-
-      /* left and right columns */
-
-      for(i=1; i<i2; i++){
-        old[i][0]  = old[i][j2m];
-        old[i][j2] = old[i][1];
-      }
-
-      /* top and bottom */
-
-      for(j=1; j<j2; j++){
-        old[0][j]  = old[i2m][j];
-        old[i2][j] = old[1][j];
-      }
-
-      /* corners */
-
-      old[0][0]   = old[i2m][j2m];
-      old[0][j2]  = old[i2m][1];
-      old[i2][j2] = old[1][1];
-      old[i2][0]  = old[1][j2m];
-
-    }else{
-
-       MPI_Cart_shift(cart_comm,0,-1,&cart_id,&above_rank);
-       MPI_Cart_shift(cart_comm,0,1,&cart_id,&below_rank);
-	
-      /* use one sided communication to move row from above and */
-      /* below into ghost cells */  
-
-      /* read row from bottom of above process into top row of ghost cells */
-      /* remember a fence call is needed before and after the get */
-            
-     
-      MPI_Win_fence(0 , bottom_win);
-      MPI_Get(&old[0][1],njnom,MPI_INT,above_rank,0,njnom,
-		MPI_INT,bottom_win);
-      MPI_Win_fence(0 , bottom_win); 
-     
-      /* read row from below into bottom row of ghost cells */
-      /* remember a fence call is needed before and after the get */
-      
-      MPI_Win_fence(0 , top_win);
-      MPI_Get(&old[i2][1],njnom,MPI_INT,below_rank,0,njnom,
-	      MPI_INT,top_win);
-      MPI_Win_fence(0 , top_win); 
-      
-      /* left and right including corners*/
-      /* needs something more complicated if problem split into 2d */
-
-        for(i=i1; i<=i2; i++){
-         old[i][0]  = old[i][j2m];
-	 old[i][j2] = old[i][1];
-        }
-
-    }
-
-    /* update states of cells */
-
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-                
-        im = i-1;
-        ip = i+1;
-        jm = j-1;
-        jp = j+1;
-        nsum =  old[im][jp] + old[i][jp] + old[ip][jp]
-              + old[im][j ]              + old[ip][j ] 
-              + old[im][jm] + old[i][jm] + old[ip][jm];
-
-        switch(nsum){
-        case 3:
-          new[i][j] = 1;
-          break;
-        case 2:
-          new[i][j] = old[i][j];
-          break;
-        default:
-          new[i][j] = 0;
-        }
-      }
-    }
-
-    /* copy new state into old state */
-    
-    for(i=1; i<i2; i++){
-      for(j=1; j<j2; j++){
-        old[i][j] = new[i][j];
-      }
-    }
-
-  }
-
-  /*  Iterations are done; sum the number of live cells */
-
-  isum = 0;
-  for(i=1; i<i2; i++){
-    for(j=1; j<j2; j++){
-      isum = isum + new[i][j];
-    }
-  }
-
-  /* Print final number of live cells.  For multiple processors,
-     must reduce partial sums */
-  
-  if(nprocs > 1){
-    isumloc = isum;
-    MPI_Reduce(&isumloc, &isum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
-  }
-
-  if(myid == 0) printf("Number of live cells = %d\n", isum);
-  
-   
-  /* free the windows */
-
-  MPI_Win_free(&top_win);
-  MPI_Win_free(&bottom_win);
-
-  MPI_Finalize();
-}
- 
diff --git a/lab3/sol/game_of_life-topology.f90 b/lab3/sol/game_of_life-topology.f90
deleted file mode 100644
index b6f29fe..0000000
--- a/lab3/sol/game_of_life-topology.f90
+++ /dev/null
@@ -1,226 +0,0 @@
-!------------------------------
-!     Conway Game of Life
-!
-!     One sided communication
-!     
-!     Domain is split vertically (left/right) so that data
-!     to be transfered is contiguous     
-!
-!     leftmost rank is rank 0
-!------------------------------
-
-program life
-  implicit none
-  include 'mpif.h'
-  
-  integer, parameter :: ni=200, nj=200, nsteps = 500
-  integer :: i, j, n, im, ip, jm, jp, nsum, isum, isum1, &
-       ierr, myid, nprocs, i1, i2, j1, j2, i1p, i2m, j1p, j2m, &
-       i1n, i2n, ninom, njnom, niproc, njproc, nitot, isumloc, &
-       j1n,j2n
-  integer, allocatable, dimension(:,:) :: old, new
-  real :: arand
-
-  integer :: left_rank, right_rank
-  integer(kind=MPI_ADDRESS_KIND) :: size,target_disp
-  integer disp_unit,left_win,right_win
-  
-  integer period,cart_comm,cart_id,cart_position
-  integer plus_one,minus_one
-
-  ! initialize MPI
-
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid, ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! domain decomposition
-
-  ! nominal number of points per proc., without ghost cells,
-  ! assume numbers divide evenly; niproc and njproc are the
-  ! numbers of procs in the i and j directions.
-  niproc = 1
-  njproc = nprocs
-  ninom  = ni/niproc
-  njnom  = nj/njproc
-
-  if(njnom*njproc.ne.nj) then
-     if(myid.eq.0) then
-        write(*,*) "Need to be able to divide the j grid exactly between the processes"
-        write(*,*) "grid points in j",nj," nprocs=",nprocs
-        write(*,*) "grid points per cell",njnom
-        write(*,*) "cells that would be used",njnom*njproc
-     endif
-     call mpi_abort(mpi_comm_world,1,ierr)
-  endif
-
-  ! Create a periodic 1d topology
-  ! use cart_position the value which is used to split the grid
-
-  call mpi_cart_create(MPI_COMM_WORLD,1,nprocs,period,1,cart_comm,ierr)
-  call mpi_comm_rank(cart_comm,cart_id,ierr)
-  call mpi_cart_coords(cart_comm,cart_id,1,cart_position,ierr)
-
-
-  ! nominal starting and ending indices, without ghost cells
-  i1n = 1
-  i2n = ninom 
-  j1n = cart_position*njnom + 1
-  j2n = j1n + njnom - 1
-
-  ! local starting and ending index, including 2 ghost cells
-  ! in each direction (at beginning and end)
-  i1  = i1n - 1
-  i1p = i1 + 1
-  i2  = i2n + 1
-  i2m = i2 - 1
-  j1  = j1n-1
-  j1p = j1 + 1
-  j2  = j2n + 1
-  j2m = j2 - 1
-  nitot = i2 - i1 + 1
-
-  ! allocate arrays
-  allocate( old(i1:i2,j1:j2), new(i1:i2,j1:j2) )
-
-  ! Create the memory window
-
-  target_disp=0
-  call mpi_type_extent(MPI_INTEGER,disp_unit,ierr)
-  size=ninom*disp_unit
-
-  call mpi_win_create(old(i1n,j1n),size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,left_win,ierr)
-  call mpi_win_create(old(i1n,j2n),size,disp_unit,MPI_INFO_NULL,MPI_COMM_WORLD,right_win,ierr)
-
-  ! Initialize elements of old to 0 or 1.  We're doing some
-  ! sleight of hand here to make sure we initialize to the
-  ! same values as in the serial case. The random_number
-  ! function is called for every i and j, even if they are
-  ! not on the current processor, to get the same random
-  ! distribution as the serial case, but they are only used
-  ! if this i and j reside on the current procesor.
-
-  do j = 1, nj
-     do i = 1, ni
-        call random_number(arand)
-        if(j > j1 .and. j < j2) old(i,j) = nint(arand)
-     enddo
-  enddo
-
-  !  iterate
-
-   
-
-  time_iteration: do n = 1, nsteps
-
-     ! transfer data to ghost cells
-
-     if(nprocs == 1) then
-
-        ! left and right boundary conditions
-
-        old(i1p:i2m,0)  = old(i1p:i2m,j2m)
-        old(i1p:i2m,j2) = old(i1p:i2m,1)
-
-        ! top and bottom boundary conditions
-
-        old(i1,:) = old(i2m,:)
-        old(i2,:) = old(i1p,:)
-
-        ! corners
-
-        old(i1,j1) = old(i2m,j2m)
-        old(i1,j2) = old(i2m,j1p)
-        old(i2,j2) = old(i1p,j1p)
-        old(i2,j1) = old(i1p,j2m)
-
-     else
-
-       call mpi_cart_shift(cart_comm,0,-1,cart_id,left_rank,ierr)
-       call mpi_cart_shift(cart_comm,0,1,cart_id,right_rank,ierr)
-
-
-        ! use one sided communication to move row from left and 
-        ! right into ghost cells
-
-        ! read row from the left into the left row of ghost cells
-        ! remember a fence call is needed before and after the get
-        ! make sure you use a variable of type
-        ! integer(kind=MPI_ADDRESS_KIND) for the target displacement
-
-        call mpi_win_fence(0,right_win,ierr)
-        call mpi_get(old(i1n,j1),ninom,MPI_INTEGER,left_rank,target_disp, &
-             ninom, MPI_INTEGER,right_win,ierr)
-        call mpi_win_fence(0,right_win,ierr)
-        
-        ! read row from the right into the right row of ghost cells
-        ! remember a fence call is needed before and after the get
-
-        call mpi_win_fence(0,left_win,ierr)
-        call mpi_get(old(i1n,j2),ninom,MPI_INTEGER,right_rank,target_disp, &
-             ninom,MPI_INTEGER,left_win,ierr)
-        call mpi_win_fence(0,left_win,ierr)
-
-        ! top and bottom including corners
-        ! will not work with 2d distribution of cells
-
-        old(i1,:) = old(i2n,:)
-        old(i2,:) = old(i1n,:)
-
-     endif
-
-     do j = j1p, j2m
-        do i = i1p, i2m
-
-           im = i - 1
-           ip = i + 1
-           jm = j - 1
-           jp = j + 1
-           nsum =  old(im,jp) + old(i,jp) + old(ip,jp) &
-                + old(im,j )             + old(ip,j ) &
-                + old(im,jm) + old(i,jm) + old(ip,jm)
-
-           select case (nsum)
-           case (3)
-              new(i,j) = 1
-           case (2)
-              new(i,j) = old(i,j)
-           case default
-              new(i,j) = 0
-           end select
-
-        enddo
-     enddo
-
-     ! copy new state into old state
-     old(i1p:i2m,j1p:j2m) = new(i1p:i2m,j1p:j2m)
-
-  enddo time_iteration
-
- 
-  ! Iterations are done; sum the number of live cells
-
-  isum = sum(new(i1p:i2m,j1p:j2m))
-
-  ! Print final number of live cells.  For multiple
-  ! processors, must reduce partial sums.
-
-  if(nprocs > 1) then
-     isumloc = isum
-     call mpi_reduce(isumloc, isum, 1, mpi_integer, &
-          mpi_sum, 0, mpi_comm_world, ierr)
-  endif
-
-  if(myid == 0) then
-     write(*,"(/'Number of live cells = ', i6/)") isum
-  endif
-
-!  call mpi_win_free(left_win,ierr)
-!  call mpi_win_free(right_win,ierr)
-
-  deallocate(old, new)
-  call mpi_finalize(ierr)
-
-end program life
-
-
diff --git a/lab3/sol/hello_world_mpiio.c b/lab3/sol/hello_world_mpiio.c
deleted file mode 100644
index 3a77279..0000000
--- a/lab3/sol/hello_world_mpiio.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/***********************
-Hello World!
-
-MPI I/O version
-
-************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include "mpi.h"
-
-#define BUF_LENGTH 50					// Max buffer
-char FILENAME[]="result.txt";		// Name of file
-
-int main(int argc, char *argv[]) {
-  char mytext[BUF_LENGTH];	
-  int rank,numtasks;
-  MPI_File fp; 
-  MPI_Status status;
-  MPI_Offset offset;
-  
-
-  // Initialize MPI
-  MPI_Init(&argc,&argv);
-  MPI_Comm_rank(MPI_COMM_WORLD,&rank);
-  MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
-  /*
-  Get the rank number and store in buffer with the same length
-  regardless of rank number so that each rank writes in a separate
-  file area with exactly the same size.
-  Therefore the output from rank, with size=3 bytes, is written to a string
-  */   
-  sprintf(mytext,"Rank: %3d Hello World!\n",rank);
-  offset=rank*strlen(mytext);
-  // Open file and write text
-  MPI_File_open(MPI_COMM_WORLD,FILENAME,MPI_MODE_CREATE|MPI_MODE_WRONLY,MPI_INFO_NULL,&fp);
-  MPI_File_write_at(fp,offset,&mytext,strlen(mytext),MPI_CHAR,&status);
-  MPI_File_close(&fp);
-  // Finalize MPI
-  MPI_Finalize();
-  return 0;
-  }
diff --git a/lab3/sol/hello_world_mpiio.f90 b/lab3/sol/hello_world_mpiio.f90
deleted file mode 100644
index f071f12..0000000
--- a/lab3/sol/hello_world_mpiio.f90
+++ /dev/null
@@ -1,41 +0,0 @@
-
-!------------------------------
-!     Hello World
-
-!     MPI-I/O example
-!------------------------------
-
-program HelloWorld
-
-  implicit none
-  include 'mpif.h'
-
-  integer, parameter :: BUF_LENGTH=50
-  integer :: ierr, myid, nprocs, fp
-  character( len=BUF_LENGTH) ::  mytext
-  integer :: status(mpi_status_size)
-  integer(kind=MPI_OFFSET_KIND) offset
-
-  ! initialize MPI
-  call mpi_init(ierr)
-  call mpi_comm_rank(mpi_comm_world, myid, ierr)
-  call mpi_comm_size(mpi_comm_world, nprocs, ierr)
-
-  ! Get the rank number and store in buffer with the same length
-  ! regardless of rank number so that each rank writes in a separate
-  ! file area with exactly the same size.
-  ! Therefore the output from rank, with size=3 bytes, is written to a string
-  write(mytext,"(A6,i3,A14)")"Rank: ",myid," Hello World!"
-  offset = myid * len(mytext)
-  
-  ! MPI IO Write to file
-  call MPI_FILE_OPEN(MPI_COMM_WORLD, 'result.txt', & 
-                       MPI_MODE_WRONLY + MPI_MODE_CREATE, & 
-                       MPI_INFO_NULL, fp, ierr)
-  call MPI_FILE_WRITE_AT(fp, offset, mytext, len(mytext), MPI_CHARACTER, & 
-                        MPI_STATUS_IGNORE, ierr)                       
-  call MPI_FILE_CLOSE(fp, ierr) 
-  ! Finalize MPI
-  call mpi_finalize(ierr)
-
-end program HelloWorld
-- 
GitLab