diff --git a/lab3/game_of_life-one_sided-prototype.c b/lab3/game_of_life-one_sided-prototype.c index b15b0ee32512b906bd69a7b72385a20518bac7db..efaa10af99a3cc49444ad8ff58f81c43993ddf3e 100644 --- a/lab3/game_of_life-one_sided-prototype.c +++ b/lab3/game_of_life-one_sided-prototype.c @@ -15,11 +15,11 @@ #define NSTEPS 500 -int main(int argc, char *argv[]){ - +int main(int argc, char *argv[]) +{ int i, j, n, im, ip, jm, jp, nsum, isum, isum1, nprocs ,myid, ierr; int ig, jg, i1g, i2g, j1g, j2g, ninom, njnom, ninj, i1, i2, i2m, - j1, j2, j2m, ni, nj, isumloc,igrid; + j1, j2, j2m, ni, nj, isumloc,igrid; int niproc, njproc; int **old, **new, *old1d, *new1d; @@ -109,7 +109,7 @@ int main(int argc, char *argv[]){ /* local i and j indices, accounting for lower ghost cell */ i = ig - i1g + 1; j = jg ; - + if(x<0.5){ old[i][j] = 0; }else{ @@ -152,19 +152,19 @@ int main(int argc, char *argv[]){ }else{ if(myid==0) { - above_rank=nprocs-1; + above_rank=nprocs-1; } else { - above_rank=myid-1; + above_rank=myid-1; } if(myid==nprocs-1) { - below_rank=0; + below_rank=0; } else { - below_rank=myid+1; + below_rank=myid+1; } /* FIXME */ - + /* use one sided communication to move row from above and */ /* below into ghost cells */ @@ -181,7 +181,7 @@ int main(int argc, char *argv[]){ for(i=i1; i<=i2; i++){ old[i][0] = old[i][j2m]; - old[i][j2] = old[i][1]; + old[i][j2] = old[i][1]; } } diff --git a/lab3/hello_mpi.c b/lab3/hello_mpi.c index 3d21a05082b28404999684c743fd4af75f0a2307..92dffc25cc782230c3e479da6a97906ef64b8bfa 100644 --- a/lab3/hello_mpi.c +++ b/lab3/hello_mpi.c @@ -1,14 +1,16 @@ #include <stdio.h> #include <mpi.h> -int main (int argc, char *argv[]) { +int main (int argc, char *argv[]) +{ + int myrank, size; - int myrank, size; + MPI_Init(&argc, &argv); + MPI_Comm_rank(MPI_COMM_WORLD, &myrank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + printf("Processor %d of %d: Hello World!\n", myrank, size); - MPI_Init(&argc, &argv); - MPI_Comm_rank(MPI_COMM_WORLD, &myrank); - MPI_Comm_size(MPI_COMM_WORLD, &size); - printf("Processor %d of %d: Hello World!\n", myrank, size); + MPI_Finalize(); - MPI_Finalize(); + return 0; } diff --git a/lab3/mpi_bandwidth-nonblock.c b/lab3/mpi_bandwidth-nonblock.c index 35471ab21d27acb3b97b623461a94c08719bf46b..e4381ab3459fd1dde0925cef25e304bd10afa03d 100644 --- a/lab3/mpi_bandwidth-nonblock.c +++ b/lab3/mpi_bandwidth-nonblock.c @@ -21,141 +21,142 @@ int main (int argc, char *argv[]) { -int numtasks, rank, n, i, j, rndtrps, nbytes, start, end, incr, - src, dest, rc, tag=1, taskpairs[MAXTASKS], namelength; -double thistime, bw, bestbw, worstbw, totalbw, avgbw, - bestall, avgall, worstall, - timings[MAXTASKS/2][3], tmptimes[3], - resolution, t1, t2; -char msgbuf[ENDSIZE], host[MPI_MAX_PROCESSOR_NAME], - hostmap[MAXTASKS][MPI_MAX_PROCESSOR_NAME]; -struct timeval tv1, tv2; -MPI_Status status, stats[2]; -MPI_Request reqs[2]; - -/* Some initializations and error checking */ -MPI_Init(&argc,&argv); -MPI_Comm_size(MPI_COMM_WORLD, &numtasks); -if (numtasks % 2 != 0) { - printf("ERROR: Must be an even number of tasks! Quitting...\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(0); + int numtasks, rank, n, i, j, rndtrps, nbytes, start, end, incr, + src, dest, rc, tag=1, taskpairs[MAXTASKS], namelength; + double thistime, bw, bestbw, worstbw, totalbw, avgbw, + bestall, avgall, worstall, + timings[MAXTASKS/2][3], tmptimes[3], + resolution, t1, t2; + char msgbuf[ENDSIZE], host[MPI_MAX_PROCESSOR_NAME], + hostmap[MAXTASKS][MPI_MAX_PROCESSOR_NAME]; + struct timeval tv1, tv2; + MPI_Status status, stats[2]; + MPI_Request reqs[2]; + + /* Some initializations and error checking */ + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + if (numtasks % 2 != 0) { + printf("ERROR: Must be an even number of tasks! Quitting...\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(0); } -MPI_Comm_rank(MPI_COMM_WORLD, &rank); -start = STARTSIZE; -end = ENDSIZE; -incr = INCREMENT; -rndtrps = ROUNDTRIPS; -for (i=0; i<end; i++) - msgbuf[i] = 'x'; - -/* All tasks send their host name to task 0 */ -MPI_Get_processor_name(host, &namelength); -MPI_Gather(&host, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, &hostmap, - MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, MPI_COMM_WORLD); - -/* Determine who my send/receive partner is and tell task 0 */ -if (rank < numtasks/2) - dest = src = numtasks/2 + rank; -if (rank >= numtasks/2) - dest = src = rank - numtasks/2; -MPI_Gather(&dest, 1, MPI_INT, &taskpairs, 1, MPI_INT, 0, MPI_COMM_WORLD); - -if (rank == 0) { - resolution = MPI_Wtick(); - printf("\n******************** MPI Bandwidth Test ********************\n"); - printf("Message start size= %d bytes\n",start); - printf("Message finish size= %d bytes\n",end); - printf("Incremented by %d bytes per iteration\n",incr); - printf("Roundtrips per iteration= %d\n",rndtrps); - printf("MPI_Wtick resolution = %e\n",resolution); - printf("************************************************************\n"); - for (i=0; i<numtasks; i++) - printf("task %4d is on %s partner=%4d\n",i,hostmap[i],taskpairs[i]); - printf("************************************************************\n"); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + start = STARTSIZE; + end = ENDSIZE; + incr = INCREMENT; + rndtrps = ROUNDTRIPS; + for (i=0; i<end; i++) + msgbuf[i] = 'x'; + + /* All tasks send their host name to task 0 */ + MPI_Get_processor_name(host, &namelength); + MPI_Gather(&host, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, &hostmap, + MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, MPI_COMM_WORLD); + + /* Determine who my send/receive partner is and tell task 0 */ + if (rank < numtasks/2) + dest = src = numtasks/2 + rank; + if (rank >= numtasks/2) + dest = src = rank - numtasks/2; + MPI_Gather(&dest, 1, MPI_INT, &taskpairs, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if (rank == 0) { + resolution = MPI_Wtick(); + printf("\n******************** MPI Bandwidth Test ********************\n"); + printf("Message start size= %d bytes\n",start); + printf("Message finish size= %d bytes\n",end); + printf("Incremented by %d bytes per iteration\n",incr); + printf("Roundtrips per iteration= %d\n",rndtrps); + printf("MPI_Wtick resolution = %e\n",resolution); + printf("************************************************************\n"); + for (i=0; i<numtasks; i++) + printf("task %4d is on %s partner=%4d\n",i,hostmap[i],taskpairs[i]); + printf("************************************************************\n"); } -/*************************** first half of tasks *****************************/ -/* These tasks send/receive messages with their partner task, and then do a */ -/* few bandwidth calculations based upon message size and timings. */ - -if (rank < numtasks/2) { - for (n=start; n<=end; n=n+incr) { - bestbw = 0.0; - worstbw = .99E+99; - totalbw = 0.0; - nbytes = sizeof(char) * n; - for (i=1; i<=rndtrps; i++){ - t1 = MPI_Wtime(); - MPI_Isend(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD, &reqs[0]); - MPI_Irecv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &reqs[1]); - MPI_Waitall(2, reqs, stats); - t2 = MPI_Wtime(); - thistime = t2 - t1; - bw = ((double)nbytes * 2) / thistime; - totalbw = totalbw + bw; - if (bw > bestbw ) bestbw = bw; - if (bw < worstbw ) worstbw = bw; + /*************************** first half of tasks *****************************/ + /* These tasks send/receive messages with their partner task, and then do a */ + /* few bandwidth calculations based upon message size and timings. */ + + if (rank < numtasks/2) { + for (n=start; n<=end; n=n+incr) { + bestbw = 0.0; + worstbw = .99E+99; + totalbw = 0.0; + nbytes = sizeof(char) * n; + for (i=1; i<=rndtrps; i++){ + t1 = MPI_Wtime(); + MPI_Isend(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD, &reqs[0]); + MPI_Irecv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &reqs[1]); + MPI_Waitall(2, reqs, stats); + t2 = MPI_Wtime(); + thistime = t2 - t1; + bw = ((double)nbytes * 2) / thistime; + totalbw = totalbw + bw; + if (bw > bestbw ) bestbw = bw; + if (bw < worstbw ) worstbw = bw; } - /* Convert to megabytes per second */ - bestbw = bestbw/1000000.0; - avgbw = (totalbw/1000000.0)/(double)rndtrps; - worstbw = worstbw/1000000.0; - - /* Task 0 collects timings from all relevant tasks */ - if (rank == 0) { - /* Keep track of my own timings first */ - timings[0][0] = bestbw; - timings[0][1] = avgbw; - timings[0][2] = worstbw; - /* Initialize overall averages */ - bestall = 0.0; - avgall = 0.0; - worstall = 0.0; - /* Now receive timings from other tasks and print results. Note */ - /* that this loop will be appropriately skipped if there are */ - /* only two tasks. */ - for (j=1; j<numtasks/2; j++) - MPI_Recv(&timings[j], 3, MPI_DOUBLE, j, tag, MPI_COMM_WORLD, &status); - printf("***Message size: %8d *** best / avg / worst (MB/sec)\n",n); - for (j=0; j<numtasks/2; j++) { - printf(" task pair: %4d - %4d: %4.2f / %4.2f / %4.2f \n", - j, taskpairs[j], timings[j][0], timings[j][1], timings[j][2]); - bestall += timings[j][0]; - avgall += timings[j][1]; - worstall += timings[j][2]; + /* Convert to megabytes per second */ + bestbw = bestbw/1000000.0; + avgbw = (totalbw/1000000.0)/(double)rndtrps; + worstbw = worstbw/1000000.0; + + /* Task 0 collects timings from all relevant tasks */ + if (rank == 0) { + /* Keep track of my own timings first */ + timings[0][0] = bestbw; + timings[0][1] = avgbw; + timings[0][2] = worstbw; + /* Initialize overall averages */ + bestall = 0.0; + avgall = 0.0; + worstall = 0.0; + /* Now receive timings from other tasks and print results. Note */ + /* that this loop will be appropriately skipped if there are */ + /* only two tasks. */ + for (j=1; j<numtasks/2; j++) + MPI_Recv(&timings[j], 3, MPI_DOUBLE, j, tag, MPI_COMM_WORLD, &status); + printf("***Message size: %8d *** best / avg / worst (MB/sec)\n",n); + for (j=0; j<numtasks/2; j++) { + printf(" task pair: %4d - %4d: %4.2f / %4.2f / %4.2f \n", + j, taskpairs[j], timings[j][0], timings[j][1], timings[j][2]); + bestall += timings[j][0]; + avgall += timings[j][1]; + worstall += timings[j][2]; } - printf(" OVERALL AVERAGES: %4.2f / %4.2f / %4.2f \n\n", - bestall/(numtasks/2), avgall/(numtasks/2), worstall/(numtasks/2)); + printf(" OVERALL AVERAGES: %4.2f / %4.2f / %4.2f \n\n", + bestall/(numtasks/2), avgall/(numtasks/2), worstall/(numtasks/2)); } - else { - /* Other tasks send their timings to task 0 */ - tmptimes[0] = bestbw; - tmptimes[1] = avgbw; - tmptimes[2] = worstbw; - MPI_Send(tmptimes, 3, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); + else { + /* Other tasks send their timings to task 0 */ + tmptimes[0] = bestbw; + tmptimes[1] = avgbw; + tmptimes[2] = worstbw; + MPI_Send(tmptimes, 3, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); } } } -/**************************** second half of tasks ***************************/ -/* These tasks do nothing more than send and receive with their partner task */ + /**************************** second half of tasks ***************************/ + /* These tasks do nothing more than send and receive with their partner task */ -if (rank >= numtasks/2) { - for (n=start; n<=end; n=n+incr) { - for (i=1; i<=rndtrps; i++){ - MPI_Irecv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &reqs[1]); - MPI_Isend(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD, &reqs[0]); - MPI_Waitall(2, reqs, stats); + if (rank >= numtasks/2) { + for (n=start; n<=end; n=n+incr) { + for (i=1; i<=rndtrps; i++) { + MPI_Irecv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &reqs[1]); + MPI_Isend(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD, &reqs[0]); + MPI_Waitall(2, reqs, stats); } } } -MPI_Finalize(); + MPI_Finalize(); + return 0; } /* end of main */ diff --git a/lab3/mpi_bandwidth.c b/lab3/mpi_bandwidth.c index 247ca89698ee57f51d78bd779bb8d7e7380b15c1..940fa16ccd569e3ad8369f28d22622790f9d4840 100644 --- a/lab3/mpi_bandwidth.c +++ b/lab3/mpi_bandwidth.c @@ -21,137 +21,138 @@ int main (int argc, char *argv[]) { -int numtasks, rank, n, i, j, rndtrps, nbytes, start, end, incr, - src, dest, rc, tag=1, taskpairs[MAXTASKS], namelength; -double thistime, bw, bestbw, worstbw, totalbw, avgbw, - bestall, avgall, worstall, - timings[MAXTASKS/2][3], tmptimes[3], - resolution, t1, t2; -char msgbuf[ENDSIZE], host[MPI_MAX_PROCESSOR_NAME], - hostmap[MAXTASKS][MPI_MAX_PROCESSOR_NAME]; -struct timeval tv1, tv2; -MPI_Status status; - -/* Some initializations and error checking */ -MPI_Init(&argc,&argv); -MPI_Comm_size(MPI_COMM_WORLD, &numtasks); -if (numtasks % 2 != 0) { - printf("ERROR: Must be an even number of tasks! Quitting...\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(0); + int numtasks, rank, n, i, j, rndtrps, nbytes, start, end, incr, + src, dest, rc, tag=1, taskpairs[MAXTASKS], namelength; + double thistime, bw, bestbw, worstbw, totalbw, avgbw, + bestall, avgall, worstall, + timings[MAXTASKS/2][3], tmptimes[3], + resolution, t1, t2; + char msgbuf[ENDSIZE], host[MPI_MAX_PROCESSOR_NAME], + hostmap[MAXTASKS][MPI_MAX_PROCESSOR_NAME]; + struct timeval tv1, tv2; + MPI_Status status; + + /* Some initializations and error checking */ + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD, &numtasks); + if (numtasks % 2 != 0) { + printf("ERROR: Must be an even number of tasks! Quitting...\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(0); } -MPI_Comm_rank(MPI_COMM_WORLD, &rank); -start = STARTSIZE; -end = ENDSIZE; -incr = INCREMENT; -rndtrps = ROUNDTRIPS; -for (i=0; i<end; i++) - msgbuf[i] = 'x'; - -/* All tasks send their host name to task 0 */ -MPI_Get_processor_name(host, &namelength); -MPI_Gather(&host, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, &hostmap, - MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, MPI_COMM_WORLD); - -/* Determine who my send/receive partner is and tell task 0 */ -if (rank < numtasks/2) - dest = src = numtasks/2 + rank; -if (rank >= numtasks/2) - dest = src = rank - numtasks/2; -MPI_Gather(&dest, 1, MPI_INT, &taskpairs, 1, MPI_INT, 0, MPI_COMM_WORLD); - -if (rank == 0) { - resolution = MPI_Wtick(); - printf("\n******************** MPI Bandwidth Test ********************\n"); - printf("Message start size= %d bytes\n",start); - printf("Message finish size= %d bytes\n",end); - printf("Incremented by %d bytes per iteration\n",incr); - printf("Roundtrips per iteration= %d\n",rndtrps); - printf("MPI_Wtick resolution = %e\n",resolution); - printf("************************************************************\n"); - for (i=0; i<numtasks; i++) - printf("task %4d is on %s partner=%4d\n",i,hostmap[i],taskpairs[i]); - printf("************************************************************\n"); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + start = STARTSIZE; + end = ENDSIZE; + incr = INCREMENT; + rndtrps = ROUNDTRIPS; + for (i=0; i<end; i++) + msgbuf[i] = 'x'; + + /* All tasks send their host name to task 0 */ + MPI_Get_processor_name(host, &namelength); + MPI_Gather(&host, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, &hostmap, + MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, MPI_COMM_WORLD); + + /* Determine who my send/receive partner is and tell task 0 */ + if (rank < numtasks/2) + dest = src = numtasks/2 + rank; + if (rank >= numtasks/2) + dest = src = rank - numtasks/2; + MPI_Gather(&dest, 1, MPI_INT, &taskpairs, 1, MPI_INT, 0, MPI_COMM_WORLD); + + if (rank == 0) { + resolution = MPI_Wtick(); + printf("\n******************** MPI Bandwidth Test ********************\n"); + printf("Message start size= %d bytes\n",start); + printf("Message finish size= %d bytes\n",end); + printf("Incremented by %d bytes per iteration\n",incr); + printf("Roundtrips per iteration= %d\n",rndtrps); + printf("MPI_Wtick resolution = %e\n",resolution); + printf("************************************************************\n"); + for (i=0; i<numtasks; i++) + printf("task %4d is on %s partner=%4d\n",i,hostmap[i],taskpairs[i]); + printf("************************************************************\n"); } -/*************************** first half of tasks *****************************/ -/* These tasks send/receive messages with their partner task, and then do a */ -/* few bandwidth calculations based upon message size and timings. */ - -if (rank < numtasks/2) { - for (n=start; n<=end; n=n+incr) { - bestbw = 0.0; - worstbw = .99E+99; - totalbw = 0.0; - nbytes = sizeof(char) * n; - for (i=1; i<=rndtrps; i++){ - t1 = MPI_Wtime(); - MPI_Send(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD); - MPI_Recv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status); - t2 = MPI_Wtime(); - thistime = t2 - t1; - bw = ((double)nbytes * 2) / thistime; - totalbw = totalbw + bw; - if (bw > bestbw ) bestbw = bw; - if (bw < worstbw ) worstbw = bw; + /*************************** first half of tasks *****************************/ + /* These tasks send/receive messages with their partner task, and then do a */ + /* few bandwidth calculations based upon message size and timings. */ + + if (rank < numtasks/2) { + for (n=start; n<=end; n=n+incr) { + bestbw = 0.0; + worstbw = .99E+99; + totalbw = 0.0; + nbytes = sizeof(char) * n; + for (i=1; i<=rndtrps; i++){ + t1 = MPI_Wtime(); + MPI_Send(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD); + MPI_Recv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status); + t2 = MPI_Wtime(); + thistime = t2 - t1; + bw = ((double)nbytes * 2) / thistime; + totalbw = totalbw + bw; + if (bw > bestbw ) bestbw = bw; + if (bw < worstbw ) worstbw = bw; } - /* Convert to megabytes per second */ - bestbw = bestbw/1000000.0; - avgbw = (totalbw/1000000.0)/(double)rndtrps; - worstbw = worstbw/1000000.0; - - /* Task 0 collects timings from all relevant tasks */ - if (rank == 0) { - /* Keep track of my own timings first */ - timings[0][0] = bestbw; - timings[0][1] = avgbw; - timings[0][2] = worstbw; - /* Initialize overall averages */ - bestall = 0.0; - avgall = 0.0; - worstall = 0.0; - /* Now receive timings from other tasks and print results. Note */ - /* that this loop will be appropriately skipped if there are */ - /* only two tasks. */ - for (j=1; j<numtasks/2; j++) - MPI_Recv(&timings[j], 3, MPI_DOUBLE, j, tag, MPI_COMM_WORLD, &status); - printf("***Message size: %8d *** best / avg / worst (MB/sec)\n",n); - for (j=0; j<numtasks/2; j++) { - printf(" task pair: %4d - %4d: %4.2f / %4.2f / %4.2f \n", - j, taskpairs[j], timings[j][0], timings[j][1], timings[j][2]); - bestall += timings[j][0]; - avgall += timings[j][1]; - worstall += timings[j][2]; + /* Convert to megabytes per second */ + bestbw = bestbw/1000000.0; + avgbw = (totalbw/1000000.0)/(double)rndtrps; + worstbw = worstbw/1000000.0; + + /* Task 0 collects timings from all relevant tasks */ + if (rank == 0) { + /* Keep track of my own timings first */ + timings[0][0] = bestbw; + timings[0][1] = avgbw; + timings[0][2] = worstbw; + /* Initialize overall averages */ + bestall = 0.0; + avgall = 0.0; + worstall = 0.0; + /* Now receive timings from other tasks and print results. Note */ + /* that this loop will be appropriately skipped if there are */ + /* only two tasks. */ + for (j=1; j<numtasks/2; j++) + MPI_Recv(&timings[j], 3, MPI_DOUBLE, j, tag, MPI_COMM_WORLD, &status); + printf("***Message size: %8d *** best / avg / worst (MB/sec)\n",n); + for (j=0; j<numtasks/2; j++) { + printf(" task pair: %4d - %4d: %4.2f / %4.2f / %4.2f \n", + j, taskpairs[j], timings[j][0], timings[j][1], timings[j][2]); + bestall += timings[j][0]; + avgall += timings[j][1]; + worstall += timings[j][2]; } - printf(" OVERALL AVERAGES: %4.2f / %4.2f / %4.2f \n\n", - bestall/(numtasks/2), avgall/(numtasks/2), worstall/(numtasks/2)); + printf(" OVERALL AVERAGES: %4.2f / %4.2f / %4.2f \n\n", + bestall/(numtasks/2), avgall/(numtasks/2), worstall/(numtasks/2)); } - else { - /* Other tasks send their timings to task 0 */ - tmptimes[0] = bestbw; - tmptimes[1] = avgbw; - tmptimes[2] = worstbw; - MPI_Send(tmptimes, 3, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); + else { + /* Other tasks send their timings to task 0 */ + tmptimes[0] = bestbw; + tmptimes[1] = avgbw; + tmptimes[2] = worstbw; + MPI_Send(tmptimes, 3, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); } } } -/**************************** second half of tasks ***************************/ -/* These tasks do nothing more than send and receive with their partner task */ + /**************************** second half of tasks ***************************/ + /* These tasks do nothing more than send and receive with their partner task */ -if (rank >= numtasks/2) { - for (n=start; n<=end; n=n+incr) { - for (i=1; i<=rndtrps; i++){ - MPI_Recv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status); - MPI_Send(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD); + if (rank >= numtasks/2) { + for (n=start; n<=end; n=n+incr) { + for (i=1; i<=rndtrps; i++) { + MPI_Recv(&msgbuf, n, MPI_CHAR, src, tag, MPI_COMM_WORLD, &status); + MPI_Send(&msgbuf, n, MPI_CHAR, dest, tag, MPI_COMM_WORLD); } } } -MPI_Finalize(); + MPI_Finalize(); + return 0; } /* end of main */ diff --git a/lab3/mpi_latency.c b/lab3/mpi_latency.c index b1268e417bc4b3be8cbf085d7f8925a01c984a9a..d74b138692793512923883023aaa30cd96c24bfc 100644 --- a/lab3/mpi_latency.c +++ b/lab3/mpi_latency.c @@ -18,95 +18,96 @@ int main (int argc, char *argv[]) { -int reps, /* number of samples per test */ - tag, /* MPI message tag parameter */ - numtasks, /* number of MPI tasks */ - rank, /* my MPI task number */ - dest, source, /* send/receive task designators */ - rc, /* return code */ - n; -double T1, T2, /* start/end times per rep */ - sumT, /* sum of all reps times */ - avgT, /* average time per rep in microseconds */ - deltaT; /* time for one rep */ -char msg; /* buffer containing 1 byte message */ -MPI_Status status; /* MPI receive routine parameter */ + int reps, /* number of samples per test */ + tag, /* MPI message tag parameter */ + numtasks, /* number of MPI tasks */ + rank, /* my MPI task number */ + dest, source, /* send/receive task designators */ + rc, /* return code */ + n; + double T1, T2, /* start/end times per rep */ + sumT, /* sum of all reps times */ + avgT, /* average time per rep in microseconds */ + deltaT; /* time for one rep */ + char msg; /* buffer containing 1 byte message */ + MPI_Status status; /* MPI receive routine parameter */ -MPI_Init(&argc,&argv); -MPI_Comm_size(MPI_COMM_WORLD,&numtasks); -MPI_Comm_rank(MPI_COMM_WORLD,&rank); -if (rank == 0 && numtasks != 2) { - printf("Number of tasks = %d\n",numtasks); - printf("Only need 2 tasks - extra will be ignored...\n"); - } -MPI_Barrier(MPI_COMM_WORLD); + MPI_Init(&argc,&argv); + MPI_Comm_size(MPI_COMM_WORLD,&numtasks); + MPI_Comm_rank(MPI_COMM_WORLD,&rank); + if (rank == 0 && numtasks != 2) { + printf("Number of tasks = %d\n",numtasks); + printf("Only need 2 tasks - extra will be ignored...\n"); + } + MPI_Barrier(MPI_COMM_WORLD); -sumT = 0; -msg = 'x'; -tag = 1; -reps = NUMBER_REPS; + sumT = 0; + msg = 'x'; + tag = 1; + reps = NUMBER_REPS; -if (rank == 0) { - /* round-trip latency timing test */ - printf("task %d has started...\n", rank); - printf("Beginning latency timing test. Number of reps = %d.\n", reps); - printf("***************************************************\n"); - printf("Rep# T1 T2 deltaT\n"); - dest = 1; - source = 1; - for (n = 1; n <= reps; n++) { - T1 = MPI_Wtime(); /* start time */ - /* send message to worker - message tag set to 1. */ - /* If return code indicates error quit */ - rc = MPI_Send(&msg, 1, MPI_BYTE, dest, tag, MPI_COMM_WORLD); - if (rc != MPI_SUCCESS) { - printf("Send error in task 0!\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(1); - } - /* Now wait to receive the echo reply from the worker */ - /* If return code indicates error quit */ - rc = MPI_Recv(&msg, 1, MPI_BYTE, source, tag, MPI_COMM_WORLD, - &status); - if (rc != MPI_SUCCESS) { - printf("Receive error in task 0!\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(1); - } - T2 = MPI_Wtime(); /* end time */ + if (rank == 0) { + /* round-trip latency timing test */ + printf("task %d has started...\n", rank); + printf("Beginning latency timing test. Number of reps = %d.\n", reps); + printf("***************************************************\n"); + printf("Rep# T1 T2 deltaT\n"); + dest = 1; + source = 1; + for (n = 1; n <= reps; n++) { + T1 = MPI_Wtime(); /* start time */ + /* send message to worker - message tag set to 1. */ + /* If return code indicates error quit */ + rc = MPI_Send(&msg, 1, MPI_BYTE, dest, tag, MPI_COMM_WORLD); + if (rc != MPI_SUCCESS) { + printf("Send error in task 0!\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(1); + } + /* Now wait to receive the echo reply from the worker */ + /* If return code indicates error quit */ + rc = MPI_Recv(&msg, 1, MPI_BYTE, source, tag, MPI_COMM_WORLD, + &status); + if (rc != MPI_SUCCESS) { + printf("Receive error in task 0!\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(1); + } + T2 = MPI_Wtime(); /* end time */ - /* calculate round trip time and print */ - deltaT = T2 - T1; - printf("%4d %8.8f %8.8f %2.8f\n", n, T1, T2, deltaT); - sumT += deltaT; - } - avgT = ((double)sumT*1000000)/(double)reps; - printf("***************************************************\n"); - printf("\n*** Avg round trip time = %.3f microseconds\n", avgT); - printf("*** Avg one way latency = %.3f microseconds\n", avgT/2); - } + /* calculate round trip time and print */ + deltaT = T2 - T1; + printf("%4d %8.8f %8.8f %2.8f\n", n, T1, T2, deltaT); + sumT += deltaT; + } + avgT = ((double)sumT*1000000)/(double)reps; + printf("***************************************************\n"); + printf("\n*** Avg round trip time = %.3f microseconds\n", avgT); + printf("*** Avg one way latency = %.3f microseconds\n", avgT/2); + } -else if (rank == 1) { - printf("task %d has started...\n", rank); - dest = 0; - source = 0; - for (n = 1; n <= reps; n++) { - rc = MPI_Recv(&msg, 1, MPI_BYTE, source, tag, MPI_COMM_WORLD, - &status); - if (rc != MPI_SUCCESS) { - printf("Receive error in task 1!\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(1); - } - rc = MPI_Send(&msg, 1, MPI_BYTE, dest, tag, MPI_COMM_WORLD); - if (rc != MPI_SUCCESS) { - printf("Send error in task 1!\n"); - MPI_Abort(MPI_COMM_WORLD, rc); - exit(1); - } - } - } + else if (rank == 1) { + printf("task %d has started...\n", rank); + dest = 0; + source = 0; + for (n = 1; n <= reps; n++) { + rc = MPI_Recv(&msg, 1, MPI_BYTE, source, tag, MPI_COMM_WORLD, + &status); + if (rc != MPI_SUCCESS) { + printf("Receive error in task 1!\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(1); + } + rc = MPI_Send(&msg, 1, MPI_BYTE, dest, tag, MPI_COMM_WORLD); + if (rc != MPI_SUCCESS) { + printf("Send error in task 1!\n"); + MPI_Abort(MPI_COMM_WORLD, rc); + exit(1); + } + } + } -MPI_Finalize(); -exit(0); + MPI_Finalize(); + + return 0; } diff --git a/lab3/simple_1d_topology.c b/lab3/simple_1d_topology.c index dd11942aa6caecab36dd754d6911698c512399f7..512a08879017c32ed4995b2f1498c3764680c265 100644 --- a/lab3/simple_1d_topology.c +++ b/lab3/simple_1d_topology.c @@ -1,14 +1,14 @@ #include "mpi.h" #include <stdio.h> #include <stdlib.h> -int main(int argc, char *argv[]){ - +int main(int argc, char *argv[]) +{ int nprocs,myid,period,cart_id; int plus_one,minus_one,cart_position; - MPI_Comm cart_comm ; + MPI_Comm cart_comm; - /* initialize MPI */ + /* initialize MPI */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&nprocs); @@ -26,5 +26,7 @@ int main(int argc, char *argv[]){ printf("myid = %d cart_id=%d cart_position=%d plus_one=%d minus_one=%d\n",myid,cart_id,cart_position,plus_one,minus_one); MPI_Finalize(); + + return 0; }