Loading jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_1.cc 0 → 100644 +88 −0 Original line number Diff line number Diff line #include <iostream> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #define NELEMENTS 7 using namespace std; int main(int argc, char ** argv) { int rank, size; int buf[NELEMENTS]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (rank == 0) { for(int i = 0; i < NELEMENTS; i++) buf[i] = 1 + i*i; } MPI_Bcast(buf, NELEMENTS, MPI_INT, 0, MPI_COMM_WORLD); // for(rank = 1; rank < size; rank++) // { // for(int i = 0; i < NELEMENTS; i++) // { // cout<<"buf["<<i<<"] = "<<buf[i]<<endl; // } // cout<<endl; // } if (rank == 0) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 1) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 2) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 3) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } // if (rank == 0) { // cout<<"I am processor "<<rank<<endl; // cout<<endl; // for(int i = 0; i < NELEMENTS; i++) // { // cout<<"buf["<<i<<"] = "<<buf[i]<<endl; // } // } MPI_Finalize(); return 0; } jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_2.cc 0 → 100644 +99 −0 Original line number Diff line number Diff line #include <iostream> #include <mpi.h> #include <stdio.h> #include <stdlib.h> //#define STR_LENGTH 32 #define STR_LENGTH 21 using namespace std; int main(int argc, char ** argv) { int rank, size; /* NONONONONO*/ // char string[STR_LENGTH]; // string[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r'}; // for(int i = 0; i < STR_LENGTH; i++) string[i] = "Hello, I'm processor"; // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]<<endl;; // char mystring[] = { 'H', 'e', 'l', 'l', 'o', '\0' }; // for(int i = 0; i < STR_LENGTH; i++) /* NONONONONO*/ char mystring[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r','\0'}; cout<<mystring<<endl; char string[STR_LENGTH]; // for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; // // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // for(int i = 0; i < STR_LENGTH; i++) cout<<mystring[i]; // cout<<endl; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // if (rank == 0) { // for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<endl; // } if (rank == 0) { for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; cout<<endl; } MPI_Bcast(string, STR_LENGTH, MPI_CHAR, 0, MPI_COMM_WORLD); // if (rank == 0) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 1) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 2) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 3) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // for(rank = 0; rank < size; rank++) for(int n = 0; n < size; n++) { for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; cout<<" "<<rank<<endl; } MPI_Finalize(); return 0; } jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Gather_Scatter.c 0 → 100644 +36 −0 Original line number Diff line number Diff line #include <mpi.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char ** argv) { int n, rank, size; double data; double *send_buf, *recv_buf; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); recv_buf = (double *) malloc(size*sizeof(double)); // allocate memory send_buf = (double *) malloc(size*sizeof(double)); data = rank*rank + 1.0; // generate data on different procs MPI_Gather(&data, 1, MPI_DOUBLE, recv_buf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (rank == 0){ printf ("[Gather()]:\n"); for (n = 0; n < size; n++) printf ("rnd[%d] = %f\n",n,recv_buf[n]); } if (rank == 0){ for (n = 0; n < size; n++) send_buf[n] = n*n - 1.0; // Generate “size” random numbers } MPI_Scatter(send_buf, 1, MPI_DOUBLE, &data, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); printf ("[Scatter, proc #%d] = %f\n",rank,data); MPI_Finalize(); return 0; } jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Heat_Equation_Gather.c 0 → 100644 +251 −0 Original line number Diff line number Diff line /* ///////////////////////////////////////////////////////////////////// */ /*! \file \brief Solve 1D heat equation. Solve the 1D heat equation using a 1st order explicit method on a parallel domain. \author A. Mignone (mignone@to.infn.it) \date March 12, 2020 */ /* ///////////////////////////////////////////////////////////////////// */ #include <stdio.h> #include <stdlib.h> #include <math.h> #define PARALLEL #ifdef PARALLEL #include <mpi.h> #endif #define NX_GLOB 64 /* Global number of interior points */ #define NGHOST 1 void Write (double *, double *, int, int); int main(int argc, char ** argv) { int i, k, beg, end; int nx_loc; /* Local grid size */ int dstL = -1, dstR=-1; /* Rank of left and right neighbour procs */ int rank=0, size=1; double t, tstop, dt, cfl = 0.5; double *u0; double *u1; double xbeg = 0.0; double xend = +1.0; double xglob[NX_GLOB + 2*NGHOST]; // Global grid array double *xloc; double dx; /* Mesh spacing */ #ifdef PARALLEL double *send_buf; double *recv_buf; #endif FILE *fp; /* -------------------------------------------------------- 0. Initialize parallel environment & get neighbour proc rank -------------------------------------------------------- */ #ifdef PARALLEL MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); dstL = rank - 1; dstR = rank + 1; if (dstL < 0) dstL = MPI_PROC_NULL; if (dstR >= size) dstR = MPI_PROC_NULL; #endif /* -------------------------------------------------------- 1. Generate global & local grids -------------------------------------------------------- */ #ifdef PARALLEL nx_loc = NX_GLOB/size; beg = NGHOST; end = beg + nx_loc - 1; dx = (xend - xbeg)/(NX_GLOB+1); for (i = 0; i < NX_GLOB + 2*NGHOST; i++){ xglob[i] = xbeg + (i-beg+1)*dx; } xloc = xglob + nx_loc*rank; /* Use pointer arithmetic */ #else nx_loc = NX_GLOB; beg = NGHOST; end = beg + nx_loc - 1; dx = (xend - xbeg)/(NX_GLOB+1); for (i = 0; i < NX_GLOB + 2*NGHOST; i++){ xglob[i] = xbeg + (i-beg+1)*dx; } xloc = xglob; /* Use pointer arithmetic */ #endif /* -------------------------------------------------------- 2. Allocate memory on local grids -------------------------------------------------------- */ u0 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double)); u1 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double)); #ifdef PARALLEL { int proc, go; for (proc = 0; proc < size; proc++){ go = proc; MPI_Bcast(&go, 1, MPI_INT, 0, MPI_COMM_WORLD); if (rank == go) { printf ("[Rank %d]\n",rank); printf (" dstL = %d, dstR = %d\n",dstL, dstR); printf (" beg, end = %d, %d; x = [%f, %f]\n", beg, end, xloc[beg],xloc[end]); } MPI_Barrier(MPI_COMM_WORLD); } } #endif /* -------------------------------------------------------- 3. Set initial condition -------------------------------------------------------- */ for (i = beg; i <= end; i++){ u0[i] = sin(M_PI*xloc[i]); } /* -------------------------------------------------------- 4. Advance solution -------------------------------------------------------- */ t = 0.0; tstop = 0.1; dt = cfl*dx*dx; k = 0; Write (xloc, u0, beg, end); while (t < tstop){ if (rank == 0){ printf ("step #%d; t = %8.3e\n",k,t); } /* -- 4a. Set physical boundary conditions -- */ if (dstL == MPI_PROC_NULL){ u0[beg-1] = 0.0; } if (dstR == MPI_PROC_NULL){ u0[end+1] = 0.0; } /* -- 4b. Set inter-process boundary conditions -- */ #ifdef PARALLEL send_buf = u0 + end - (NGHOST - 1); // Address of rightmost interior point recv_buf = u0 + 0; // Address of leftmost ghost zone MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstR, 0, recv_buf, NGHOST, MPI_DOUBLE, dstL, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); send_buf = u0 + beg; // Address of leftmost interior point recv_buf = u0 + end + 1; // Address of first ghost zone on the right MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstL, 0, recv_buf, NGHOST, MPI_DOUBLE, dstR, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); #endif /* -- 4c. Advance solution by one time step -- */ for (i = beg; i <= end; i++){ u1[i] = u0[i] + dt/(dx*dx)*(u0[i-1] - 2.0*u0[i] + u0[i+1]); } t += dt; k++; /* -- 4d. Copy arrays for next time level -- */ for (i = beg; i <= end; i++) u0[i] = u1[i]; } Write (xloc, u0, beg, end); #ifdef PARALLEL MPI_Finalize(); #endif return 0; } /* ********************************************************************* */ void Write (double *x, double *u, int beg, int end) /* *********************************************************************** */ { int i; int rank; static int n = 0; /* File number */ FILE *fp; char fname[32]; /* -------------------------------------------------------- 1. Serial output -------------------------------------------------------- */ #ifndef PARALLEL sprintf (fname,"heat_eq%02d.dat",n); fp = fopen (fname,"w"); for (i = beg; i <= end; i++) fprintf (fp, "%12.6e %12.6e\n", x[i], u[i]); fclose(fp); #endif /* -------------------------------------------------------- 2. Parallel output -------------------------------------------------------- */ #ifdef PARALLEL /* -- 2a. Process #0 gathers data and does the writing -- */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); int nx_loc = end - beg + 1; static double *recv_buf; if (recv_buf == NULL) { recv_buf = (double *) malloc((NX_GLOB + 2*NGHOST)*sizeof(double)); } MPI_Gather (u + beg, nx_loc, MPI_DOUBLE, recv_buf + beg, nx_loc, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (rank == 0){ sprintf (fname,"heat_eq%02d.dat",n); fp = fopen (fname,"w"); for (i = beg; i < beg+NX_GLOB; i++) { fprintf (fp, "%f %f\n", x[i], recv_buf[i]); } fclose(fp); } /* -- 2b. Shared file pointer -- */ /* -- 2c. Individual file pointer -- */ #endif n++; } /* MAPLE Script: restart; u := A*exp(-D*mu^2*t)*sin(mu*x + B) + C; eq := diff(u,t) - D*diff(diff(u,x),x); simplify(eq); */ jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/lecture_Broadcast_Gather_Scatter.pdf 0 → 100644 +1.99 MiB File added.No diff preview for this file type. View file Loading
jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_1.cc 0 → 100644 +88 −0 Original line number Diff line number Diff line #include <iostream> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #define NELEMENTS 7 using namespace std; int main(int argc, char ** argv) { int rank, size; int buf[NELEMENTS]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (rank == 0) { for(int i = 0; i < NELEMENTS; i++) buf[i] = 1 + i*i; } MPI_Bcast(buf, NELEMENTS, MPI_INT, 0, MPI_COMM_WORLD); // for(rank = 1; rank < size; rank++) // { // for(int i = 0; i < NELEMENTS; i++) // { // cout<<"buf["<<i<<"] = "<<buf[i]<<endl; // } // cout<<endl; // } if (rank == 0) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 1) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 2) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } cout<<endl; if (rank == 3) { cout<<"I am processor "<<rank<<endl; cout<<endl; for(int i = 0; i < NELEMENTS; i++) { cout<<"buf["<<i<<"] = "<<buf[i]<<endl; } } // if (rank == 0) { // cout<<"I am processor "<<rank<<endl; // cout<<endl; // for(int i = 0; i < NELEMENTS; i++) // { // cout<<"buf["<<i<<"] = "<<buf[i]<<endl; // } // } MPI_Finalize(); return 0; }
jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_2.cc 0 → 100644 +99 −0 Original line number Diff line number Diff line #include <iostream> #include <mpi.h> #include <stdio.h> #include <stdlib.h> //#define STR_LENGTH 32 #define STR_LENGTH 21 using namespace std; int main(int argc, char ** argv) { int rank, size; /* NONONONONO*/ // char string[STR_LENGTH]; // string[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r'}; // for(int i = 0; i < STR_LENGTH; i++) string[i] = "Hello, I'm processor"; // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]<<endl;; // char mystring[] = { 'H', 'e', 'l', 'l', 'o', '\0' }; // for(int i = 0; i < STR_LENGTH; i++) /* NONONONONO*/ char mystring[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r','\0'}; cout<<mystring<<endl; char string[STR_LENGTH]; // for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; // // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // for(int i = 0; i < STR_LENGTH; i++) cout<<mystring[i]; // cout<<endl; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); // if (rank == 0) { // for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<endl; // } if (rank == 0) { for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i]; for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; cout<<endl; } MPI_Bcast(string, STR_LENGTH, MPI_CHAR, 0, MPI_COMM_WORLD); // if (rank == 0) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 1) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 2) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // // if (rank == 3) { // for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; // cout<<" "<<rank<<endl; // } // // cout<<endl; // for(rank = 0; rank < size; rank++) for(int n = 0; n < size; n++) { for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]; cout<<" "<<rank<<endl; } MPI_Finalize(); return 0; }
jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Gather_Scatter.c 0 → 100644 +36 −0 Original line number Diff line number Diff line #include <mpi.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char ** argv) { int n, rank, size; double data; double *send_buf, *recv_buf; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); recv_buf = (double *) malloc(size*sizeof(double)); // allocate memory send_buf = (double *) malloc(size*sizeof(double)); data = rank*rank + 1.0; // generate data on different procs MPI_Gather(&data, 1, MPI_DOUBLE, recv_buf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (rank == 0){ printf ("[Gather()]:\n"); for (n = 0; n < size; n++) printf ("rnd[%d] = %f\n",n,recv_buf[n]); } if (rank == 0){ for (n = 0; n < size; n++) send_buf[n] = n*n - 1.0; // Generate “size” random numbers } MPI_Scatter(send_buf, 1, MPI_DOUBLE, &data, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); printf ("[Scatter, proc #%d] = %f\n",rank,data); MPI_Finalize(); return 0; }
jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Heat_Equation_Gather.c 0 → 100644 +251 −0 Original line number Diff line number Diff line /* ///////////////////////////////////////////////////////////////////// */ /*! \file \brief Solve 1D heat equation. Solve the 1D heat equation using a 1st order explicit method on a parallel domain. \author A. Mignone (mignone@to.infn.it) \date March 12, 2020 */ /* ///////////////////////////////////////////////////////////////////// */ #include <stdio.h> #include <stdlib.h> #include <math.h> #define PARALLEL #ifdef PARALLEL #include <mpi.h> #endif #define NX_GLOB 64 /* Global number of interior points */ #define NGHOST 1 void Write (double *, double *, int, int); int main(int argc, char ** argv) { int i, k, beg, end; int nx_loc; /* Local grid size */ int dstL = -1, dstR=-1; /* Rank of left and right neighbour procs */ int rank=0, size=1; double t, tstop, dt, cfl = 0.5; double *u0; double *u1; double xbeg = 0.0; double xend = +1.0; double xglob[NX_GLOB + 2*NGHOST]; // Global grid array double *xloc; double dx; /* Mesh spacing */ #ifdef PARALLEL double *send_buf; double *recv_buf; #endif FILE *fp; /* -------------------------------------------------------- 0. Initialize parallel environment & get neighbour proc rank -------------------------------------------------------- */ #ifdef PARALLEL MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); dstL = rank - 1; dstR = rank + 1; if (dstL < 0) dstL = MPI_PROC_NULL; if (dstR >= size) dstR = MPI_PROC_NULL; #endif /* -------------------------------------------------------- 1. Generate global & local grids -------------------------------------------------------- */ #ifdef PARALLEL nx_loc = NX_GLOB/size; beg = NGHOST; end = beg + nx_loc - 1; dx = (xend - xbeg)/(NX_GLOB+1); for (i = 0; i < NX_GLOB + 2*NGHOST; i++){ xglob[i] = xbeg + (i-beg+1)*dx; } xloc = xglob + nx_loc*rank; /* Use pointer arithmetic */ #else nx_loc = NX_GLOB; beg = NGHOST; end = beg + nx_loc - 1; dx = (xend - xbeg)/(NX_GLOB+1); for (i = 0; i < NX_GLOB + 2*NGHOST; i++){ xglob[i] = xbeg + (i-beg+1)*dx; } xloc = xglob; /* Use pointer arithmetic */ #endif /* -------------------------------------------------------- 2. Allocate memory on local grids -------------------------------------------------------- */ u0 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double)); u1 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double)); #ifdef PARALLEL { int proc, go; for (proc = 0; proc < size; proc++){ go = proc; MPI_Bcast(&go, 1, MPI_INT, 0, MPI_COMM_WORLD); if (rank == go) { printf ("[Rank %d]\n",rank); printf (" dstL = %d, dstR = %d\n",dstL, dstR); printf (" beg, end = %d, %d; x = [%f, %f]\n", beg, end, xloc[beg],xloc[end]); } MPI_Barrier(MPI_COMM_WORLD); } } #endif /* -------------------------------------------------------- 3. Set initial condition -------------------------------------------------------- */ for (i = beg; i <= end; i++){ u0[i] = sin(M_PI*xloc[i]); } /* -------------------------------------------------------- 4. Advance solution -------------------------------------------------------- */ t = 0.0; tstop = 0.1; dt = cfl*dx*dx; k = 0; Write (xloc, u0, beg, end); while (t < tstop){ if (rank == 0){ printf ("step #%d; t = %8.3e\n",k,t); } /* -- 4a. Set physical boundary conditions -- */ if (dstL == MPI_PROC_NULL){ u0[beg-1] = 0.0; } if (dstR == MPI_PROC_NULL){ u0[end+1] = 0.0; } /* -- 4b. Set inter-process boundary conditions -- */ #ifdef PARALLEL send_buf = u0 + end - (NGHOST - 1); // Address of rightmost interior point recv_buf = u0 + 0; // Address of leftmost ghost zone MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstR, 0, recv_buf, NGHOST, MPI_DOUBLE, dstL, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); send_buf = u0 + beg; // Address of leftmost interior point recv_buf = u0 + end + 1; // Address of first ghost zone on the right MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstL, 0, recv_buf, NGHOST, MPI_DOUBLE, dstR, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); #endif /* -- 4c. Advance solution by one time step -- */ for (i = beg; i <= end; i++){ u1[i] = u0[i] + dt/(dx*dx)*(u0[i-1] - 2.0*u0[i] + u0[i+1]); } t += dt; k++; /* -- 4d. Copy arrays for next time level -- */ for (i = beg; i <= end; i++) u0[i] = u1[i]; } Write (xloc, u0, beg, end); #ifdef PARALLEL MPI_Finalize(); #endif return 0; } /* ********************************************************************* */ void Write (double *x, double *u, int beg, int end) /* *********************************************************************** */ { int i; int rank; static int n = 0; /* File number */ FILE *fp; char fname[32]; /* -------------------------------------------------------- 1. Serial output -------------------------------------------------------- */ #ifndef PARALLEL sprintf (fname,"heat_eq%02d.dat",n); fp = fopen (fname,"w"); for (i = beg; i <= end; i++) fprintf (fp, "%12.6e %12.6e\n", x[i], u[i]); fclose(fp); #endif /* -------------------------------------------------------- 2. Parallel output -------------------------------------------------------- */ #ifdef PARALLEL /* -- 2a. Process #0 gathers data and does the writing -- */ MPI_Comm_rank(MPI_COMM_WORLD, &rank); int nx_loc = end - beg + 1; static double *recv_buf; if (recv_buf == NULL) { recv_buf = (double *) malloc((NX_GLOB + 2*NGHOST)*sizeof(double)); } MPI_Gather (u + beg, nx_loc, MPI_DOUBLE, recv_buf + beg, nx_loc, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (rank == 0){ sprintf (fname,"heat_eq%02d.dat",n); fp = fopen (fname,"w"); for (i = beg; i < beg+NX_GLOB; i++) { fprintf (fp, "%f %f\n", x[i], recv_buf[i]); } fclose(fp); } /* -- 2b. Shared file pointer -- */ /* -- 2c. Individual file pointer -- */ #endif n++; } /* MAPLE Script: restart; u := A*exp(-D*mu^2*t)*sin(mu*x + B) + C; eq := diff(u,t) - D*diff(diff(u,x),x); simplify(eq); */
jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/lecture_Broadcast_Gather_Scatter.pdf 0 → 100644 +1.99 MiB File added.No diff preview for this file type. View file