// asynch version - illustrates various forms of message passing in MPI #include #include #include // timings added. #define SN 100019 int myid; int sbuf[200000]; // for buffered send. /* this program adds up an array of SN randomly generated integers */ void genarray(int A[], int iv,int procs); void suminterval(int iv, int *sum); void collect(int A[], int procs, int *sum); int csum(int A[]); int main(int argc, char* argv[]) { double t1, t2, t3, t4; // for timing measurements int A[SN]; // array int sum =0; // the overall sum int rank, size, iv; int regsum; // conventional result MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); if (argc>1) srandom(atoi(argv[1])); myid = rank; iv = (SN/(size-1)); // size of interval sent to procs 1+ // remainder will be sumed by rank 0 process. if (rank==0) { genarray(A,iv,size); // generate and broadcast array t1 = MPI_Wtime(); regsum = csum(A); t2 = MPI_Wtime(); } else suminterval(iv, &sum); if (rank==0) { t3 = MPI_Wtime(); collect(A,size,&sum); t4 = MPI_Wtime(); printf("correct sum is %d\n",regsum); printf("mp sum is %d\n",sum); printf("time for conventional sum: %f\n",t2-t1); printf("time for parallel sum: %f\n",t4-t3); } MPI_Finalize(); } void genarray(int A[],int iv,int procs) { int i, x; MPI_Status stat; MPI_Request sreq[128]; // assume procs is less than 128 for(i=0;i