| 1 | #include <stdio.h>
|
|---|
| 2 | #include <stdlib.h>
|
|---|
| 3 | #include <mpi.h>
|
|---|
| 4 |
|
|---|
| 5 | /*
|
|---|
| 6 | ! This program shows how to use MPI_Alltoallv. Each processor
|
|---|
| 7 | ! send/rec a different and random amount of data to/from other
|
|---|
| 8 | ! processors.
|
|---|
| 9 | ! We use MPI_Alltoall to tell how much data is going to be sent.
|
|---|
| 10 | */
|
|---|
| 11 | /* globals */
|
|---|
| 12 | int numnodes,myid,mpi_err;
|
|---|
| 13 | #define mpi_root 0
|
|---|
| 14 | /* end module */
|
|---|
| 15 |
|
|---|
| 16 | void init_it(int *argc, char ***argv);
|
|---|
| 17 | void seed_random(int id);
|
|---|
| 18 | void random_number(float *z);
|
|---|
| 19 |
|
|---|
| 20 | void init_it(int *argc, char ***argv) {
|
|---|
| 21 | mpi_err = MPI_Init(argc,argv);
|
|---|
| 22 | mpi_err = MPI_Comm_size( MPI_COMM_WORLD, &numnodes );
|
|---|
| 23 | mpi_err = MPI_Comm_rank(MPI_COMM_WORLD, &myid);
|
|---|
| 24 | }
|
|---|
| 25 |
|
|---|
| 26 | int main(int argc,char *argv[]){
|
|---|
| 27 | int *sray,*rray;
|
|---|
| 28 | int *sdisp,*scounts,*rdisp,*rcounts;
|
|---|
| 29 | int ssize,rsize,i,k,j;
|
|---|
| 30 | float z;
|
|---|
| 31 |
|
|---|
| 32 | init_it(&argc,&argv);
|
|---|
| 33 | scounts=(int*)malloc(sizeof(int)*numnodes);
|
|---|
| 34 | rcounts=(int*)malloc(sizeof(int)*numnodes);
|
|---|
| 35 | sdisp=(int*)malloc(sizeof(int)*numnodes);
|
|---|
| 36 | rdisp=(int*)malloc(sizeof(int)*numnodes);
|
|---|
| 37 | /*
|
|---|
| 38 | ! seed the random number generator with a
|
|---|
| 39 | ! different number on each processor
|
|---|
| 40 | */
|
|---|
| 41 | seed_random(myid);
|
|---|
| 42 | /* find out how much data to send */
|
|---|
| 43 | for(i=0;i<numnodes;i++){
|
|---|
| 44 | random_number(&z);
|
|---|
| 45 | scounts[i]=(int)(5.0*z)+1;
|
|---|
| 46 | }
|
|---|
| 47 | printf("myid= %d scounts=%d %d %d %d\n",myid,scounts[0],scounts[1],scounts[2],scounts[3]);
|
|---|
| 48 | for(i=0;i<numnodes;i++)
|
|---|
| 49 | printf("%d ",scounts[i]);
|
|---|
| 50 | printf("\n");
|
|---|
| 51 | /* tell the other processors how much data is coming */
|
|---|
| 52 | mpi_err = MPI_Alltoall( scounts,1,MPI_INT,
|
|---|
| 53 | rcounts,1,MPI_INT,
|
|---|
| 54 | MPI_COMM_WORLD);
|
|---|
| 55 | /* write(*,*)"myid= ",myid," rcounts= ",rcounts */
|
|---|
| 56 | /* calculate displacements and the size of the arrays */
|
|---|
| 57 | sdisp[0]=0;
|
|---|
| 58 | for(i=1;i<numnodes;i++){
|
|---|
| 59 | sdisp[i]=scounts[i-1]+sdisp[i-1];
|
|---|
| 60 | }
|
|---|
| 61 | rdisp[0]=0;
|
|---|
| 62 | for(i=1;i<numnodes;i++){
|
|---|
| 63 | rdisp[i]=rcounts[i-1]+rdisp[i-1];
|
|---|
| 64 | }
|
|---|
| 65 | ssize=0;
|
|---|
| 66 | rsize=0;
|
|---|
| 67 | for(i=0;i<numnodes;i++){
|
|---|
| 68 | ssize=ssize+scounts[i];
|
|---|
| 69 | rsize=rsize+rcounts[i];
|
|---|
| 70 | }
|
|---|
| 71 |
|
|---|
| 72 | /* allocate send and rec arrays */
|
|---|
| 73 | sray=(int*)malloc(sizeof(int)*ssize);
|
|---|
| 74 | rray=(int*)malloc(sizeof(int)*rsize);
|
|---|
| 75 | for(i=0;i<ssize;i++)
|
|---|
| 76 | sray[i]=myid;
|
|---|
| 77 | /* send/rec different amounts of data to/from each processor */
|
|---|
| 78 | mpi_err = MPI_Alltoallv( sray,scounts,sdisp,MPI_INT,
|
|---|
| 79 | rray,rcounts,rdisp,MPI_INT,
|
|---|
| 80 | MPI_COMM_WORLD);
|
|---|
| 81 |
|
|---|
| 82 | printf("myid= %d rray=",myid);
|
|---|
| 83 | for(i=0;i<rsize;i++)
|
|---|
| 84 | printf("%d ",rray[i]);
|
|---|
| 85 | printf("\n");
|
|---|
| 86 | mpi_err = MPI_Finalize();
|
|---|
| 87 | }
|
|---|
| 88 | /*
|
|---|
| 89 | 0:myid= 0 scounts=1 7 4
|
|---|
| 90 | 0:myid= 0 scounts=0 1 1 1 1 1 1 2
|
|---|
| 91 | 1:myid= 1 scounts=6 2 4
|
|---|
| 92 | 1:myid= 1 scounts=0 0 0 0 0 0 0 1 1 2 2 2 2 2 2 2
|
|---|
| 93 | 2:myid= 2 scounts=1 7 4
|
|---|
| 94 | 2:myid= 2 scounts=0 0 0 0 1 1 1 1 2 2 2 2
|
|---|
| 95 | */
|
|---|
| 96 |
|
|---|
| 97 | void seed_random(int id){
|
|---|
| 98 | srand((unsigned int)id);
|
|---|
| 99 | }
|
|---|
| 100 | void random_number(float *z){
|
|---|
| 101 | int i;
|
|---|
| 102 | i=rand();
|
|---|
| 103 | *z=(float)i/RAND_MAX;
|
|---|
| 104 | }
|
|---|