| [6e48678] | 1 | /**
|
|---|
| 2 | * This program illustrates MPI_Alltoallv.
|
|---|
| 3 | * online source: http://mpi.deino.net/mpi_functions/MPI_Alltoallw.html
|
|---|
| 4 | */
|
|---|
| 5 | #include "mpi.h"
|
|---|
| 6 | #include <stdlib.h>
|
|---|
| 7 | #include <stdio.h>
|
|---|
| 8 | /*
|
|---|
| 9 | This program tests MPI_Alltoallw by having processor i send different
|
|---|
| 10 | amounts of data to each processor.
|
|---|
| 11 | The first test sends i items to processor i from all processors.
|
|---|
| 12 | */
|
|---|
| 13 | int main( int argc, char **argv )
|
|---|
| 14 | {
|
|---|
| 15 | MPI_Comm comm;
|
|---|
| 16 | int *sbuf, *rbuf;
|
|---|
| 17 | int rank, size;
|
|---|
| 18 | int *sendcounts, *recvcounts, *rdispls, *sdispls;
|
|---|
| 19 | int i, j, *p, err;
|
|---|
| 20 | MPI_Datatype *sendtypes, *recvtypes;
|
|---|
| 21 |
|
|---|
| 22 | MPI_Init( &argc, &argv );
|
|---|
| 23 | err = 0;
|
|---|
| 24 | comm = MPI_COMM_WORLD;
|
|---|
| 25 | /* Create the buffer */
|
|---|
| 26 | MPI_Comm_size( comm, &size );
|
|---|
| 27 | MPI_Comm_rank( comm, &rank );
|
|---|
| 28 | sbuf = (int *)malloc( size * size * sizeof(int) );
|
|---|
| 29 | rbuf = (int *)malloc( size * size * sizeof(int) );
|
|---|
| 30 | if (!sbuf || !rbuf) {
|
|---|
| 31 | fprintf( stderr, "Could not allocated buffers!\n" );
|
|---|
| 32 | fflush(stderr);
|
|---|
| 33 | MPI_Abort( comm, 1 );
|
|---|
| 34 | }
|
|---|
| 35 | /* Load up the buffers */
|
|---|
| 36 | for (i=0; i<size*size; i++) {
|
|---|
| 37 | sbuf[i] = i + 100*rank;
|
|---|
| 38 | rbuf[i] = -i;
|
|---|
| 39 | }
|
|---|
| 40 | /* Create and load the arguments to alltoallv */
|
|---|
| 41 | sendcounts = (int *)malloc( size * sizeof(int) );
|
|---|
| 42 | recvcounts = (int *)malloc( size * sizeof(int) );
|
|---|
| 43 | rdispls = (int *)malloc( size * sizeof(int) );
|
|---|
| 44 | sdispls = (int *)malloc( size * sizeof(int) );
|
|---|
| 45 | sendtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
|
|---|
| 46 | recvtypes = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
|
|---|
| 47 | if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
|
|---|
| 48 | fprintf( stderr, "Could not allocate arg items!\n" );
|
|---|
| 49 | fflush(stderr);
|
|---|
| 50 | MPI_Abort( comm, 1 );
|
|---|
| 51 | }
|
|---|
| 52 | /* Note that process 0 sends no data (sendcounts[0] = 0) */
|
|---|
| 53 | for (i=0; i<size; i++) {
|
|---|
| 54 | sendcounts[i] = i;
|
|---|
| 55 | recvcounts[i] = rank;
|
|---|
| 56 | rdispls[i] = i * rank * sizeof(int);
|
|---|
| 57 | sdispls[i] = (((i+1) * (i))/2) * sizeof(int);
|
|---|
| 58 | sendtypes[i] = recvtypes[i] = MPI_INT;
|
|---|
| 59 | }
|
|---|
| 60 | MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
|
|---|
| 61 | rbuf, recvcounts, rdispls, recvtypes, comm );
|
|---|
| 62 | /* Check rbuf */
|
|---|
| 63 | for (i=0; i<size; i++) {
|
|---|
| 64 | p = rbuf + rdispls[i]/sizeof(int);
|
|---|
| 65 | for (j=0; j<rank; j++) {
|
|---|
| 66 | if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
|
|---|
| [3273760] | 67 | printf("[%d] got %d expected %d for %dth\n",
|
|---|
| [6e48678] | 68 | rank, p[j],(i*(i+1))/2 + j, j );
|
|---|
| 69 | fflush(stderr);
|
|---|
| 70 | err++;
|
|---|
| 71 | }
|
|---|
| 72 | }
|
|---|
| 73 | }
|
|---|
| 74 | free( sendtypes );
|
|---|
| 75 | free( recvtypes );
|
|---|
| 76 | free( sdispls );
|
|---|
| 77 | free( rdispls );
|
|---|
| 78 | free( recvcounts );
|
|---|
| 79 | free( sendcounts );
|
|---|
| 80 | free( rbuf );
|
|---|
| 81 | free( sbuf );
|
|---|
| 82 | MPI_Finalize();
|
|---|
| 83 | return 0;
|
|---|
| 84 | }
|
|---|