| 1 | /*BHEADER**********************************************************************
|
|---|
| 2 | * Copyright (c) 2008, Lawrence Livermore National Security, LLC.
|
|---|
| 3 | * Produced at the Lawrence Livermore National Laboratory.
|
|---|
| 4 | * This file is part of HYPRE. See file COPYRIGHT for details.
|
|---|
| 5 | *
|
|---|
| 6 | * HYPRE is free software; you can redistribute it and/or modify it under the
|
|---|
| 7 | * terms of the GNU Lesser General Public License (as published by the Free
|
|---|
| 8 | * Software Foundation) version 2.1 dated February 1999.
|
|---|
| 9 | *
|
|---|
| 10 | * $Revision: 2.4 $
|
|---|
| 11 | ***********************************************************************EHEADER*/
|
|---|
| 12 |
|
|---|
| 13 |
|
|---|
| 14 |
|
|---|
| 15 | #include "headers.h"
|
|---|
| 16 |
|
|---|
| 17 | /*==========================================================================*/
|
|---|
| 18 |
|
|---|
| 19 | hypre_ParCSRCommHandle *
|
|---|
| 20 | hypre_ParCSRCommHandleCreate ( int job,
|
|---|
| 21 | hypre_ParCSRCommPkg *comm_pkg,
|
|---|
| 22 | void *send_data,
|
|---|
| 23 | void *recv_data )
|
|---|
| 24 | {
|
|---|
| 25 | int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
|
|---|
| 26 | int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
|
|---|
| 27 | MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg);
|
|---|
| 28 |
|
|---|
| 29 | hypre_ParCSRCommHandle *comm_handle;
|
|---|
| 30 | int num_requests;
|
|---|
| 31 | MPI_Request *requests;
|
|---|
| 32 |
|
|---|
| 33 | int i, j;
|
|---|
| 34 | int my_id, num_procs;
|
|---|
| 35 | int ip, vec_start, vec_len;
|
|---|
| 36 |
|
|---|
| 37 | /*--------------------------------------------------------------------
|
|---|
| 38 | * hypre_Initialize sets up a communication handle,
|
|---|
| 39 | * posts receives and initiates sends. It always requires num_sends,
|
|---|
| 40 | * num_recvs, recv_procs and send_procs to be set in comm_pkg.
|
|---|
| 41 | * There are different options for job:
|
|---|
| 42 | * job = 1 : is used to initialize communication exchange for the parts
|
|---|
| 43 | * of vector needed to perform a Matvec, it requires send_data
|
|---|
| 44 | * and recv_data to be doubles, recv_vec_starts and
|
|---|
| 45 | * send_map_starts need to be set in comm_pkg.
|
|---|
| 46 | * job = 2 : is used to initialize communication exchange for the parts
|
|---|
| 47 | * of vector needed to perform a MatvecT, it requires send_data
|
|---|
| 48 | * and recv_data to be doubles, recv_vec_starts and
|
|---|
| 49 | * send_map_starts need to be set in comm_pkg.
|
|---|
| 50 | * job = 11: similar to job = 1, but exchanges data of type int (not double),
|
|---|
| 51 | * requires send_data and recv_data to be ints
|
|---|
| 52 | * recv_vec_starts and send_map_starts need to be set in comm_pkg.
|
|---|
| 53 | * job = 12: similar to job = 1, but exchanges data of type int (not double),
|
|---|
| 54 | * requires send_data and recv_data to be ints
|
|---|
| 55 | * recv_vec_starts and send_map_starts need to be set in comm_pkg.
|
|---|
| 56 | * job = 21: similar to job = 1, but exchanges data of type int (not double),
|
|---|
| 57 | * requires send_data and recv_data to be long longs
|
|---|
| 58 | * recv_vec_starts and send_map_starts need to be set in comm_pkg.
|
|---|
| 59 | * job = 22: similar to job = 1, but exchanges data of type int (not double),
|
|---|
| 60 | * requires send_data and recv_data to be long longs
|
|---|
| 61 | * recv_vec_starts and send_map_starts need to be set in comm_pkg.
|
|---|
| 62 | * default: ignores send_data and recv_data, requires send_mpi_types
|
|---|
| 63 | * and recv_mpi_types to be set in comm_pkg.
|
|---|
| 64 | * datatypes need to point to absolute
|
|---|
| 65 | * addresses, e.g. generated using MPI_Address .
|
|---|
| 66 | *--------------------------------------------------------------------*/
|
|---|
| 67 |
|
|---|
| 68 | num_requests = num_sends + num_recvs;
|
|---|
| 69 | requests = hypre_CTAlloc(MPI_Request, num_requests);
|
|---|
| 70 |
|
|---|
| 71 | MPI_Comm_size(comm,&num_procs);
|
|---|
| 72 | MPI_Comm_rank(comm,&my_id);
|
|---|
| 73 |
|
|---|
| 74 | j = 0;
|
|---|
| 75 | switch (job)
|
|---|
| 76 | {
|
|---|
| 77 | case 1:
|
|---|
| 78 | {
|
|---|
| 79 | double *d_send_data = (double *) send_data;
|
|---|
| 80 | double *d_recv_data = (double *) recv_data;
|
|---|
| 81 | for (i = 0; i < num_recvs; i++)
|
|---|
| 82 | {
|
|---|
| 83 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 84 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 85 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 86 | MPI_Irecv(&d_recv_data[vec_start], vec_len, MPI_DOUBLE,
|
|---|
| 87 | ip, 0, comm, &requests[j++]);
|
|---|
| 88 | }
|
|---|
| 89 | for (i = 0; i < num_sends; i++)
|
|---|
| 90 | {
|
|---|
| 91 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 92 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
|
|---|
| 93 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 94 | MPI_Isend(&d_send_data[vec_start], vec_len, MPI_DOUBLE,
|
|---|
| 95 | ip, 0, comm, &requests[j++]);
|
|---|
| 96 | }
|
|---|
| 97 | break;
|
|---|
| 98 | }
|
|---|
| 99 | case 2:
|
|---|
| 100 | {
|
|---|
| 101 | double *d_send_data = (double *) send_data;
|
|---|
| 102 | double *d_recv_data = (double *) recv_data;
|
|---|
| 103 | for (i = 0; i < num_sends; i++)
|
|---|
| 104 | {
|
|---|
| 105 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 106 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
|
|---|
| 107 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 108 | MPI_Irecv(&d_recv_data[vec_start], vec_len, MPI_DOUBLE,
|
|---|
| 109 | ip, 0, comm, &requests[j++]);
|
|---|
| 110 | }
|
|---|
| 111 | for (i = 0; i < num_recvs; i++)
|
|---|
| 112 | {
|
|---|
| 113 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 114 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 115 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 116 | MPI_Isend(&d_send_data[vec_start], vec_len, MPI_DOUBLE,
|
|---|
| 117 | ip, 0, comm, &requests[j++]);
|
|---|
| 118 | }
|
|---|
| 119 | break;
|
|---|
| 120 | }
|
|---|
| 121 | case 11:
|
|---|
| 122 | {
|
|---|
| 123 | int *i_send_data = (int *) send_data;
|
|---|
| 124 | int *i_recv_data = (int *) recv_data;
|
|---|
| 125 | for (i = 0; i < num_recvs; i++)
|
|---|
| 126 | {
|
|---|
| 127 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 128 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 129 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 130 | MPI_Irecv(&i_recv_data[vec_start], vec_len, MPI_INT,
|
|---|
| 131 | ip, 0, comm, &requests[j++]);
|
|---|
| 132 | }
|
|---|
| 133 | for (i = 0; i < num_sends; i++)
|
|---|
| 134 | {
|
|---|
| 135 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 136 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
|
|---|
| 137 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 138 | MPI_Isend(&i_send_data[vec_start], vec_len, MPI_INT,
|
|---|
| 139 | ip, 0, comm, &requests[j++]);
|
|---|
| 140 | }
|
|---|
| 141 | break;
|
|---|
| 142 | }
|
|---|
| 143 | case 12:
|
|---|
| 144 | {
|
|---|
| 145 | int *i_send_data = (int *) send_data;
|
|---|
| 146 | int *i_recv_data = (int *) recv_data;
|
|---|
| 147 | for (i = 0; i < num_sends; i++)
|
|---|
| 148 | {
|
|---|
| 149 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 150 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
|
|---|
| 151 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 152 | MPI_Irecv(&i_recv_data[vec_start], vec_len, MPI_INT,
|
|---|
| 153 | ip, 0, comm, &requests[j++]);
|
|---|
| 154 | }
|
|---|
| 155 | for (i = 0; i < num_recvs; i++)
|
|---|
| 156 | {
|
|---|
| 157 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 158 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 159 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 160 | MPI_Isend(&i_send_data[vec_start], vec_len, MPI_INT,
|
|---|
| 161 | ip, 0, comm, &requests[j++]);
|
|---|
| 162 | }
|
|---|
| 163 | break;
|
|---|
| 164 | }
|
|---|
| 165 | case 21:
|
|---|
| 166 | {
|
|---|
| 167 | HYPRE_BigInt *i_send_data = (HYPRE_BigInt *) send_data;
|
|---|
| 168 | HYPRE_BigInt *i_recv_data = (HYPRE_BigInt *) recv_data;
|
|---|
| 169 | for (i = 0; i < num_recvs; i++)
|
|---|
| 170 | {
|
|---|
| 171 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 172 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 173 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 174 | MPI_Irecv(&i_recv_data[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 175 | ip, 0, comm, &requests[j++]);
|
|---|
| 176 | }
|
|---|
| 177 | for (i = 0; i < num_sends; i++)
|
|---|
| 178 | {
|
|---|
| 179 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 180 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1)-vec_start;
|
|---|
| 181 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 182 | MPI_Isend(&i_send_data[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 183 | ip, 0, comm, &requests[j++]);
|
|---|
| 184 | }
|
|---|
| 185 | break;
|
|---|
| 186 | }
|
|---|
| 187 | case 22:
|
|---|
| 188 | {
|
|---|
| 189 | HYPRE_BigInt *i_send_data = (HYPRE_BigInt *) send_data;
|
|---|
| 190 | HYPRE_BigInt *i_recv_data = (HYPRE_BigInt *) recv_data;
|
|---|
| 191 | for (i = 0; i < num_sends; i++)
|
|---|
| 192 | {
|
|---|
| 193 | vec_start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
|
|---|
| 194 | vec_len = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - vec_start;
|
|---|
| 195 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 196 | MPI_Irecv(&i_recv_data[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 197 | ip, 0, comm, &requests[j++]);
|
|---|
| 198 | }
|
|---|
| 199 | for (i = 0; i < num_recvs; i++)
|
|---|
| 200 | {
|
|---|
| 201 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 202 | vec_start = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i);
|
|---|
| 203 | vec_len = hypre_ParCSRCommPkgRecvVecStart(comm_pkg,i+1)-vec_start;
|
|---|
| 204 | MPI_Isend(&i_send_data[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 205 | ip, 0, comm, &requests[j++]);
|
|---|
| 206 | }
|
|---|
| 207 | break;
|
|---|
| 208 | }
|
|---|
| 209 | /* default :
|
|---|
| 210 | {
|
|---|
| 211 | for (i = 0; i < num_recvs; i++)
|
|---|
| 212 | {
|
|---|
| 213 | ip = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
|
|---|
| 214 | MPI_Irecv(MPI_BOTTOM, 1,
|
|---|
| 215 | hypre_ParCSRCommPkgRecvMPIType(comm_pkg, i),
|
|---|
| 216 | ip, 0, comm, &requests[j++]);
|
|---|
| 217 | }
|
|---|
| 218 | for (i = 0; i < num_sends; i++)
|
|---|
| 219 | {
|
|---|
| 220 | ip = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
|
|---|
| 221 | MPI_Isend(MPI_BOTTOM, 1,
|
|---|
| 222 | hypre_ParCSRCommPkgSendMPIType(comm_pkg, i),
|
|---|
| 223 | ip, 0, comm, &requests[j++]);
|
|---|
| 224 | }
|
|---|
| 225 | break;
|
|---|
| 226 | } */
|
|---|
| 227 | }
|
|---|
| 228 | /*--------------------------------------------------------------------
|
|---|
| 229 | * set up comm_handle and return
|
|---|
| 230 | *--------------------------------------------------------------------*/
|
|---|
| 231 |
|
|---|
| 232 | comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle, 1);
|
|---|
| 233 |
|
|---|
| 234 | hypre_ParCSRCommHandleCommPkg(comm_handle) = comm_pkg;
|
|---|
| 235 | hypre_ParCSRCommHandleSendData(comm_handle) = send_data;
|
|---|
| 236 | hypre_ParCSRCommHandleRecvData(comm_handle) = recv_data;
|
|---|
| 237 | hypre_ParCSRCommHandleNumRequests(comm_handle) = num_requests;
|
|---|
| 238 | hypre_ParCSRCommHandleRequests(comm_handle) = requests;
|
|---|
| 239 |
|
|---|
| 240 | return ( comm_handle );
|
|---|
| 241 | }
|
|---|
| 242 |
|
|---|
| 243 | int
|
|---|
| 244 | hypre_ParCSRCommHandleDestroy( hypre_ParCSRCommHandle *comm_handle )
|
|---|
| 245 | {
|
|---|
| 246 | MPI_Status *status0;
|
|---|
| 247 | int ierr = 0;
|
|---|
| 248 |
|
|---|
| 249 | if ( comm_handle==NULL ) return ierr;
|
|---|
| 250 | if (hypre_ParCSRCommHandleNumRequests(comm_handle))
|
|---|
| 251 | {
|
|---|
| 252 | status0 = hypre_CTAlloc(MPI_Status,
|
|---|
| 253 | hypre_ParCSRCommHandleNumRequests(comm_handle));
|
|---|
| 254 | MPI_Waitall(hypre_ParCSRCommHandleNumRequests(comm_handle),
|
|---|
| 255 | hypre_ParCSRCommHandleRequests(comm_handle), status0);
|
|---|
| 256 | hypre_TFree(status0);
|
|---|
| 257 | }
|
|---|
| 258 |
|
|---|
| 259 | hypre_TFree(hypre_ParCSRCommHandleRequests(comm_handle));
|
|---|
| 260 | hypre_TFree(comm_handle);
|
|---|
| 261 |
|
|---|
| 262 | return ierr;
|
|---|
| 263 | }
|
|---|
| 264 |
|
|---|
| 265 |
|
|---|
| 266 | /* hypre_MatCommPkgCreate_core does all the communications and computations for
|
|---|
| 267 | hypre_MatCommPkgCreate ( hypre_ParCSRMatrix *A)
|
|---|
| 268 | To support both data types, it has hardly any data structures other than int*.
|
|---|
| 269 |
|
|---|
| 270 | */
|
|---|
| 271 |
|
|---|
| 272 | void
|
|---|
| 273 | hypre_MatvecCommPkgCreate_core (
|
|---|
| 274 |
|
|---|
| 275 | /* input args: */
|
|---|
| 276 | MPI_Comm comm, HYPRE_BigInt * col_map_offd, HYPRE_BigInt first_col_diag,
|
|---|
| 277 | HYPRE_BigInt * col_starts,
|
|---|
| 278 | int num_cols_diag, int num_cols_offd,
|
|---|
| 279 | HYPRE_BigInt firstColDiag, HYPRE_BigInt * colMapOffd,
|
|---|
| 280 |
|
|---|
| 281 | int data, /* = 1 for a matrix with floating-point data, =0 for Boolean matrix */
|
|---|
| 282 |
|
|---|
| 283 | /* pointers to output args: */
|
|---|
| 284 | int * p_num_recvs, int ** p_recv_procs, int ** p_recv_vec_starts,
|
|---|
| 285 | int * p_num_sends, int ** p_send_procs, int ** p_send_map_starts,
|
|---|
| 286 | int ** p_send_map_elmts
|
|---|
| 287 |
|
|---|
| 288 | )
|
|---|
| 289 | {
|
|---|
| 290 | int i, j;
|
|---|
| 291 | int num_procs, my_id, proc_num, num_elmts;
|
|---|
| 292 | int local_info;
|
|---|
| 293 | HYPRE_BigInt offd_col;
|
|---|
| 294 | HYPRE_BigInt *big_buf_data = NULL;
|
|---|
| 295 | int *proc_mark, *proc_add, *tmp, *recv_buf, *displs, *info;
|
|---|
| 296 | /* outputs: */
|
|---|
| 297 | int num_recvs, * recv_procs, * recv_vec_starts;
|
|---|
| 298 | int num_sends, * send_procs, * send_map_starts, * send_map_elmts;
|
|---|
| 299 | int ip, vec_start, vec_len, num_requests;
|
|---|
| 300 |
|
|---|
| 301 | MPI_Request *requests;
|
|---|
| 302 | MPI_Status *status;
|
|---|
| 303 |
|
|---|
| 304 | MPI_Comm_size(comm, &num_procs);
|
|---|
| 305 | MPI_Comm_rank(comm, &my_id);
|
|---|
| 306 |
|
|---|
| 307 | proc_mark = hypre_CTAlloc(int, num_procs);
|
|---|
| 308 | proc_add = hypre_CTAlloc(int, num_procs);
|
|---|
| 309 | info = hypre_CTAlloc(int, num_procs);
|
|---|
| 310 |
|
|---|
| 311 | /* ----------------------------------------------------------------------
|
|---|
| 312 | * determine which processors to receive from (set proc_mark) and num_recvs,
|
|---|
| 313 | * at the end of the loop proc_mark[i] contains the number of elements to be
|
|---|
| 314 | * received from Proc. i
|
|---|
| 315 | * ---------------------------------------------------------------------*/
|
|---|
| 316 |
|
|---|
| 317 | for (i=0; i < num_procs; i++)
|
|---|
| 318 | proc_add[i] = 0;
|
|---|
| 319 |
|
|---|
| 320 | proc_num = 0;
|
|---|
| 321 | if (num_cols_offd) offd_col = col_map_offd[0];
|
|---|
| 322 | num_recvs=0;
|
|---|
| 323 | j = 0;
|
|---|
| 324 | for (i=0; i < num_cols_offd; i++)
|
|---|
| 325 | {
|
|---|
| 326 | if (num_cols_diag) proc_num = hypre_min(num_procs-1,offd_col /
|
|---|
| 327 | num_cols_diag);
|
|---|
| 328 | while (col_starts[proc_num] > offd_col )
|
|---|
| 329 | proc_num = proc_num-1;
|
|---|
| 330 | while (col_starts[proc_num+1]-1 < offd_col )
|
|---|
| 331 | proc_num = proc_num+1;
|
|---|
| 332 | proc_mark[num_recvs] = proc_num;
|
|---|
| 333 | j = i;
|
|---|
| 334 | while (col_starts[proc_num+1] > offd_col)
|
|---|
| 335 | {
|
|---|
| 336 | proc_add[num_recvs]++;
|
|---|
| 337 | if (j < num_cols_offd-1)
|
|---|
| 338 | {
|
|---|
| 339 | j++;
|
|---|
| 340 | offd_col = col_map_offd[j];
|
|---|
| 341 | }
|
|---|
| 342 | else
|
|---|
| 343 | {
|
|---|
| 344 | j++;
|
|---|
| 345 | offd_col = col_starts[num_procs];
|
|---|
| 346 | }
|
|---|
| 347 | }
|
|---|
| 348 | num_recvs++;
|
|---|
| 349 | if (j < num_cols_offd) i = j-1;
|
|---|
| 350 | else i=j;
|
|---|
| 351 | }
|
|---|
| 352 |
|
|---|
| 353 | local_info = 2*num_recvs;
|
|---|
| 354 |
|
|---|
| 355 | MPI_Allgather(&local_info, 1, MPI_INT, info, 1, MPI_INT, comm);
|
|---|
| 356 |
|
|---|
| 357 | /* ----------------------------------------------------------------------
|
|---|
| 358 | * generate information to be sent: tmp contains for each recv_proc:
|
|---|
| 359 | * id of recv_procs, number of elements to be received for this processor,
|
|---|
| 360 | * indices of elements (in this order)
|
|---|
| 361 | * ---------------------------------------------------------------------*/
|
|---|
| 362 |
|
|---|
| 363 | displs = hypre_CTAlloc(int, num_procs+1);
|
|---|
| 364 | displs[0] = 0;
|
|---|
| 365 | for (i=1; i < num_procs+1; i++)
|
|---|
| 366 | displs[i] = displs[i-1]+info[i-1];
|
|---|
| 367 | recv_buf = hypre_CTAlloc(int, displs[num_procs]);
|
|---|
| 368 |
|
|---|
| 369 | recv_procs = NULL;
|
|---|
| 370 | tmp = NULL;
|
|---|
| 371 | if (num_recvs)
|
|---|
| 372 | {
|
|---|
| 373 | recv_procs = hypre_CTAlloc(int, num_recvs);
|
|---|
| 374 | tmp = hypre_CTAlloc(int, local_info);
|
|---|
| 375 | }
|
|---|
| 376 | recv_vec_starts = hypre_CTAlloc(int, num_recvs+1);
|
|---|
| 377 |
|
|---|
| 378 |
|
|---|
| 379 | j = 0;
|
|---|
| 380 | if (num_recvs) recv_vec_starts[0] = 0;
|
|---|
| 381 | for (i=0; i < num_recvs; i++)
|
|---|
| 382 | {
|
|---|
| 383 | num_elmts = proc_add[i];
|
|---|
| 384 | recv_procs[i] = proc_mark[i];
|
|---|
| 385 | recv_vec_starts[i+1] = recv_vec_starts[i]+num_elmts;
|
|---|
| 386 | tmp[j++] = proc_mark[i];
|
|---|
| 387 | tmp[j++] = num_elmts;
|
|---|
| 388 | }
|
|---|
| 389 |
|
|---|
| 390 | MPI_Allgatherv(tmp,local_info,MPI_INT,recv_buf,info,displs,MPI_INT,comm);
|
|---|
| 391 |
|
|---|
| 392 |
|
|---|
| 393 | /* ----------------------------------------------------------------------
|
|---|
| 394 | * determine num_sends and number of elements to be sent
|
|---|
| 395 | * ---------------------------------------------------------------------*/
|
|---|
| 396 |
|
|---|
| 397 | num_sends = 0;
|
|---|
| 398 | num_elmts = 0;
|
|---|
| 399 | proc_add[0] = 0;
|
|---|
| 400 | for (i=0; i < num_procs; i++)
|
|---|
| 401 | {
|
|---|
| 402 | j = displs[i];
|
|---|
| 403 | while ( j < displs[i+1])
|
|---|
| 404 | {
|
|---|
| 405 | if (recv_buf[j++] == my_id)
|
|---|
| 406 | {
|
|---|
| 407 | proc_mark[num_sends] = i;
|
|---|
| 408 | num_sends++;
|
|---|
| 409 | proc_add[num_sends] = proc_add[num_sends-1]+recv_buf[j];
|
|---|
| 410 | break;
|
|---|
| 411 | }
|
|---|
| 412 | j++;
|
|---|
| 413 | }
|
|---|
| 414 | }
|
|---|
| 415 |
|
|---|
| 416 | /* ----------------------------------------------------------------------
|
|---|
| 417 | * determine send_procs and actual elements to be send (in send_map_elmts)
|
|---|
| 418 | * and send_map_starts whose i-th entry points to the beginning of the
|
|---|
| 419 | * elements to be send to proc. i
|
|---|
| 420 | * ---------------------------------------------------------------------*/
|
|---|
| 421 |
|
|---|
| 422 | send_procs = NULL;
|
|---|
| 423 | send_map_elmts = NULL;
|
|---|
| 424 |
|
|---|
| 425 | if (num_sends)
|
|---|
| 426 | {
|
|---|
| 427 | send_procs = hypre_CTAlloc(int, num_sends);
|
|---|
| 428 | send_map_elmts = hypre_CTAlloc(int, proc_add[num_sends]);
|
|---|
| 429 | big_buf_data = hypre_CTAlloc(HYPRE_BigInt, proc_add[num_sends]);
|
|---|
| 430 | }
|
|---|
| 431 | send_map_starts = hypre_CTAlloc(int, num_sends+1);
|
|---|
| 432 | num_requests = num_recvs+num_sends;
|
|---|
| 433 | if (num_requests)
|
|---|
| 434 | {
|
|---|
| 435 | requests = hypre_CTAlloc(MPI_Request, num_requests);
|
|---|
| 436 | status = hypre_CTAlloc(MPI_Status, num_requests);
|
|---|
| 437 | }
|
|---|
| 438 |
|
|---|
| 439 | if (num_sends) send_map_starts[0] = 0;
|
|---|
| 440 | for (i=0; i < num_sends; i++)
|
|---|
| 441 | {
|
|---|
| 442 | send_map_starts[i+1] = proc_add[i+1];
|
|---|
| 443 | send_procs[i] = proc_mark[i];
|
|---|
| 444 | }
|
|---|
| 445 |
|
|---|
| 446 | j=0;
|
|---|
| 447 | for (i=0; i < num_sends; i++)
|
|---|
| 448 | {
|
|---|
| 449 | vec_start = send_map_starts[i];
|
|---|
| 450 | vec_len = send_map_starts[i+1] - vec_start;
|
|---|
| 451 | ip = send_procs[i];
|
|---|
| 452 | MPI_Irecv(&big_buf_data[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 453 | ip, 0, comm, &requests[j++]);
|
|---|
| 454 | }
|
|---|
| 455 | for (i=0; i < num_recvs; i++)
|
|---|
| 456 | {
|
|---|
| 457 | vec_start = recv_vec_starts[i];
|
|---|
| 458 | vec_len = recv_vec_starts[i+1] - vec_start;
|
|---|
| 459 | ip = recv_procs[i];
|
|---|
| 460 | MPI_Isend(&col_map_offd[vec_start], vec_len, MPI_HYPRE_BIG_INT,
|
|---|
| 461 | ip, 0, comm, &requests[j++]);
|
|---|
| 462 | }
|
|---|
| 463 |
|
|---|
| 464 | if (num_requests)
|
|---|
| 465 | {
|
|---|
| 466 | MPI_Waitall(num_requests, requests, status);
|
|---|
| 467 | hypre_TFree(requests);
|
|---|
| 468 | hypre_TFree(status);
|
|---|
| 469 | }
|
|---|
| 470 |
|
|---|
| 471 | if (num_sends)
|
|---|
| 472 | {
|
|---|
| 473 | for (i=0; i<send_map_starts[num_sends]; i++)
|
|---|
| 474 | send_map_elmts[i] = (int)(big_buf_data[i]- first_col_diag);
|
|---|
| 475 | }
|
|---|
| 476 |
|
|---|
| 477 | hypre_TFree(proc_add);
|
|---|
| 478 | hypre_TFree(proc_mark);
|
|---|
| 479 | hypre_TFree(tmp);
|
|---|
| 480 | hypre_TFree(recv_buf);
|
|---|
| 481 | hypre_TFree(displs);
|
|---|
| 482 | hypre_TFree(info);
|
|---|
| 483 | hypre_TFree(big_buf_data);
|
|---|
| 484 |
|
|---|
| 485 | /* finish up with the hand-coded call-by-reference... */
|
|---|
| 486 | *p_num_recvs = num_recvs;
|
|---|
| 487 | *p_recv_procs = recv_procs;
|
|---|
| 488 | *p_recv_vec_starts = recv_vec_starts;
|
|---|
| 489 | *p_num_sends = num_sends;
|
|---|
| 490 | *p_send_procs = send_procs;
|
|---|
| 491 | *p_send_map_starts = send_map_starts;
|
|---|
| 492 | *p_send_map_elmts = send_map_elmts;
|
|---|
| 493 | }
|
|---|
| 494 |
|
|---|
| 495 | /* ----------------------------------------------------------------------
|
|---|
| 496 | * hypre_MatvecCommPkgCreate
|
|---|
| 497 | * generates the comm_pkg for A
|
|---|
| 498 | * if no row and/or column partitioning is given, the routine determines
|
|---|
| 499 | * them with MPE_Decomp1d
|
|---|
| 500 | * ---------------------------------------------------------------------*/
|
|---|
| 501 |
|
|---|
| 502 | int
|
|---|
| 503 | hypre_MatvecCommPkgCreate ( hypre_ParCSRMatrix *A)
|
|---|
| 504 | {
|
|---|
| 505 | hypre_ParCSRCommPkg *comm_pkg;
|
|---|
| 506 |
|
|---|
| 507 | MPI_Comm comm = hypre_ParCSRMatrixComm(A);
|
|---|
| 508 | /* MPI_Datatype *recv_mpi_types;
|
|---|
| 509 | MPI_Datatype *send_mpi_types;
|
|---|
| 510 | */
|
|---|
| 511 | int num_sends;
|
|---|
| 512 | int *send_procs;
|
|---|
| 513 | int *send_map_starts;
|
|---|
| 514 | int *send_map_elmts;
|
|---|
| 515 | int num_recvs;
|
|---|
| 516 | int *recv_procs;
|
|---|
| 517 | int *recv_vec_starts;
|
|---|
| 518 |
|
|---|
| 519 | HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
|
|---|
| 520 | HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(A);
|
|---|
| 521 | HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
|
|---|
| 522 |
|
|---|
| 523 | int ierr = 0;
|
|---|
| 524 | int num_cols_diag = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A));
|
|---|
| 525 | int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
|
|---|
| 526 |
|
|---|
| 527 | hypre_MatvecCommPkgCreate_core
|
|---|
| 528 | (
|
|---|
| 529 | comm, col_map_offd, first_col_diag, col_starts,
|
|---|
| 530 | num_cols_diag, num_cols_offd,
|
|---|
| 531 | first_col_diag, col_map_offd,
|
|---|
| 532 | 1,
|
|---|
| 533 | &num_recvs, &recv_procs, &recv_vec_starts,
|
|---|
| 534 | &num_sends, &send_procs, &send_map_starts,
|
|---|
| 535 | &send_map_elmts
|
|---|
| 536 | );
|
|---|
| 537 |
|
|---|
| 538 | comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1);
|
|---|
| 539 |
|
|---|
| 540 | hypre_ParCSRCommPkgComm(comm_pkg) = comm;
|
|---|
| 541 |
|
|---|
| 542 | hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs;
|
|---|
| 543 | hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs;
|
|---|
| 544 | hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts;
|
|---|
| 545 | /* hypre_ParCSRCommPkgRecvMPITypes(comm_pkg) = recv_mpi_types; */
|
|---|
| 546 |
|
|---|
| 547 | hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends;
|
|---|
| 548 | hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs;
|
|---|
| 549 | hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts;
|
|---|
| 550 | hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elmts;
|
|---|
| 551 | /* hypre_ParCSRCommPkgSendMPITypes(comm_pkg) = send_mpi_types; */
|
|---|
| 552 |
|
|---|
| 553 | hypre_ParCSRMatrixCommPkg(A) = comm_pkg;
|
|---|
| 554 |
|
|---|
| 555 | return ierr;
|
|---|
| 556 | }
|
|---|
| 557 |
|
|---|
| 558 | int
|
|---|
| 559 | hypre_MatvecCommPkgDestroy(hypre_ParCSRCommPkg *comm_pkg)
|
|---|
| 560 | {
|
|---|
| 561 | int ierr = 0;
|
|---|
| 562 |
|
|---|
| 563 | if (hypre_ParCSRCommPkgNumSends(comm_pkg))
|
|---|
| 564 | {
|
|---|
| 565 | hypre_TFree(hypre_ParCSRCommPkgSendProcs(comm_pkg));
|
|---|
| 566 | hypre_TFree(hypre_ParCSRCommPkgSendMapElmts(comm_pkg));
|
|---|
| 567 | }
|
|---|
| 568 | hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg));
|
|---|
| 569 | /* if (hypre_ParCSRCommPkgSendMPITypes(comm_pkg))
|
|---|
| 570 | hypre_TFree(hypre_ParCSRCommPkgSendMPITypes(comm_pkg)); */
|
|---|
| 571 | if (hypre_ParCSRCommPkgNumRecvs(comm_pkg))
|
|---|
| 572 | {
|
|---|
| 573 | hypre_TFree(hypre_ParCSRCommPkgRecvProcs(comm_pkg));
|
|---|
| 574 | }
|
|---|
| 575 | hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg));
|
|---|
| 576 | /* if (hypre_ParCSRCommPkgRecvMPITypes(comm_pkg))
|
|---|
| 577 | hypre_TFree(hypre_ParCSRCommPkgRecvMPITypes(comm_pkg)); */
|
|---|
| 578 | hypre_TFree(comm_pkg);
|
|---|
| 579 |
|
|---|
| 580 | return ierr;
|
|---|
| 581 | }
|
|---|
| 582 |
|
|---|
| 583 | int
|
|---|
| 584 | hypre_BuildCSRMatrixMPIDataType(int num_nonzeros, int num_rows,
|
|---|
| 585 | double *a_data, int *a_i, int *a_j,
|
|---|
| 586 | MPI_Datatype *csr_matrix_datatype)
|
|---|
| 587 | {
|
|---|
| 588 | int block_lens[3];
|
|---|
| 589 | MPI_Aint displ[3];
|
|---|
| 590 | MPI_Datatype types[3];
|
|---|
| 591 | int ierr = 0;
|
|---|
| 592 |
|
|---|
| 593 | block_lens[0] = num_nonzeros;
|
|---|
| 594 | block_lens[1] = num_rows+1;
|
|---|
| 595 | block_lens[2] = num_nonzeros;
|
|---|
| 596 |
|
|---|
| 597 | types[0] = MPI_DOUBLE;
|
|---|
| 598 | types[1] = MPI_INT;
|
|---|
| 599 | types[2] = MPI_INT;
|
|---|
| 600 |
|
|---|
| 601 | MPI_Address(a_data, &displ[0]);
|
|---|
| 602 | MPI_Address(a_i, &displ[1]);
|
|---|
| 603 | MPI_Address(a_j, &displ[2]);
|
|---|
| 604 | MPI_Type_struct(3,block_lens,displ,types,csr_matrix_datatype);
|
|---|
| 605 | MPI_Type_commit(csr_matrix_datatype);
|
|---|
| 606 |
|
|---|
| 607 | return ierr;
|
|---|
| 608 | }
|
|---|
| 609 |
|
|---|
| 610 | int
|
|---|
| 611 | hypre_BuildCSRJDataType(int num_nonzeros,
|
|---|
| 612 | double *a_data, int *a_j,
|
|---|
| 613 | MPI_Datatype *csr_jdata_datatype)
|
|---|
| 614 | {
|
|---|
| 615 | int block_lens[2];
|
|---|
| 616 | MPI_Aint displs[2];
|
|---|
| 617 | MPI_Datatype types[2];
|
|---|
| 618 | int ierr = 0;
|
|---|
| 619 |
|
|---|
| 620 | block_lens[0] = num_nonzeros;
|
|---|
| 621 | block_lens[1] = num_nonzeros;
|
|---|
| 622 |
|
|---|
| 623 | types[0] = MPI_DOUBLE;
|
|---|
| 624 | types[1] = MPI_INT;
|
|---|
| 625 |
|
|---|
| 626 | MPI_Address(a_data, &displs[0]);
|
|---|
| 627 | MPI_Address(a_j, &displs[1]);
|
|---|
| 628 |
|
|---|
| 629 | MPI_Type_struct(2,block_lens,displs,types,csr_jdata_datatype);
|
|---|
| 630 | MPI_Type_commit(csr_jdata_datatype);
|
|---|
| 631 |
|
|---|
| 632 | return ierr;
|
|---|
| 633 | }
|
|---|