source: CIVL/examples/mpi-omp/AMG2013/parcsr_mv/new_commpkg.c

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 25.7 KB
Line 
1/*BHEADER**********************************************************************
2 * Copyright (c) 2008, Lawrence Livermore National Security, LLC.
3 * Produced at the Lawrence Livermore National Laboratory.
4 * This file is part of HYPRE. See file COPYRIGHT for details.
5 *
6 * HYPRE is free software; you can redistribute it and/or modify it under the
7 * terms of the GNU Lesser General Public License (as published by the Free
8 * Software Foundation) version 2.1 dated February 1999.
9 *
10 * $Revision: 2.4 $
11 ***********************************************************************EHEADER*/
12
13
14
15/*----------------------------------------------------
16 * Communication package that uses an assumed partition
17 * AHB 6/04
18 *-----------------------------------------------------*/
19
20#include "headers.h"
21
22/* some debugging tools*/
23#define mydebug 0
24
25/*==========================================================================*/
26
27
28
29int PrintCommpkg(hypre_ParCSRMatrix *A, const char *file_name)
30{
31
32 int num_sends, num_recvs;
33
34 int *recv_vec_starts, *recv_procs;
35 int *send_map_starts, *send_map_elements, *send_procs;
36
37 int i;
38 int my_id;
39
40 MPI_Comm comm;
41
42 hypre_ParCSRCommPkg *comm_pkg;
43
44
45 char new_file[80];
46 FILE *fp;
47
48 comm_pkg = hypre_ParCSRMatrixCommPkg(A);
49
50
51 comm = hypre_ParCSRCommPkgComm(comm_pkg);
52
53 num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
54 recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
55 recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
56 num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
57 send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
58 send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
59 send_map_elements = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
60
61
62 MPI_Comm_rank(comm, &my_id);
63
64 sprintf(new_file,"%s.%d",file_name,my_id);
65
66 fp = fopen(new_file, "w");
67 fprintf(fp, "num_recvs = %d\n", num_recvs);
68 for (i=0; i < num_recvs; i++)
69 {
70 fprintf(fp, "recv_proc [start, end] = %d [%d, %d] \n", recv_procs[i], recv_vec_starts[i], recv_vec_starts[i+1]-1);
71 }
72
73 fprintf(fp, "num_sends = %d\n", num_sends);
74 for (i=0; i < num_sends; i++)
75 {
76 fprintf(fp, "send_proc [start, end] = %d [%d, %d] \n", send_procs[i], send_map_starts[i], send_map_starts[i+1]-1);
77 }
78
79 for (i = 0; i< send_map_starts[num_sends]; i++)
80 {
81 fprintf(fp, "send_map_elements (%d) = %d\n", i, send_map_elements[i]);
82 }
83
84 fclose(fp);
85
86 return hypre_error_flag;
87
88
89
90}
91
92
93/*------------------------------------------------------------------
94 * hypre_NewCommPkgCreate_core
95 *
96 * This does the work for hypre_NewCommPkgCreate - we have to split it
97 * off so that it can also be used for block matrices.
98 *--------------------------------------------------------------------------*/
99
100int
101hypre_NewCommPkgCreate_core(
102/* input args: */
103 MPI_Comm comm, HYPRE_BigInt *col_map_off_d, HYPRE_BigInt first_col_diag,
104 HYPRE_BigInt col_start, HYPRE_BigInt col_end,
105 int num_cols_off_d, HYPRE_BigInt global_num_cols,
106/* pointers to output args: */
107 int *p_num_recvs, int **p_recv_procs, int **p_recv_vec_starts,
108 int *p_num_sends, int **p_send_procs, int ** p_send_map_starts,
109 int **p_send_map_elements, hypre_IJAssumedPart *apart)
110
111{
112 int num_procs, myid;
113 int j, i;
114 HYPRE_BigInt range_start, range_end, big_size;
115
116 int size;
117 int count;
118
119 int num_recvs, *recv_procs = NULL, *recv_vec_starts=NULL;
120 int tmp_id, prev_id;
121
122 int num_sends;
123
124 int ex_num_contacts, *ex_contact_procs=NULL, *ex_contact_vec_starts=NULL;
125 HYPRE_BigInt *ex_contact_buf=NULL;
126 int *send_map_elmts=NULL;
127
128 int num_ranges;
129 HYPRE_BigInt upper_bound;
130
131
132 HYPRE_BigInt *response_buf = NULL;
133 int *response_buf_starts=NULL;
134
135 int max_response_size;
136
137 hypre_DataExchangeResponse response_obj1, response_obj2;
138 hypre_ProcListElements send_proc_obj;
139
140#if mydebug
141 int tmp_int, index;
142#endif
143
144 MPI_Comm_size(comm, &num_procs );
145 MPI_Comm_rank(comm, &myid );
146
147
148#if mydebug
149
150 printf("myid = %i, my assumed local range: [%i, %i]\n", myid,
151 apart->row_start, apart->row_end);
152
153 for (i=0; i<apart.length; i++)
154 {
155 printf("myid = %d, proc %d owns assumed partition range = [%d, %d]\n",
156 myid, apart->proc_list[i], apart->row_start_list[i],
157 apart->row_end_list[i]);
158 }
159
160 printf("myid = %d, length of apart = %d\n", myid, apart->length);
161
162#endif
163
164
165 /*-----------------------------------------------------------
166 * Everyone knows where their assumed range is located
167 * (because of the assumed partition object (apart).
168 * For the comm. package, each proc must know it's receive
169 * procs (who it will receive data from and how much data)
170 * and its send procs
171 * (who it will send data to) and the indices of the elements
172 * to be sent. This is based on the non-zero
173 * entries in its rows. Each proc should know this from the user.
174 *-----------------------------------------------------------*/
175
176
177 /*------------------------------------------------------------
178 * First, get the receive processors
179 * each par_csr matrix will have a certain number of columns
180 * (num_cols_off_d) given in col_map_offd[] for which it needs
181 * data from another processor.
182 *
183 *------------------------------------------------------------*/
184
185 /*calculate the assumed receive processors*/
186
187 /* need to populate num_recvs, *recv_procs, and *recv_vec_starts
188 (correlates to starts in col_map_off_d for recv_procs) for
189 the comm. package*/
190
191
192 /*create contact information*/
193
194 ex_num_contacts = 0;
195
196 /*estimate the storage needed*/
197 if (num_cols_off_d > 0 && (apart->row_end - apart->row_start) > 0 )
198 {
199 big_size = col_map_off_d[num_cols_off_d-1] - col_map_off_d[0];
200
201 size = (int)(big_size/(apart->row_end - apart->row_start)) + 2;
202 }
203 else
204 {
205 size = 0;
206 }
207
208
209 /*we will contact each with a range of cols that we need*/
210 /* it is ok to contact yourself - because then there doesn't
211 need to be separate code */
212
213 ex_contact_procs = hypre_CTAlloc(int, size);
214 ex_contact_vec_starts = hypre_CTAlloc(int, size+1);
215 ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, size*2);
216
217 range_end = -1;
218 for (i=0; i< num_cols_off_d; i++)
219 {
220 if (col_map_off_d[i] > range_end)
221 {
222
223
224 hypre_GetAssumedPartitionProcFromRow(comm, col_map_off_d[i],
225 global_num_cols, &tmp_id);
226
227 if (ex_num_contacts == size) /*need more space? */
228 {
229 size += 20;
230 ex_contact_procs = hypre_TReAlloc(ex_contact_procs, int, size);
231 ex_contact_vec_starts = hypre_TReAlloc(ex_contact_vec_starts, int, size+1);
232 ex_contact_buf = hypre_TReAlloc(ex_contact_buf, HYPRE_BigInt, size*2);
233 }
234
235 /* end of prev. range */
236 if (ex_num_contacts > 0) ex_contact_buf[ex_num_contacts*2 - 1] = col_map_off_d[i-1];
237
238 /*start new range*/
239 ex_contact_procs[ex_num_contacts] = tmp_id;
240 ex_contact_vec_starts[ex_num_contacts] = ex_num_contacts*2;
241 ex_contact_buf[ex_num_contacts*2] = col_map_off_d[i];
242
243
244 ex_num_contacts++;
245
246 hypre_GetAssumedPartitionRowRange(comm, tmp_id, global_num_cols,
247 &range_start, &range_end);
248
249 }
250 }
251
252 /*finish the starts*/
253 ex_contact_vec_starts[ex_num_contacts] = ex_num_contacts*2;
254 /*finish the last range*/
255 if (ex_num_contacts > 0) ex_contact_buf[ex_num_contacts*2 - 1] = col_map_off_d[num_cols_off_d-1];
256
257
258 /*don't allocate space for responses */
259
260
261 /*create response object*/
262 response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
263 response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
264 response_obj1.data2 = NULL;
265
266 max_response_size = 6; /* 6 means we can fit 3 ranges*/
267
268
269 hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
270 ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
271 sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1,
272 comm, (void**) &response_buf, &response_buf_starts);
273
274
275
276 /*now create recv_procs[] and recv_vec_starts[] and num_recvs
277 from the complete data in response_buf - this array contains
278 a proc_id followed by an upper bound for the range. */
279
280
281 /*initialize */
282 num_recvs = 0;
283 size = ex_num_contacts+20; /* num of recv procs should be roughly similar size
284 to number of contacts - add a buffer of 20*/
285
286
287 recv_procs = hypre_CTAlloc(int, size);
288 recv_vec_starts = hypre_CTAlloc(int, size+1);
289 recv_vec_starts[0] = 0;
290
291 /*how many ranges were returned?*/
292 num_ranges = response_buf_starts[ex_num_contacts];
293 num_ranges = num_ranges/2;
294
295 prev_id = -1;
296 j = 0;
297 count = 0;
298
299 /* loop through ranges */
300 for (i=0; i<num_ranges; i++)
301 {
302 upper_bound = response_buf[i*2+1];
303 count = 0;
304 /* loop through off_d entries - counting how many are in the range */
305 while (j < num_cols_off_d && col_map_off_d[j] <= upper_bound)
306 {
307 j++;
308 count++;
309 }
310 if (count > 0)
311 {
312 /*add the range if the proc id != myid*/
313 tmp_id = response_buf[i*2];
314 if (tmp_id != myid)
315 {
316 if (tmp_id != prev_id) /*increment the number of recvs */
317 {
318 /*check size of recv buffers*/
319 if (num_recvs == size)
320 {
321 size+=20;
322 recv_procs = hypre_TReAlloc(recv_procs,int, size);
323 recv_vec_starts = hypre_TReAlloc(recv_vec_starts,int, size+1);
324 }
325
326 recv_vec_starts[num_recvs+1] = j; /*the new start is at this element*/
327 recv_procs[num_recvs] = tmp_id; /*add the new processor*/
328 num_recvs++;
329
330 }
331 else
332 {
333 /*same processor - just change the vec starts*/
334 recv_vec_starts[num_recvs] = j; /*the new start is at this element*/
335 }
336 }
337 prev_id = tmp_id;
338
339 }
340
341 }
342
343
344
345
346#if mydebug
347 for (i=0; i < num_recvs; i++)
348 {
349 printf("myid = %d, recv proc = %d, vec_starts = [%d : %d]\n",
350 myid, recv_procs[i], recv_vec_starts[i],recv_vec_starts[i+1]-1);
351 }
352#endif
353
354
355 /*------------------------------------------------------------
356 * determine the send processors
357 * each processor contacts its recv procs to let them
358 * know they are a send processor
359 *
360 *-------------------------------------------------------------*/
361
362 /* the contact information is the recv_processor infomation - so
363 nothing more to do to generate contact info*/
364
365 /* the response we expect is just a confirmation*/
366 hypre_TFree(response_buf);
367 hypre_TFree(response_buf_starts);
368 response_buf = NULL;
369 response_buf_starts = NULL;
370
371 /*build the response object*/
372 /*estimate for inital storage allocation that we send to as many procs
373 as we recv from + pad by 5*/
374 send_proc_obj.length = 0;
375 send_proc_obj.storage_length = num_recvs + 5;
376 send_proc_obj.id = hypre_CTAlloc(int, send_proc_obj.storage_length);
377 send_proc_obj.vec_starts = hypre_CTAlloc(int, send_proc_obj.storage_length + 1);
378 send_proc_obj.vec_starts[0] = 0;
379 send_proc_obj.element_storage_length = num_cols_off_d;
380 send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length);
381
382 response_obj2.fill_response = hypre_FillResponseIJDetermineSendProcs;
383 response_obj2.data1 = NULL;
384 response_obj2.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
385
386 max_response_size = 0;
387
388
389
390 hypre_DataExchangeList(num_recvs, recv_procs,
391 col_map_off_d, recv_vec_starts, sizeof(HYPRE_BigInt),
392 sizeof(HYPRE_BigInt), &response_obj2, max_response_size, 2,
393 comm, (void **) &response_buf, &response_buf_starts);
394
395
396
397 num_sends = send_proc_obj.length;
398
399 /*send procs are in send_proc_object.id */
400 /*send proc starts are in send_proc_obj.vec_starts */
401
402#if mydebug
403 printf("myid = %d, num_sends = %d\n", myid, num_sends);
404 for (i=0; i < num_sends; i++)
405 {
406 tmp_int = send_proc_obj.vec_starts[i+1] - send_proc_obj.vec_starts[i];
407 index = send_proc_obj.vec_starts[i];
408 for (j=0; j< tmp_int; j++)
409 {
410 printf("myid = %d, send proc = %d, send element = %d\n",myid,
411 send_proc_obj.id[i],send_proc_obj.elements[index+j]);
412 }
413 }
414#endif
415
416 /*-----------------------------------------------------------
417 * We need to sort the send procs and send elements (to produce
418 * the same result as with the standard comm package)
419 * 11/07/05
420 *-----------------------------------------------------------*/
421
422 {
423
424 int *orig_order;
425 int *orig_send_map_starts;
426 HYPRE_BigInt *orig_send_elements;
427 int ct, sz, pos;
428
429 orig_order = hypre_CTAlloc(int, num_sends);
430 orig_send_map_starts = hypre_CTAlloc(int, num_sends+1);
431 orig_send_elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.vec_starts[num_sends]);
432
433 orig_send_map_starts[0] = 0;
434 /* copy send map starts and elements */
435 for (i=0; i< num_sends; i++)
436 {
437 orig_order[i] = i;
438 orig_send_map_starts[i+1] = send_proc_obj.vec_starts[i+1];
439 }
440 for (i=0; i< send_proc_obj.vec_starts[num_sends]; i++)
441 {
442 orig_send_elements[i] = send_proc_obj.elements[i];
443 }
444 /* sort processor ids - keep track of original order */
445 hypre_qsort2i( send_proc_obj.id, orig_order, 0, num_sends-1 );
446
447 /* now rearrange vec starts and send elements to correspond to proc ids */
448 ct = 0;
449 for (i=0; i< num_sends; i++)
450 {
451 pos = orig_order[i];
452 sz = orig_send_map_starts[pos + 1] - orig_send_map_starts[pos];
453 send_proc_obj.vec_starts[i+1] = ct + sz;
454 for (j = 0; j< sz; j++)
455 {
456 send_proc_obj.elements[ct +j] = orig_send_elements[orig_send_map_starts[pos]+j];
457 }
458 ct += sz;
459 }
460 /* clean up */
461 hypre_TFree(orig_order);
462 hypre_TFree(orig_send_elements);
463 hypre_TFree(orig_send_map_starts);
464 }
465
466
467 /*-----------------------------------------------------------
468 * Return output info for setting up the comm package
469 *-----------------------------------------------------------*/
470
471
472 *p_num_recvs = num_recvs;
473 *p_recv_procs = recv_procs;
474 *p_recv_vec_starts = recv_vec_starts;
475 *p_num_sends = num_sends;
476 *p_send_procs = send_proc_obj.id;
477 *p_send_map_starts = send_proc_obj.vec_starts;
478
479
480 /*send map elements have global index - need local instead*/
481
482 if (num_sends)
483 {
484 send_map_elmts = hypre_CTAlloc(int,send_proc_obj.vec_starts[num_sends]);
485 for (i=0; i<send_proc_obj.vec_starts[num_sends]; i++)
486 {
487 send_map_elmts[i] = (int)(send_proc_obj.elements[i]-first_col_diag);
488 /*send_proc_obj.elements[i] -= first_col_diag;*/
489 }
490 }
491
492 *p_send_map_elements = send_map_elmts;
493
494 /*-----------------------------------------------------------
495 * Clean up
496 *-----------------------------------------------------------*/
497
498
499 if(ex_contact_procs) hypre_TFree(ex_contact_procs);
500 if(ex_contact_vec_starts) hypre_TFree(ex_contact_vec_starts);
501 hypre_TFree(ex_contact_buf);
502
503
504 if(response_buf) hypre_TFree(response_buf);
505 if(response_buf_starts) hypre_TFree(response_buf_starts);
506
507
508 /* don't free send_proc_obj.id,send_proc_obj.vec_starts,send_proc_obj.elements;
509 recv_procs, recv_vec_starts. These are aliased to the comm package and
510 will be destroyed there */
511
512 return hypre_error_flag;
513
514}
515
516/*------------------------------------------------------------------
517 * hypre_NewCommPkgCreate
518 * this is an alternate way of constructing the comm package
519 * (compare to hypre_MatvecCommPkgCreate() in par_csr_communication.c
520 * that should be more scalable
521 *-------------------------------------------------------------------*/
522
523int
524hypre_NewCommPkgCreate( hypre_ParCSRMatrix *parcsr_A)
525{
526
527 HYPRE_BigInt row_start=0, row_end=0, col_start = 0, col_end = 0;
528 int num_recvs, *recv_procs, *recv_vec_starts;
529
530 int num_sends, *send_procs, *send_map_starts;
531 int *send_map_elements;
532
533 int num_cols_off_d;
534 HYPRE_BigInt *col_map_off_d;
535
536 HYPRE_BigInt first_col_diag;
537 HYPRE_BigInt global_num_cols;
538
539
540 MPI_Comm comm;
541
542 hypre_ParCSRCommPkg *comm_pkg;
543 hypre_IJAssumedPart *apart;
544
545 /*-----------------------------------------------------------
546 * get parcsr_A information
547 *----------------------------------------------------------*/
548
549 hypre_ParCSRMatrixGetLocalRange( parcsr_A,
550 &row_start, &row_end ,
551 &col_start, &col_end );
552
553 col_map_off_d = hypre_ParCSRMatrixColMapOffd(parcsr_A);
554 num_cols_off_d = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(parcsr_A));
555
556 global_num_cols = hypre_ParCSRMatrixGlobalNumCols(parcsr_A);
557
558 comm = hypre_ParCSRMatrixComm(parcsr_A);
559
560 first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A);
561
562
563 /* Create the assumed partition */
564 if (hypre_ParCSRMatrixAssumedPartition(parcsr_A) == NULL)
565 {
566 hypre_ParCSRMatrixCreateAssumedPartition(parcsr_A);
567 }
568
569 apart = hypre_ParCSRMatrixAssumedPartition(parcsr_A);
570
571 /*-----------------------------------------------------------
572 * get commpkg info information
573 *----------------------------------------------------------*/
574
575 hypre_NewCommPkgCreate_core( comm, col_map_off_d, first_col_diag,
576 col_start, col_end,
577 num_cols_off_d, global_num_cols,
578 &num_recvs, &recv_procs, &recv_vec_starts,
579 &num_sends, &send_procs, &send_map_starts,
580 &send_map_elements, apart);
581
582
583 if (!num_recvs)
584 {
585 hypre_TFree(recv_procs);
586 recv_procs = NULL;
587 }
588 if (!num_sends)
589 {
590 hypre_TFree(send_procs);
591 hypre_TFree(send_map_elements);
592 send_procs = NULL;
593 send_map_elements = NULL;
594 }
595
596
597 /*-----------------------------------------------------------
598 * setup commpkg
599 *----------------------------------------------------------*/
600
601 comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1);
602
603 hypre_ParCSRCommPkgComm(comm_pkg) = comm;
604 hypre_ParCSRCommPkgNumRecvs(comm_pkg) = num_recvs;
605 hypre_ParCSRCommPkgRecvProcs(comm_pkg) = recv_procs;
606 hypre_ParCSRCommPkgRecvVecStarts(comm_pkg) = recv_vec_starts;
607 hypre_ParCSRCommPkgNumSends(comm_pkg) = num_sends;
608 hypre_ParCSRCommPkgSendProcs(comm_pkg) = send_procs;
609 hypre_ParCSRCommPkgSendMapStarts(comm_pkg) = send_map_starts;
610 hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = send_map_elements;
611
612 hypre_ParCSRMatrixCommPkg(parcsr_A) = comm_pkg;
613
614 return hypre_error_flag;
615
616
617}
618
619/*------------------------------------------------------------------
620 * hypre_NewCommPkgDestroy
621 * Destroy the comm package
622 *------------------------------------------------------------------*/
623
624
625int
626hypre_NewCommPkgDestroy(hypre_ParCSRMatrix *parcsr_A)
627{
628
629
630 hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(parcsr_A);
631
632
633 /*even if num_sends and num_recvs = 0, storage may have been allocated */
634
635 if (hypre_ParCSRCommPkgSendProcs(comm_pkg))
636 {
637 hypre_TFree(hypre_ParCSRCommPkgSendProcs(comm_pkg));
638 }
639 if (hypre_ParCSRCommPkgSendMapElmts(comm_pkg))
640 {
641 hypre_TFree(hypre_ParCSRCommPkgSendMapElmts(comm_pkg));
642 }
643 if (hypre_ParCSRCommPkgSendMapStarts(comm_pkg))
644 {
645 hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg));
646 }
647 if (hypre_ParCSRCommPkgRecvProcs(comm_pkg))
648 {
649 hypre_TFree(hypre_ParCSRCommPkgRecvProcs(comm_pkg));
650 }
651 if (hypre_ParCSRCommPkgRecvVecStarts(comm_pkg))
652 {
653 hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg));
654 }
655
656 hypre_TFree(comm_pkg);
657 hypre_ParCSRMatrixCommPkg(parcsr_A) = NULL; /*this gets freed again in destroy
658 parscr since there are two comm
659 packages now*/
660
661 return hypre_error_flag;
662}
663
664
665
666
667/*--------------------------------------------------------------------
668 * hypre_RangeFillResponseIJDetermineRecvProcs
669 * Fill response function for determining the recv. processors
670 * data exchange
671 *--------------------------------------------------------------------*/
672
673int
674hypre_RangeFillResponseIJDetermineRecvProcs(void *p_recv_contact_buf,
675 int contact_size, int contact_proc, void *ro,
676 MPI_Comm comm, void **p_send_response_buf,
677 int *response_message_size)
678{
679 int myid, tmp_id;
680 HYPRE_BigInt row_end;
681 int j;
682 int index, size;
683 HYPRE_BigInt row_val;
684
685 HYPRE_BigInt *send_response_buf = (HYPRE_BigInt *) *p_send_response_buf;
686 HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
687
688
689 hypre_DataExchangeResponse *response_obj = ro;
690 hypre_IJAssumedPart *part = response_obj->data1;
691
692 int overhead = response_obj->send_response_overhead;
693
694 /*-------------------------------------------------------------------
695 * we are getting a range of off_d entries - need to see if we own them
696 * or how many ranges to send back - send back
697 * with format [proc_id end_row proc_id #end_row proc_id #end_row etc...].
698 *----------------------------------------------------------------------*/
699
700 MPI_Comm_rank(comm, &myid );
701
702
703 /* populate send_response_buf */
704
705 index = 0; /*count entries in send_response_buf*/
706
707 j = 0; /*marks which partition of the assumed partition we are in */
708 row_val = recv_contact_buf[0]; /*beginning of range*/
709 row_end = part->row_end_list[part->sort_index[j]];
710 tmp_id = part->proc_list[part->sort_index[j]];
711
712 /*check storage in send_buf for adding the ranges */
713 size = 2*(part->length);
714
715 if ( response_obj->send_response_storage < size )
716 {
717
718 response_obj->send_response_storage = hypre_max(size, 20);
719 send_response_buf = hypre_TReAlloc( send_response_buf, HYPRE_BigInt,
720 response_obj->send_response_storage + overhead );
721 *p_send_response_buf = send_response_buf; /* needed when using ReAlloc */
722 }
723
724
725 while (row_val > row_end) /*which partition to start in */
726 {
727 j++;
728 row_end = part->row_end_list[part->sort_index[j]];
729 tmp_id = part->proc_list[part->sort_index[j]];
730 }
731
732 /*add this range*/
733 send_response_buf[index++] = (HYPRE_BigInt)tmp_id;
734 send_response_buf[index++] = row_end;
735
736 j++; /*increase j to look in next partition */
737
738
739 /*any more? - now compare with end of range value*/
740 row_val = recv_contact_buf[1]; /*end of range*/
741 while ( j < part->length && row_val > row_end )
742 {
743 row_end = part->row_end_list[part->sort_index[j]];
744 tmp_id = part->proc_list[part->sort_index[j]];
745
746 send_response_buf[index++] = (HYPRE_BigInt)tmp_id;
747 send_response_buf[index++] = row_end;
748
749 j++;
750
751 }
752
753
754 *response_message_size = index;
755 *p_send_response_buf = send_response_buf;
756
757 return hypre_error_flag;
758
759}
760
761
762
763/*--------------------------------------------------------------------
764 * hypre_FillResponseIJDetermineSendProcs
765 * Fill response function for determining the send processors
766 * data exchange
767 *--------------------------------------------------------------------*/
768
769int
770hypre_FillResponseIJDetermineSendProcs(void *p_recv_contact_buf,
771 int contact_size, int contact_proc, void *ro,
772 MPI_Comm comm, void **p_send_response_buf,
773 int *response_message_size )
774{
775 int myid;
776 int i, index, count, elength;
777
778 HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
779
780 hypre_DataExchangeResponse *response_obj = ro;
781
782 hypre_ProcListElements *send_proc_obj = response_obj->data2;
783
784
785 MPI_Comm_rank(comm, &myid );
786
787
788 /*check to see if we need to allocate more space in send_proc_obj for ids*/
789 if (send_proc_obj->length == send_proc_obj->storage_length)
790 {
791 send_proc_obj->storage_length +=20; /*add space for 20 more processors*/
792 send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id,int,
793 send_proc_obj->storage_length);
794 send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,int,
795 send_proc_obj->storage_length + 1);
796 }
797
798 /*initialize*/
799 count = send_proc_obj->length;
800 index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
801
802 /*send proc*/
803 send_proc_obj->id[count] = contact_proc;
804
805 /*do we need more storage for the elements?*/
806 if (send_proc_obj->element_storage_length < index + contact_size)
807 {
808 elength = hypre_max(contact_size, 50);
809 elength += index;
810 send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
811 HYPRE_BigInt, elength);
812 send_proc_obj->element_storage_length = elength;
813 }
814 /*populate send_proc_obj*/
815 for (i=0; i< contact_size; i++)
816 {
817 send_proc_obj->elements[index++] = recv_contact_buf[i];
818 }
819 send_proc_obj->vec_starts[count+1] = index;
820 send_proc_obj->length++;
821
822
823 /*output - no message to return (confirmation) */
824 *response_message_size = 0;
825
826 return hypre_error_flag;
827
828}
829
Note: See TracBrowser for help on using the repository browser.