source: CIVL/examples/mpi-omp/AMG2013/IJ_mv/IJVector_parcsr.c

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 42.4 KB
Line 
1/*BHEADER**********************************************************************
2 * Copyright (c) 2008, Lawrence Livermore National Security, LLC.
3 * Produced at the Lawrence Livermore National Laboratory.
4 * This file is part of HYPRE. See file COPYRIGHT for details.
5 *
6 * HYPRE is free software; you can redistribute it and/or modify it under the
7 * terms of the GNU Lesser General Public License (as published by the Free
8 * Software Foundation) version 2.1 dated February 1999.
9 *
10 * $Revision: 2.4 $
11 ***********************************************************************EHEADER*/
12
13
14
15/******************************************************************************
16 *
17 * IJVector_Par interface
18 *
19 *****************************************************************************/
20
21#include "headers.h"
22#include "../HYPRE.h"
23
24/******************************************************************************
25 *
26 * hypre_IJVectorCreatePar
27 *
28 * creates ParVector if necessary, and leaves a pointer to it as the
29 * hypre_IJVector object
30 *
31 *****************************************************************************/
32int
33hypre_IJVectorCreatePar(hypre_IJVector *vector, HYPRE_BigInt *IJpartitioning)
34{
35 MPI_Comm comm = hypre_IJVectorComm(vector);
36
37
38 int num_procs, j;
39 HYPRE_BigInt global_n, *partitioning, jmin;
40 MPI_Comm_size(comm, &num_procs);
41
42#ifdef HYPRE_NO_GLOBAL_PARTITION
43 jmin = hypre_IJVectorGlobalFirstRow(vector);
44 global_n = hypre_IJVectorGlobalNumRows(vector);
45
46 partitioning = hypre_CTAlloc(HYPRE_BigInt, 2);
47
48/* Shift to zero-based partitioning for ParVector object */
49 for (j = 0; j < 2; j++)
50 partitioning[j] = IJpartitioning[j] - jmin;
51
52#else
53 jmin = IJpartitioning[0];
54 global_n = IJpartitioning[num_procs] - jmin;
55
56 partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1);
57
58/* Shift to zero-based partitioning for ParVector object */
59 for (j = 0; j < num_procs+1; j++)
60 partitioning[j] = IJpartitioning[j] - jmin;
61
62#endif
63
64
65 hypre_IJVectorObject(vector) = hypre_ParVectorCreate(comm,
66 global_n, (HYPRE_BigInt *) partitioning);
67
68 return hypre_error_flag;
69}
70
71/******************************************************************************
72 *
73 * hypre_IJVectorDestroyPar
74 *
75 * frees ParVector local storage of an IJVectorPar
76 *
77 *****************************************************************************/
78int
79hypre_IJVectorDestroyPar(hypre_IJVector *vector)
80{
81 return hypre_ParVectorDestroy(hypre_IJVectorObject(vector));
82}
83
84/******************************************************************************
85 *
86 * hypre_IJVectorInitializePar
87 *
88 * initializes ParVector of IJVectorPar
89 *
90 *****************************************************************************/
91int
92hypre_IJVectorInitializePar(hypre_IJVector *vector)
93{
94 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
95 hypre_AuxParVector *aux_vector = hypre_IJVectorTranslator(vector);
96 HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector);
97 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
98 int my_id;
99
100 MPI_Comm comm = hypre_IJVectorComm(vector);
101
102 MPI_Comm_rank(comm,&my_id);
103
104 if (!partitioning)
105 {
106 printf("No ParVector partitioning for initialization -- ");
107 printf("hypre_IJVectorInitializePar\n");
108 hypre_error_in_arg(1);
109 }
110
111#ifdef HYPRE_NO_GLOBAL_PARTITION
112 hypre_VectorSize(local_vector) = (int) (partitioning[1] -
113 partitioning[0]);
114#else
115 hypre_VectorSize(local_vector) = (int) (partitioning[my_id+1] -
116 partitioning[my_id]);
117#endif
118
119
120 hypre_ParVectorInitialize(par_vector);
121
122 if (!aux_vector)
123 {
124 hypre_AuxParVectorCreate(&aux_vector);
125 hypre_IJVectorTranslator(vector) = aux_vector;
126 }
127 hypre_AuxParVectorInitialize(aux_vector);
128
129 return hypre_error_flag;
130}
131
132/******************************************************************************
133 *
134 * hypre_IJVectorSetMaxOffProcElmtsPar
135 *
136 *****************************************************************************/
137
138int
139hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector,
140 int max_off_proc_elmts)
141{
142
143 hypre_AuxParVector *aux_vector;
144
145 aux_vector = hypre_IJVectorTranslator(vector);
146 if (!aux_vector)
147 {
148 hypre_AuxParVectorCreate(&aux_vector);
149 hypre_IJVectorTranslator(vector) = aux_vector;
150 }
151 hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts;
152 return hypre_error_flag;
153}
154
155/******************************************************************************
156 *
157 * hypre_IJVectorDistributePar
158 *
159 * takes an IJVector generated for one processor and distributes it
160 * across many processors according to vec_starts,
161 * if vec_starts is NULL, it distributes them evenly?
162 *
163 *****************************************************************************/
164/*int
165hypre_IJVectorDistributePar(hypre_IJVector *vector,
166 const int *vec_starts)
167{
168
169 hypre_ParVector *old_vector = hypre_IJVectorObject(vector);
170 hypre_ParVector *par_vector;
171
172 if (!old_vector)
173 {
174 printf("old_vector == NULL -- ");
175 printf("hypre_IJVectorDistributePar\n");
176 printf("**** Vector storage is either unallocated or orphaned ****\n");
177 hypre_error_in_arg(1);
178 }
179
180 par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
181 hypre_ParVectorLocalVector(old_vector),
182 (int *)vec_starts);
183 if (!par_vector)
184 {
185 printf("par_vector == NULL -- ");
186 printf("hypre_IJVectorDistributePar\n");
187 printf("**** Vector storage is unallocated ****\n");
188 hypre_error_in_arg(1);
189 }
190
191 hypre_ParVectorDestroy(old_vector);
192
193 hypre_IJVectorObject(vector) = par_vector;
194
195 return hypre_error_flag;
196}*/
197
198/******************************************************************************
199 *
200 * hypre_IJVectorZeroValuesPar
201 *
202 * zeroes all local components of an IJVectorPar
203 *
204 *****************************************************************************/
205int
206hypre_IJVectorZeroValuesPar(hypre_IJVector *vector)
207{
208 int my_id;
209 int i;
210 HYPRE_BigInt vec_start, vec_stop;
211 double *data;
212
213 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
214 MPI_Comm comm = hypre_IJVectorComm(vector);
215 HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector);
216 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
217
218 MPI_Comm_rank(comm, &my_id);
219
220/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
221 let user know of catastrophe and exit */
222
223 if (!par_vector)
224 {
225 printf("par_vector == NULL -- ");
226 printf("hypre_IJVectorZeroValuesPar\n");
227 printf("**** Vector storage is either unallocated or orphaned ****\n");
228 hypre_error_in_arg(1);
229 }
230 if (!partitioning)
231 {
232 printf("partitioning == NULL -- ");
233 printf("hypre_IJVectorZeroValuesPar\n");
234 printf("**** Vector partitioning is either unallocated or orphaned ****\n");
235 hypre_error_in_arg(1);
236 }
237 if (!local_vector)
238 {
239 printf("local_vector == NULL -- ");
240 printf("hypre_IJVectorZeroValuesPar\n");
241 printf("**** Vector local data is either unallocated or orphaned ****\n");
242 hypre_error_in_arg(1);
243 }
244
245#ifdef HYPRE_NO_GLOBAL_PARTITION
246 vec_start = partitioning[0];
247 vec_stop = partitioning[1];
248#else
249 vec_start = partitioning[my_id];
250 vec_stop = partitioning[my_id+1];
251#endif
252
253
254 if (vec_start > vec_stop)
255 {
256 printf("vec_start > vec_stop -- ");
257 printf("hypre_IJVectorZeroValuesPar\n");
258 printf("**** This vector partitioning should not occur ****\n");
259 hypre_error_in_arg(1);
260
261 }
262
263 data = hypre_VectorData( local_vector );
264 for (i = 0; i < (int)(vec_stop - vec_start); i++)
265 data[i] = 0.;
266
267 return hypre_error_flag;
268}
269
270/******************************************************************************
271 *
272 * hypre_IJVectorSetValuesPar
273 *
274 * sets a potentially noncontiguous set of components of an IJVectorPar
275 *
276 *****************************************************************************/
277int
278hypre_IJVectorSetValuesPar(hypre_IJVector *vector,
279 int num_values,
280 const HYPRE_BigInt *indices,
281 const double *values )
282{
283
284 int my_id;
285 int j, k;
286 HYPRE_BigInt i, vec_start, vec_stop;
287 double *data;
288
289 HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
290 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
291 hypre_AuxParVector *aux_vector = hypre_IJVectorTranslator(vector);
292 MPI_Comm comm = hypre_IJVectorComm(vector);
293 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
294
295/* If no components are to be set, perform no checking and return */
296 if (num_values < 1) return 0;
297
298 MPI_Comm_rank(comm, &my_id);
299
300/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
301 let user know of catastrophe and exit */
302
303 if (!par_vector)
304 {
305 printf("par_vector == NULL -- ");
306 printf("hypre_IJVectorSetValuesPar\n");
307 printf("**** Vector storage is either unallocated or orphaned ****\n");
308 hypre_error_in_arg(1);
309 }
310 if (!IJpartitioning)
311 {
312 printf("IJpartitioning == NULL -- ");
313 printf("hypre_IJVectorSetValuesPar\n");
314 printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
315 hypre_error_in_arg(1);
316 }
317 if (!local_vector)
318 {
319 printf("local_vector == NULL -- ");
320 printf("hypre_IJVectorSetValuesPar\n");
321 printf("**** Vector local data is either unallocated or orphaned ****\n");
322 hypre_error_in_arg(1);
323 }
324
325#ifdef HYPRE_NO_GLOBAL_PARTITION
326 vec_start = IJpartitioning[0];
327 vec_stop = IJpartitioning[1]-1;
328#else
329 vec_start = IJpartitioning[my_id];
330 vec_stop = IJpartitioning[my_id+1]-1;
331#endif
332
333 if (vec_start > vec_stop)
334 {
335 printf("vec_start > vec_stop -- ");
336 printf("hypre_IJVectorSetValuesPar\n");
337 printf("**** This vector partitioning should not occur ****\n");
338 hypre_error_in_arg(1);
339 }
340
341/* Determine whether indices points to local indices only,
342 and if not, store indices and values into auxiliary vector structure
343 If indices == NULL, assume that num_values components are to be
344 set in a block starting at vec_start.
345 NOTE: If indices == NULL off processor values are ignored!!! */
346
347 data = hypre_VectorData(local_vector);
348
349 if (indices)
350 {
351 int current_num_elmts
352 = hypre_AuxParVectorCurrentNumElmts(aux_vector);
353 int max_off_proc_elmts
354 = hypre_AuxParVectorMaxOffProcElmts(aux_vector);
355 HYPRE_BigInt *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
356 double *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
357
358 for (j = 0; j < num_values; j++)
359 {
360 i = indices[j];
361 if (i < vec_start || i > vec_stop)
362 {
363 /* if elements outside processor boundaries, store in off processor
364 stash */
365 if (!max_off_proc_elmts)
366 {
367 max_off_proc_elmts = 100;
368 hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
369 max_off_proc_elmts;
370 hypre_AuxParVectorOffProcI(aux_vector)
371 = hypre_CTAlloc(HYPRE_BigInt,max_off_proc_elmts);
372 hypre_AuxParVectorOffProcData(aux_vector)
373 = hypre_CTAlloc(double,max_off_proc_elmts);
374 off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
375 off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
376 }
377 else if (current_num_elmts + 1 > max_off_proc_elmts)
378 {
379 max_off_proc_elmts += 10;
380 off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_BigInt,max_off_proc_elmts);
381 off_proc_data = hypre_TReAlloc(off_proc_data,double,
382 max_off_proc_elmts);
383 hypre_AuxParVectorMaxOffProcElmts(aux_vector)
384 = max_off_proc_elmts;
385 hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
386 hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
387 }
388 off_proc_i[current_num_elmts] = i;
389 off_proc_data[current_num_elmts++] = values[j];
390 hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
391 }
392 else /* local values are inserted into the vector */
393 {
394 k = (int) (i - vec_start);
395 data[k] = values[j];
396 }
397 }
398 }
399 else
400 {
401 if (num_values > (int)(vec_stop - vec_start) + 1)
402 {
403 printf("Warning! Indices beyond local range not identified!\n ");
404 printf("Off processor values have been ignored!\n");
405 num_values = (int)(vec_stop - vec_start) +1;
406 }
407
408 for (j = 0; j < num_values; j++)
409 data[j] = values[j];
410 }
411
412 return hypre_error_flag;
413}
414
415/******************************************************************************
416 *
417 * hypre_IJVectorAddToValuesPar
418 *
419 * adds to a potentially noncontiguous set of IJVectorPar components
420 *
421 *****************************************************************************/
422int
423hypre_IJVectorAddToValuesPar(hypre_IJVector *vector,
424 int num_values,
425 const HYPRE_BigInt *indices,
426 const double *values )
427{
428 int my_id;
429 int j, k;
430 HYPRE_BigInt i, vec_start, vec_stop;
431 double *data;
432
433 HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
434 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
435 hypre_AuxParVector *aux_vector = hypre_IJVectorTranslator(vector);
436 MPI_Comm comm = hypre_IJVectorComm(vector);
437 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
438
439/* If no components are to be retrieved, perform no checking and return */
440 if (num_values < 1) return 0;
441
442 MPI_Comm_rank(comm, &my_id);
443
444/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
445 let user know of catastrophe and exit */
446
447 if (!par_vector)
448 {
449 printf("par_vector == NULL -- ");
450 printf("hypre_IJVectorAddToValuesPar\n");
451 printf("**** Vector storage is either unallocated or orphaned ****\n");
452 hypre_error_in_arg(1);
453 }
454 if (!IJpartitioning)
455 {
456 printf("IJpartitioning == NULL -- ");
457 printf("hypre_IJVectorAddToValuesPar\n");
458 printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
459 hypre_error_in_arg(1);
460 }
461 if (!local_vector)
462 {
463 printf("local_vector == NULL -- ");
464 printf("hypre_IJVectorAddToValuesPar\n");
465 printf("**** Vector local data is either unallocated or orphaned ****\n");
466 hypre_error_in_arg(1);
467 }
468
469#ifdef HYPRE_NO_GLOBAL_PARTITION
470 vec_start = IJpartitioning[0];
471 vec_stop = IJpartitioning[1]-1;
472#else
473 vec_start = IJpartitioning[my_id];
474 vec_stop = IJpartitioning[my_id+1]-1;
475#endif
476
477 if (vec_start > vec_stop)
478 {
479 printf("vec_start > vec_stop -- ");
480 printf("hypre_IJVectorAddToValuesPar\n");
481 printf("**** This vector partitioning should not occur ****\n");
482 hypre_error_in_arg(1);
483 }
484
485/* Determine whether indices points to local indices only,
486 and if not, store indices and values into auxiliary vector structure
487 If indices == NULL, assume that num_values components are to be
488 set in a block starting at vec_start.
489 NOTE: If indices == NULL off processor values are ignored!!! */
490
491 /* if (indices)
492 {
493 for (i = 0; i < num_values; i++)
494 {
495 ierr += (indices[i] < vec_start);
496 ierr += (indices[i] >= vec_stop);
497 }
498 }
499
500 if (ierr)
501 {
502 printf("indices beyond local range -- ");
503 printf("hypre_IJVectorAddToValuesPar\n");
504 printf("**** Indices specified are unusable ****\n");
505 exit(1);
506 } */
507
508 data = hypre_VectorData(local_vector);
509
510 if (indices)
511 {
512 int current_num_elmts
513 = hypre_AuxParVectorCurrentNumElmts(aux_vector);
514 int max_off_proc_elmts
515 = hypre_AuxParVectorMaxOffProcElmts(aux_vector);
516 HYPRE_BigInt *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
517 double *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
518
519 for (j = 0; j < num_values; j++)
520 {
521 i = indices[j];
522 if (i < vec_start || i > vec_stop)
523 {
524 /* if elements outside processor boundaries, store in off processor
525 stash */
526 if (!max_off_proc_elmts)
527 {
528 max_off_proc_elmts = 100;
529 hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
530 max_off_proc_elmts;
531 hypre_AuxParVectorOffProcI(aux_vector)
532 = hypre_CTAlloc(HYPRE_BigInt,max_off_proc_elmts);
533 hypre_AuxParVectorOffProcData(aux_vector)
534 = hypre_CTAlloc(double,max_off_proc_elmts);
535 off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
536 off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
537 }
538 else if (current_num_elmts + 1 > max_off_proc_elmts)
539 {
540 max_off_proc_elmts += 10;
541 off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_BigInt,max_off_proc_elmts);
542 off_proc_data = hypre_TReAlloc(off_proc_data,double,
543 max_off_proc_elmts);
544 hypre_AuxParVectorMaxOffProcElmts(aux_vector)
545 = max_off_proc_elmts;
546 hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
547 hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
548 }
549 off_proc_i[current_num_elmts] = -i-1;
550 off_proc_data[current_num_elmts++] = values[j];
551 hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
552 }
553 else /* local values are added to the vector */
554 {
555 k = (int) (i - vec_start);
556 data[k] += values[j];
557 }
558 }
559 }
560 else
561 {
562 if (num_values > (int)(vec_stop - vec_start) + 1)
563 {
564 printf("Warning! Indices beyond local range not identified!\n ");
565 printf("Off processor values have been ignored!\n");
566 num_values = (int)(vec_stop - vec_start) +1;
567 }
568
569 for (j = 0; j < num_values; j++)
570 data[j] += values[j];
571 }
572
573 return hypre_error_flag;
574}
575
576/******************************************************************************
577 *
578 * hypre_IJVectorAssemblePar
579 *
580 * currently tests existence of of ParVector object and its partitioning
581 *
582 *****************************************************************************/
583int
584hypre_IJVectorAssemblePar(hypre_IJVector *vector)
585{
586 HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
587 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
588 hypre_AuxParVector *aux_vector = hypre_IJVectorTranslator(vector);
589 HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector);
590 MPI_Comm comm = hypre_IJVectorComm(vector);
591
592 if (!par_vector)
593 {
594 printf("par_vector == NULL -- ");
595 printf("hypre_IJVectorAssemblePar\n");
596 printf("**** Vector storage is either unallocated or orphaned ****\n");
597 hypre_error_in_arg(1);
598 }
599 if (!IJpartitioning)
600 {
601 printf("IJpartitioning == NULL -- ");
602 printf("hypre_IJVectorAssemblePar\n");
603 printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
604 hypre_error_in_arg(1);
605 }
606 if (!partitioning)
607 {
608 printf("partitioning == NULL -- ");
609 printf("hypre_IJVectorAssemblePar\n");
610 printf("**** ParVector partitioning is either unallocated or orphaned ****\n");
611 hypre_error_in_arg(1);
612 }
613
614 if (aux_vector)
615 {
616 int off_proc_elmts, current_num_elmts;
617 int max_off_proc_elmts;
618 HYPRE_BigInt *off_proc_i;
619 double *off_proc_data;
620 current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);
621 MPI_Allreduce(&current_num_elmts,&off_proc_elmts,1,MPI_INT, MPI_SUM,comm);
622 if (off_proc_elmts)
623 {
624 max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);
625 off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
626 off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
627 hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts,
628 current_num_elmts, off_proc_i, off_proc_data);
629 hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector));
630 hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector));
631 hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;
632 hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;
633 }
634 }
635
636 return hypre_error_flag;
637}
638
639/******************************************************************************
640 *
641 * hypre_IJVectorGetValuesPar
642 *
643 * get a potentially noncontiguous set of IJVectorPar components
644 *
645 *****************************************************************************/
646int
647hypre_IJVectorGetValuesPar(hypre_IJVector *vector,
648 int num_values,
649 const HYPRE_BigInt *indices,
650 double *values )
651{
652 int my_id;
653 int j, k;
654 HYPRE_BigInt i, vec_start, vec_stop;
655 double *data;
656 int ierr = 0;
657
658 HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector);
659 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
660 MPI_Comm comm = hypre_IJVectorComm(vector);
661 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
662
663/* If no components are to be retrieved, perform no checking and return */
664 if (num_values < 1) return 0;
665
666 MPI_Comm_rank(comm, &my_id);
667
668/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
669 let user know of catastrophe and exit */
670
671 if (!par_vector)
672 {
673 printf("par_vector == NULL -- ");
674 printf("hypre_IJVectorGetValuesPar\n");
675 printf("**** Vector storage is either unallocated or orphaned ****\n");
676 hypre_error_in_arg(1);
677 }
678 if (!IJpartitioning)
679 {
680 printf("IJpartitioning == NULL -- ");
681 printf("hypre_IJVectorGetValuesPar\n");
682 printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
683 hypre_error_in_arg(1);
684 }
685 if (!local_vector)
686 {
687 printf("local_vector == NULL -- ");
688 printf("hypre_IJVectorGetValuesPar\n");
689 printf("**** Vector local data is either unallocated or orphaned ****\n");
690 hypre_error_in_arg(1);
691 }
692
693#ifdef HYPRE_NO_GLOBAL_PARTITION
694 vec_start = IJpartitioning[0];
695 vec_stop = IJpartitioning[1];
696#else
697 vec_start = IJpartitioning[my_id];
698 vec_stop = IJpartitioning[my_id+1];
699#endif
700
701 if (vec_start > vec_stop)
702 {
703 printf("vec_start > vec_stop -- ");
704 printf("hypre_IJVectorGetValuesPar\n");
705 printf("**** This vector partitioning should not occur ****\n");
706 hypre_error_in_arg(1);
707 }
708
709/* Determine whether indices points to local indices only,
710 and if not, let user know of catastrophe and exit.
711 If indices == NULL, assume that num_values components are to be
712 retrieved from block starting at vec_start */
713
714 if (indices)
715 {
716 for (j = 0; j < num_values; i++)
717 {
718 ierr += (indices[j] < vec_start);
719 ierr += (indices[j] >= vec_stop);
720 }
721 }
722
723 if (ierr)
724 {
725 printf("indices beyond local range -- ");
726 printf("hypre_IJVectorGetValuesPar\n");
727 printf("**** Indices specified are unusable ****\n");
728 hypre_error_in_arg(3);
729 }
730
731 data = hypre_VectorData(local_vector);
732
733 if (indices)
734 {
735 for (j = 0; j < num_values; j++)
736 {
737 k = (int)(indices[j] - vec_start);
738 values[j] = data[k];
739 }
740 }
741 else
742 {
743 for (j = 0; j < num_values; j++)
744 values[j] = data[j];
745 }
746
747 return hypre_error_flag;
748}
749
750/******************************************************************************
751
752 * hypre_IJVectorAssembleOffProcValsPar
753
754 * This is for handling set and get values calls to off-proc. entries -
755 * it is called from assemble. There is an alternate version for
756 * when the assumed partition is being used.
757
758 *****************************************************************************/
759
760#ifndef HYPRE_NO_GLOBAL_PARTITION
761
762int
763hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
764 int max_off_proc_elmts,
765 int current_num_elmts,
766 HYPRE_BigInt *off_proc_i,
767 double *off_proc_data)
768{
769 MPI_Comm comm = hypre_IJVectorComm(vector);
770 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
771 MPI_Request *requests;
772 MPI_Status *status;
773 int i, j, j2;
774 int iii, indx, ip;
775 HYPRE_BigInt first_index, row;
776 int proc_id, num_procs, my_id;
777 int num_sends, num_sends2;
778 int num_recvs;
779 int num_requests;
780 int vec_start, vec_len;
781 int *send_procs;
782 HYPRE_BigInt *send_i;
783 int *send_map_starts;
784 int *recv_procs;
785 HYPRE_BigInt *recv_i;
786 int *recv_vec_starts;
787 int *info;
788 int *int_buffer;
789 int *proc_id_mem;
790 HYPRE_BigInt *partitioning;
791 int *displs;
792 int *recv_buf;
793 double *send_data;
794 double *recv_data;
795 double *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
796
797 MPI_Comm_size(comm,&num_procs);
798 MPI_Comm_rank(comm, &my_id);
799 partitioning = hypre_IJVectorPartitioning(vector);
800
801 first_index = partitioning[my_id];
802
803
804 info = hypre_CTAlloc(int,num_procs);
805 proc_id_mem = hypre_CTAlloc(int,current_num_elmts);
806 for (i=0; i < current_num_elmts; i++)
807 {
808 row = off_proc_i[i];
809 if (row < 0) row = -row-1;
810 proc_id = hypre_FindProc(partitioning,row,num_procs);
811 proc_id_mem[i] = proc_id;
812 info[proc_id]++;
813 }
814
815 /* determine send_procs and amount of data to be sent */
816 num_sends = 0;
817 for (i=0; i < num_procs; i++)
818 {
819 if (info[i])
820 {
821 num_sends++;
822 }
823 }
824 num_sends2 = 2*num_sends;
825 send_procs = hypre_CTAlloc(int,num_sends);
826 send_map_starts = hypre_CTAlloc(int,num_sends+1);
827 int_buffer = hypre_CTAlloc(int,num_sends2);
828 j = 0;
829 j2 = 0;
830 send_map_starts[0] = 0;
831 for (i=0; i < num_procs; i++)
832 {
833 if (info[i])
834 {
835 send_procs[j++] = i;
836 send_map_starts[j] = send_map_starts[j-1]+info[i];
837 int_buffer[j2++] = i;
838 int_buffer[j2++] = info[i];
839 }
840 }
841
842 MPI_Allgather(&num_sends2,1,MPI_INT,info,1,MPI_INT,comm);
843
844 displs = hypre_CTAlloc(int, num_procs+1);
845 displs[0] = 0;
846 for (i=1; i < num_procs+1; i++)
847 displs[i] = displs[i-1]+info[i-1];
848 recv_buf = hypre_CTAlloc(int, displs[num_procs]);
849
850 MPI_Allgatherv(int_buffer,num_sends2,MPI_INT,recv_buf,info,displs,
851 MPI_INT,comm);
852
853 hypre_TFree(int_buffer);
854 hypre_TFree(info);
855
856 /* determine recv procs and amount of data to be received */
857 num_recvs = 0;
858 for (j=0; j < displs[num_procs]; j+=2)
859 {
860 if (recv_buf[j] == my_id)
861 num_recvs++;
862 }
863
864 recv_procs = hypre_CTAlloc(int,num_recvs);
865 recv_vec_starts = hypre_CTAlloc(int,num_recvs+1);
866
867 j2 = 0;
868 recv_vec_starts[0] = 0;
869 for (i=0; i < num_procs; i++)
870 {
871 for (j=displs[i]; j < displs[i+1]; j+=2)
872 {
873 if (recv_buf[j] == my_id)
874 {
875 recv_procs[j2++] = i;
876 recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];
877 }
878 if (j2 == num_recvs) break;
879 }
880 }
881 hypre_TFree(recv_buf);
882 hypre_TFree(displs);
883
884 /* set up data to be sent to send procs */
885 /* send_i contains for each send proc
886 indices, send_data contains corresponding values */
887
888 send_i = hypre_CTAlloc(HYPRE_BigInt,send_map_starts[num_sends]);
889 send_data = hypre_CTAlloc(double,send_map_starts[num_sends]);
890 recv_i = hypre_CTAlloc(HYPRE_BigInt,recv_vec_starts[num_recvs]);
891 recv_data = hypre_CTAlloc(double,recv_vec_starts[num_recvs]);
892
893 for (i=0; i < current_num_elmts; i++)
894 {
895 proc_id = proc_id_mem[i];
896 indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
897 iii = send_map_starts[indx];
898 send_i[iii] = off_proc_i[i];
899 send_data[iii] = off_proc_data[i];
900 send_map_starts[indx]++;
901 }
902
903 hypre_TFree(proc_id_mem);
904
905 for (i=num_sends; i > 0; i--)
906 {
907 send_map_starts[i] = send_map_starts[i-1];
908 }
909 send_map_starts[0] = 0;
910
911 num_requests = num_recvs+num_sends;
912
913 requests = hypre_CTAlloc(MPI_Request, num_requests);
914 status = hypre_CTAlloc(MPI_Status, num_requests);
915
916 j=0;
917 for (i=0; i < num_recvs; i++)
918 {
919 vec_start = recv_vec_starts[i];
920 vec_len = recv_vec_starts[i+1] - vec_start;
921 ip = recv_procs[i];
922 MPI_Irecv(&recv_i[vec_start], vec_len, MPI_HYPRE_BIG_INT, ip, 0, comm,
923 &requests[j++]);
924 }
925
926 for (i=0; i < num_sends; i++)
927 {
928 vec_start = send_map_starts[i];
929 vec_len = send_map_starts[i+1] - vec_start;
930 ip = send_procs[i];
931 MPI_Isend(&send_i[vec_start], vec_len, MPI_HYPRE_BIG_INT, ip, 0, comm,
932 &requests[j++]);
933 }
934
935 if (num_requests)
936 {
937 MPI_Waitall(num_requests, requests, status);
938 }
939
940 j=0;
941 for (i=0; i < num_recvs; i++)
942 {
943 vec_start = recv_vec_starts[i];
944 vec_len = recv_vec_starts[i+1] - vec_start;
945 ip = recv_procs[i];
946 MPI_Irecv(&recv_data[vec_start], vec_len, MPI_DOUBLE, ip, 0, comm,
947 &requests[j++]);
948 }
949
950 for (i=0; i < num_sends; i++)
951 {
952 vec_start = send_map_starts[i];
953 vec_len = send_map_starts[i+1] - vec_start;
954 ip = send_procs[i];
955 MPI_Isend(&send_data[vec_start], vec_len, MPI_DOUBLE, ip, 0, comm,
956 &requests[j++]);
957 }
958
959 if (num_requests)
960 {
961 MPI_Waitall(num_requests, requests, status);
962 hypre_TFree(requests);
963 hypre_TFree(status);
964 }
965
966 hypre_TFree(send_i);
967 hypre_TFree(send_data);
968 hypre_TFree(send_procs);
969 hypre_TFree(send_map_starts);
970 hypre_TFree(recv_procs);
971
972 for (i=0; i < recv_vec_starts[num_recvs]; i++)
973 {
974 row = recv_i[i];
975 if (row < 0)
976 {
977 row = -row-1;
978 j = (int)(row - first_index);
979 data[j] += recv_data[i];
980 }
981 else
982 {
983 j = (int)(row - first_index);
984 data[j] = recv_data[i];
985 }
986 }
987
988 hypre_TFree(recv_vec_starts);
989 hypre_TFree(recv_i);
990 hypre_TFree(recv_data);
991
992 return hypre_error_flag;
993}
994
995#else
996
997
998/* assumed partition version */
999
1000
1001int
1002hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
1003 int max_off_proc_elmts,
1004 int current_num_elmts,
1005 HYPRE_BigInt *off_proc_i,
1006 double *off_proc_data)
1007{
1008
1009 int myid;
1010 HYPRE_BigInt global_num_rows;
1011 int i, j, in, k;
1012 int proc_id, last_proc, prev_id, tmp_id;
1013 int max_response_size;
1014 int ex_num_contacts = 0;
1015 HYPRE_BigInt range_start, range_end;
1016 int storage;
1017 int indx;
1018 int num_ranges, row_count;
1019 HYPRE_BigInt row;
1020 int num_recvs;
1021 int counter;
1022 HYPRE_BigInt upper_bound;
1023 int num_real_procs;
1024 int first_index;
1025
1026 HYPRE_BigInt *big_ex_contact_buf = NULL;
1027 HYPRE_BigInt *row_list=NULL;
1028 int *a_proc_id=NULL, *orig_order=NULL;
1029 int *real_proc_id = NULL, *us_real_proc_id = NULL;
1030 int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
1031 int *recv_starts=NULL;
1032 int *response_buf_starts=NULL;
1033 HYPRE_BigInt *response_buf = NULL;
1034 int *num_rows_per_proc = NULL;
1035 int tmp_int;
1036 int obj_size_bytes, int_size, double_size, big_int_size;
1037
1038 void *void_contact_buf = NULL;
1039 void *index_ptr;
1040 void *recv_data_ptr;
1041
1042 double tmp_double;
1043 double *vector_data;
1044 double value;
1045
1046 hypre_DataExchangeResponse response_obj1, response_obj2;
1047 hypre_ProcListElements send_proc_obj;
1048
1049 MPI_Comm comm = hypre_IJVectorComm(vector);
1050 hypre_ParVector *par_vector = hypre_IJVectorObject(vector);
1051
1052 hypre_IJAssumedPart *apart;
1053
1054
1055 MPI_Comm_rank(comm, &myid);
1056
1057 global_num_rows = hypre_ParVectorGlobalSize(par_vector);
1058
1059 /* verify that we have created the assumed partition */
1060
1061 if (hypre_ParVectorAssumedPartition(par_vector) == NULL)
1062 {
1063 hypre_ParVectorCreateAssumedPartition(par_vector);
1064 }
1065 apart = hypre_ParVectorAssumedPartition(par_vector);
1066
1067
1068
1069 /* get the assumed processor id for each row */
1070 a_proc_id = hypre_CTAlloc(int, current_num_elmts);
1071 orig_order = hypre_CTAlloc(int, current_num_elmts);
1072 real_proc_id = hypre_CTAlloc(int, current_num_elmts);
1073 row_list = hypre_CTAlloc(HYPRE_BigInt, current_num_elmts);
1074
1075 if (current_num_elmts > 0)
1076 {
1077 for (i=0; i < current_num_elmts; i++)
1078 {
1079 row = off_proc_i[i];
1080 if (row < 0) row = -row-1;
1081 row_list[i] = row;
1082 hypre_GetAssumedPartitionProcFromRow (comm, row, global_num_rows, &proc_id);
1083 a_proc_id[i] = proc_id;
1084 orig_order[i] = i;
1085 }
1086
1087 /* now we need to find the actual order of each row - sort on row -
1088 this will result in proc ids sorted also...*/
1089
1090 hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1);
1091
1092 /* calculate the number of contacts */
1093 ex_num_contacts = 1;
1094 last_proc = a_proc_id[0];
1095 for (i=1; i < current_num_elmts; i++)
1096 {
1097 if (a_proc_id[i] > last_proc)
1098 {
1099 ex_num_contacts++;
1100 last_proc = a_proc_id[i];
1101 }
1102 }
1103
1104 }
1105
1106 /* now we will go through a create a contact list - need to contact
1107 assumed processors and find out who the actual row owner is - we
1108 will contact with a range (2 numbers) */
1109
1110
1111
1112
1113 ex_contact_procs = hypre_CTAlloc(int, ex_num_contacts);
1114 ex_contact_vec_starts = hypre_CTAlloc(int, ex_num_contacts+1);
1115 big_ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2);
1116
1117
1118 counter = 0;
1119 range_end = -1;
1120 for (i=0; i< current_num_elmts; i++)
1121 {
1122 if (row_list[i] > range_end)
1123 {
1124 /* assumed proc */
1125 proc_id = a_proc_id[i];
1126
1127 /* end of prev. range */
1128 if (counter > 0) big_ex_contact_buf[counter*2 - 1] = row_list[i-1];
1129
1130 /*start new range*/
1131 ex_contact_procs[counter] = proc_id;
1132 ex_contact_vec_starts[counter] = counter*2;
1133 big_ex_contact_buf[counter*2] = row_list[i];
1134 counter++;
1135
1136 hypre_GetAssumedPartitionRowRange(comm, proc_id, global_num_rows,
1137 &range_start, &range_end);
1138
1139
1140 }
1141 }
1142
1143
1144 /*finish the starts*/
1145 ex_contact_vec_starts[counter] = counter*2;
1146 /*finish the last range*/
1147 if (counter > 0)
1148 big_ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1];
1149
1150
1151
1152 /*create response object - can use same fill response as used in the commpkg
1153 routine */
1154 response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
1155 response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
1156 response_obj1.data2 = NULL;
1157
1158 max_response_size = 6; /* 6 means we can fit 3 ranges*/
1159
1160 hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
1161 big_ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt),
1162 sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 4,
1163 comm, (void**) &response_buf, &response_buf_starts);
1164
1165
1166 /* now response_buf contains
1167 a proc_id followed by an upper bound for the range. */
1168
1169
1170
1171 hypre_TFree(ex_contact_procs);
1172 hypre_TFree(big_ex_contact_buf);
1173 hypre_TFree(ex_contact_vec_starts);
1174
1175 hypre_TFree(a_proc_id);
1176
1177
1178
1179 /*how many ranges were returned?*/
1180 num_ranges = response_buf_starts[ex_num_contacts];
1181 num_ranges = num_ranges/2;
1182
1183
1184 prev_id = -1;
1185 j = 0;
1186 counter = 0;
1187 num_real_procs = 0;
1188
1189
1190 /* loop through ranges - create a list of actual processor ids*/
1191 for (i=0; i<num_ranges; i++)
1192 {
1193 upper_bound = response_buf[i*2+1];
1194 counter = 0;
1195 tmp_id = (int) response_buf[i*2];
1196
1197 /* loop through row_list entries - counting how many are in the range */
1198 while (row_list[j] <= upper_bound && j < current_num_elmts)
1199 {
1200 real_proc_id[j] = tmp_id;
1201 j++;
1202 counter++;
1203 }
1204 if (counter > 0 && tmp_id != prev_id)
1205 {
1206 num_real_procs++;
1207 }
1208 prev_id = tmp_id;
1209 }
1210
1211
1212 /* now we have the list of real procesors ids (real_proc_id) -
1213 and the number of distinct ones -
1214 so now we can set up data to be sent - we have int and double data.
1215 (row number and value) - we will send everything as a void since
1216 we may not know the rel sizes of ints and doubles*/
1217
1218 /*first find out how many elements to send per proc - so we can do
1219 storage */
1220
1221 int_size = sizeof(int);
1222 double_size = sizeof(double);
1223 big_int_size = sizeof(HYPRE_BigInt);
1224
1225 obj_size_bytes = hypre_max(big_int_size, double_size);
1226
1227 ex_contact_procs = hypre_CTAlloc(int, num_real_procs);
1228 num_rows_per_proc = hypre_CTAlloc(int, num_real_procs);
1229
1230 counter = 0;
1231
1232 if (num_real_procs > 0 )
1233 {
1234 ex_contact_procs[0] = real_proc_id[0];
1235 num_rows_per_proc[0] = 1;
1236
1237 for (i=1; i < current_num_elmts; i++) /* loop through real procs - these are sorted
1238 (row_list is sorted also)*/
1239 {
1240 if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
1241 {
1242 num_rows_per_proc[counter] += 1; /*another row */
1243 }
1244 else /* new processor */
1245 {
1246 counter++;
1247 ex_contact_procs[counter] = real_proc_id[i];
1248 num_rows_per_proc[counter] = 1;
1249 }
1250 }
1251 }
1252
1253 /* calculate total storage and make vec_starts arrays */
1254 storage = 0;
1255 ex_contact_vec_starts = hypre_CTAlloc(int, num_real_procs + 1);
1256 ex_contact_vec_starts[0] = -1;
1257
1258 for (i=0; i < num_real_procs; i++)
1259 {
1260 storage += 1 + 2* num_rows_per_proc[i];
1261 ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
1262 }
1263
1264 void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);
1265 index_ptr = void_contact_buf; /* step through with this index */
1266
1267 /* set up data to be sent to send procs */
1268 /* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */
1269
1270 /* un-sort real_proc_id - we want to access data arrays in order */
1271
1272 us_real_proc_id = hypre_CTAlloc(int, current_num_elmts);
1273 for (i=0; i < current_num_elmts; i++)
1274 {
1275 us_real_proc_id[orig_order[i]] = real_proc_id[i];
1276 }
1277 hypre_TFree(real_proc_id);
1278
1279
1280
1281 prev_id = -1;
1282 for (i=0; i < current_num_elmts; i++)
1283 {
1284 proc_id = us_real_proc_id[i];
1285 row = off_proc_i[i]; /*can't use row list[i] - you loose the negative signs that
1286 differentiate add/set values */
1287 /* find position of this processor */
1288 indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
1289 in = ex_contact_vec_starts[indx];
1290
1291 index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
1292
1293 if (in < 0) /* first time for this processor - add the number of rows to the buffer */
1294 {
1295 in = -in - 1;
1296 /* re-calc. index_ptr since in_i was negative */
1297 index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
1298
1299 tmp_int = num_rows_per_proc[indx];
1300 memcpy( index_ptr, &tmp_int, int_size);
1301 index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
1302
1303 in++;
1304 }
1305 /* add row # */
1306 memcpy( index_ptr, &row, big_int_size);
1307 index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
1308 in++;
1309
1310
1311 /* add value */
1312 tmp_double = off_proc_data[i];
1313 memcpy( index_ptr, &tmp_double, double_size);
1314 index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
1315 in++;
1316
1317
1318 /* increment the indexes to keep track of where we are - fix later */
1319 ex_contact_vec_starts[indx] = in;
1320
1321 }
1322
1323 /* some clean up */
1324
1325 hypre_TFree(response_buf);
1326 hypre_TFree(response_buf_starts);
1327
1328 hypre_TFree(us_real_proc_id);
1329 hypre_TFree(orig_order);
1330 hypre_TFree(row_list);
1331 hypre_TFree(num_rows_per_proc);
1332
1333
1334 for (i=num_real_procs; i > 0; i--)
1335 {
1336 ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
1337 }
1338
1339 ex_contact_vec_starts[0] = 0;
1340
1341
1342
1343 /* now send the data */
1344
1345 /***********************************/
1346 /* now get the info in send_proc_obj_d */
1347
1348
1349 /* the response we expect is just a confirmation*/
1350 response_buf = NULL;
1351 response_buf_starts = NULL;
1352
1353 /*build the response object*/
1354
1355 /* use the send_proc_obj for the info kept from contacts */
1356 /*estimate inital storage allocation */
1357
1358 send_proc_obj.length = 0;
1359 send_proc_obj.storage_length = num_real_procs + 5;
1360 send_proc_obj.id = NULL; /* don't care who sent it to us */
1361 send_proc_obj.vec_starts = hypre_CTAlloc(int, send_proc_obj.storage_length + 1);
1362 send_proc_obj.vec_starts[0] = 0;
1363 send_proc_obj.element_storage_length = storage + 20;
1364 send_proc_obj.v_elements = hypre_MAlloc(obj_size_bytes*send_proc_obj.element_storage_length);
1365
1366 response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
1367 response_obj2.data1 = NULL;
1368 response_obj2.data2 = &send_proc_obj;
1369
1370 max_response_size = 0;
1371
1372 hypre_DataExchangeList(num_real_procs, ex_contact_procs,
1373 void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
1374 0, &response_obj2, max_response_size, 5,
1375 comm, (void **) &response_buf, &response_buf_starts);
1376
1377
1378
1379
1380 /***********************************/
1381
1382
1383 hypre_TFree(response_buf);
1384 hypre_TFree(response_buf_starts);
1385
1386 hypre_TFree(ex_contact_procs);
1387 hypre_TFree(void_contact_buf);
1388 hypre_TFree(ex_contact_vec_starts);
1389
1390
1391 /* Now we can unpack the send_proc_objects and either set or add to
1392 the vector data */
1393
1394 num_recvs = send_proc_obj.length;
1395
1396 /* alias */
1397 recv_data_ptr = send_proc_obj.v_elements;
1398 recv_starts = send_proc_obj.vec_starts;
1399
1400 vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
1401 first_index = hypre_ParVectorFirstIndex(par_vector);
1402
1403
1404 for (i=0; i < num_recvs; i++)
1405 {
1406
1407 indx = recv_starts[i];
1408
1409 /* get the number of rows for this recv */
1410 memcpy( &row_count, recv_data_ptr, int_size);
1411 recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
1412 indx++;
1413
1414 for (j=0; j < row_count; j++) /* for each row: unpack info */
1415 {
1416
1417 /* row # */
1418 memcpy( &row, recv_data_ptr, big_int_size);
1419 recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
1420 indx++;
1421
1422 /* value */
1423 memcpy( &value, recv_data_ptr, double_size);
1424 recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
1425 indx++;
1426
1427
1428 if (row < 0) /* add */
1429 {
1430 row = -row-1;
1431 k = row - first_index;
1432 vector_data[k] += value;
1433 }
1434 else /* set */
1435 {
1436 k = row - first_index;
1437 vector_data[k] = value;
1438 }
1439 }
1440 }
1441
1442
1443 hypre_TFree(send_proc_obj.v_elements);
1444 hypre_TFree(send_proc_obj.vec_starts);
1445
1446
1447 return hypre_error_flag;
1448}
1449
1450#endif
Note: See TracBrowser for help on using the repository browser.