source: CIVL/examples/mpi-omp/AMG2013/parcsr_mv/par_vector.c

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 27.6 KB
Line 
1/*BHEADER**********************************************************************
2 * Copyright (c) 2008, Lawrence Livermore National Security, LLC.
3 * Produced at the Lawrence Livermore National Laboratory.
4 * This file is part of HYPRE. See file COPYRIGHT for details.
5 *
6 * HYPRE is free software; you can redistribute it and/or modify it under the
7 * terms of the GNU Lesser General Public License (as published by the Free
8 * Software Foundation) version 2.1 dated February 1999.
9 *
10 * $Revision: 2.4 $
11 ***********************************************************************EHEADER*/
12
13
14
15/******************************************************************************
16 *
17 * Member functions for hypre_Vector class.
18 *
19 *****************************************************************************/
20
21#include "headers.h"
22#include <assert.h>
23
24#ifdef HYPRE_NO_GLOBAL_PARTITION
25int hypre_FillResponseParToVectorAll(void*, int, int, void*, MPI_Comm, void**, int*);
26#endif
27
28
29/*--------------------------------------------------------------------------
30 * hypre_ParVectorCreate
31 *--------------------------------------------------------------------------*/
32
33/* If create is called for HYPRE_NO_GLOBAL_PARTITION and partitioning is NOT null,
34 then it is assumed that it is array of length 2 containing the start row of
35 the calling processor followed by the start row of the next processor - AHB 6/05 */
36
37
38hypre_ParVector *
39hypre_ParVectorCreate( MPI_Comm comm,
40 HYPRE_BigInt global_size,
41 HYPRE_BigInt *partitioning)
42{
43 hypre_ParVector *vector;
44 int num_procs, my_id;
45
46 if (global_size < 0)
47 {
48 hypre_error_in_arg(2);
49 return NULL;
50 }
51 vector = hypre_CTAlloc(hypre_ParVector, 1);
52 MPI_Comm_rank(comm,&my_id);
53
54 if (!partitioning)
55 {
56 MPI_Comm_size(comm,&num_procs);
57#ifdef HYPRE_NO_GLOBAL_PARTITION
58 hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning);
59#else
60 hypre_GeneratePartitioning(global_size, num_procs, &partitioning);
61#endif
62 }
63
64
65 hypre_ParVectorAssumedPartition(vector) = NULL;
66
67
68 hypre_ParVectorComm(vector) = comm;
69 hypre_ParVectorGlobalSize(vector) = global_size;
70#ifdef HYPRE_NO_GLOBAL_PARTITION
71 hypre_ParVectorFirstIndex(vector) = partitioning[0];
72 hypre_ParVectorLastIndex(vector) = partitioning[1]-1;
73 hypre_ParVectorPartitioning(vector) = partitioning;
74 hypre_ParVectorLocalVector(vector) =
75 hypre_SeqVectorCreate(partitioning[1]-partitioning[0]);
76#else
77 hypre_ParVectorFirstIndex(vector) = partitioning[my_id];
78 hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] -1;
79 hypre_ParVectorPartitioning(vector) = partitioning;
80 hypre_ParVectorLocalVector(vector) =
81 hypre_SeqVectorCreate(partitioning[my_id+1]-partitioning[my_id]);
82#endif
83
84 /* set defaults */
85 hypre_ParVectorOwnsData(vector) = 1;
86 hypre_ParVectorOwnsPartitioning(vector) = 1;
87
88 return vector;
89}
90
91/*--------------------------------------------------------------------------
92 * hypre_ParMultiVectorCreate
93 *--------------------------------------------------------------------------*/
94
95/*hypre_ParVector *
96hypre_ParMultiVectorCreate( MPI_Comm comm,
97 int global_size,
98 int *partitioning,
99 int num_vectors )
100{*/
101 /* note that global_size is the global length of a single vector */
102 /*hypre_ParVector * vector = hypre_ParVectorCreate( comm, global_size, partitioning );
103 hypre_ParVectorNumVectors(vector) = num_vectors;
104 return vector;
105}*/
106
107
108/*--------------------------------------------------------------------------
109 * hypre_ParVectorDestroy
110 *--------------------------------------------------------------------------*/
111
112int
113hypre_ParVectorDestroy( hypre_ParVector *vector )
114{
115 if (vector)
116 {
117 if ( hypre_ParVectorOwnsData(vector) )
118 {
119 hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector));
120 }
121 if ( hypre_ParVectorOwnsPartitioning(vector) )
122 {
123 hypre_TFree(hypre_ParVectorPartitioning(vector));
124 }
125
126 if (hypre_ParVectorAssumedPartition(vector))
127 hypre_ParVectorDestroyAssumedPartition(vector);
128
129
130
131 hypre_TFree(vector);
132 }
133
134 return hypre_error_flag;
135}
136
137/*--------------------------------------------------------------------------
138 * hypre_ParVectorInitialize
139 *--------------------------------------------------------------------------*/
140
141int
142hypre_ParVectorInitialize( hypre_ParVector *vector )
143{
144 if (!vector)
145 {
146 hypre_error_in_arg(1);
147 return hypre_error_flag;
148 }
149 hypre_SeqVectorInitialize(hypre_ParVectorLocalVector(vector));
150
151 return hypre_error_flag;
152}
153
154/*--------------------------------------------------------------------------
155 * hypre_ParVectorSetDataOwner
156 *--------------------------------------------------------------------------*/
157
158int
159hypre_ParVectorSetDataOwner( hypre_ParVector *vector,
160 int owns_data )
161{
162
163 if (!vector)
164 {
165 hypre_error_in_arg(1);
166 return hypre_error_flag;
167 }
168 hypre_ParVectorOwnsData(vector) = owns_data;
169
170 return hypre_error_flag;
171}
172
173/*--------------------------------------------------------------------------
174 * hypre_ParVectorSetPartitioningOwner
175 *--------------------------------------------------------------------------*/
176
177int
178hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector,
179 int owns_partitioning)
180{
181 if (!vector)
182 {
183 hypre_error_in_arg(1);
184 return hypre_error_flag;
185 }
186
187 hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning;
188
189 return hypre_error_flag;
190}
191
192/*--------------------------------------------------------------------------
193 * hypre_ParVectorSetNumVectors
194 * call before calling hypre_ParVectorInitialize
195 * probably this will do more harm than good, use hypre_ParMultiVectorCreate
196 *--------------------------------------------------------------------------*/
197#if 0
198int
199hypre_ParVectorSetNumVectors( hypre_ParVector *vector,
200 int num_vectors )
201{
202 int ierr=0;
203 hypre_Vector *local_vector = hypre_ParVectorLocalVector(v);
204
205 hypre_SeqVectorSetNumVectors( local_vector, num_vectors );
206
207 return ierr;
208}
209#endif
210/*--------------------------------------------------------------------------
211 * hypre_ParVectorRead
212 *--------------------------------------------------------------------------*/
213
214hypre_ParVector
215*hypre_ParVectorRead( MPI_Comm comm,
216 const char *file_name )
217{
218 char new_file_name[80];
219 hypre_ParVector *par_vector;
220 int my_id, num_procs;
221 HYPRE_BigInt *partitioning;
222 HYPRE_BigInt global_size;
223 int i;
224 FILE *fp;
225
226 MPI_Comm_rank(comm,&my_id);
227 MPI_Comm_size(comm,&num_procs);
228
229 partitioning = hypre_CTAlloc(HYPRE_BigInt,num_procs+1);
230
231 sprintf(new_file_name,"%s.INFO.%d",file_name,my_id);
232 fp = fopen(new_file_name, "r");
233#ifdef HYPRE_LONG_LONG
234 fscanf(fp, "%lld\n", &global_size);
235#else
236 fscanf(fp, "%d\n", &global_size);
237#endif
238#ifdef HYPRE_NO_GLOBAL_PARTITION
239 for (i=0; i < 2; i++)
240#ifdef HYPRE_LONG_LONG
241 fscanf(fp, "%lld\n", &partitioning[i]);
242#else
243 fscanf(fp, "%d\n", &partitioning[i]);
244#endif
245 fclose (fp);
246#else
247 for (i=0; i < num_procs; i++)
248#ifdef HYPRE_LONG_LONG
249 fscanf(fp, "%lld\n", &partitioning[i]);
250#else
251 fscanf(fp, "%d\n", &partitioning[i]);
252#endif
253 fclose (fp);
254 partitioning[num_procs] = global_size;
255#endif
256 par_vector = hypre_CTAlloc(hypre_ParVector, 1);
257
258 hypre_ParVectorComm(par_vector) = comm;
259 hypre_ParVectorGlobalSize(par_vector) = global_size;
260
261#ifdef HYPRE_NO_GLOBAL_PARTITION
262 hypre_ParVectorFirstIndex(par_vector) = partitioning[0];
263 hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1;
264#else
265 hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id];
266 hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1;
267#endif
268
269 hypre_ParVectorPartitioning(par_vector) = partitioning;
270
271 hypre_ParVectorOwnsData(par_vector) = 1;
272 hypre_ParVectorOwnsPartitioning(par_vector) = 1;
273
274 sprintf(new_file_name,"%s.%d",file_name,my_id);
275 hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name);
276
277 /* multivector code not written yet >>> */
278 hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 );
279
280 return par_vector;
281}
282
283/*--------------------------------------------------------------------------
284 * hypre_ParVectorPrint
285 *--------------------------------------------------------------------------*/
286
287int
288hypre_ParVectorPrint( hypre_ParVector *vector,
289 const char *file_name )
290{
291 char new_file_name[80];
292 hypre_Vector *local_vector;
293 MPI_Comm comm;
294 int my_id, num_procs, i;
295 HYPRE_BigInt *partitioning;
296 HYPRE_BigInt global_size;
297 FILE *fp;
298 if (!vector)
299 {
300 hypre_error_in_arg(1);
301 return hypre_error_flag;
302 }
303 local_vector = hypre_ParVectorLocalVector(vector);
304 comm = hypre_ParVectorComm(vector);
305 partitioning = hypre_ParVectorPartitioning(vector);
306 global_size = hypre_ParVectorGlobalSize(vector);
307
308 MPI_Comm_rank(comm,&my_id);
309 MPI_Comm_size(comm,&num_procs);
310 sprintf(new_file_name,"%s.%d",file_name,my_id);
311 hypre_SeqVectorPrint(local_vector,new_file_name);
312 sprintf(new_file_name,"%s.INFO.%d",file_name,my_id);
313 fp = fopen(new_file_name, "w");
314#ifdef HYPRE_LONG_LONG
315 fprintf(fp, "%lld\n", global_size);
316#else
317 fprintf(fp, "%d\n", global_size);
318#endif
319#ifdef HYPRE_NO_GLOBAL_PARTITION
320 for (i=0; i < 2; i++)
321#ifdef HYPRE_LONG_LONG
322 fprintf(fp, "%lld\n", partitioning[i]);
323#else
324 fprintf(fp, "%d\n", partitioning[i]);
325#endif
326#else
327 for (i=0; i < num_procs; i++)
328#ifdef HYPRE_LONG_LONG
329 fprintf(fp, "%lld\n", partitioning[i]);
330#else
331 fprintf(fp, "%d\n", partitioning[i]);
332#endif
333#endif
334
335 fclose (fp);
336 return hypre_error_flag;
337}
338
339/*--------------------------------------------------------------------------
340 * hypre_ParVectorSetConstantValues
341 *--------------------------------------------------------------------------*/
342
343int
344hypre_ParVectorSetConstantValues( hypre_ParVector *v,
345 double value )
346{
347 hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
348
349 return hypre_SeqVectorSetConstantValues(v_local,value);
350}
351
352/*--------------------------------------------------------------------------
353 * hypre_ParVectorSetRandomValues
354 *--------------------------------------------------------------------------*/
355
356int
357hypre_ParVectorSetRandomValues( hypre_ParVector *v,
358 int seed )
359{
360 int my_id;
361 hypre_Vector *v_local = hypre_ParVectorLocalVector(v);
362
363 MPI_Comm comm = hypre_ParVectorComm(v);
364 MPI_Comm_rank(comm,&my_id);
365
366 seed *= (my_id+1);
367
368 return hypre_SeqVectorSetRandomValues(v_local,seed);
369}
370
371/*--------------------------------------------------------------------------
372 * hypre_ParVectorCopy
373 *--------------------------------------------------------------------------*/
374
375int
376hypre_ParVectorCopy( hypre_ParVector *x,
377 hypre_ParVector *y )
378{
379 hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
380 hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
381 return hypre_SeqVectorCopy(x_local, y_local);
382}
383
384/*--------------------------------------------------------------------------
385 * hypre_ParVectorCloneShallow
386 * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using
387 * the partitioning and data arrays of x
388 *--------------------------------------------------------------------------*/
389
390hypre_ParVector *
391hypre_ParVectorCloneShallow( hypre_ParVector *x )
392{
393 hypre_ParVector * y = hypre_ParVectorCreate(
394 hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x) );
395
396 hypre_ParVectorOwnsData(y) = 1;
397 /* ...This vector owns its local vector, although the local vector doesn't own _its_ data */
398 hypre_ParVectorOwnsPartitioning(y) = 0;
399 hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) );
400 hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(
401 hypre_ParVectorLocalVector(x) );
402 hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x);
403
404 return y;
405}
406
407/*--------------------------------------------------------------------------
408 * hypre_ParVectorScale
409 *--------------------------------------------------------------------------*/
410
411int
412hypre_ParVectorScale( double alpha,
413 hypre_ParVector *y )
414{
415 hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
416
417 return hypre_SeqVectorScale( alpha, y_local);
418}
419
420/*--------------------------------------------------------------------------
421 * hypre_ParVectorAxpy
422 *--------------------------------------------------------------------------*/
423
424int
425hypre_ParVectorAxpy( double alpha,
426 hypre_ParVector *x,
427 hypre_ParVector *y )
428{
429 hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
430 hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
431
432 return hypre_SeqVectorAxpy( alpha, x_local, y_local);
433}
434
435/*--------------------------------------------------------------------------
436 * hypre_ParVectorInnerProd
437 *--------------------------------------------------------------------------*/
438
439double
440hypre_ParVectorInnerProd( hypre_ParVector *x,
441 hypre_ParVector *y )
442{
443 MPI_Comm comm = hypre_ParVectorComm(x);
444 hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
445 hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
446
447 double result = 0.0;
448 double local_result = hypre_SeqVectorInnerProd(x_local, y_local);
449
450 MPI_Allreduce(&local_result, &result, 1, MPI_DOUBLE, MPI_SUM, comm);
451
452 return result;
453}
454
455/*--------------------------------------------------------------------------
456 * hypre_ParVectorToVectorAll:
457 * generates a Vector on every proc which has a piece of the data
458 * from a ParVector on several procs in comm,
459 * vec_starts needs to contain the partitioning across all procs in comm
460 *--------------------------------------------------------------------------*/
461
462hypre_Vector *
463hypre_ParVectorToVectorAll (hypre_ParVector *par_v)
464{
465 MPI_Comm comm = hypre_ParVectorComm(par_v);
466 HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v);
467#ifndef HYPRE_NO_GLOBAL_PARTITION
468 HYPRE_BigInt *vec_starts = hypre_ParVectorPartitioning(par_v);
469#endif
470 hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v);
471 int num_procs, my_id;
472 int num_vectors = hypre_ParVectorNumVectors(par_v);
473 hypre_Vector *vector;
474 double *vector_data;
475 double *local_data;
476 int local_size;
477 MPI_Request *requests;
478 MPI_Status *status;
479 int i, j;
480 int *used_procs;
481 int num_types, num_requests;
482 int vec_len, proc_id;
483
484#ifdef HYPRE_NO_GLOBAL_PARTITION
485
486 HYPRE_BigInt *new_vec_starts;
487
488 int num_contacts;
489 int contact_proc_list[1];
490 HYPRE_BigInt contact_send_buf[1];
491 int contact_send_buf_starts[2];
492 int max_response_size;
493 int *response_recv_buf=NULL;
494 int *response_recv_buf_starts = NULL;
495 hypre_DataExchangeResponse response_obj;
496 hypre_ProcListElements send_proc_obj;
497
498 HYPRE_BigInt *send_info = NULL;
499 MPI_Status status1;
500 int count, tag1 = 112, tag2 = 223;
501 int start;
502
503#endif
504
505
506 MPI_Comm_size(comm, &num_procs);
507 MPI_Comm_rank(comm, &my_id);
508
509#ifdef HYPRE_NO_GLOBAL_PARTITION
510
511
512 local_size = (int)(hypre_ParVectorLastIndex(par_v) -
513 hypre_ParVectorFirstIndex(par_v)) + 1;
514
515
516
517/* determine procs which hold data of par_v and store ids in used_procs */
518/* we need to do an exchange data for this. If I own row then I will contact
519 processor 0 with the endpoint of my local range */
520
521
522 if (local_size > 0)
523 {
524 num_contacts = 1;
525 contact_proc_list[0] = 0;
526 contact_send_buf[0] = hypre_ParVectorLastIndex(par_v);
527 contact_send_buf_starts[0] = 0;
528 contact_send_buf_starts[1] = 1;
529 }
530 else
531 {
532 num_contacts = 0;
533 contact_send_buf_starts[0] = 0;
534 contact_send_buf_starts[1] = 0;
535 }
536
537 /*build the response object*/
538 /*send_proc_obj will be for saving info from contacts */
539 send_proc_obj.length = 0;
540 send_proc_obj.storage_length = 10;
541 send_proc_obj.id = hypre_CTAlloc(int, send_proc_obj.storage_length);
542 send_proc_obj.vec_starts = hypre_CTAlloc(int, send_proc_obj.storage_length + 1);
543 send_proc_obj.vec_starts[0] = 0;
544 send_proc_obj.element_storage_length = 10;
545 send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length);
546
547 max_response_size = 0; /* each response is null */
548 response_obj.fill_response = hypre_FillResponseParToVectorAll;
549 response_obj.data1 = NULL;
550 response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
551
552
553 hypre_DataExchangeList(num_contacts,
554 contact_proc_list, contact_send_buf,
555 contact_send_buf_starts, sizeof(HYPRE_BigInt),
556 0, &response_obj,
557 max_response_size, 1,
558 comm, (void**) &response_recv_buf,
559 &response_recv_buf_starts);
560
561 /* now processor 0 should have a list of ranges for processors that have rows -
562 these are in send_proc_obj - it needs to create the new list of processors
563 and also an array of vec starts - and send to those who own row*/
564 if (my_id)
565 {
566 if (local_size)
567 {
568 /* look for a message from processor 0 */
569 MPI_Probe(0, tag1, comm, &status1);
570 MPI_Get_count(&status1, MPI_HYPRE_BIG_INT, &count);
571
572 send_info = hypre_CTAlloc(HYPRE_BigInt, count);
573 MPI_Recv(send_info, count, MPI_HYPRE_BIG_INT, 0, tag1, comm, &status1);
574
575 /* now unpack */
576 num_types = (int) send_info[0];
577 used_procs = hypre_CTAlloc(int, num_types);
578 new_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_types+1);
579
580 for (i=1; i<= num_types; i++)
581 {
582 used_procs[i-1] = (int) send_info[i];
583 }
584 for (i=num_types+1; i< count; i++)
585 {
586 new_vec_starts[i-num_types-1] = send_info[i] ;
587 }
588 }
589 else /* clean up and exit */
590 {
591 hypre_TFree(send_proc_obj.vec_starts);
592 hypre_TFree(send_proc_obj.id);
593 hypre_TFree(send_proc_obj.elements);
594 if(response_recv_buf) hypre_TFree(response_recv_buf);
595 if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
596 return NULL;
597 }
598 }
599 else /* my_id ==0 */
600 {
601 num_types = send_proc_obj.length;
602 used_procs = hypre_CTAlloc(int, num_types);
603 new_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_types+1);
604
605 new_vec_starts[0] = 0;
606 for (i=0; i< num_types; i++)
607 {
608 used_procs[i] = send_proc_obj.id[i];
609 new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
610 }
611 qsort0(used_procs, 0, num_types-1);
612 hypre_BigQsort0(new_vec_starts, 0, num_types);
613 /*now we need to put into an array to send */
614 count = 2*num_types+2;
615 send_info = hypre_CTAlloc(HYPRE_BigInt, count);
616 send_info[0] = (HYPRE_BigInt) num_types;
617 for (i=1; i<= num_types; i++)
618 {
619 send_info[i] = (HYPRE_BigInt) used_procs[i-1];
620 }
621 for (i=num_types+1; i< count; i++)
622 {
623 send_info[i] = new_vec_starts[i-num_types-1];
624 }
625 requests = hypre_CTAlloc(MPI_Request, num_types);
626 status = hypre_CTAlloc(MPI_Status, num_types);
627
628 /* don't send to myself - these are sorted so my id would be first*/
629 start = 0;
630 if (used_procs[0] == 0)
631 {
632 start = 1;
633 }
634
635
636 for (i=start; i < num_types; i++)
637 {
638 MPI_Isend(send_info, count, MPI_HYPRE_BIG_INT, used_procs[i], tag1, comm, &requests[i-start]);
639 }
640 MPI_Waitall(num_types-start, requests, status);
641
642 hypre_TFree(status);
643 hypre_TFree(requests);
644 }
645
646 /* clean up */
647 hypre_TFree(send_proc_obj.vec_starts);
648 hypre_TFree(send_proc_obj.id);
649 hypre_TFree(send_proc_obj.elements);
650 hypre_TFree(send_info);
651 if(response_recv_buf) hypre_TFree(response_recv_buf);
652 if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts);
653
654 /* now proc 0 can exit if it has no rows */
655 if (!local_size) {
656 hypre_TFree(used_procs);
657 hypre_TFree(new_vec_starts);
658 return NULL;
659 }
660
661 /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
662
663 /* this vector should be rather small */
664
665 local_data = hypre_VectorData(local_vector);
666 vector = hypre_SeqVectorCreate(global_size);
667 hypre_VectorNumVectors(vector) = num_vectors;
668 hypre_SeqVectorInitialize(vector);
669 vector_data = hypre_VectorData(vector);
670
671 num_requests = 2*num_types;
672
673 requests = hypre_CTAlloc(MPI_Request, num_requests);
674 status = hypre_CTAlloc(MPI_Status, num_requests);
675
676/* initialize data exchange among used_procs and generate vector - here we
677 send to ourself also*/
678
679 j = 0;
680 for (i = 0; i < num_types; i++)
681 {
682 proc_id = used_procs[i];
683 vec_len = (int) (new_vec_starts[i+1] - new_vec_starts[i]);
684 MPI_Irecv(&vector_data[(int) new_vec_starts[i]], num_vectors*vec_len, MPI_DOUBLE,
685 proc_id, tag2, comm, &requests[j++]);
686 }
687 for (i = 0; i < num_types; i++)
688 {
689 MPI_Isend(local_data, num_vectors*local_size, MPI_DOUBLE, used_procs[i],
690 tag2, comm, &requests[j++]);
691 }
692
693 MPI_Waitall(num_requests, requests, status);
694
695
696 if (num_requests)
697 {
698 hypre_TFree(requests);
699 hypre_TFree(status);
700 hypre_TFree(used_procs);
701 }
702
703 hypre_TFree(new_vec_starts);
704
705
706
707#else
708 local_size = (int) (vec_starts[my_id+1] - vec_starts[my_id]);
709
710/* if my_id contains no data, return NULL */
711
712 if (!local_size)
713 return NULL;
714
715 local_data = hypre_VectorData(local_vector);
716 vector = hypre_SeqVectorCreate(global_size);
717 hypre_VectorNumVectors(vector) = num_vectors;
718 hypre_SeqVectorInitialize(vector);
719 vector_data = hypre_VectorData(vector);
720
721/* determine procs which hold data of par_v and store ids in used_procs */
722
723 num_types = -1;
724 for (i=0; i < num_procs; i++)
725 if (vec_starts[i+1]-vec_starts[i])
726 num_types++;
727 num_requests = 2*num_types;
728
729 used_procs = hypre_CTAlloc(int, num_types);
730 j = 0;
731 for (i=0; i < num_procs; i++)
732 if (vec_starts[i+1]-vec_starts[i] && i-my_id)
733 used_procs[j++] = i;
734
735 requests = hypre_CTAlloc(MPI_Request, num_requests);
736 status = hypre_CTAlloc(MPI_Status, num_requests);
737
738/* initialize data exchange among used_procs and generate vector */
739
740 j = 0;
741 for (i = 0; i < num_types; i++)
742 {
743 proc_id = used_procs[i];
744 vec_len = (int) (vec_starts[proc_id+1] - vec_starts[proc_id]);
745 MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, MPI_DOUBLE,
746 proc_id, 0, comm, &requests[j++]);
747 }
748 for (i = 0; i < num_types; i++)
749 {
750 MPI_Isend(local_data, num_vectors*local_size, MPI_DOUBLE, used_procs[i],
751 0, comm, &requests[j++]);
752 }
753
754 for (i=0; i < num_vectors*local_size; i++)
755 vector_data[vec_starts[my_id]+i] = local_data[i];
756
757 MPI_Waitall(num_requests, requests, status);
758
759 if (num_requests)
760 {
761 hypre_TFree(used_procs);
762 hypre_TFree(requests);
763 hypre_TFree(status);
764 }
765
766
767#endif
768
769 return vector;
770}
771
772/*--------------------------------------------------------------------------
773 * hypre_ParVectorPrintIJ
774 *--------------------------------------------------------------------------*/
775
776int
777hypre_ParVectorPrintIJ( hypre_ParVector *vector,
778 int base_j,
779 const char *filename )
780{
781 MPI_Comm comm;
782 HYPRE_BigInt global_size, part0;
783 HYPRE_BigInt *partitioning;
784 double *local_data;
785 int myid, num_procs, i, j;
786 char new_filename[255];
787 FILE *file;
788 if (!vector)
789 {
790 hypre_error_in_arg(1);
791 return hypre_error_flag;
792 }
793 comm = hypre_ParVectorComm(vector);
794 global_size = hypre_ParVectorGlobalSize(vector);
795 partitioning = hypre_ParVectorPartitioning(vector);
796
797 /* multivector code not written yet >>> */
798 hypre_assert( hypre_ParVectorNumVectors(vector) == 1 );
799 if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1);
800
801 MPI_Comm_rank(comm, &myid);
802 MPI_Comm_size(comm, &num_procs);
803
804 sprintf(new_filename,"%s.%05d", filename, myid);
805
806 if ((file = fopen(new_filename, "w")) == NULL)
807 {
808 printf("Error: can't open output file %s\n", new_filename);
809 hypre_error_in_arg(3);
810 return hypre_error_flag;
811 }
812
813 local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector));
814
815#ifdef HYPRE_LONG_LONG
816 fprintf(file, "%lld \n", global_size);
817#else
818 fprintf(file, "%d \n", global_size);
819#endif
820#ifdef HYPRE_NO_GLOBAL_PARTITION
821 for (i=0; i <= 2; i++)
822#else
823 for (i=0; i <= num_procs; i++)
824#endif
825 {
826#ifdef HYPRE_LONG_LONG
827 fprintf(file, "%lld \n", partitioning[i] + base_j);
828#else
829 fprintf(file, "%d \n", partitioning[i] + base_j);
830#endif
831 }
832
833#ifdef HYPRE_NO_GLOBAL_PARTITION
834 part0 = partitioning[0];
835 for (j = 0; j < (int)(partitioning[1]-part0); j++)
836#else
837 part0 = partitioning[myid];
838 for (j = 0; j < (int)(partitioning[myid+1]-part0); j++)
839#endif
840 {
841#ifdef HYPRE_LONG_LONG
842#else
843 fprintf(file, "%d %.14e\n", part0 + j + base_j, local_data[j]);
844#endif
845 }
846
847 fclose(file);
848
849 return hypre_error_flag;
850}
851
852
853
854/*--------------------------------------------------------------------
855 * hypre_FillResponseParToVectorAll
856 * Fill response function for determining the send processors
857 * data exchange
858 *--------------------------------------------------------------------*/
859
860
861int
862hypre_FillResponseParToVectorAll(void *p_recv_contact_buf,
863 int contact_size, int contact_proc, void *ro,
864 MPI_Comm comm, void **p_send_response_buf,
865 int *response_message_size )
866{
867 int myid;
868 int i, index, count, elength;
869
870 HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
871
872 hypre_DataExchangeResponse *response_obj = ro;
873
874 hypre_ProcListElements *send_proc_obj = response_obj->data2;
875
876
877 MPI_Comm_rank(comm, &myid );
878
879
880 /*check to see if we need to allocate more space in send_proc_obj for ids*/
881 if (send_proc_obj->length == send_proc_obj->storage_length)
882 {
883 send_proc_obj->storage_length +=10; /*add space for 10 more processors*/
884 send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id,int,
885 send_proc_obj->storage_length);
886 send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,int,
887 send_proc_obj->storage_length + 1);
888 }
889
890 /*initialize*/
891 count = send_proc_obj->length;
892 index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
893
894 /*send proc*/
895 send_proc_obj->id[count] = contact_proc;
896
897 /*do we need more storage for the elements?*/
898 if (send_proc_obj->element_storage_length < index + contact_size)
899 {
900 elength = hypre_max(contact_size, 10);
901 elength += index;
902 send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
903 HYPRE_BigInt, elength);
904 send_proc_obj->element_storage_length = elength;
905 }
906 /*populate send_proc_obj*/
907 for (i=0; i< contact_size; i++)
908 {
909 send_proc_obj->elements[index++] = recv_contact_buf[i];
910 }
911 send_proc_obj->vec_starts[count+1] = index;
912 send_proc_obj->length++;
913
914
915 /*output - no message to return (confirmation) */
916 *response_message_size = 0;
917
918
919 return hypre_error_flag;
920
921}
922
923/* -----------------------------------------------------------------------------
924 * return the sum of all local elements of the vector
925 * ----------------------------------------------------------------------------- */
926
927double hypre_ParVectorLocalSumElts( hypre_ParVector * vector )
928{
929 return hypre_VectorSumElts( hypre_ParVectorLocalVector(vector) );
930}
Note: See TracBrowser for help on using the repository browser.