source: CIVL/examples/fortran/nek5000/core/mpi_dummy.f

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100755
File size: 24.2 KB
Line 
1c*********************************************************************72
2 subroutine mpi_scan(data1, data2, n, datatype,
3 & operation, comm, ierror )
4
5 implicit none
6
7 include "mpi_dummy.h"
8
9 integer n
10
11 integer comm
12 integer data1(n)
13 integer data2(n)
14 integer datatype
15 integer ierror
16 integer operation ! currently hardwired only for sum only
17
18 ierror = MPI_SUCCESS
19
20 if ( datatype .eq. mpi_double_precision ) then
21
22 call copy ( data2, data1, n )
23
24 else if ( datatype .eq. mpi_integer ) then
25
26 call icopy ( data2, data1, n )
27
28 else if ( datatype .eq. mpi_integer8 ) then
29
30 call i8copy ( data2, data1, n )
31
32 else if ( datatype .eq. mpi_real ) then
33
34 call rrcopy ( data2, data1, n )
35
36 else
37
38 ierror = MPI_FAILURE
39
40 end if
41
42 return
43 end
44
45c*********************************************************************72
46 subroutine mpi_abort ( comm, errorcode, ierror )
47
48c*********************************************************************72
49c
50cc MPI_ABORT shuts down the processes in a given communicator.
51c
52 implicit none
53
54 integer comm
55 integer errorcode
56 integer ierror
57 integer MPI_FAILURE
58 parameter ( MPI_FAILURE = 1 )
59 integer MPI_SUCCESS
60 parameter ( MPI_SUCCESS = 0 )
61
62 ierror = MPI_SUCCESS
63
64 write ( *, '(a)' ) ' '
65 write ( *, '(a)' ) 'MPI_ABORT:'
66 write ( *, '(a,i12)' )
67 & ' Shut down with error code = ', errorcode
68
69 stop
70 end
71 subroutine mpi_allgather ( data1, nsend, sendtype, data2,
72 & nrecv, recvtype, comm, ierror )
73
74c*********************************************************************72
75c
76cc MPI_ALLGATHER gathers data from all the processes in a communicator.
77c
78 implicit none
79
80 include "mpi_dummy.h"
81
82 integer nsend
83
84 integer comm
85 integer data1(nsend)
86 integer data2(nsend)
87 integer ierror
88 integer nrecv
89 integer recvtype
90 integer sendtype
91
92 ierror = MPI_SUCCESS
93
94 if ( sendtype .eq. mpi_double_precision ) then
95 call mpi_copy_double_precision ( data1, data2, nsend, ierror )
96 else if ( sendtype .eq. mpi_integer ) then
97 call mpi_copy_integer ( data1, data2, nsend, ierror )
98 else if ( sendtype .eq. mpi_real ) then
99 call mpi_copy_real ( data1, data2, nsend, ierror )
100 else
101 ierror = MPI_FAILURE
102 end if
103
104 return
105 end
106 subroutine mpi_allgatherv ( data1, nsend, sendtype,
107 & data2, nrecv, ndispls, recvtype, comm, ierror )
108
109c*********************************************************************72
110c
111cc MPI_ALLGATHERV gathers data from all the processes in a communicator.
112c
113 implicit none
114
115 include "mpi_dummy.h"
116
117 integer nsend
118
119 integer comm
120 integer data1(nsend)
121 integer data2(nsend)
122 integer ierror
123 integer ndispls
124 integer nrecv
125 integer recvtype
126 integer sendtype
127
128 ierror = MPI_SUCCESS
129
130 if ( sendtype .eq. mpi_double_precision ) then
131 call mpi_copy_double_precision ( data1, data2, nsend, ierror )
132 else if ( sendtype .eq. mpi_integer ) then
133 call mpi_copy_integer ( data1, data2, nsend, ierror )
134 else if ( sendtype .eq. mpi_real ) then
135 call mpi_copy_real ( data1, data2, nsend, ierror )
136 else
137 ierror = MPI_FAILURE
138 end if
139
140 return
141 end
142 subroutine mpi_allreduce ( data1, data2, n, datatype,
143 & operation, comm, ierror )
144
145c*********************************************************************72
146c
147cc MPI_ALLREDUCE carries out a reduction operation.
148c
149 implicit none
150
151 include "mpi_dummy.h"
152
153 integer n
154
155 integer comm
156 integer data1(n)
157 integer data2(n)
158 integer datatype
159 integer ierror
160 integer operation
161
162 ierror = MPI_SUCCESS
163
164 if ( datatype .eq. mpi_double_precision ) then
165
166 call mpi_reduce_double_precision (
167 & data1, data2, n, operation, ierror )
168
169 else if ( datatype .eq. mpi_integer ) then
170
171 call mpi_reduce_integer (
172 & data1, data2, n, operation, ierror )
173
174 else if ( datatype .eq. mpi_integer8 ) then
175
176 call mpi_reduce_integer8(
177 & data1, data2, n, operation, ierror )
178
179 else if ( datatype .eq. mpi_real ) then
180
181 call mpi_reduce_real (
182 & data1, data2, n, operation, ierror )
183
184 else
185
186 ierror = MPI_FAILURE
187
188 end if
189
190 return
191 end
192
193 subroutine mpi_barrier ( comm, ierror )
194
195c*********************************************************************72
196c
197cc MPI_BARRIER forces processes within a communicator to wait together.
198c
199 implicit none
200
201 integer comm
202 integer ierror
203 integer MPI_FAILURE
204 parameter ( MPI_FAILURE = 1 )
205 integer MPI_SUCCESS
206 parameter ( MPI_SUCCESS = 0 )
207
208 ierror = MPI_FAILURE
209
210 return
211 end
212 subroutine mpi_bcast ( data, n, datatype, node, comm, ierror )
213
214c*********************************************************************72
215c
216cc MPI_BCAST broadcasts data from one process to all others.
217c
218 implicit none
219
220 integer n
221
222 integer comm
223 integer data(n)
224 integer datatype
225 integer ierror
226 integer MPI_FAILURE
227 parameter ( MPI_FAILURE = 1 )
228 integer MPI_SUCCESS
229 parameter ( MPI_SUCCESS = 0 )
230 integer node
231
232 ierror = MPI_SUCCESS
233
234 return
235 end
236 subroutine mpi_bsend ( data, n, datatype, iproc, itag,
237 & comm, ierror )
238
239c*********************************************************************72
240c
241cc MPI_BSEND sends data from one process to another, using buffering.
242c
243 implicit none
244
245 integer n
246
247 integer comm
248 integer data(n)
249 integer datatype
250 integer ierror
251 integer iproc
252 integer itag
253 integer MPI_FAILURE
254 parameter ( MPI_FAILURE = 1 )
255 integer MPI_SUCCESS
256 parameter ( MPI_SUCCESS = 0 )
257
258 ierror = MPI_FAILURE
259
260 write ( *, '(a)' ) ' '
261 write ( *, '(a)' ) 'MPI_BSEND - Error!'
262 write ( *, '(a)' ) ' Should not send message to self.'
263
264 return
265 end
266 subroutine mpi_cart_create ( comm, ldims, dims, periods,
267 & reorder, comm_cart, ierror )
268
269c*********************************************************************72
270c
271cc MPI_CART_CREATE creates a communicator for a Cartesian topology.
272c
273 implicit none
274
275 integer ldims
276
277 integer comm
278 integer comm_cart
279 integer dims(*)
280 integer ierror
281 integer MPI_FAILURE
282 parameter ( MPI_FAILURE = 1 )
283 integer MPI_SUCCESS
284 parameter ( MPI_SUCCESS = 0 )
285 logical periods(*)
286 logical reorder
287
288 ierror = MPI_SUCCESS
289
290 return
291 end
292 subroutine mpi_cart_get ( comm, ldims, dims, periods,
293 & coords, ierror )
294
295c*********************************************************************72
296c
297cc MPI_CART_GET returns the "Cartesian coordinates" of the calling process.
298c
299 implicit none
300
301 integer ldims
302
303 integer comm
304 integer coords(*)
305 integer dims(*)
306 integer i
307 integer ierror
308 integer MPI_FAILURE
309 parameter ( MPI_FAILURE = 1 )
310 integer MPI_SUCCESS
311 parameter ( MPI_SUCCESS = 0 )
312 logical periods(*)
313
314 ierror = MPI_SUCCESS
315
316 do i = 1, ldims
317 coords(i) = 0
318 end do
319
320 return
321 end
322 subroutine mpi_cart_shift ( comm, idir, idisp, isource,
323 & idest, ierror )
324
325c*********************************************************************72
326c
327cc MPI_CART_SHIFT finds the destination and source for Cartesian shifts.
328c
329 implicit none
330
331 integer comm
332 integer idest
333 integer idir
334 integer idisp
335 integer ierror
336 integer isource
337 integer MPI_FAILURE
338 parameter ( MPI_FAILURE = 1 )
339 integer MPI_SUCCESS
340 parameter ( MPI_SUCCESS = 0 )
341
342 ierror = MPI_SUCCESS
343 isource = 0
344 idest = 0
345
346 return
347 end
348 subroutine mpi_comm_dup ( comm, comm_out, ierror )
349
350c*********************************************************************72
351c
352cc MPI_COMM_DUP duplicates a communicator.
353c
354 implicit none
355
356 integer comm
357 integer comm_out
358 integer ierror
359 integer MPI_FAILURE
360 parameter ( MPI_FAILURE = 1 )
361 integer MPI_SUCCESS
362 parameter ( MPI_SUCCESS = 0 )
363
364 ierror = MPI_SUCCESS
365 comm_out = comm
366
367 return
368 end
369 subroutine mpi_comm_free ( comm, ierror )
370
371c*********************************************************************72
372c
373cc MPI_COMM_FREE "frees" a communicator.
374c
375 implicit none
376
377 integer comm
378 integer ierror
379 integer MPI_FAILURE
380 parameter ( MPI_FAILURE = 1 )
381 integer MPI_SUCCESS
382 parameter ( MPI_SUCCESS = 0 )
383
384 ierror = MPI_SUCCESS
385
386 return
387 end
388 subroutine mpi_comm_rank ( comm, me, ierror )
389
390c*********************************************************************72
391c
392cc MPI_COMM_RANK reports the rank of the calling process.
393c
394 implicit none
395
396 integer comm
397 integer ierror
398 integer me
399 integer MPI_FAILURE
400 parameter ( MPI_FAILURE = 1 )
401 integer MPI_SUCCESS
402 parameter ( MPI_SUCCESS = 0 )
403
404 ierror = MPI_SUCCESS
405 me = 0
406
407 return
408 end
409 subroutine mpi_comm_size ( comm, nprocs, ierror )
410
411c*********************************************************************72
412c
413cc MPI_COMM_SIZE reports the number of processes in a communicator.
414c
415 implicit none
416
417 integer comm
418 integer ierror
419 integer MPI_FAILURE
420 parameter ( MPI_FAILURE = 1 )
421 integer MPI_SUCCESS
422 parameter ( MPI_SUCCESS = 0 )
423 integer nprocs
424
425 ierror = MPI_SUCCESS
426 nprocs = 1
427
428 return
429 end
430 subroutine mpi_comm_split ( comm, icolor, ikey, comm_new,
431 & ierror )
432
433c*********************************************************************72
434c
435cc MPI_COMM_SPLIT splits up a communicator based on a key.
436c
437 implicit none
438
439 integer comm
440 integer comm_new
441 integer icolor
442 integer ierror
443 integer ikey
444 integer MPI_FAILURE
445 parameter ( MPI_FAILURE = 1 )
446 integer MPI_SUCCESS
447 parameter ( MPI_SUCCESS = 0 )
448
449 ierror = MPI_SUCCESS
450
451 return
452 end
453 subroutine mpi_copy_double_precision ( data1, data2, n, ierror )
454
455c*********************************************************************72
456c
457cc MPI_COPY_DOUBLE copies a real*8 vector.
458c
459 implicit none
460
461 integer n
462
463 real*8 data1(n)
464 real*8 data2(n)
465 integer i
466 integer ierror
467 integer MPI_FAILURE
468 parameter ( MPI_FAILURE = 1 )
469 integer MPI_SUCCESS
470 parameter ( MPI_SUCCESS = 0 )
471
472 ierror = MPI_SUCCESS
473
474 do i = 1, n
475 data2(i) = data1(i)
476 end do
477
478 return
479 end
480 subroutine mpi_copy_integer ( data1, data2, n, ierror )
481
482c*********************************************************************72
483c
484cc MPI_COPY_INTEGER copies an integer vector.
485c
486 implicit none
487
488 integer n
489
490 integer data1(n)
491 integer data2(n)
492 integer i
493 integer ierror
494 integer MPI_FAILURE
495 parameter ( MPI_FAILURE = 1 )
496 integer MPI_SUCCESS
497 parameter ( MPI_SUCCESS = 0 )
498
499 ierror = MPI_SUCCESS
500
501 do i = 1, n
502 data2(i) = data1(i)
503 end do
504
505 return
506 end
507 subroutine mpi_copy_real ( data1, data2, n, ierror )
508
509c*********************************************************************72
510c
511 implicit none
512
513 integer n
514
515 real data1(n)
516 real data2(n)
517 integer i
518 integer ierror
519 integer MPI_FAILURE
520 parameter ( MPI_FAILURE = 1 )
521 integer MPI_SUCCESS
522 parameter ( MPI_SUCCESS = 0 )
523
524 ierror = MPI_SUCCESS
525
526 do i = 1, n
527 data2(i) = data1(i)
528 end do
529
530 return
531 end
532 subroutine mpi_finalize ( ierror )
533
534c*********************************************************************72
535c
536cc MPI_FINALIZE shuts down the MPI library.
537c
538 implicit none
539
540 integer ierror
541 integer MPI_FAILURE
542 parameter ( MPI_FAILURE = 1 )
543 integer MPI_SUCCESS
544 parameter ( MPI_SUCCESS = 0 )
545
546 ierror = MPI_SUCCESS
547
548 return
549 end
550 subroutine mpi_get_count ( istatus, datatype, icount, ierror )
551
552c*********************************************************************72
553c
554cc MPI_GET_COUNT reports the actual number of items transmitted.
555c
556 implicit none
557
558 integer datatype
559 integer icount
560 integer ierror
561 integer istatus
562 integer MPI_FAILURE
563 parameter ( MPI_FAILURE = 1 )
564 integer MPI_SUCCESS
565 parameter ( MPI_SUCCESS = 0 )
566
567 ierror = MPI_FAILURE
568
569 write ( *, '(a)' ) ' '
570 write ( *, '(a)' ) 'MPI_GET_COUNT - Error!'
571 write ( *, '(a)' ) ' Should not query message from self.'
572
573 return
574 end
575 subroutine mpi_init ( ierror )
576
577c*********************************************************************72
578c
579cc MPI_INIT initializes the MPI library.
580c
581 implicit none
582
583 integer ierror
584 integer MPI_FAILURE
585 parameter ( MPI_FAILURE = 1 )
586 integer MPI_SUCCESS
587 parameter ( MPI_SUCCESS = 0 )
588
589 write(6,*) 'Initialize dummy MPI library'
590 ierror = MPI_SUCCESS
591
592 return
593 end
594 subroutine mpi_irecv ( data, n, datatype, iproc, itag,
595 & comm, irequest, ierror )
596
597c*********************************************************************72
598c
599cc MPI_IRECV receives data from another process.
600c
601 implicit none
602
603 integer n
604
605 integer comm
606 integer data(n)
607 integer datatype
608 integer ierror
609 integer iproc
610 integer irequest
611 integer itag
612 integer MPI_FAILURE
613 parameter ( MPI_FAILURE = 1 )
614 integer MPI_SUCCESS
615 parameter ( MPI_SUCCESS = 0 )
616
617 ierror = MPI_FAILURE
618
619 write ( *, '(a)' ) ' '
620 write ( *, '(a)' ) 'MPI_IRECV - Error!'
621 write ( *, '(a)' ) ' Should not recv message from self.'
622
623 return
624 end
625 subroutine mpi_isend ( data, n, datatype, iproc, itag,
626 & comm, request, ierror )
627
628c*********************************************************************72
629c
630cc MPI_ISEND sends data from one process to another using nonblocking transmission.
631c
632 implicit none
633
634 integer n
635
636 integer comm
637 integer data(n)
638 integer datatype
639 integer ierror
640 integer iproc
641 integer itag
642 integer MPI_FAILURE
643 parameter ( MPI_FAILURE = 1 )
644 integer MPI_SUCCESS
645 parameter ( MPI_SUCCESS = 0 )
646 integer request
647
648 request = 0
649 ierror = MPI_FAILURE
650
651 write ( *, '(a)' ) ' '
652 write ( *, '(a)' ) 'MPI_ISEND - Error!'
653 write ( *, '(a)' ) ' Should not send message to self.'
654
655 return
656 end
657 subroutine mpi_recv ( data, n, datatype, iproc, itag,
658 & comm, istatus, ierror )
659
660c*********************************************************************72
661c
662cc MPI_RECV receives data from another process within a communicator.
663c
664 implicit none
665
666 integer n
667
668 integer comm
669 integer data(n)
670 integer datatype
671 integer ierror
672 integer iproc
673 integer istatus
674 integer itag
675 integer MPI_FAILURE
676 parameter ( MPI_FAILURE = 1 )
677 integer MPI_SUCCESS
678 parameter ( MPI_SUCCESS = 0 )
679
680 ierror = MPI_FAILURE
681
682 write ( *, '(a)' ) ' '
683 write ( *, '(a)' ) 'MPI_RECV - Error!'
684 write ( *, '(a)' ) ' Should not recv message from self.'
685
686 return
687 end
688 subroutine mpi_reduce ( data1, data2, n, datatype, operation,
689 & receiver, comm, ierror )
690
691c*********************************************************************72
692c
693cc MPI_REDUCE carries out a reduction operation.
694c
695 implicit none
696
697 include "mpi_dummy.h"
698
699 integer n
700
701 integer comm
702 integer data1(n)
703 integer data2
704 integer datatype
705 integer ierror
706 integer operation
707 integer receiver
708
709 ierror = MPI_SUCCESS
710
711 if ( datatype .eq. mpi_double_precision ) then
712
713 call mpi_reduce_double_precision (
714 & data1, data2, n, operation, ierror )
715
716 else if ( datatype .eq. mpi_integer ) then
717
718 call mpi_reduce_integer (
719 & data1, data2, n, operation, ierror )
720
721 else if ( datatype .eq. mpi_real ) then
722
723 call mpi_reduce_real (
724 & data1, data2, n, operation, ierror )
725
726 else
727
728 ierror = MPI_FAILURE
729
730 end if
731
732 return
733 end
734 subroutine mpi_reduce_double_precision (
735 & data1, data2, n, operation, ierror )
736
737c*********************************************************************72
738c
739cc MPI_REDUCE_DOUBLE_PRECISION carries out a reduction operation on real*8 values.
740c
741 implicit none
742
743 include "mpi_dummy.h"
744
745 integer n
746
747 real*8 data1(n)
748 real*8 data2(n)
749 integer i
750 integer ierror
751 integer operation
752
753
754 ierror = MPI_SUCCESS
755
756 do i = 1, n
757 data2(i) = data1(i)
758 end do
759
760 return
761 end
762
763 subroutine mpi_reduce_integer8 (
764 & data1, data2, n, operation, ierror )
765
766c*********************************************************************72
767c
768 implicit none
769
770 include "mpi_dummy.h"
771
772 integer n
773
774 integer*8 data1(n)
775 integer*8 data2(n)
776 integer i
777 integer ierror
778 integer operation
779
780 ierror = MPI_SUCCESS
781
782 do i = 1, n
783 data2(i) = data1(i)
784 end do
785
786 ierror = MPI_FAILURE
787
788 return
789 end
790
791 subroutine mpi_reduce_integer (
792 & data1, data2, n, operation, ierror )
793
794c*********************************************************************72
795c
796 implicit none
797
798 include "mpi_dummy.h"
799
800 integer n
801
802 integer data1(n)
803 integer data2(n)
804 integer i
805 integer ierror
806 integer operation
807
808 ierror = MPI_SUCCESS
809
810 do i = 1, n
811 data2(i) = data1(i)
812 end do
813
814 ierror = MPI_FAILURE
815
816 return
817 end
818
819 subroutine mpi_reduce_real (
820 & data1, data2, n, operation, ierror )
821
822c*********************************************************************72
823c
824cc MPI_REDUCE_REAL carries out a reduction operation on reals.
825c
826c Discussion:
827c
828 implicit none
829
830 include "mpi_dummy.h"
831
832 integer n
833
834 real data1(n)
835 real data2(n)
836 integer i
837 integer ierror
838 integer operation
839
840 ierror = MPI_SUCCESS
841
842 do i = 1, n
843 data2(i) = data1(i)
844 end do
845
846 return
847 end
848 subroutine mpi_reduce_scatter ( data1, data2, n, datatype,
849 & operation, comm, ierror )
850
851c*********************************************************************72
852c
853cc MPI_REDUCE_SCATTER collects a message of the same length from each process.
854c
855 implicit none
856
857 include "mpi_dummy.h"
858
859 integer n
860
861 integer comm
862 integer data1(n)
863 integer data2(n)
864 integer datatype
865 integer ierror
866 integer operation
867
868 ierror = MPI_SUCCESS
869
870 if ( datatype .eq. mpi_double_precision ) then
871 call mpi_copy_double_precision ( data1, data2, n, ierror )
872 else if ( datatype .eq. mpi_integer ) then
873 call mpi_copy_integer ( data1, data2, n, ierror )
874 else if ( datatype .eq. mpi_real ) then
875 call mpi_copy_real ( data1, data2, n, ierror )
876 else
877 ierror = MPI_FAILURE
878 end if
879
880 return
881 end
882 subroutine mpi_rsend ( data, n, datatype, iproc, itag,
883 & comm, ierror )
884
885c*********************************************************************72
886c
887cc MPI_RSEND "ready sends" data from one process to another.
888c
889 implicit none
890
891 integer n
892
893 integer comm
894 integer data(n)
895 integer datatype
896 integer ierror
897 integer iproc
898 integer itag
899 integer MPI_FAILURE
900 parameter ( MPI_FAILURE = 1 )
901 integer MPI_SUCCESS
902 parameter ( MPI_SUCCESS = 0 )
903
904 ierror = MPI_FAILURE
905
906 write ( *, '(a)' ) ' '
907 write ( *, '(a)' ) 'MPI_RSEND - Error!'
908 write ( *, '(a)' ) ' Should not send message to self.'
909
910 return
911 end
912 subroutine mpi_send ( data, n, datatype, iproc, itag,
913 & comm, ierror )
914
915c*********************************************************************72
916c
917cc MPI_SEND sends data from one process to another.
918c
919 implicit none
920
921 integer n
922
923 integer comm
924 integer data(n)
925 integer datatype
926 integer ierror
927 integer iproc
928 integer itag
929 integer MPI_FAILURE
930 parameter ( MPI_FAILURE = 1 )
931 integer MPI_SUCCESS
932 parameter ( MPI_SUCCESS = 0 )
933
934 ierror = MPI_FAILURE
935
936 write ( *, '(a)' ) ' '
937 write ( *, '(a)' ) 'MPI_SEND - Error!'
938 write ( *, '(a)' ) ' Should not send message to self.'
939
940 return
941 end
942 subroutine mpi_wait ( irequest, istatus, ierror )
943
944c*********************************************************************72
945c
946cc MPI_WAIT waits for an I/O request to complete.
947c
948 implicit none
949
950 integer ierror
951 integer irequest
952 integer istatus
953 integer MPI_FAILURE
954 parameter ( MPI_FAILURE = 1 )
955 integer MPI_SUCCESS
956 parameter ( MPI_SUCCESS = 0 )
957
958 ierror = MPI_FAILURE
959
960 write ( *, '(a)' ) ' '
961 write ( *, '(a)' ) 'MPI_WAIT - Error!'
962 write ( *, '(a)' ) ' Should not wait on message from self.'
963
964 return
965 end
966 subroutine mpi_waitall ( icount, irequest, istatus, ierror )
967
968c*********************************************************************72
969c
970cc MPI_WAITALL waits until all I/O requests have completed.
971c
972 implicit none
973
974 integer icount
975 integer ierror
976 integer irequest
977 integer istatus
978 integer MPI_FAILURE
979 parameter ( MPI_FAILURE = 1 )
980 integer MPI_SUCCESS
981 parameter ( MPI_SUCCESS = 0 )
982
983 ierror = MPI_FAILURE
984
985 write ( *, '(a)' ) ' '
986 write ( *, '(a)' ) 'MPI_WAITALL - Error!'
987 write ( *, '(a)' ) ' Should not wait on message from self.'
988
989 return
990 end
991 subroutine mpi_waitany ( icount, array_of_requests, index,
992 & istatus, ierror )
993
994c*********************************************************************72
995c
996cc MPI_WAITANY waits until one I/O requests has completed.
997c
998 implicit none
999
1000 integer array_of_requests(*)
1001 integer icount
1002 integer ierror
1003 integer index
1004 integer istatus
1005 integer MPI_FAILURE
1006 parameter ( MPI_FAILURE = 1 )
1007 integer MPI_SUCCESS
1008 parameter ( MPI_SUCCESS = 0 )
1009
1010 ierror = MPI_FAILURE
1011
1012 write ( *, '(a)' ) ' '
1013 write ( *, '(a)' ) 'MPI_WAITANY - Error!'
1014 write ( *, '(a)' ) ' Should not wait on message from self.'
1015
1016 return
1017 end
1018 function mpi_wtick ( )
1019
1020c*********************************************************************72
1021c
1022cc MPI_WTICK returns the time between clock ticks.
1023c
1024 implicit none
1025
1026 real*8 mpi_wtick
1027
1028 mpi_wtick = 1.0D+00
1029
1030 return
1031 end
1032 function mpi_wtime ( )
1033
1034c*********************************************************************72
1035c
1036cc MPI_WTIME returns the elapsed wall clock time.
1037c
1038 implicit none
1039
1040 real*8 mpi_wtime
1041 real*4 a(2),etime
1042 a(1)=0.0
1043 a(2)=0.0
1044 mpi_wtime = etime(a)
1045
1046 return
1047 end
1048
1049 subroutine mpi_initialized(mpi_is_initialized, ierr)
1050
1051 mpi_is_initialized = 0
1052 ierr = 0
1053
1054 return
1055 end
1056
1057 subroutine mpi_comm_create(icomm,igroup,icommd,ierr)
1058
1059 icommd = 1
1060
1061 return
1062 end
1063
1064 subroutine mpi_intercomm_create(ilcomm,ill,ipcomm,irl,itag,
1065 $ newcomm,ierr)
1066
1067 call exitti('mpi_intercomm_create not supported!$',1)
1068
1069 return
1070 end
1071
1072 subroutine mpi_intercomm_merge(icomm,ihigh,icommd,ierr)
1073
1074 call exitti('mpi_intercomm_merge not supported!$',1)
1075
1076 return
1077 end
1078
1079 subroutine mpi_comm_group(icomm,igroup,ierr)
1080
1081 igroup = 1
1082 ierr = 0
1083
1084 return
1085 end
1086
1087 subroutine mpi_group_free
1088
1089 return
1090 end
1091
1092 subroutine mpi_comm_get_attr(icomm,ikey,ival,iflag,ierr)
1093
1094 integer*8 ival
1095 logical iflag
1096
1097 ival = 9 999 999 ! dummy
1098
1099 return
1100 end
1101c
1102
1103 subroutine mpi_attr_get(icomm,ikey,ival,iflag,ierr)
1104
1105 logical iflag
1106
1107 ival = 9 999 999 ! dummy
1108
1109 return
1110 end
1111c-----------------------------------------------------------------------
1112 subroutine mpi_type_get_extent(ikey,ib,isize,ierr)
1113
1114 include "mpi_dummy.h"
1115 integer*8 ib, isize
1116
1117 if (ikey.eq.MPI_DOUBLE_PRECISION) isize = 8
1118 if (ikey.eq.MPI_INTEGER) isize = 4
1119 if (ikey.eq.MPI_INTEGER8) isize = 8
1120
1121 ierr = 0
1122
1123 return
1124 end
1125
Note: See TracBrowser for help on using the repository browser.