Actual source code: mpi.c
1: /*
2: This provides a few of the MPI-uni functions that cannot be implemented
3: with C macros
4: */
5: #include <petscsys.h>
6: #ifndef MPIUNI_H
7: #error "Wrong mpi.h included! require mpi.h from MPIUNI"
8: #endif
10: #include <petscdevice_cupm.h>
11: #include <petsc/private/petscimpl.h>
13: #define MPI_SUCCESS 0
14: #define MPI_FAILURE 1
16: void *MPIUNI_TMP = NULL;
18: /*
19: With MPI Uni there are exactly four distinct communicators:
20: MPI_COMM_SELF, MPI_COMM_WORLD, and a MPI_Comm_dup() of each of these (duplicates of duplicates return the same communictor)
22: MPI_COMM_SELF and MPI_COMM_WORLD are MPI_Comm_free() in MPI_Finalize() but in general with PETSc,
23: the other communicators are freed once the last PETSc object is freed (before MPI_Finalize()).
25: */
26: #define MAX_ATTR 256
27: #define MAX_COMM 128
29: typedef struct {
30: void *attribute_val;
31: int active;
32: } MPI_Attr;
34: typedef struct {
35: void *extra_state;
36: MPI_Delete_function *del;
37: int active; /* Is this keyval in use by some comm? */
38: } MPI_Attr_keyval;
40: static MPI_Attr_keyval attr_keyval[MAX_ATTR];
41: static MPI_Attr attr[MAX_COMM][MAX_ATTR];
42: static int comm_active[MAX_COMM]; /* Boolean array indicating which comms are in use */
43: static int mpi_tag_ub = 100000000;
44: static int num_attr = 1; /* Maximal number of keyvals/attributes ever created, including the predefined MPI_TAG_UB attribute. */
45: static int MaxComm = 2; /* Maximal number of communicators ever created, including comm_self(1), comm_world(2), but not comm_null(0) */
46: static void *MPIUNIF_mpi_in_place = 0;
48: #define CommIdx(comm) ((comm)-1) /* the communicator's internal index used in attr[idx][] and comm_active[idx]. comm_null does not occupy slots in attr[][] */
50: #if defined(__cplusplus)
51: extern "C" {
52: #endif
54: /*
55: To avoid problems with prototypes to the system memcpy() it is duplicated here
56: */
57: int MPIUNI_Memcpy(void *dst, const void *src, int n)
58: {
59: if (dst == MPI_IN_PLACE || dst == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
60: if (src == MPI_IN_PLACE || src == MPIUNIF_mpi_in_place) return MPI_SUCCESS;
61: if (!n) return MPI_SUCCESS;
63: /* GPU-aware MPIUNI. Use synchronous copy per MPI semantics */
64: #if defined(PETSC_HAVE_CUDA)
65: if (PetscDeviceInitialized(PETSC_DEVICE_CUDA)) {
66: cudaError_t cerr = cudaMemcpy(dst, src, n, cudaMemcpyDefault);
67: if (cerr != cudaSuccess) return MPI_FAILURE;
68: } else
69: #elif defined(PETSC_HAVE_HIP)
70: if (PetscDeviceInitialized(PETSC_DEVICE_HIP)) {
71: hipError_t cerr = hipMemcpy(dst, src, n, hipMemcpyDefault);
72: if (cerr != hipSuccess) return MPI_FAILURE;
73: } else
74: #endif
75: {
76: memcpy(dst, src, n);
77: }
78: return MPI_SUCCESS;
79: }
81: static int classcnt = 0;
82: static int codecnt = 0;
84: int MPI_Add_error_class(int *cl)
85: {
86: *cl = classcnt++;
87: return MPI_SUCCESS;
88: }
90: int MPI_Add_error_code(int cl, int *co)
91: {
92: if (cl >= classcnt) return MPI_FAILURE;
93: *co = codecnt++;
94: return MPI_SUCCESS;
95: }
97: int MPI_Type_get_envelope(MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner)
98: {
99: int comb = datatype >> 28;
100: switch (comb) {
101: case MPI_COMBINER_NAMED:
102: *num_integers = 0;
103: *num_addresses = 0;
104: *num_datatypes = 0;
105: *combiner = comb;
106: break;
107: case MPI_COMBINER_DUP:
108: *num_integers = 0;
109: *num_addresses = 0;
110: *num_datatypes = 1;
111: *combiner = comb;
112: break;
113: case MPI_COMBINER_CONTIGUOUS:
114: *num_integers = 1;
115: *num_addresses = 0;
116: *num_datatypes = 1;
117: *combiner = comb;
118: break;
119: default:
120: return MPIUni_Abort(MPI_COMM_SELF, 1);
121: }
122: return MPI_SUCCESS;
123: }
125: int MPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int *array_of_integers, MPI_Aint *array_of_addresses, MPI_Datatype *array_of_datatypes)
126: {
127: int comb = datatype >> 28;
128: switch (comb) {
129: case MPI_COMBINER_NAMED:
130: return MPIUni_Abort(MPI_COMM_SELF, 1);
131: case MPI_COMBINER_DUP:
132: if (max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
133: array_of_datatypes[0] = datatype & 0x0fffffff;
134: break;
135: case MPI_COMBINER_CONTIGUOUS:
136: if (max_integers < 1 || max_datatypes < 1) return MPIUni_Abort(MPI_COMM_SELF, 1);
137: array_of_integers[0] = (datatype >> 8) & 0xfff; /* count */
138: array_of_datatypes[0] = (datatype & 0x0ff000ff) | 0x100; /* basic named type (count=1) from which the contiguous type is derived */
139: break;
140: default:
141: return MPIUni_Abort(MPI_COMM_SELF, 1);
142: }
143: return MPI_SUCCESS;
144: }
146: /*
147: Used to set the built-in MPI_TAG_UB attribute
148: */
149: static int Keyval_setup(void)
150: {
151: attr[CommIdx(MPI_COMM_WORLD)][0].active = 1;
152: attr[CommIdx(MPI_COMM_WORLD)][0].attribute_val = &mpi_tag_ub;
153: attr[CommIdx(MPI_COMM_SELF)][0].active = 1;
154: attr[CommIdx(MPI_COMM_SELF)][0].attribute_val = &mpi_tag_ub;
155: attr_keyval[0].active = 1;
156: return MPI_SUCCESS;
157: }
159: int MPI_Comm_create_keyval(MPI_Copy_function *copy_fn, MPI_Delete_function *delete_fn, int *keyval, void *extra_state)
160: {
161: int i, keyid;
162: for (i = 1; i < num_attr; i++) { /* the first attribute is always in use */
163: if (!attr_keyval[i].active) {
164: keyid = i;
165: goto found;
166: }
167: }
168: if (num_attr >= MAX_ATTR) return MPIUni_Abort(MPI_COMM_WORLD, 1);
169: keyid = num_attr++;
171: found:
172: attr_keyval[keyid].extra_state = extra_state;
173: attr_keyval[keyid].del = delete_fn;
174: attr_keyval[keyid].active = 1;
175: *keyval = keyid;
176: return MPI_SUCCESS;
177: }
179: /*
180: The reference counting business is here to guard against the following:
182: MPI_Comm_set_attr(comm, keyval, some_attr);
183: MPI_Comm_free_keyval(&keyval);
184: MPI_Comm_free(&comm);
186: Here MPI_Comm_free() will try to destroy all of the attributes of the comm, and hence we
187: should not clear the deleter or extra_state until all communicators that have the attribute
188: set are either freed or have given up their attribute.
190: The attribute reference count is INCREASED in:
191: - MPI_Comm_create_keyval()
192: - MPI_Comm_set_attr()
194: The atrtibute reference count is DECREASED in:
195: - MPI_Comm_free_keyval()
196: - MPI_Comm_delete_attr() (but only if the comm has the attribute)
197: */
198: static int MPI_Attr_dereference_keyval(int keyval)
199: {
200: if (--(attr_keyval[keyval].active) <= 0) {
201: attr_keyval[keyval].extra_state = 0;
202: attr_keyval[keyval].del = 0;
203: }
204: return MPI_SUCCESS;
205: }
207: static int MPI_Attr_reference_keyval(int keyval)
208: {
209: ++(attr_keyval[keyval].active);
210: return MPI_SUCCESS;
211: }
213: int MPI_Comm_free_keyval(int *keyval)
214: {
215: int ret;
217: if (*keyval < 0 || *keyval >= num_attr) return MPI_FAILURE;
218: if ((ret = MPI_Attr_dereference_keyval(*keyval))) return ret;
219: *keyval = 0;
220: return MPI_SUCCESS;
221: }
223: int MPI_Comm_set_attr(MPI_Comm comm, int keyval, void *attribute_val)
224: {
225: int idx = CommIdx(comm), ret;
226: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
227: if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
229: if ((ret = MPI_Comm_delete_attr(comm, keyval))) return ret;
230: if ((ret = MPI_Attr_reference_keyval(keyval))) return ret;
231: attr[idx][keyval].active = 1;
232: attr[idx][keyval].attribute_val = attribute_val;
233: return MPI_SUCCESS;
234: }
236: int MPI_Comm_delete_attr(MPI_Comm comm, int keyval)
237: {
238: int idx = CommIdx(comm);
239: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
240: if (keyval < 0 || keyval >= num_attr) return MPI_FAILURE;
241: if (attr[idx][keyval].active) {
242: int ret;
243: void *save_attribute_val = attr[idx][keyval].attribute_val;
245: attr[idx][keyval].active = 0;
246: attr[idx][keyval].attribute_val = 0;
247: if (attr_keyval[keyval].del) {
248: if ((ret = (*(attr_keyval[keyval].del))(comm, keyval, save_attribute_val, attr_keyval[keyval].extra_state))) return ret;
249: }
250: if ((ret = MPI_Attr_dereference_keyval(keyval))) return ret;
251: }
252: return MPI_SUCCESS;
253: }
255: int MPI_Comm_get_attr(MPI_Comm comm, int keyval, void *attribute_val, int *flag)
256: {
257: int idx = CommIdx(comm);
258: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
259: if (!keyval) Keyval_setup();
260: *flag = attr[idx][keyval].active;
261: *(void **)attribute_val = attr[idx][keyval].attribute_val;
262: return MPI_SUCCESS;
263: }
265: static char all_comm_names[MAX_COMM][MPI_MAX_OBJECT_NAME] = {"MPI_COMM_SELF", "MPI_COMM_WORLD"};
267: int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen)
268: {
269: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
270: if (!comm_name || !resultlen) return MPI_FAILURE;
271: strncpy(comm_name, all_comm_names[CommIdx(comm)], MPI_MAX_OBJECT_NAME - 1);
272: *resultlen = (int)strlen(comm_name);
273: return MPI_SUCCESS;
274: }
276: int MPI_Comm_set_name(MPI_Comm comm, const char *comm_name)
277: {
278: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
279: if (!comm_name) return MPI_FAILURE;
280: if (strlen(comm_name) > MPI_MAX_OBJECT_NAME - 1) return MPI_FAILURE;
281: strncpy(all_comm_names[CommIdx(comm)], comm_name, MPI_MAX_OBJECT_NAME - 1);
282: return MPI_SUCCESS;
283: }
285: int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)
286: {
287: int j;
288: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
289: for (j = 3; j <= MaxComm; j++) {
290: if (!comm_active[CommIdx(j)]) {
291: comm_active[CommIdx(j)] = 1;
292: *newcomm = j;
293: return MPI_SUCCESS;
294: }
295: }
296: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
297: *newcomm = ++MaxComm;
298: comm_active[CommIdx(*newcomm)] = 1;
299: return MPI_SUCCESS;
300: }
302: int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *out)
303: {
304: int j;
305: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
306: for (j = 3; j <= MaxComm; j++) {
307: if (!comm_active[CommIdx(j)]) {
308: comm_active[CommIdx(j)] = 1;
309: *out = j;
310: return MPI_SUCCESS;
311: }
312: }
313: if (MaxComm >= MAX_COMM) return MPI_FAILURE;
314: *out = ++MaxComm;
315: comm_active[CommIdx(*out)] = 1;
316: return MPI_SUCCESS;
317: }
319: int MPI_Comm_free(MPI_Comm *comm)
320: {
321: int idx = CommIdx(*comm);
323: if (*comm < 1 || *comm > MaxComm) return MPI_FAILURE;
324: for (int i = 0; i < num_attr; i++) {
325: int ret = MPI_Comm_delete_attr(*comm, i);
327: if (ret) return ret;
328: }
329: if (*comm >= 3) comm_active[idx] = 0;
330: *comm = 0;
331: return MPI_SUCCESS;
332: }
334: int MPI_Comm_size(MPI_Comm comm, int *size)
335: {
336: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
337: *size = 1;
338: return MPI_SUCCESS;
339: }
341: int MPI_Comm_rank(MPI_Comm comm, int *rank)
342: {
343: if (comm < 1 || comm > MaxComm) return MPI_FAILURE;
344: *rank = 0;
345: return MPI_SUCCESS;
346: }
348: int MPIUni_Abort(MPI_Comm comm, int errorcode)
349: {
350: printf("MPI operation not supported by PETSc's sequential MPI wrappers\n");
351: return MPI_ERR_NOSUPPORT;
352: }
354: int MPI_Abort(MPI_Comm comm, int errorcode)
355: {
356: abort();
357: return MPI_SUCCESS;
358: }
360: /* --------------------------------------------------------------------------*/
362: static int MPI_was_initialized = 0;
363: static int MPI_was_finalized = 0;
365: int MPI_Init(int *argc, char ***argv)
366: {
367: if (MPI_was_initialized) return MPI_FAILURE;
368: /* MPI standard says "once MPI_Finalize returns, no MPI routine (not even MPI_Init) may be called", so an MPI standard compliant
369: MPIU should have this 'if (MPI_was_finalized) return MPI_FAILURE;' check. We relax it here to make life easier for users
370: of MPIU so that they can do multiple PetscInitialize/Finalize().
371: */
372: /* if (MPI_was_finalized) return MPI_FAILURE; */
373: MPI_was_initialized = 1;
374: MPI_was_finalized = 0;
375: return MPI_SUCCESS;
376: }
378: int MPI_Init_thread(int *argc, char ***argv, int required, int *provided)
379: {
380: MPI_Query_thread(provided);
381: return MPI_Init(argc, argv);
382: }
384: int MPI_Query_thread(int *provided)
385: {
386: *provided = MPI_THREAD_FUNNELED;
387: return MPI_SUCCESS;
388: }
390: int MPI_Finalize(void)
391: {
392: if (MPI_was_finalized || !MPI_was_initialized) return MPI_FAILURE;
393: MPI_Comm comm = MPI_COMM_WORLD;
394: int ret = MPI_Comm_free(&comm);
396: if (ret) return ret;
397: comm = MPI_COMM_SELF;
398: ret = MPI_Comm_free(&comm);
399: if (ret) return ret;
400: if (PetscDefined(USE_DEBUG)) {
401: for (int i = 3; i <= MaxComm; ++i) {
402: if (comm_active[CommIdx(i)]) printf("MPIUni warning: MPI communicator %d is not freed before MPI_Finalize()\n", i);
403: }
405: for (int i = 1; i <= MaxComm; ++i) {
406: for (int j = 0; j < num_attr; ++j) {
407: if (attr[CommIdx(i)][j].active) printf("MPIUni warning: MPI communicator %d attribute %d was not freed before MPI_Finalize()\n", i, j);
408: }
409: }
411: for (int i = 1; i < num_attr; ++i) {
412: if (attr_keyval[i].active) printf("MPIUni warning: MPI attribute %d was not freed before MPI_Finalize()\n", i);
413: }
414: }
416: /* reset counters */
417: MaxComm = 2;
418: num_attr = 1;
419: MPI_was_finalized = 1;
420: MPI_was_initialized = 0;
421: PETSC_COMM_WORLD = MPI_COMM_NULL;
422: return MPI_SUCCESS;
423: }
425: int MPI_Initialized(int *flag)
426: {
427: *flag = MPI_was_initialized;
428: return MPI_SUCCESS;
429: }
431: int MPI_Finalized(int *flag)
432: {
433: *flag = MPI_was_finalized;
434: return MPI_SUCCESS;
435: }
437: /* ------------------- Fortran versions of several routines ------------------ */
439: #if defined(PETSC_HAVE_FORTRAN_CAPS)
440: #define mpiunisetmoduleblock_ MPIUNISETMODULEBLOCK
441: #define mpiunisetfortranbasepointers_ MPIUNISETFORTRANBASEPOINTERS
442: #define petsc_mpi_init_ PETSC_MPI_INIT
443: #define petsc_mpi_finalize_ PETSC_MPI_FINALIZE
444: #define petsc_mpi_comm_size_ PETSC_MPI_COMM_SIZE
445: #define petsc_mpi_comm_rank_ PETSC_MPI_COMM_RANK
446: #define petsc_mpi_abort_ PETSC_MPI_ABORT
447: #define petsc_mpi_reduce_ PETSC_MPI_REDUCE
448: #define petsc_mpi_allreduce_ PETSC_MPI_ALLREDUCE
449: #define petsc_mpi_barrier_ PETSC_MPI_BARRIER
450: #define petsc_mpi_bcast_ PETSC_MPI_BCAST
451: #define petsc_mpi_gather_ PETSC_MPI_GATHER
452: #define petsc_mpi_allgather_ PETSC_MPI_ALLGATHER
453: #define petsc_mpi_comm_split_ PETSC_MPI_COMM_SPLIT
454: #define petsc_mpi_scan_ PETSC_MPI_SCAN
455: #define petsc_mpi_send_ PETSC_MPI_SEND
456: #define petsc_mpi_recv_ PETSC_MPI_RECV
457: #define petsc_mpi_reduce_scatter_ PETSC_MPI_REDUCE_SCATTER
458: #define petsc_mpi_irecv_ PETSC_MPI_IRECV
459: #define petsc_mpi_isend_ PETSC_MPI_ISEND
460: #define petsc_mpi_sendrecv_ PETSC_MPI_SENDRECV
461: #define petsc_mpi_test_ PETSC_MPI_TEST
462: #define petsc_mpi_waitall_ PETSC_MPI_WAITALL
463: #define petsc_mpi_waitany_ PETSC_MPI_WAITANY
464: #define petsc_mpi_allgatherv_ PETSC_MPI_ALLGATHERV
465: #define petsc_mpi_alltoallv_ PETSC_MPI_ALLTOALLV
466: #define petsc_mpi_comm_create_ PETSC_MPI_COMM_CREATE
467: #define petsc_mpi_address_ PETSC_MPI_ADDRESS
468: #define petsc_mpi_pack_ PETSC_MPI_PACK
469: #define petsc_mpi_unpack_ PETSC_MPI_UNPACK
470: #define petsc_mpi_pack_size_ PETSC_MPI_PACK_SIZE
471: #define petsc_mpi_type_struct_ PETSC_MPI_TYPE_STRUCT
472: #define petsc_mpi_type_commit_ PETSC_MPI_TYPE_COMMIT
473: #define petsc_mpi_wtime_ PETSC_MPI_WTIME
474: #define petsc_mpi_cancel_ PETSC_MPI_CANCEL
475: #define petsc_mpi_comm_dup_ PETSC_MPI_COMM_DUP
476: #define petsc_mpi_comm_free_ PETSC_MPI_COMM_FREE
477: #define petsc_mpi_get_count_ PETSC_MPI_GET_COUNT
478: #define petsc_mpi_get_processor_name_ PETSC_MPI_GET_PROCESSOR_NAME
479: #define petsc_mpi_initialized_ PETSC_MPI_INITIALIZED
480: #define petsc_mpi_iprobe_ PETSC_MPI_IPROBE
481: #define petsc_mpi_probe_ PETSC_MPI_PROBE
482: #define petsc_mpi_request_free_ PETSC_MPI_REQUEST_FREE
483: #define petsc_mpi_ssend_ PETSC_MPI_SSEND
484: #define petsc_mpi_wait_ PETSC_MPI_WAIT
485: #define petsc_mpi_comm_group_ PETSC_MPI_COMM_GROUP
486: #define petsc_mpi_exscan_ PETSC_MPI_EXSCAN
487: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
488: #define mpiunisetmoduleblock_ mpiunisetmoduleblock
489: #define mpiunisetfortranbasepointers_ mpiunisetfortranbasepointers
490: #define petsc_mpi_init_ petsc_mpi_init
491: #define petsc_mpi_finalize_ petsc_mpi_finalize
492: #define petsc_mpi_comm_size_ petsc_mpi_comm_size
493: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank
494: #define petsc_mpi_abort_ petsc_mpi_abort
495: #define petsc_mpi_reduce_ petsc_mpi_reduce
496: #define petsc_mpi_allreduce_ petsc_mpi_allreduce
497: #define petsc_mpi_barrier_ petsc_mpi_barrier
498: #define petsc_mpi_bcast_ petsc_mpi_bcast
499: #define petsc_mpi_gather_ petsc_mpi_gather
500: #define petsc_mpi_allgather_ petsc_mpi_allgather
501: #define petsc_mpi_comm_split_ petsc_mpi_comm_split
502: #define petsc_mpi_scan_ petsc_mpi_scan
503: #define petsc_mpi_send_ petsc_mpi_send
504: #define petsc_mpi_recv_ petsc_mpi_recv
505: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter
506: #define petsc_mpi_irecv_ petsc_mpi_irecv
507: #define petsc_mpi_isend_ petsc_mpi_isend
508: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv
509: #define petsc_mpi_test_ petsc_mpi_test
510: #define petsc_mpi_waitall_ petsc_mpi_waitall
511: #define petsc_mpi_waitany_ petsc_mpi_waitany
512: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv
513: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv
514: #define petsc_mpi_comm_create_ petsc_mpi_comm_create
515: #define petsc_mpi_address_ petsc_mpi_address
516: #define petsc_mpi_pack_ petsc_mpi_pack
517: #define petsc_mpi_unpack_ petsc_mpi_unpack
518: #define petsc_mpi_pack_size_ petsc_mpi_pack_size
519: #define petsc_mpi_type_struct_ petsc_mpi_type_struct
520: #define petsc_mpi_type_commit_ petsc_mpi_type_commit
521: #define petsc_mpi_wtime_ petsc_mpi_wtime
522: #define petsc_mpi_cancel_ petsc_mpi_cancel
523: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup
524: #define petsc_mpi_comm_free_ petsc_mpi_comm_free
525: #define petsc_mpi_get_count_ petsc_mpi_get_count
526: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name
527: #define petsc_mpi_initialized_ petsc_mpi_initialized
528: #define petsc_mpi_iprobe_ petsc_mpi_iprobe
529: #define petsc_mpi_probe_ petsc_mpi_probe
530: #define petsc_mpi_request_free_ petsc_mpi_request_free
531: #define petsc_mpi_ssend_ petsc_mpi_ssend
532: #define petsc_mpi_wait_ petsc_mpi_wait
533: #define petsc_mpi_comm_group_ petsc_mpi_comm_group
534: #define petsc_mpi_exscan_ petsc_mpi_exscan
535: #endif
537: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
538: #define petsc_mpi_init_ petsc_mpi_init__
539: #define petsc_mpi_finalize_ petsc_mpi_finalize__
540: #define petsc_mpi_comm_size_ petsc_mpi_comm_size__
541: #define petsc_mpi_comm_rank_ petsc_mpi_comm_rank__
542: #define petsc_mpi_abort_ petsc_mpi_abort__
543: #define petsc_mpi_reduce_ petsc_mpi_reduce__
544: #define petsc_mpi_allreduce_ petsc_mpi_allreduce__
545: #define petsc_mpi_barrier_ petsc_mpi_barrier__
546: #define petsc_mpi_bcast_ petsc_mpi_bcast__
547: #define petsc_mpi_gather_ petsc_mpi_gather__
548: #define petsc_mpi_allgather_ petsc_mpi_allgather__
549: #define petsc_mpi_comm_split_ petsc_mpi_comm_split__
550: #define petsc_mpi_scan_ petsc_mpi_scan__
551: #define petsc_mpi_send_ petsc_mpi_send__
552: #define petsc_mpi_recv_ petsc_mpi_recv__
553: #define petsc_mpi_reduce_scatter_ petsc_mpi_reduce_scatter__
554: #define petsc_mpi_irecv_ petsc_mpi_irecv__
555: #define petsc_mpi_isend_ petsc_mpi_isend__
556: #define petsc_mpi_sendrecv_ petsc_mpi_sendrecv__
557: #define petsc_mpi_test_ petsc_mpi_test__
558: #define petsc_mpi_waitall_ petsc_mpi_waitall__
559: #define petsc_mpi_waitany_ petsc_mpi_waitany__
560: #define petsc_mpi_allgatherv_ petsc_mpi_allgatherv__
561: #define petsc_mpi_alltoallv_ petsc_mpi_alltoallv__
562: #define petsc_mpi_comm_create_ petsc_mpi_comm_create__
563: #define petsc_mpi_address_ petsc_mpi_address__
564: #define petsc_mpi_pack_ petsc_mpi_pack__
565: #define petsc_mpi_unpack_ petsc_mpi_unpack__
566: #define petsc_mpi_pack_size_ petsc_mpi_pack_size__
567: #define petsc_mpi_type_struct_ petsc_mpi_type_struct__
568: #define petsc_mpi_type_commit_ petsc_mpi_type_commit__
569: #define petsc_mpi_wtime_ petsc_mpi_wtime__
570: #define petsc_mpi_cancel_ petsc_mpi_cancel__
571: #define petsc_mpi_comm_dup_ petsc_mpi_comm_dup__
572: #define petsc_mpi_comm_free_ petsc_mpi_comm_free__
573: #define petsc_mpi_get_count_ petsc_mpi_get_count__
574: #define petsc_mpi_get_processor_name_ petsc_mpi_get_processor_name__
575: #define petsc_mpi_initialized_ petsc_mpi_initialized__
576: #define petsc_mpi_iprobe_ petsc_mpi_iprobe__
577: #define petsc_mpi_probe_ petsc_mpi_probe__
578: #define petsc_mpi_request_free_ petsc_mpi_request_free__
579: #define petsc_mpi_ssend_ petsc_mpi_ssend__
580: #define petsc_mpi_wait_ petsc_mpi_wait__
581: #define petsc_mpi_comm_group_ petsc_mpi_comm_group__
582: #define petsc_mpi_exscan_ petsc_mpi_exscan__
583: #endif
585: /* Do not build fortran interface if MPI namespace collision is to be avoided */
586: #if defined(PETSC_HAVE_FORTRAN)
588: PETSC_EXTERN void mpiunisetmoduleblock_(void);
590: PETSC_EXTERN void mpiunisetfortranbasepointers_(void *f_mpi_in_place)
591: {
592: MPIUNIF_mpi_in_place = f_mpi_in_place;
593: }
595: PETSC_EXTERN void petsc_mpi_init_(int *ierr)
596: {
597: mpiunisetmoduleblock_();
598: *ierr = MPI_Init((int *)0, (char ***)0);
599: }
601: PETSC_EXTERN void petsc_mpi_finalize_(int *ierr)
602: {
603: *ierr = MPI_Finalize();
604: }
606: PETSC_EXTERN void petsc_mpi_comm_size_(MPI_Comm *comm, int *size, int *ierr)
607: {
608: *size = 1;
609: *ierr = 0;
610: }
612: PETSC_EXTERN void petsc_mpi_comm_rank_(MPI_Comm *comm, int *rank, int *ierr)
613: {
614: *rank = 0;
615: *ierr = MPI_SUCCESS;
616: }
618: PETSC_EXTERN void petsc_mpi_comm_split_(MPI_Comm *comm, int *color, int *key, MPI_Comm *newcomm, int *ierr)
619: {
620: *newcomm = *comm;
621: *ierr = MPI_SUCCESS;
622: }
624: PETSC_EXTERN void petsc_mpi_abort_(MPI_Comm *comm, int *errorcode, int *ierr)
625: {
626: abort();
627: *ierr = MPI_SUCCESS;
628: }
630: PETSC_EXTERN void petsc_mpi_reduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *root, int *comm, int *ierr)
631: {
632: *ierr = MPI_Reduce(sendbuf, recvbuf, *count, *datatype, *op, *root, *comm);
633: }
635: PETSC_EXTERN void petsc_mpi_allreduce_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
636: {
637: *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, *datatype, *op, *comm);
638: }
640: PETSC_EXTERN void petsc_mpi_barrier_(MPI_Comm *comm, int *ierr)
641: {
642: *ierr = MPI_SUCCESS;
643: }
645: PETSC_EXTERN void petsc_mpi_bcast_(void *buf, int *count, int *datatype, int *root, int *comm, int *ierr)
646: {
647: *ierr = MPI_SUCCESS;
648: }
650: PETSC_EXTERN void petsc_mpi_gather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *root, int *comm, int *ierr)
651: {
652: *ierr = MPI_Gather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *root, *comm);
653: }
655: PETSC_EXTERN void petsc_mpi_allgather_(void *sendbuf, int *scount, int *sdatatype, void *recvbuf, int *rcount, int *rdatatype, int *comm, int *ierr)
656: {
657: *ierr = MPI_Allgather(sendbuf, *scount, *sdatatype, recvbuf, rcount, rdatatype, *comm);
658: }
660: PETSC_EXTERN void petsc_mpi_scan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
661: {
662: *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*count) * MPI_sizeof(*datatype));
663: }
665: PETSC_EXTERN void petsc_mpi_send_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
666: {
667: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
668: }
670: PETSC_EXTERN void petsc_mpi_recv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int status, int *ierr)
671: {
672: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
673: }
675: PETSC_EXTERN void petsc_mpi_reduce_scatter_(void *sendbuf, void *recvbuf, int *recvcounts, int *datatype, int *op, int *comm, int *ierr)
676: {
677: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
678: }
680: PETSC_EXTERN void petsc_mpi_irecv_(void *buf, int *count, int *datatype, int *source, int *tag, int *comm, int *request, int *ierr)
681: {
682: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
683: }
685: PETSC_EXTERN void petsc_mpi_isend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *request, int *ierr)
686: {
687: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
688: }
690: PETSC_EXTERN void petsc_mpi_sendrecv_(void *sendbuf, int *sendcount, int *sendtype, int *dest, int *sendtag, void *recvbuf, int *recvcount, int *recvtype, int *source, int *recvtag, int *comm, int *status, int *ierr)
691: {
692: *ierr = MPIUNI_Memcpy(recvbuf, sendbuf, (*sendcount) * MPI_sizeof(*sendtype));
693: }
695: PETSC_EXTERN void petsc_mpi_test_(int *request, int *flag, int *status, int *ierr)
696: {
697: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
698: }
700: PETSC_EXTERN void petsc_mpi_waitall_(int *count, int *array_of_requests, int *array_of_statuses, int *ierr)
701: {
702: *ierr = MPI_SUCCESS;
703: }
705: PETSC_EXTERN void petsc_mpi_waitany_(int *count, int *array_of_requests, int *index, int *status, int *ierr)
706: {
707: *ierr = MPI_SUCCESS;
708: }
710: PETSC_EXTERN void petsc_mpi_allgatherv_(void *sendbuf, int *sendcount, int *sendtype, void *recvbuf, int *recvcounts, int *displs, int *recvtype, int *comm, int *ierr)
711: {
712: *ierr = MPI_Allgatherv(sendbuf, *sendcount, *sendtype, recvbuf, recvcounts, displs, *recvtype, *comm);
713: }
715: PETSC_EXTERN void petsc_mpi_alltoallv_(void *sendbuf, int *sendcounts, int *sdispls, int *sendtype, void *recvbuf, int *recvcounts, int *rdispls, int *recvtype, int *comm, int *ierr)
716: {
717: *ierr = MPI_Alltoallv(sendbuf, sendcounts, sdispls, *sendtype, recvbuf, recvcounts, rdispls, *recvtype, *comm);
718: }
720: PETSC_EXTERN void petsc_mpi_comm_create_(int *comm, int *group, int *newcomm, int *ierr)
721: {
722: *newcomm = *comm;
723: *ierr = MPI_SUCCESS;
724: }
726: PETSC_EXTERN void petsc_mpi_address_(void *location, MPI_Aint *address, int *ierr)
727: {
728: *address = (MPI_Aint)((char *)location);
729: *ierr = MPI_SUCCESS;
730: }
732: PETSC_EXTERN void petsc_mpi_pack_(void *inbuf, int *incount, int *datatype, void *outbuf, int *outsize, int *position, int *comm, int *ierr)
733: {
734: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
735: }
737: PETSC_EXTERN void petsc_mpi_unpack_(void *inbuf, int *insize, int *position, void *outbuf, int *outcount, int *datatype, int *comm, int *ierr)
738: {
739: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
740: }
742: PETSC_EXTERN void petsc_mpi_pack_size_(int *incount, int *datatype, int *comm, int *size, int *ierr)
743: {
744: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
745: }
747: PETSC_EXTERN void petsc_mpi_type_struct_(int *count, int *array_of_blocklengths, int *array_of_displaments, int *array_of_types, int *newtype, int *ierr)
748: {
749: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
750: }
752: PETSC_EXTERN void petsc_mpi_type_commit_(int *datatype, int *ierr)
753: {
754: *ierr = MPI_SUCCESS;
755: }
757: double petsc_mpi_wtime_(void)
758: {
759: return 0.0;
760: }
762: PETSC_EXTERN void petsc_mpi_cancel_(int *request, int *ierr)
763: {
764: *ierr = MPI_SUCCESS;
765: }
767: PETSC_EXTERN void petsc_mpi_comm_dup_(int *comm, int *out, int *ierr)
768: {
769: *out = *comm;
770: *ierr = MPI_SUCCESS;
771: }
773: PETSC_EXTERN void petsc_mpi_comm_free_(int *comm, int *ierr)
774: {
775: *ierr = MPI_SUCCESS;
776: }
778: PETSC_EXTERN void petsc_mpi_get_count_(int *status, int *datatype, int *count, int *ierr)
779: {
780: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
781: }
783: PETSC_EXTERN void petsc_mpi_get_processor_name_(char *name, int *result_len, int *ierr, PETSC_FORTRAN_CHARLEN_T len)
784: {
785: MPIUNI_Memcpy(name, "localhost", 9 * sizeof(char));
786: *result_len = 9;
787: *ierr = MPI_SUCCESS;
788: }
790: PETSC_EXTERN void petsc_mpi_initialized_(int *flag, int *ierr)
791: {
792: *flag = MPI_was_initialized;
793: *ierr = MPI_SUCCESS;
794: }
796: PETSC_EXTERN void petsc_mpi_iprobe_(int *source, int *tag, int *comm, int *glag, int *status, int *ierr)
797: {
798: *ierr = MPI_SUCCESS;
799: }
801: PETSC_EXTERN void petsc_mpi_probe_(int *source, int *tag, int *comm, int *flag, int *status, int *ierr)
802: {
803: *ierr = MPI_SUCCESS;
804: }
806: PETSC_EXTERN void petsc_mpi_request_free_(int *request, int *ierr)
807: {
808: *ierr = MPI_SUCCESS;
809: }
811: PETSC_EXTERN void petsc_mpi_ssend_(void *buf, int *count, int *datatype, int *dest, int *tag, int *comm, int *ierr)
812: {
813: *ierr = MPIUni_Abort(MPI_COMM_WORLD, 0);
814: }
816: PETSC_EXTERN void petsc_mpi_wait_(int *request, int *status, int *ierr)
817: {
818: *ierr = MPI_SUCCESS;
819: }
821: PETSC_EXTERN void petsc_mpi_comm_group_(int *comm, int *group, int *ierr)
822: {
823: *ierr = MPI_SUCCESS;
824: }
826: PETSC_EXTERN void petsc_mpi_exscan_(void *sendbuf, void *recvbuf, int *count, int *datatype, int *op, int *comm, int *ierr)
827: {
828: *ierr = MPI_SUCCESS;
829: }
831: #endif /* PETSC_HAVE_FORTRAN */
833: #if defined(__cplusplus)
834: }
835: #endif