Actual source code: sfimpl.h
1: #ifndef SFIMPL_H
2: #define SFIMPL_H
4: #include <petscvec.h>
5: #include <petscsf.h>
6: #include <petsc/private/deviceimpl.h>
7: #include <petsc/private/mpiutils.h>
8: #include <petsc/private/petscimpl.h>
10: PETSC_EXTERN PetscLogEvent PETSCSF_SetGraph;
11: PETSC_EXTERN PetscLogEvent PETSCSF_SetUp;
12: PETSC_EXTERN PetscLogEvent PETSCSF_BcastBegin;
13: PETSC_EXTERN PetscLogEvent PETSCSF_BcastEnd;
14: PETSC_EXTERN PetscLogEvent PETSCSF_BcastBegin;
15: PETSC_EXTERN PetscLogEvent PETSCSF_BcastEnd;
16: PETSC_EXTERN PetscLogEvent PETSCSF_ReduceBegin;
17: PETSC_EXTERN PetscLogEvent PETSCSF_ReduceEnd;
18: PETSC_EXTERN PetscLogEvent PETSCSF_FetchAndOpBegin;
19: PETSC_EXTERN PetscLogEvent PETSCSF_FetchAndOpEnd;
20: PETSC_EXTERN PetscLogEvent PETSCSF_EmbedSF;
21: PETSC_EXTERN PetscLogEvent PETSCSF_DistSect;
22: PETSC_EXTERN PetscLogEvent PETSCSF_SectSF;
23: PETSC_EXTERN PetscLogEvent PETSCSF_RemoteOff;
24: PETSC_EXTERN PetscLogEvent PETSCSF_Pack;
25: PETSC_EXTERN PetscLogEvent PETSCSF_Unpack;
27: typedef enum {
28: PETSCSF_../../..2LEAF = 0,
29: PETSCSF_LEAF2../../..
30: } PetscSFDirection;
31: typedef enum {
32: PETSCSF_BCAST = 0,
33: PETSCSF_REDUCE,
34: PETSCSF_FETCH
35: } PetscSFOperation;
36: /* When doing device-aware MPI, a backend refers to the SF/device interface */
37: typedef enum {
38: PETSCSF_BACKEND_INVALID = 0,
39: PETSCSF_BACKEND_CUDA,
40: PETSCSF_BACKEND_HIP,
41: PETSCSF_BACKEND_KOKKOS
42: } PetscSFBackend;
44: struct _PetscSFOps {
45: PetscErrorCode (*Reset)(PetscSF);
46: PetscErrorCode (*Destroy)(PetscSF);
47: PetscErrorCode (*SetUp)(PetscSF);
48: PetscErrorCode (*SetFromOptions)(PetscSF, PetscOptionItems *);
49: PetscErrorCode (*View)(PetscSF, PetscViewer);
50: PetscErrorCode (*Duplicate)(PetscSF, PetscSFDuplicateOption, PetscSF);
51: PetscErrorCode (*BcastBegin)(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
52: PetscErrorCode (*BcastEnd)(PetscSF, MPI_Datatype, const void *, void *, MPI_Op);
53: PetscErrorCode (*ReduceBegin)(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
54: PetscErrorCode (*ReduceEnd)(PetscSF, MPI_Datatype, const void *, void *, MPI_Op);
55: PetscErrorCode (*FetchAndOpBegin)(PetscSF, MPI_Datatype, PetscMemType, void *, PetscMemType, const void *, void *, MPI_Op);
56: PetscErrorCode (*FetchAndOpEnd)(PetscSF, MPI_Datatype, void *, const void *, void *, MPI_Op);
57: PetscErrorCode (*BcastToZero)(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *); /* For internal use only */
58: PetscErrorCode (*GetRootRanks)(PetscSF, PetscInt *, const PetscMPIInt **, const PetscInt **, const PetscInt **, const PetscInt **);
59: PetscErrorCode (*GetLeafRanks)(PetscSF, PetscInt *, const PetscMPIInt **, const PetscInt **, const PetscInt **);
60: PetscErrorCode (*CreateLocalSF)(PetscSF, PetscSF *);
61: PetscErrorCode (*GetGraph)(PetscSF, PetscInt *, PetscInt *, const PetscInt **, const PetscSFNode **);
62: PetscErrorCode (*CreateEmbeddedRootSF)(PetscSF, PetscInt, const PetscInt *, PetscSF *);
63: PetscErrorCode (*CreateEmbeddedLeafSF)(PetscSF, PetscInt, const PetscInt *, PetscSF *);
65: PetscErrorCode (*Malloc)(PetscMemType, size_t, void **);
66: PetscErrorCode (*Free)(PetscMemType, void *);
67: };
69: typedef struct _n_PetscSFPackOpt *PetscSFPackOpt;
71: struct _p_PetscSF {
72: PETSCHEADER(struct _PetscSFOps);
73: struct { /* Fields needed to implement VecScatter behavior */
74: PetscInt from_n, to_n; /* Recorded local sizes of the input from/to vectors in VecScatterCreate(). Used subsequently for error checking. */
75: PetscBool beginandendtogether; /* Indicates that the scatter begin and end function are called together, VecScatterEnd() is then treated as a nop */
76: const PetscScalar *xdata; /* Vector data to read from */
77: PetscScalar *ydata; /* Vector data to write to. The two pointers are recorded in VecScatterBegin. Memory is not managed by SF. */
78: PetscSF lsf; /* The local part of the scatter, used in SCATTER_LOCAL. Built on demand. */
79: PetscInt bs; /* Block size, determined by IS passed to VecScatterCreate */
80: MPI_Datatype unit; /* one unit = bs PetscScalars */
81: PetscBool logging; /* Indicate if vscat log events are happening. If yes, avoid duplicated SF logging to have clear -log_view */
82: } vscat;
84: /* Fields for generic PetscSF functionality */
85: PetscInt nroots; /* Number of root vertices on current process (candidates for incoming edges) */
86: PetscInt nleaves; /* Number of leaf vertices on current process (this process specifies a root for each leaf) */
87: PetscInt *mine; /* Location of leaves in leafdata arrays provided to the communication routines */
88: PetscInt *mine_alloc;
89: PetscInt minleaf, maxleaf;
90: PetscSFNode *remote; /* Remote references to roots for each local leaf */
91: PetscSFNode *remote_alloc;
92: PetscInt nranks; /* Number of ranks owning roots connected to my leaves */
93: PetscInt ndranks; /* Number of ranks in distinguished group holding roots connected to my leaves */
94: PetscMPIInt *ranks; /* List of ranks referenced by "remote" */
95: PetscInt *roffset; /* Array of length nranks+1, offset in rmine/rremote for each rank */
96: PetscInt *rmine; /* Concatenated array holding local indices referencing each remote rank */
97: PetscInt *rmine_d[2]; /* A copy of rmine[local/remote] in device memory if needed */
99: /* Some results useful in packing by analyzing rmine[] */
100: PetscInt leafbuflen[2]; /* Length (in unit) of leaf buffers, in layout of [PETSCSF_LOCAL/REMOTE] */
101: PetscBool leafcontig[2]; /* True means indices in rmine[self part] or rmine[remote part] are contiguous, and they start from ... */
102: PetscInt leafstart[2]; /* ... leafstart[0] and leafstart[1] respectively */
103: PetscSFPackOpt leafpackopt[2]; /* Optimization plans to (un)pack leaves connected to remote roots, based on index patterns in rmine[]. NULL for no optimization */
104: PetscSFPackOpt leafpackopt_d[2]; /* Copy of leafpackopt_d[] on device if needed */
105: PetscBool leafdups[2]; /* Indices in rmine[] for self(0)/remote(1) communication have dups respectively? TRUE implies theads working on them in parallel may have data race. */
107: PetscInt nleafreqs; /* Number of MPI requests for leaves */
108: PetscInt *rremote; /* Concatenated array holding remote indices referenced for each remote rank */
109: PetscBool degreeknown; /* The degree is currently known, do not have to recompute */
110: PetscInt *degree; /* Degree of each of my root vertices */
111: PetscInt *degreetmp; /* Temporary local array for computing degree */
112: PetscBool rankorder; /* Sort ranks for gather and scatter operations */
113: MPI_Group ingroup; /* Group of processes connected to my roots */
114: MPI_Group outgroup; /* Group of processes connected to my leaves */
115: PetscSF multi; /* Internal graph used to implement gather and scatter operations */
116: PetscBool graphset; /* Flag indicating that the graph has been set, required before calling communication routines */
117: PetscBool setupcalled; /* Type and communication structures have been set up */
118: PetscSFPattern pattern; /* Pattern of the graph */
119: PetscBool persistent; /* Does this SF use MPI persistent requests for communication */
120: PetscLayout map; /* Layout of leaves over all processes when building a patterned graph */
121: PetscBool unknown_input_stream; /* If true, SF does not know which streams root/leafdata is on. Default is false, since we only use petsc default stream */
122: PetscBool use_gpu_aware_mpi; /* If true, SF assumes it can pass GPU pointers to MPI */
123: PetscBool use_stream_aware_mpi; /* If true, SF assumes the underlying MPI is cuda-stream aware and we won't sync streams for send/recv buffers passed to MPI */
124: PetscInt maxResidentThreadsPerGPU;
125: PetscBool allow_multi_leaves;
126: PetscSFBackend backend; /* The device backend (if any) SF will use */
127: void *data; /* Pointer to implementation */
129: #if defined(PETSC_HAVE_NVSHMEM)
130: PetscBool use_nvshmem; /* TRY to use nvshmem on cuda devices with this SF when possible */
131: PetscBool use_nvshmem_get; /* If true, use nvshmem_get based protocol, otherwise, use nvshmem_put based protocol */
132: PetscBool checked_nvshmem_eligibility; /* Have we checked eligibility of using NVSHMEM on this sf? */
133: PetscBool setup_nvshmem; /* Have we already set up NVSHMEM related fields below? These fields are built on-demand */
134: PetscInt leafbuflen_rmax; /* max leafbuflen[REMOTE] over comm */
135: PetscInt nRemoteRootRanks; /* nranks - ndranks */
136: PetscInt nRemoteRootRanksMax; /* max nranks-ndranks over comm */
138: /* The following two fields look confusing but actually make sense: They are offsets of buffers at the remote side. We're doing one-sided communication! */
139: PetscInt *rootsigdisp; /* [nRemoteRootRanks]. For my i-th remote root rank, I will access its rootsigdisp[i]-th root signal */
140: PetscInt *rootbufdisp; /* [nRemoteRootRanks]. For my i-th remote root rank, I will access its root buf at offset rootbufdisp[i], in <unit> to be set */
142: PetscInt *rootbufdisp_d;
143: PetscInt *rootsigdisp_d; /* Copy of rootsigdisp[] on device */
144: PetscMPIInt *ranks_d; /* Copy of the remote part of (root) ranks[] on device */
145: PetscInt *roffset_d; /* Copy of the remote part of roffset[] on device */
146: #endif
147: #if defined(PETSC_HAVE_MPIX_STREAM)
148: MPIX_Stream mpi_stream;
149: MPI_Comm stream_comm; /* gpu stream aware MPI communicator */
150: #endif
151: };
153: PETSC_EXTERN PetscBool PetscSFRegisterAllCalled;
154: PETSC_EXTERN PetscErrorCode PetscSFRegisterAll(void);
156: PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Private(PetscSF, PetscSF *);
157: PETSC_INTERN PetscErrorCode PetscSFBcastToZero_Private(PetscSF, MPI_Datatype, const void *, void *) PETSC_ATTRIBUTE_MPI_POINTER_WITH_TYPE(3, 2) PETSC_ATTRIBUTE_MPI_POINTER_WITH_TYPE(4, 2);
159: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_unwrap(MPI_Datatype, MPI_Datatype *, PetscBool *);
160: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_compare(MPI_Datatype, MPI_Datatype, PetscBool *);
161: PETSC_EXTERN PetscErrorCode MPIPetsc_Type_compare_contig(MPI_Datatype, MPI_Datatype, PetscInt *);
163: #if defined(PETSC_HAVE_MPI_NONBLOCKING_COLLECTIVES)
164: #define MPIU_Ibcast(a, b, c, d, e, req) MPI_Ibcast(a, b, c, d, e, req)
165: #define MPIU_Ireduce(a, b, c, d, e, f, g, req) MPI_Ireduce(a, b, c, d, e, f, g, req)
166: #define MPIU_Iscatter(a, b, c, d, e, f, g, h, req) MPI_Iscatter(a, b, c, d, e, f, g, h, req)
167: #define MPIU_Iscatterv(a, b, c, d, e, f, g, h, i, req) MPI_Iscatterv(a, b, c, d, e, f, g, h, i, req)
168: #define MPIU_Igather(a, b, c, d, e, f, g, h, req) MPI_Igather(a, b, c, d, e, f, g, h, req)
169: #define MPIU_Igatherv(a, b, c, d, e, f, g, h, i, req) MPI_Igatherv(a, b, c, d, e, f, g, h, i, req)
170: #define MPIU_Iallgather(a, b, c, d, e, f, g, req) MPI_Iallgather(a, b, c, d, e, f, g, req)
171: #define MPIU_Iallgatherv(a, b, c, d, e, f, g, h, req) MPI_Iallgatherv(a, b, c, d, e, f, g, h, req)
172: #define MPIU_Ialltoall(a, b, c, d, e, f, g, req) MPI_Ialltoall(a, b, c, d, e, f, g, req)
173: #else
174: /* Ignore req, the MPI_Request argument, and use MPI blocking collectives. One should initialize req
175: to MPI_REQUEST_NULL so that one can do MPI_Wait(req,status) no matter the call is blocking or not.
176: */
177: #define MPIU_Ibcast(a, b, c, d, e, req) MPI_Bcast(a, b, c, d, e)
178: #define MPIU_Ireduce(a, b, c, d, e, f, g, req) MPI_Reduce(a, b, c, d, e, f, g)
179: #define MPIU_Iscatter(a, b, c, d, e, f, g, h, req) MPI_Scatter(a, b, c, d, e, f, g, h)
180: #define MPIU_Iscatterv(a, b, c, d, e, f, g, h, i, req) MPI_Scatterv(a, b, c, d, e, f, g, h, i)
181: #define MPIU_Igather(a, b, c, d, e, f, g, h, req) MPI_Gather(a, b, c, d, e, f, g, h)
182: #define MPIU_Igatherv(a, b, c, d, e, f, g, h, i, req) MPI_Gatherv(a, b, c, d, e, f, g, h, i)
183: #define MPIU_Iallgather(a, b, c, d, e, f, g, req) MPI_Allgather(a, b, c, d, e, f, g)
184: #define MPIU_Iallgatherv(a, b, c, d, e, f, g, h, req) MPI_Allgatherv(a, b, c, d, e, f, g, h)
185: #define MPIU_Ialltoall(a, b, c, d, e, f, g, req) MPI_Alltoall(a, b, c, d, e, f, g)
186: #endif
188: PETSC_EXTERN PetscErrorCode VecScatterGetRemoteCount_Private(VecScatter, PetscBool, PetscInt *, PetscInt *);
189: PETSC_EXTERN PetscErrorCode VecScatterGetRemote_Private(VecScatter, PetscBool, PetscInt *, const PetscInt **, const PetscInt **, const PetscMPIInt **, PetscInt *);
190: PETSC_EXTERN PetscErrorCode VecScatterGetRemoteOrdered_Private(VecScatter, PetscBool, PetscInt *, const PetscInt **, const PetscInt **, const PetscMPIInt **, PetscInt *);
191: PETSC_EXTERN PetscErrorCode VecScatterRestoreRemote_Private(VecScatter, PetscBool, PetscInt *, const PetscInt **, const PetscInt **, const PetscMPIInt **, PetscInt *);
192: PETSC_EXTERN PetscErrorCode VecScatterRestoreRemoteOrdered_Private(VecScatter, PetscBool, PetscInt *, const PetscInt **, const PetscInt **, const PetscMPIInt **, PetscInt *);
194: #if defined(PETSC_HAVE_CUDA)
195: PETSC_EXTERN PetscErrorCode PetscSFMalloc_CUDA(PetscMemType, size_t, void **);
196: PETSC_EXTERN PetscErrorCode PetscSFFree_CUDA(PetscMemType, void *);
197: #endif
198: #if defined(PETSC_HAVE_HIP)
199: PETSC_EXTERN PetscErrorCode PetscSFMalloc_HIP(PetscMemType, size_t, void **);
200: PETSC_EXTERN PetscErrorCode PetscSFFree_HIP(PetscMemType, void *);
201: #endif
202: #if defined(PETSC_HAVE_KOKKOS)
203: PETSC_EXTERN PetscErrorCode PetscSFMalloc_Kokkos(PetscMemType, size_t, void **);
204: PETSC_EXTERN PetscErrorCode PetscSFFree_Kokkos(PetscMemType, void *);
205: #endif
207: /* SF only supports CUDA and Kokkos devices. Even VIENNACL is a device, its device pointers are invisible to SF.
208: Through VecGetArray(), we copy data of VECVIENNACL from device to host and pass host pointers to SF.
209: */
210: #if defined(PETSC_HAVE_CUDA) || defined(PETSC_HAVE_KOKKOS) || defined(PETSC_HAVE_HIP)
211: #define PetscSFMalloc(sf, mtype, sz, ptr) ((*(sf)->ops->Malloc)(mtype, sz, ptr))
212: /* Free memory and set ptr to NULL when succeeded */
213: #define PetscSFFree(sf, mtype, ptr) ((PetscErrorCode)((ptr) && ((*(sf)->ops->Free)(mtype, ptr) || ((ptr) = NULL, PETSC_SUCCESS))))
214: #else
215: /* If pure host code, do with less indirection */
216: #define PetscSFMalloc(sf, mtype, sz, ptr) PetscMalloc(sz, ptr)
217: #define PetscSFFree(sf, mtype, ptr) PetscFree(ptr)
218: #endif
220: #endif