Actual source code: ex36.c
1: static char help[] = "Tests interpolation and output of hybrid meshes\n\n";
3: #include <petscdmplex.h>
4: #include <petscsf.h>
6: /* Much can be learned using
7: -rd_dm_view -rd2_dm_view -rd_section_view -rd_vec_view -rd2_section_view */
9: static PetscErrorCode redistribute_vec(DM dist_dm, PetscSF sf, Vec *v)
10: {
11: DM dm, dist_v_dm;
12: PetscSection section, dist_section;
13: Vec dist_v;
14: PetscMPIInt rank, size, p;
16: PetscFunctionBegin;
17: PetscCall(VecGetDM(*v, &dm));
18: PetscCall(DMGetLocalSection(dm, §ion));
19: PetscCall(DMViewFromOptions(dm, NULL, "-rd_dm_view"));
20: PetscCall(DMViewFromOptions(dist_dm, NULL, "-rd2_dm_view"));
22: PetscCall(DMClone(dm, &dist_v_dm));
23: PetscCall(VecCreate(PetscObjectComm((PetscObject)*v), &dist_v));
24: PetscCall(VecSetDM(dist_v, dist_v_dm));
25: PetscCall(PetscSectionCreate(PetscObjectComm((PetscObject)*v), &dist_section));
26: PetscCall(DMSetLocalSection(dist_v_dm, dist_section));
28: PetscCall(PetscObjectViewFromOptions((PetscObject)section, NULL, "-rd_section_view"));
29: PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)dm), &rank));
30: PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dm), &size));
31: for (p = 0; p < size; ++p) {
32: if (p == rank) PetscCall(PetscObjectViewFromOptions((PetscObject)*v, NULL, "-rd_vec_view"));
33: PetscCall(PetscBarrier((PetscObject)dm));
34: PetscCall(PetscViewerFlush(PETSC_VIEWER_STDOUT_WORLD));
35: }
36: PetscCall(DMPlexDistributeField(dm, sf, section, *v, dist_section, dist_v));
37: for (p = 0; p < size; ++p) {
38: if (p == rank) {
39: PetscCall(PetscObjectViewFromOptions((PetscObject)dist_section, NULL, "-rd2_section_view"));
40: PetscCall(PetscObjectViewFromOptions((PetscObject)dist_v, NULL, "-rd2_vec_view"));
41: }
42: PetscCall(PetscBarrier((PetscObject)dm));
43: PetscCall(PetscViewerFlush(PETSC_VIEWER_STDOUT_WORLD));
44: }
46: PetscCall(PetscSectionDestroy(&dist_section));
47: PetscCall(DMDestroy(&dist_v_dm));
49: PetscCall(VecDestroy(v));
50: *v = dist_v;
51: PetscFunctionReturn(PETSC_SUCCESS);
52: }
54: static PetscErrorCode dm_view_geometry(DM dm, Vec cell_geom, Vec face_geom)
55: {
56: DM cell_dm, face_dm;
57: PetscSection cell_section, face_section;
58: const PetscScalar *cell_array, *face_array;
59: const PetscInt *cells;
60: PetscInt c, start_cell, end_cell;
61: PetscInt f, start_face, end_face;
62: PetscInt supportSize, offset;
63: PetscMPIInt rank;
65: PetscFunctionBegin;
66: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
68: /* cells */
69: PetscCall(DMPlexGetHeightStratum(dm, 0, &start_cell, &end_cell));
70: PetscCall(VecGetDM(cell_geom, &cell_dm));
71: PetscCall(DMGetLocalSection(cell_dm, &cell_section));
72: PetscCall(VecGetArrayRead(cell_geom, &cell_array));
74: for (c = start_cell; c < end_cell; ++c) {
75: const PetscFVCellGeom *geom;
76: PetscCall(PetscSectionGetOffset(cell_section, c, &offset));
77: geom = (PetscFVCellGeom *)&cell_array[offset];
78: PetscCall(PetscSynchronizedPrintf(PETSC_COMM_WORLD, "rank %d c %" PetscInt_FMT " centroid %g,%g,%g vol %g\n", rank, c, (double)geom->centroid[0], (double)geom->centroid[1], (double)geom->centroid[2], (double)geom->volume));
79: }
80: PetscCall(PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL));
81: PetscCall(VecRestoreArrayRead(cell_geom, &cell_array));
83: /* faces */
84: PetscCall(DMPlexGetHeightStratum(dm, 1, &start_face, &end_face));
85: PetscCall(VecGetDM(face_geom, &face_dm));
86: PetscCall(DMGetLocalSection(face_dm, &face_section));
87: PetscCall(VecGetArrayRead(face_geom, &face_array));
88: for (f = start_face; f < end_face; ++f) {
89: PetscCall(DMPlexGetSupport(dm, f, &cells));
90: PetscCall(DMPlexGetSupportSize(dm, f, &supportSize));
91: if (supportSize > 1) {
92: PetscCall(PetscSectionGetOffset(face_section, f, &offset));
93: PetscCall(PetscSynchronizedPrintf(PETSC_COMM_WORLD, "rank %d f %" PetscInt_FMT " cells %" PetscInt_FMT ",%" PetscInt_FMT " normal %g,%g,%g centroid %g,%g,%g\n", rank, f, cells[0], cells[1], (double)PetscRealPart(face_array[offset + 0]), (double)PetscRealPart(face_array[offset + 1]), (double)PetscRealPart(face_array[offset + 2]), (double)PetscRealPart(face_array[offset + 3]), (double)PetscRealPart(face_array[offset + 4]), (double)PetscRealPart(face_array[offset + 5])));
94: }
95: }
96: PetscCall(PetscSynchronizedFlush(PETSC_COMM_WORLD, NULL));
97: PetscCall(VecRestoreArrayRead(cell_geom, &cell_array));
98: PetscFunctionReturn(PETSC_SUCCESS);
99: }
101: int main(int argc, char **argv)
102: {
103: DM dm, redist_dm;
104: PetscPartitioner part;
105: PetscSF redist_sf;
106: Vec cell_geom, face_geom;
107: PetscInt overlap2 = 1;
109: PetscFunctionBeginUser;
110: PetscCall(PetscInitialize(&argc, &argv, NULL, help));
111: PetscCall(DMCreate(PETSC_COMM_WORLD, &dm));
112: PetscCall(DMSetType(dm, DMPLEX));
113: PetscCall(DMSetFromOptions(dm));
114: PetscCall(DMViewFromOptions(dm, NULL, "-dm_view"));
116: PetscCall(DMPlexComputeGeometryFVM(dm, &cell_geom, &face_geom));
117: PetscCall(dm_view_geometry(dm, cell_geom, face_geom));
119: /* redistribute */
120: PetscCall(DMPlexGetPartitioner(dm, &part));
121: PetscCall(PetscPartitionerSetFromOptions(part));
122: PetscCall(PetscOptionsGetInt(NULL, NULL, "-overlap2", &overlap2, NULL));
123: PetscCall(DMPlexDistribute(dm, overlap2, &redist_sf, &redist_dm));
124: if (redist_dm) {
125: PetscCall(redistribute_vec(redist_dm, redist_sf, &cell_geom));
126: PetscCall(redistribute_vec(redist_dm, redist_sf, &face_geom));
127: PetscCall(PetscObjectViewFromOptions((PetscObject)redist_sf, NULL, "-rd2_sf_view"));
128: PetscCall(PetscPrintf(PETSC_COMM_WORLD, "redistributed:\n"));
129: PetscCall(dm_view_geometry(redist_dm, cell_geom, face_geom));
130: }
132: PetscCall(VecDestroy(&cell_geom));
133: PetscCall(VecDestroy(&face_geom));
134: PetscCall(PetscSFDestroy(&redist_sf));
135: PetscCall(DMDestroy(&redist_dm));
136: PetscCall(DMDestroy(&dm));
137: PetscCall(PetscFinalize());
138: return 0;
139: }
141: /*TEST
143: test:
144: suffix: 0
145: nsize: 3
146: args: -dm_plex_dim 3 -dm_plex_box_faces 8,1,1 -dm_plex_simplex 0 -dm_plex_adj_cone 1 -dm_plex_adj_closure 0 -petscpartitioner_type simple -dm_distribute_overlap 1 -overlap2 1
148: TEST*/