Actual source code: ex8.c
2: static char help[] = "Demonstrates using a local ordering to set values into a parallel vector.\n\n";
4: /*
5: Include "petscvec.h" so that we can use vectors. Note that this file
6: automatically includes:
7: petscsys.h - base PETSc routines petscis.h - index sets
8: petscviewer.h - viewers
9: */
10: #include <petscvec.h>
12: int main(int argc, char **argv)
13: {
14: PetscMPIInt rank;
15: PetscInt i, ng, *gindices, rstart, rend, M;
16: PetscScalar one = 1.0;
17: Vec x;
19: PetscFunctionBeginUser;
20: PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));
21: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
23: /*
24: Create a parallel vector.
25: - In this case, we specify the size of each processor's local
26: portion, and PETSc computes the global size. Alternatively,
27: PETSc could determine the vector's distribution if we specify
28: just the global size.
29: */
30: PetscCall(VecCreate(PETSC_COMM_WORLD, &x));
31: PetscCall(VecSetSizes(x, rank + 1, PETSC_DECIDE));
32: PetscCall(VecSetFromOptions(x));
33: PetscCall(VecSet(x, one));
35: /*
36: Set the local to global ordering for the vector. Each processor
37: generates a list of the global indices for each local index. Note that
38: the local indices are just whatever is convenient for a particular application.
39: In this case we treat the vector as lying on a one dimensional grid and
40: have one ghost point on each end of the blocks owned by each processor.
41: */
43: PetscCall(VecGetSize(x, &M));
44: PetscCall(VecGetOwnershipRange(x, &rstart, &rend));
45: ng = rend - rstart + 2;
46: PetscCall(PetscMalloc1(ng, &gindices));
47: gindices[0] = rstart - 1;
48: for (i = 0; i < ng - 1; i++) gindices[i + 1] = gindices[i] + 1;
49: /* map the first and last point as periodic */
50: if (gindices[0] == -1) gindices[0] = M - 1;
51: if (gindices[ng - 1] == M) gindices[ng - 1] = 0;
52: {
53: ISLocalToGlobalMapping ltog;
54: PetscCall(ISLocalToGlobalMappingCreate(PETSC_COMM_SELF, 1, ng, gindices, PETSC_COPY_VALUES, <og));
55: PetscCall(VecSetLocalToGlobalMapping(x, ltog));
56: PetscCall(ISLocalToGlobalMappingDestroy(<og));
57: }
58: PetscCall(PetscFree(gindices));
60: /*
61: Set the vector elements.
62: - In this case set the values using the local ordering
63: - Each processor can contribute any vector entries,
64: regardless of which processor "owns" them; any nonlocal
65: contributions will be transferred to the appropriate processor
66: during the assembly process.
67: - In this example, the flag ADD_VALUES indicates that all
68: contributions will be added together.
69: */
70: for (i = 0; i < ng; i++) PetscCall(VecSetValuesLocal(x, 1, &i, &one, ADD_VALUES));
72: /*
73: Assemble vector, using the 2-step process:
74: VecAssemblyBegin(), VecAssemblyEnd()
75: Computations can be done while messages are in transition
76: by placing code between these two statements.
77: */
78: PetscCall(VecAssemblyBegin(x));
79: PetscCall(VecAssemblyEnd(x));
81: /*
82: View the vector; then destroy it.
83: */
84: PetscCall(VecView(x, PETSC_VIEWER_STDOUT_WORLD));
85: PetscCall(VecDestroy(&x));
87: PetscCall(PetscFinalize());
88: return 0;
89: }
91: /*TEST
93: test:
94: nsize: 4
96: TEST*/