1! 2! 3! Description: Builds a parallel vector with 1 component on the first 4! processor, 2 on the second, etc. Then each processor adds 5! one to all elements except the last rank. 6! 7! ----------------------------------------------------------------------- 8#include <petsc/finclude/petscvec.h> 9program main 10 use petscvec 11 implicit none 12 13! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 14! Beginning of program 15! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 16 17 Vec x 18 PetscInt N, i, ione 19 PetscErrorCode ierr 20 PetscMPIInt rank 21 PetscScalar one, value(1) 22 23 PetscCallA(PetscInitialize(ierr)) 24 one = 1.0 25 PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr)) 26 27! Create a parallel vector. 28! - In this case, we specify the size of the local portion on 29! each processor, and PETSc computes the global size. Alternatively, 30! if we pass the global size and use PETSC_DECIDE for the 31! local size PETSc will choose a reasonable partition trying 32! to put nearly an equal number of elements on each processor. 33 34 N = rank + 1 35 ione = 1 36 PetscCallA(VecCreateFromOptions(PETSC_COMM_WORLD, PETSC_NULL_CHARACTER, ione, N, PETSC_DECIDE, x, ierr)) 37 PetscCallA(VecGetSize(x, N, ierr)) 38 PetscCallA(VecSet(x, one, ierr)) 39 40! Set the vector elements. 41! - Note that VecSetValues() uses 0-based row and column numbers 42! in Fortran as well as in C. 43! - Always specify global locations of vector entries. 44! - Each processor can contribute any vector entries, 45! regardless of which processor "owns" them; any nonlocal 46! contributions will be transferred to the appropriate processor 47! during the assembly process. 48! - In this example, the flag ADD_VALUES indicates that all 49! contributions will be added together. 50 51 ione = 1 52 do i = 0, N - rank - 1 53 PetscCallA(VecSetValues(x, ione, [i], [one], ADD_VALUES, ierr)) 54 end do 55 56! Assemble vector, using the 2-step process: 57! VecAssemblyBegin(), VecAssemblyEnd() 58! Computations can be done while messages are in transition 59! by placing code between these two statements. 60 61 PetscCallA(VecAssemblyBegin(x, ierr)) 62 PetscCallA(VecAssemblyEnd(x, ierr)) 63 64! Test VecGetValues() with scalar entries 65 if (rank == 0) then 66 ione = 0 67 PetscCallA(VecGetValues(x, ione, [i], value, ierr)) 68 end if 69 70! View the vector; then destroy it. 71 72 PetscCallA(VecView(x, PETSC_VIEWER_STDOUT_WORLD, ierr)) 73 PetscCallA(VecDestroy(x, ierr)) 74 75 PetscCallA(PetscFinalize(ierr)) 76end 77 78!/*TEST 79! 80! test: 81! nsize: 2 82! filter: grep -v " MPI process" 83! 84!TEST*/ 85