xref: /petsc/src/sys/tutorials/ex4f.F90 (revision 4820e4ea99a084ae862a8c395f732bc7c0e1a6d0)
1f8402805SBarry Smith!
2f8402805SBarry Smith!     This introductory example illustrates running PETSc on a subset
3f8402805SBarry Smith!     of processes
4f8402805SBarry Smith!
5f8402805SBarry Smith! -----------------------------------------------------------------------
6f8402805SBarry Smith
7f8402805SBarry Smithprogram main
8f8402805SBarry Smith#include <petsc/finclude/petscsys.h>
9f8402805SBarry Smith  use petscmpi  ! or mpi or mpi_f08
10f8402805SBarry Smith  use petscsys
11f8402805SBarry Smith  implicit none
12f8402805SBarry Smith  PetscErrorCode ierr
13f8402805SBarry Smith  PetscMPIInt rank, size, grank, zero, two
14f8402805SBarry Smith  PetscReal globalrank
15f8402805SBarry Smith
16f8402805SBarry Smith!     We must call MPI_Init() first, making us, not PETSc, responsible for MPI
17f8402805SBarry Smith
18f8402805SBarry Smith  PetscCallMPIA(MPI_Init(ierr))
19f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
20f8402805SBarry Smith  PetscCallA(PetscElementalInitializePackage(ierr))
21f8402805SBarry Smith#endif
22f8402805SBarry Smith!     We can now change the communicator universe for PETSc
23f8402805SBarry Smith
24f8402805SBarry Smith  zero = 0
25f8402805SBarry Smith  two = 2
26f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
27f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))
28f8402805SBarry Smith
29f8402805SBarry Smith!     Every PETSc routine should begin with the PetscInitialize()
30f8402805SBarry Smith!     routine.
31f8402805SBarry Smith  PetscCallA(PetscInitializeNoArguments(ierr))
32f8402805SBarry Smith
33f8402805SBarry Smith!     The following MPI calls return the number of processes being used
34f8402805SBarry Smith!     and the rank of this process in the group.
35f8402805SBarry Smith
36f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
37f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))
38f8402805SBarry Smith
39f8402805SBarry Smith!     Here we would like to print only one message that represents all
40f8402805SBarry Smith!     the processes in the group. Sleep so that IO from different ranks
41f8402805SBarry Smith!     don't get mixed up. Note this is not an ideal solution
42f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
43f8402805SBarry Smith  globalrank = grank
44f8402805SBarry Smith  PetscCallA(PetscSleep(globalrank, ierr))
45*4820e4eaSBarry Smith  if (rank == 0) write (6, 100) size, rank
46f8402805SBarry Smith100 format('No of Procs = ', i4, ' rank = ', i4)
47f8402805SBarry Smith
48f8402805SBarry Smith!     Always call PetscFinalize() before exiting a program.  This
49f8402805SBarry Smith!     routine - finalizes the PETSc libraries as well as MPI - provides
50f8402805SBarry Smith!     summary and diagnostic information if certain runtime options are
51f8402805SBarry Smith!     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
52f8402805SBarry Smith!     information.
53f8402805SBarry Smith
54f8402805SBarry Smith  PetscCallA(PetscFinalize(ierr))
55f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
56f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
57f8402805SBarry Smith  PetscCallA(PetscElementalFinalizePackage(ierr))
58f8402805SBarry Smith#endif
59f8402805SBarry Smith
60f8402805SBarry Smith!     Since we initialized MPI, we must call MPI_Finalize()
61f8402805SBarry Smith
62f8402805SBarry Smith  PetscCallMPIA(MPI_Finalize(ierr))
63f8402805SBarry Smithend
64f8402805SBarry Smith
65f8402805SBarry Smith!/*TEST
66f8402805SBarry Smith!
67f8402805SBarry Smith!   test:
68f8402805SBarry Smith!      nsize: 5
69f8402805SBarry Smith!      filter: sort -b
70f8402805SBarry Smith!      filter_output: sort -b
71f8402805SBarry Smith!      requires: !cuda !saws
72f8402805SBarry Smith!
73f8402805SBarry Smith!TEST*/
74