xref: /petsc/src/sys/tutorials/ex4f.F90 (revision 749c190bad46ba447444c173d8c7a4080c70750e)
1f8402805SBarry Smith!
2f8402805SBarry Smith!     This introductory example illustrates running PETSc on a subset
3f8402805SBarry Smith!     of processes
4f8402805SBarry Smith!
5f8402805SBarry Smith! -----------------------------------------------------------------------
6f8402805SBarry Smith#include <petsc/finclude/petscsys.h>
7*c5e229c2SMartin Diehlprogram main
8f8402805SBarry Smith  use petscsys
9f8402805SBarry Smith  implicit none
10f8402805SBarry Smith  PetscErrorCode ierr
11f8402805SBarry Smith  PetscMPIInt rank, size, grank, zero, two
12f8402805SBarry Smith  PetscReal globalrank
13f8402805SBarry Smith
14f8402805SBarry Smith!     We must call MPI_Init() first, making us, not PETSc, responsible for MPI
15f8402805SBarry Smith
16f8402805SBarry Smith  PetscCallMPIA(MPI_Init(ierr))
17f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
18f8402805SBarry Smith  PetscCallA(PetscElementalInitializePackage(ierr))
19f8402805SBarry Smith#endif
20f8402805SBarry Smith!     We can now change the communicator universe for PETSc
21f8402805SBarry Smith
22f8402805SBarry Smith  zero = 0
23f8402805SBarry Smith  two = 2
24f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
25f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))
26f8402805SBarry Smith
27f8402805SBarry Smith!     Every PETSc routine should begin with the PetscInitialize()
28f8402805SBarry Smith!     routine.
29f8402805SBarry Smith  PetscCallA(PetscInitializeNoArguments(ierr))
30f8402805SBarry Smith
31f8402805SBarry Smith!     The following MPI calls return the number of processes being used
32f8402805SBarry Smith!     and the rank of this process in the group.
33f8402805SBarry Smith
34f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
35f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))
36f8402805SBarry Smith
37f8402805SBarry Smith!     Here we would like to print only one message that represents all
38f8402805SBarry Smith!     the processes in the group. Sleep so that IO from different ranks
39f8402805SBarry Smith!     don't get mixed up. Note this is not an ideal solution
40f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
41f8402805SBarry Smith  globalrank = grank
42f8402805SBarry Smith  PetscCallA(PetscSleep(globalrank, ierr))
434820e4eaSBarry Smith  if (rank == 0) write (6, 100) size, rank
44f8402805SBarry Smith100 format('No of Procs = ', i4, ' rank = ', i4)
45f8402805SBarry Smith
46f8402805SBarry Smith!     Always call PetscFinalize() before exiting a program.  This
47f8402805SBarry Smith!     routine - finalizes the PETSc libraries as well as MPI - provides
48f8402805SBarry Smith!     summary and diagnostic information if certain runtime options are
49f8402805SBarry Smith!     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
50f8402805SBarry Smith!     information.
51f8402805SBarry Smith
52f8402805SBarry Smith  PetscCallA(PetscFinalize(ierr))
53f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
54f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
55f8402805SBarry Smith  PetscCallA(PetscElementalFinalizePackage(ierr))
56f8402805SBarry Smith#endif
57f8402805SBarry Smith
58f8402805SBarry Smith!     Since we initialized MPI, we must call MPI_Finalize()
59f8402805SBarry Smith
60f8402805SBarry Smith  PetscCallMPIA(MPI_Finalize(ierr))
61f8402805SBarry Smithend
62f8402805SBarry Smith
63f8402805SBarry Smith!/*TEST
64f8402805SBarry Smith!
65f8402805SBarry Smith!   test:
66f8402805SBarry Smith!      nsize: 5
67f8402805SBarry Smith!      filter: sort -b
68f8402805SBarry Smith!      filter_output: sort -b
69f8402805SBarry Smith!      requires: !cuda !saws
70f8402805SBarry Smith!
71f8402805SBarry Smith!TEST*/
72