xref: /petsc/src/sys/tutorials/ex4f.F90 (revision c5e229c2f66f66995aed5443a26600af2aec4a3f)
1f8402805SBarry Smith!
2f8402805SBarry Smith!     This introductory example illustrates running PETSc on a subset
3f8402805SBarry Smith!     of processes
4f8402805SBarry Smith!
5f8402805SBarry Smith! -----------------------------------------------------------------------
6f8402805SBarry Smith#include <petsc/finclude/petscsys.h>
7*c5e229c2SMartin Diehlprogram main
8f8402805SBarry Smith  use petscmpi  ! or mpi or mpi_f08
9f8402805SBarry Smith  use petscsys
10f8402805SBarry Smith  implicit none
11f8402805SBarry Smith  PetscErrorCode ierr
12f8402805SBarry Smith  PetscMPIInt rank, size, grank, zero, two
13f8402805SBarry Smith  PetscReal globalrank
14f8402805SBarry Smith
15f8402805SBarry Smith!     We must call MPI_Init() first, making us, not PETSc, responsible for MPI
16f8402805SBarry Smith
17f8402805SBarry Smith  PetscCallMPIA(MPI_Init(ierr))
18f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
19f8402805SBarry Smith  PetscCallA(PetscElementalInitializePackage(ierr))
20f8402805SBarry Smith#endif
21f8402805SBarry Smith!     We can now change the communicator universe for PETSc
22f8402805SBarry Smith
23f8402805SBarry Smith  zero = 0
24f8402805SBarry Smith  two = 2
25f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, rank, ierr))
26f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_split(MPI_COMM_WORLD, mod(rank, two), zero, PETSC_COMM_WORLD, ierr))
27f8402805SBarry Smith
28f8402805SBarry Smith!     Every PETSc routine should begin with the PetscInitialize()
29f8402805SBarry Smith!     routine.
30f8402805SBarry Smith  PetscCallA(PetscInitializeNoArguments(ierr))
31f8402805SBarry Smith
32f8402805SBarry Smith!     The following MPI calls return the number of processes being used
33f8402805SBarry Smith!     and the rank of this process in the group.
34f8402805SBarry Smith
35f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_size(PETSC_COMM_WORLD, size, ierr))
36f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(PETSC_COMM_WORLD, rank, ierr))
37f8402805SBarry Smith
38f8402805SBarry Smith!     Here we would like to print only one message that represents all
39f8402805SBarry Smith!     the processes in the group. Sleep so that IO from different ranks
40f8402805SBarry Smith!     don't get mixed up. Note this is not an ideal solution
41f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_rank(MPI_COMM_WORLD, grank, ierr))
42f8402805SBarry Smith  globalrank = grank
43f8402805SBarry Smith  PetscCallA(PetscSleep(globalrank, ierr))
444820e4eaSBarry Smith  if (rank == 0) write (6, 100) size, rank
45f8402805SBarry Smith100 format('No of Procs = ', i4, ' rank = ', i4)
46f8402805SBarry Smith
47f8402805SBarry Smith!     Always call PetscFinalize() before exiting a program.  This
48f8402805SBarry Smith!     routine - finalizes the PETSc libraries as well as MPI - provides
49f8402805SBarry Smith!     summary and diagnostic information if certain runtime options are
50f8402805SBarry Smith!     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
51f8402805SBarry Smith!     information.
52f8402805SBarry Smith
53f8402805SBarry Smith  PetscCallA(PetscFinalize(ierr))
54f8402805SBarry Smith  PetscCallMPIA(MPI_Comm_free(PETSC_COMM_WORLD, ierr))
55f8402805SBarry Smith#if defined(PETSC_HAVE_ELEMENTAL)
56f8402805SBarry Smith  PetscCallA(PetscElementalFinalizePackage(ierr))
57f8402805SBarry Smith#endif
58f8402805SBarry Smith
59f8402805SBarry Smith!     Since we initialized MPI, we must call MPI_Finalize()
60f8402805SBarry Smith
61f8402805SBarry Smith  PetscCallMPIA(MPI_Finalize(ierr))
62f8402805SBarry Smithend
63f8402805SBarry Smith
64f8402805SBarry Smith!/*TEST
65f8402805SBarry Smith!
66f8402805SBarry Smith!   test:
67f8402805SBarry Smith!      nsize: 5
68f8402805SBarry Smith!      filter: sort -b
69f8402805SBarry Smith!      filter_output: sort -b
70f8402805SBarry Smith!      requires: !cuda !saws
71f8402805SBarry Smith!
72f8402805SBarry Smith!TEST*/
73