xref: /petsc/src/dm/impls/da/dasub.c (revision db05f41b88540d61f4bd88b8a8012f1ff01b70f8)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc-private/daimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "DMDAGetRay"
10 /*@C
11    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
12 
13    Collective on DMDA
14 
15    Input Parameters:
16 +  da - the distributed array
17 .  vec - the vector
18 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
19 -  gp - global grid point number in this direction
20 
21    Output Parameters:
22 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
23 -  scatter - the VecScatter that will map from the original vector to the slice
24 
25    Level: advanced
26 
27    Notes:
28    All processors that share the DMDA must call this with the same gp value
29 
30 .keywords: distributed array, get, processor subset
31 @*/
32 PetscErrorCode  DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
33 {
34   PetscMPIInt    rank;
35   DM_DA          *dd = (DM_DA*)da->data;
36   PetscErrorCode ierr;
37   IS             is;
38   AO             ao;
39   Vec            vec;
40 
41   PetscFunctionBegin;
42   if (dd->dim == 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Cannot get slice from 1d DMDA");
43   if (dd->dim == 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Cannot get slice from 3d DMDA");
44   ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr);
45   ierr = MPI_Comm_rank(((PetscObject)da)->comm,&rank);CHKERRQ(ierr);
46   if (!rank) {
47     if (dir == DMDA_Y) {
48       PetscInt *indices,i;
49       ierr = PetscMalloc(dd->M*sizeof(PetscInt),&indices);CHKERRQ(ierr);
50       indices[0] = gp*dd->M;
51       for (i=1; i<dd->M; i++) {indices[i] = indices[i-1] + 1;}
52       ierr = AOApplicationToPetsc(ao,dd->M,indices);CHKERRQ(ierr);
53       ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
54       ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
55       ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
56       ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
57       ierr = ISCreateBlock(PETSC_COMM_SELF,dd->w,dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
58     } else if (dir == DMDA_X) {
59       PetscInt *indices,i;
60       ierr = PetscMalloc(dd->N*sizeof(PetscInt),&indices);CHKERRQ(ierr);
61       indices[0] = gp;
62       for (i=1; i<dd->N; i++) {indices[i] = indices[i-1] + dd->M;}
63       ierr = AOApplicationToPetsc(ao,dd->M,indices);CHKERRQ(ierr);
64       ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
65       ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
66       ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
67       ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
68       ierr = ISCreateBlock(PETSC_COMM_SELF,dd->w,dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
69     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
70   } else {
71     ierr = VecCreateSeq(PETSC_COMM_SELF,0,newvec);CHKERRQ(ierr);
72     ierr = ISCreateBlock(PETSC_COMM_SELF,dd->w,0,0,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
73   }
74   ierr = DMGetGlobalVector(da,&vec);CHKERRQ(ierr);
75   ierr = VecScatterCreate(vec,is,*newvec,PETSC_NULL,scatter);CHKERRQ(ierr);
76   ierr = DMRestoreGlobalVector(da,&vec);CHKERRQ(ierr);
77   ierr = ISDestroy(&is);CHKERRQ(ierr);
78   PetscFunctionReturn(0);
79 }
80 
81 #undef __FUNCT__
82 #define __FUNCT__ "DMDAGetProcessorSubset"
83 /*@C
84    DMDAGetProcessorSubset - Returns a communicator consisting only of the
85    processors in a DMDA that own a particular global x, y, or z grid point
86    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
87 
88    Collective on DMDA
89 
90    Input Parameters:
91 +  da - the distributed array
92 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
93 -  gp - global grid point number in this direction
94 
95    Output Parameters:
96 .  comm - new communicator
97 
98    Level: advanced
99 
100    Notes:
101    All processors that share the DMDA must call this with the same gp value
102 
103    This routine is particularly useful to compute boundary conditions
104    or other application-specific calculations that require manipulating
105    sets of data throughout a logical plane of grid points.
106 
107 .keywords: distributed array, get, processor subset
108 @*/
109 PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
110 {
111   MPI_Group      group,subgroup;
112   PetscErrorCode ierr;
113   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
114   PetscMPIInt    size,*ranks = PETSC_NULL;
115   DM_DA          *dd = (DM_DA*)da->data;
116 
117   PetscFunctionBegin;
118   PetscValidHeaderSpecific(da,DM_CLASSID,1);
119   flag = 0;
120   ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
121   ierr = MPI_Comm_size(((PetscObject)da)->comm,&size);CHKERRQ(ierr);
122   if (dir == DMDA_Z) {
123     if (dd->dim < 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
124     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
125     if (gp >= zs && gp < zs+zm) flag = 1;
126   } else if (dir == DMDA_Y) {
127     if (dd->dim == 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
128     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
129     if (gp >= ys && gp < ys+ym) flag = 1;
130   } else if (dir == DMDA_X) {
131     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
132     if (gp >= xs && gp < xs+xm) flag = 1;
133   } else SETERRQ(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
134 
135   ierr = PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);CHKERRQ(ierr);
136   ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
137   ict  = 0;
138   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr);
139   for (i=0; i<size; i++) {
140     if (owners[i]) {
141       ranks[ict] = i; ict++;
142       ierr = PetscInfo1(da,"%D ",i);CHKERRQ(ierr);
143     }
144   }
145   ierr = PetscInfo(da,"\n");CHKERRQ(ierr);
146   ierr = MPI_Comm_group(((PetscObject)da)->comm,&group);CHKERRQ(ierr);
147   ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRQ(ierr);
148   ierr = MPI_Comm_create(((PetscObject)da)->comm,subgroup,comm);CHKERRQ(ierr);
149   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
150   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
151   ierr = PetscFree2(owners,ranks);CHKERRQ(ierr);
152   PetscFunctionReturn(0);
153 }
154 
155 #undef __FUNCT__
156 #define __FUNCT__ "DMDAGetProcessorSubsets"
157 /*@C
158    DMDAGetProcessorSubsets - Returns communicators consisting only of the
159    processors in a DMDA adjacent in a particular dimension,
160    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
161 
162    Collective on DMDA
163 
164    Input Parameters:
165 +  da - the distributed array
166 -  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
167 
168    Output Parameters:
169 .  subcomm - new communicator
170 
171    Level: advanced
172 
173    Notes:
174    This routine is useful for distributing one-dimensional data in a tensor product grid.
175 
176 .keywords: distributed array, get, processor subset
177 @*/
178 PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
179 {
180   MPI_Comm       comm;
181   MPI_Group      group, subgroup;
182   PetscInt       subgroupSize = 0;
183   PetscInt      *firstPoints;
184   PetscMPIInt    size, *subgroupRanks = PETSC_NULL;
185   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
186   PetscErrorCode ierr;
187   DM_DA          *dd = (DM_DA*)da->data;
188 
189   PetscFunctionBegin;
190   PetscValidHeaderSpecific(da, DM_CLASSID, 1);
191   comm = ((PetscObject) da)->comm;
192   ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr);
193   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
194   if (dir == DMDA_Z) {
195     if (dd->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
196     firstPoint = zs;
197   } else if (dir == DMDA_Y) {
198     if (dd->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
199     firstPoint = ys;
200   } else if (dir == DMDA_X) {
201     firstPoint = xs;
202   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
203 
204   ierr = PetscMalloc2(size, PetscInt, &firstPoints, size, PetscMPIInt, &subgroupRanks);CHKERRQ(ierr);
205   ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRQ(ierr);
206   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr);
207   for (p = 0; p < size; ++p) {
208     if (firstPoints[p] == firstPoint) {
209       subgroupRanks[subgroupSize++] = p;
210       ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr);
211     }
212   }
213   ierr = PetscInfo(da, "\n");CHKERRQ(ierr);
214   ierr = MPI_Comm_group(comm, &group);CHKERRQ(ierr);
215   ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRQ(ierr);
216   ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRQ(ierr);
217   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
218   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
219   ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr);
220   PetscFunctionReturn(0);
221 }
222