1 2 /* 3 Code for interpolating between grids represented by DMDAs 4 */ 5 6 /* 7 For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does 8 not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results 9 we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some 10 consider it cleaner, but old version is turned on since it handles periodic case. 11 */ 12 #define NEWVERSION 0 13 14 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 15 16 /* 17 Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ. 18 This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the 19 matrix type for both the operator matrices and the interpolation matrices so that users 20 can select matrix types of base MATAIJ for accelerators 21 */ 22 static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype) 23 { 24 PetscInt i; 25 char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ}; 26 PetscBool flg; 27 28 PetscFunctionBegin; 29 *outtype = MATAIJ; 30 for (i=0; i<3; i++) { 31 PetscCall(PetscStrbeginswith(intype,types[i],&flg)); 32 if (flg) { 33 *outtype = intype; 34 PetscFunctionReturn(0); 35 } 36 } 37 PetscFunctionReturn(0); 38 } 39 40 PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A) 41 { 42 PetscInt i,i_start,m_f,Mx; 43 const PetscInt *idx_f,*idx_c; 44 PetscInt m_ghost,m_ghost_c; 45 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 46 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 47 PetscScalar v[2],x; 48 Mat mat; 49 DMBoundaryType bx; 50 ISLocalToGlobalMapping ltog_f,ltog_c; 51 MatType mattype; 52 53 PetscFunctionBegin; 54 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 55 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 56 if (bx == DM_BOUNDARY_PERIODIC) { 57 ratio = mx/Mx; 58 PetscCheck(ratio*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 59 } else { 60 ratio = (mx-1)/(Mx-1); 61 PetscCheck(ratio*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 62 } 63 64 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 65 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 66 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 67 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 68 69 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 70 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 71 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 72 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 73 74 /* create interpolation matrix */ 75 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 76 #if defined(PETSC_HAVE_CUDA) 77 /* 78 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 79 we don't want the original unconverted matrix copied to the GPU 80 */ 81 if (dof > 1) { 82 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 83 } 84 #endif 85 PetscCall(MatSetSizes(mat,m_f,m_c,mx,Mx)); 86 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 87 PetscCall(MatSetType(mat,mattype)); 88 PetscCall(MatSeqAIJSetPreallocation(mat,2,NULL)); 89 PetscCall(MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL)); 90 91 /* loop over local fine grid nodes setting interpolation for those*/ 92 if (!NEWVERSION) { 93 94 for (i=i_start; i<i_start+m_f; i++) { 95 /* convert to local "natural" numbering and then to PETSc global numbering */ 96 row = idx_f[i-i_start_ghost]; 97 98 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 99 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 100 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 101 102 /* 103 Only include those interpolation points that are truly 104 nonzero. Note this is very important for final grid lines 105 in x direction; since they have no right neighbor 106 */ 107 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 108 nc = 0; 109 /* one left and below; or we are right on it */ 110 col = (i_c-i_start_ghost_c); 111 cols[nc] = idx_c[col]; 112 v[nc++] = -x + 1.0; 113 /* one right? */ 114 if (i_c*ratio != i) { 115 cols[nc] = idx_c[col+1]; 116 v[nc++] = x; 117 } 118 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 119 } 120 121 } else { 122 PetscScalar *xi; 123 PetscInt li,nxi,n; 124 PetscScalar Ni[2]; 125 126 /* compute local coordinate arrays */ 127 nxi = ratio + 1; 128 PetscCall(PetscMalloc1(nxi,&xi)); 129 for (li=0; li<nxi; li++) { 130 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 131 } 132 133 for (i=i_start; i<i_start+m_f; i++) { 134 /* convert to local "natural" numbering and then to PETSc global numbering */ 135 row = idx_f[(i-i_start_ghost)]; 136 137 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 138 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 139 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 140 141 /* remainders */ 142 li = i - ratio * (i/ratio); 143 if (i==mx-1) li = nxi-1; 144 145 /* corners */ 146 col = (i_c-i_start_ghost_c); 147 cols[0] = idx_c[col]; 148 Ni[0] = 1.0; 149 if ((li==0) || (li==nxi-1)) { 150 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 151 continue; 152 } 153 154 /* edges + interior */ 155 /* remainders */ 156 if (i==mx-1) i_c--; 157 158 col = (i_c-i_start_ghost_c); 159 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 160 cols[1] = idx_c[col+1]; 161 162 Ni[0] = 0.5*(1.0-xi[li]); 163 Ni[1] = 0.5*(1.0+xi[li]); 164 for (n=0; n<2; n++) { 165 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 166 } 167 PetscCall(MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES)); 168 } 169 PetscCall(PetscFree(xi)); 170 } 171 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 172 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 173 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 174 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 175 PetscCall(MatCreateMAIJ(mat,dof,A)); 176 PetscCall(MatDestroy(&mat)); 177 PetscFunctionReturn(0); 178 } 179 180 PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A) 181 { 182 PetscInt i,i_start,m_f,Mx; 183 const PetscInt *idx_f,*idx_c; 184 ISLocalToGlobalMapping ltog_f,ltog_c; 185 PetscInt m_ghost,m_ghost_c; 186 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 187 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 188 PetscScalar v[2],x; 189 Mat mat; 190 DMBoundaryType bx; 191 MatType mattype; 192 193 PetscFunctionBegin; 194 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 195 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 196 if (bx == DM_BOUNDARY_PERIODIC) { 197 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be positive",Mx); 198 ratio = mx/Mx; 199 PetscCheck(ratio*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 200 } else { 201 PetscCheck(Mx >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be greater than 1",Mx); 202 ratio = (mx-1)/(Mx-1); 203 PetscCheck(ratio*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 204 } 205 206 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 207 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 208 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 209 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 210 211 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 212 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 213 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 214 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 215 216 /* create interpolation matrix */ 217 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 218 #if defined(PETSC_HAVE_CUDA) 219 /* 220 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 221 we don't want the original unconverted matrix copied to the GPU 222 */ 223 if (dof > 1) { 224 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 225 } 226 #endif 227 PetscCall(MatSetSizes(mat,m_f,m_c,mx,Mx)); 228 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 229 PetscCall(MatSetType(mat,mattype)); 230 PetscCall(MatSeqAIJSetPreallocation(mat,2,NULL)); 231 PetscCall(MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL)); 232 233 /* loop over local fine grid nodes setting interpolation for those*/ 234 for (i=i_start; i<i_start+m_f; i++) { 235 /* convert to local "natural" numbering and then to PETSc global numbering */ 236 row = idx_f[(i-i_start_ghost)]; 237 238 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 239 240 /* 241 Only include those interpolation points that are truly 242 nonzero. Note this is very important for final grid lines 243 in x direction; since they have no right neighbor 244 */ 245 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 246 nc = 0; 247 /* one left and below; or we are right on it */ 248 col = (i_c-i_start_ghost_c); 249 cols[nc] = idx_c[col]; 250 v[nc++] = -x + 1.0; 251 /* one right? */ 252 if (i_c*ratio != i) { 253 cols[nc] = idx_c[col+1]; 254 v[nc++] = x; 255 } 256 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 257 } 258 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 259 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 260 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 261 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 262 PetscCall(MatCreateMAIJ(mat,dof,A)); 263 PetscCall(MatDestroy(&mat)); 264 PetscCall(PetscLogFlops(5.0*m_f)); 265 PetscFunctionReturn(0); 266 } 267 268 PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A) 269 { 270 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 271 const PetscInt *idx_c,*idx_f; 272 ISLocalToGlobalMapping ltog_f,ltog_c; 273 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 274 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 275 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 276 PetscMPIInt size_c,size_f,rank_f; 277 PetscScalar v[4],x,y; 278 Mat mat; 279 DMBoundaryType bx,by; 280 MatType mattype; 281 282 PetscFunctionBegin; 283 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 284 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 285 if (bx == DM_BOUNDARY_PERIODIC) { 286 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be positive",Mx); 287 ratioi = mx/Mx; 288 PetscCheck(ratioi*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 289 } else { 290 PetscCheck(Mx >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be greater than 1",Mx); 291 ratioi = (mx-1)/(Mx-1); 292 PetscCheck(ratioi*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 293 } 294 if (by == DM_BOUNDARY_PERIODIC) { 295 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be positive",My); 296 ratioj = my/My; 297 PetscCheck(ratioj*My == my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 298 } else { 299 PetscCheck(My >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be greater than 1",My); 300 ratioj = (my-1)/(My-1); 301 PetscCheck(ratioj*(My-1) == my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 302 } 303 304 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 305 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 306 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 307 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 308 309 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 310 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 311 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 312 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 313 314 /* 315 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 316 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 317 processors). It's effective length is hence 4 times its normal length, this is 318 why the col_scale is multiplied by the interpolation matrix column sizes. 319 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 320 copy of the coarse vector. A bit of a hack but you do better. 321 322 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 323 */ 324 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 325 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 326 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 327 col_scale = size_f/size_c; 328 col_shift = Mx*My*(rank_f/size_c); 329 330 MatPreallocateBegin(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz); 331 for (j=j_start; j<j_start+n_f; j++) { 332 for (i=i_start; i<i_start+m_f; i++) { 333 /* convert to local "natural" numbering and then to PETSc global numbering */ 334 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 335 336 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 337 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 338 339 PetscCheck(j_c >= j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 340 j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,j_start,j_c,j_start_ghost_c); 341 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 342 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 343 344 /* 345 Only include those interpolation points that are truly 346 nonzero. Note this is very important for final grid lines 347 in x and y directions; since they have no right/top neighbors 348 */ 349 nc = 0; 350 /* one left and below; or we are right on it */ 351 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 352 cols[nc++] = col_shift + idx_c[col]; 353 /* one right and below */ 354 if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1]; 355 /* one left and above */ 356 if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c]; 357 /* one right and above */ 358 if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)]; 359 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 360 } 361 } 362 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 363 #if defined(PETSC_HAVE_CUDA) 364 /* 365 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 366 we don't want the original unconverted matrix copied to the GPU 367 */ 368 if (dof > 1) { 369 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 370 } 371 #endif 372 PetscCall(MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My)); 373 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 374 PetscCall(MatSetType(mat,mattype)); 375 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 376 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 377 MatPreallocateEnd(dnz,onz); 378 379 /* loop over local fine grid nodes setting interpolation for those*/ 380 if (!NEWVERSION) { 381 382 for (j=j_start; j<j_start+n_f; j++) { 383 for (i=i_start; i<i_start+m_f; i++) { 384 /* convert to local "natural" numbering and then to PETSc global numbering */ 385 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 386 387 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 388 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 389 390 /* 391 Only include those interpolation points that are truly 392 nonzero. Note this is very important for final grid lines 393 in x and y directions; since they have no right/top neighbors 394 */ 395 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 396 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 397 398 nc = 0; 399 /* one left and below; or we are right on it */ 400 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 401 cols[nc] = col_shift + idx_c[col]; 402 v[nc++] = x*y - x - y + 1.0; 403 /* one right and below */ 404 if (i_c*ratioi != i) { 405 cols[nc] = col_shift + idx_c[col+1]; 406 v[nc++] = -x*y + x; 407 } 408 /* one left and above */ 409 if (j_c*ratioj != j) { 410 cols[nc] = col_shift + idx_c[col+m_ghost_c]; 411 v[nc++] = -x*y + y; 412 } 413 /* one right and above */ 414 if (j_c*ratioj != j && i_c*ratioi != i) { 415 cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)]; 416 v[nc++] = x*y; 417 } 418 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 419 } 420 } 421 422 } else { 423 PetscScalar Ni[4]; 424 PetscScalar *xi,*eta; 425 PetscInt li,nxi,lj,neta; 426 427 /* compute local coordinate arrays */ 428 nxi = ratioi + 1; 429 neta = ratioj + 1; 430 PetscCall(PetscMalloc1(nxi,&xi)); 431 PetscCall(PetscMalloc1(neta,&eta)); 432 for (li=0; li<nxi; li++) { 433 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 434 } 435 for (lj=0; lj<neta; lj++) { 436 eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 437 } 438 439 /* loop over local fine grid nodes setting interpolation for those*/ 440 for (j=j_start; j<j_start+n_f; j++) { 441 for (i=i_start; i<i_start+m_f; i++) { 442 /* convert to local "natural" numbering and then to PETSc global numbering */ 443 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 444 445 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 446 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 447 448 /* remainders */ 449 li = i - ratioi * (i/ratioi); 450 if (i==mx-1) li = nxi-1; 451 lj = j - ratioj * (j/ratioj); 452 if (j==my-1) lj = neta-1; 453 454 /* corners */ 455 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 456 cols[0] = col_shift + idx_c[col]; /* left, below */ 457 Ni[0] = 1.0; 458 if ((li==0) || (li==nxi-1)) { 459 if ((lj==0) || (lj==neta-1)) { 460 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 461 continue; 462 } 463 } 464 465 /* edges + interior */ 466 /* remainders */ 467 if (i==mx-1) i_c--; 468 if (j==my-1) j_c--; 469 470 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 471 cols[0] = col_shift + idx_c[col]; /* left, below */ 472 cols[1] = col_shift + idx_c[col+1]; /* right, below */ 473 cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */ 474 cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */ 475 476 Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]); 477 Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]); 478 Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]); 479 Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]); 480 481 nc = 0; 482 if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1; 483 if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1; 484 if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1; 485 if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1; 486 487 PetscCall(MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES)); 488 } 489 } 490 PetscCall(PetscFree(xi)); 491 PetscCall(PetscFree(eta)); 492 } 493 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 494 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 495 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 496 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 497 PetscCall(MatCreateMAIJ(mat,dof,A)); 498 PetscCall(MatDestroy(&mat)); 499 PetscFunctionReturn(0); 500 } 501 502 /* 503 Contributed by Andrei Draganescu <aidraga@sandia.gov> 504 */ 505 PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A) 506 { 507 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 508 const PetscInt *idx_c,*idx_f; 509 ISLocalToGlobalMapping ltog_f,ltog_c; 510 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 511 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 512 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 513 PetscMPIInt size_c,size_f,rank_f; 514 PetscScalar v[4]; 515 Mat mat; 516 DMBoundaryType bx,by; 517 MatType mattype; 518 519 PetscFunctionBegin; 520 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 521 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 522 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be positive",Mx); 523 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be positive",My); 524 ratioi = mx/Mx; 525 ratioj = my/My; 526 PetscCheck(ratioi*Mx == mx,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 527 PetscCheck(ratioj*My == my,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 528 PetscCheck(ratioi == 2,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 2"); 529 PetscCheck(ratioj == 2,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 2"); 530 531 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 532 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 533 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 534 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 535 536 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 537 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 538 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 539 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 540 541 /* 542 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 543 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 544 processors). It's effective length is hence 4 times its normal length, this is 545 why the col_scale is multiplied by the interpolation matrix column sizes. 546 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 547 copy of the coarse vector. A bit of a hack but you do better. 548 549 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 550 */ 551 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 552 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 553 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 554 col_scale = size_f/size_c; 555 col_shift = Mx*My*(rank_f/size_c); 556 557 MatPreallocateBegin(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz); 558 for (j=j_start; j<j_start+n_f; j++) { 559 for (i=i_start; i<i_start+m_f; i++) { 560 /* convert to local "natural" numbering and then to PETSc global numbering */ 561 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 562 563 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 564 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 565 566 PetscCheck(j_c >= j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 567 j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,j_start,j_c,j_start_ghost_c); 568 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 569 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 570 571 /* 572 Only include those interpolation points that are truly 573 nonzero. Note this is very important for final grid lines 574 in x and y directions; since they have no right/top neighbors 575 */ 576 nc = 0; 577 /* one left and below; or we are right on it */ 578 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 579 cols[nc++] = col_shift + idx_c[col]; 580 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 581 } 582 } 583 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 584 #if defined(PETSC_HAVE_CUDA) 585 /* 586 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 587 we don't want the original unconverted matrix copied to the GPU 588 */ 589 if (dof > 1) { 590 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 591 } 592 #endif 593 PetscCall(MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My)); 594 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 595 PetscCall(MatSetType(mat,mattype)); 596 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 597 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 598 MatPreallocateEnd(dnz,onz); 599 600 /* loop over local fine grid nodes setting interpolation for those*/ 601 for (j=j_start; j<j_start+n_f; j++) { 602 for (i=i_start; i<i_start+m_f; i++) { 603 /* convert to local "natural" numbering and then to PETSc global numbering */ 604 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 605 606 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 607 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 608 nc = 0; 609 /* one left and below; or we are right on it */ 610 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 611 cols[nc] = col_shift + idx_c[col]; 612 v[nc++] = 1.0; 613 614 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 615 } 616 } 617 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 618 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 619 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 620 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 621 PetscCall(MatCreateMAIJ(mat,dof,A)); 622 PetscCall(MatDestroy(&mat)); 623 PetscCall(PetscLogFlops(13.0*m_f*n_f)); 624 PetscFunctionReturn(0); 625 } 626 627 /* 628 Contributed by Jianming Yang <jianming-yang@uiowa.edu> 629 */ 630 PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A) 631 { 632 PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof; 633 const PetscInt *idx_c,*idx_f; 634 ISLocalToGlobalMapping ltog_f,ltog_c; 635 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz; 636 PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol; 637 PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale; 638 PetscMPIInt size_c,size_f,rank_f; 639 PetscScalar v[8]; 640 Mat mat; 641 DMBoundaryType bx,by,bz; 642 MatType mattype; 643 644 PetscFunctionBegin; 645 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 646 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be positive",Mx); 647 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be positive",My); 648 PetscCheck(Mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %" PetscInt_FMT " must be positive",Mz); 649 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 650 ratioi = mx/Mx; 651 ratioj = my/My; 652 ratiol = mz/Mz; 653 PetscCheck(ratioi*Mx == mx,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 654 PetscCheck(ratioj*My == my,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 655 PetscCheck(ratiol*Mz == mz,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in z"); 656 PetscCheck(ratioi == 2 || ratioi == 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 1 or 2"); 657 PetscCheck(ratioj == 2 || ratioj == 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 1 or 2"); 658 PetscCheck(ratiol == 2 || ratiol == 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in z must be 1 or 2"); 659 660 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 661 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 662 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 663 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 664 665 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 666 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 667 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 668 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 669 670 /* 671 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 672 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 673 processors). It's effective length is hence 4 times its normal length, this is 674 why the col_scale is multiplied by the interpolation matrix column sizes. 675 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 676 copy of the coarse vector. A bit of a hack but you do better. 677 678 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 679 */ 680 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 681 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 682 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 683 col_scale = size_f/size_c; 684 col_shift = Mx*My*Mz*(rank_f/size_c); 685 686 MatPreallocateBegin(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz); 687 for (l=l_start; l<l_start+p_f; l++) { 688 for (j=j_start; j<j_start+n_f; j++) { 689 for (i=i_start; i<i_start+m_f; i++) { 690 /* convert to local "natural" numbering and then to PETSc global numbering */ 691 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 692 693 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 694 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 695 l_c = (l/ratiol); 696 697 PetscCheck(l_c >= l_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 698 l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT,l_start,l_c,l_start_ghost_c); 699 PetscCheck(j_c >= j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 700 j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,j_start,j_c,j_start_ghost_c); 701 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 702 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 703 704 /* 705 Only include those interpolation points that are truly 706 nonzero. Note this is very important for final grid lines 707 in x and y directions; since they have no right/top neighbors 708 */ 709 nc = 0; 710 /* one left and below; or we are right on it */ 711 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 712 cols[nc++] = col_shift + idx_c[col]; 713 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 714 } 715 } 716 } 717 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 718 #if defined(PETSC_HAVE_CUDA) 719 /* 720 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 721 we don't want the original unconverted matrix copied to the GPU 722 */ 723 if (dof > 1) { 724 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 725 } 726 #endif 727 PetscCall(MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz)); 728 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 729 PetscCall(MatSetType(mat,mattype)); 730 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 731 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 732 MatPreallocateEnd(dnz,onz); 733 734 /* loop over local fine grid nodes setting interpolation for those*/ 735 for (l=l_start; l<l_start+p_f; l++) { 736 for (j=j_start; j<j_start+n_f; j++) { 737 for (i=i_start; i<i_start+m_f; i++) { 738 /* convert to local "natural" numbering and then to PETSc global numbering */ 739 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 740 741 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 742 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 743 l_c = (l/ratiol); 744 nc = 0; 745 /* one left and below; or we are right on it */ 746 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 747 cols[nc] = col_shift + idx_c[col]; 748 v[nc++] = 1.0; 749 750 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 751 } 752 } 753 } 754 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 755 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 756 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 757 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 758 PetscCall(MatCreateMAIJ(mat,dof,A)); 759 PetscCall(MatDestroy(&mat)); 760 PetscCall(PetscLogFlops(13.0*m_f*n_f*p_f)); 761 PetscFunctionReturn(0); 762 } 763 764 PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A) 765 { 766 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l; 767 const PetscInt *idx_c,*idx_f; 768 ISLocalToGlobalMapping ltog_f,ltog_c; 769 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz; 770 PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok; 771 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 772 PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c; 773 PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz; 774 PetscScalar v[8],x,y,z; 775 Mat mat; 776 DMBoundaryType bx,by,bz; 777 MatType mattype; 778 779 PetscFunctionBegin; 780 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 781 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 782 if (mx == Mx) { 783 ratioi = 1; 784 } else if (bx == DM_BOUNDARY_PERIODIC) { 785 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be positive",Mx); 786 ratioi = mx/Mx; 787 PetscCheck(ratioi*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 788 } else { 789 PetscCheck(Mx >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %" PetscInt_FMT " must be greater than 1",Mx); 790 ratioi = (mx-1)/(Mx-1); 791 PetscCheck(ratioi*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 792 } 793 if (my == My) { 794 ratioj = 1; 795 } else if (by == DM_BOUNDARY_PERIODIC) { 796 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be positive",My); 797 ratioj = my/My; 798 PetscCheck(ratioj*My == my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 799 } else { 800 PetscCheck(My >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %" PetscInt_FMT " must be greater than 1",My); 801 ratioj = (my-1)/(My-1); 802 PetscCheck(ratioj*(My-1) == my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 803 } 804 if (mz == Mz) { 805 ratiok = 1; 806 } else if (bz == DM_BOUNDARY_PERIODIC) { 807 PetscCheck(Mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %" PetscInt_FMT " must be positive",Mz); 808 ratiok = mz/Mz; 809 PetscCheck(ratiok*Mz == mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT,mz,Mz); 810 } else { 811 PetscCheck(Mz >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %" PetscInt_FMT " must be greater than 1",Mz); 812 ratiok = (mz-1)/(Mz-1); 813 PetscCheck(ratiok*(Mz-1) == mz-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT,mz,Mz); 814 } 815 816 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 817 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 818 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 819 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 820 821 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 822 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 823 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 824 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 825 826 /* create interpolation matrix, determining exact preallocation */ 827 MatPreallocateBegin(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz); 828 /* loop over local fine grid nodes counting interpolating points */ 829 for (l=l_start; l<l_start+p_f; l++) { 830 for (j=j_start; j<j_start+n_f; j++) { 831 for (i=i_start; i<i_start+m_f; i++) { 832 /* convert to local "natural" numbering and then to PETSc global numbering */ 833 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 834 i_c = (i/ratioi); 835 j_c = (j/ratioj); 836 l_c = (l/ratiok); 837 PetscCheck(l_c >= l_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 838 l_start %" PetscInt_FMT " l_c %" PetscInt_FMT " l_start_ghost_c %" PetscInt_FMT,l_start,l_c,l_start_ghost_c); 839 PetscCheck(j_c >= j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 840 j_start %" PetscInt_FMT " j_c %" PetscInt_FMT " j_start_ghost_c %" PetscInt_FMT,j_start,j_c,j_start_ghost_c); 841 PetscCheck(i_c >= i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 842 i_start %" PetscInt_FMT " i_c %" PetscInt_FMT " i_start_ghost_c %" PetscInt_FMT,i_start,i_c,i_start_ghost_c); 843 844 /* 845 Only include those interpolation points that are truly 846 nonzero. Note this is very important for final grid lines 847 in x and y directions; since they have no right/top neighbors 848 */ 849 nc = 0; 850 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 851 cols[nc++] = idx_c[col]; 852 if (i_c*ratioi != i) { 853 cols[nc++] = idx_c[col+1]; 854 } 855 if (j_c*ratioj != j) { 856 cols[nc++] = idx_c[col+m_ghost_c]; 857 } 858 if (l_c*ratiok != l) { 859 cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c]; 860 } 861 if (j_c*ratioj != j && i_c*ratioi != i) { 862 cols[nc++] = idx_c[col+(m_ghost_c+1)]; 863 } 864 if (j_c*ratioj != j && l_c*ratiok != l) { 865 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 866 } 867 if (i_c*ratioi != i && l_c*ratiok != l) { 868 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 869 } 870 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 871 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 872 } 873 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 874 } 875 } 876 } 877 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 878 #if defined(PETSC_HAVE_CUDA) 879 /* 880 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 881 we don't want the original unconverted matrix copied to the GPU 882 */ 883 if (dof > 1) { 884 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 885 } 886 #endif 887 PetscCall(MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz)); 888 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 889 PetscCall(MatSetType(mat,mattype)); 890 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 891 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 892 MatPreallocateEnd(dnz,onz); 893 894 /* loop over local fine grid nodes setting interpolation for those*/ 895 if (!NEWVERSION) { 896 897 for (l=l_start; l<l_start+p_f; l++) { 898 for (j=j_start; j<j_start+n_f; j++) { 899 for (i=i_start; i<i_start+m_f; i++) { 900 /* convert to local "natural" numbering and then to PETSc global numbering */ 901 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 902 903 i_c = (i/ratioi); 904 j_c = (j/ratioj); 905 l_c = (l/ratiok); 906 907 /* 908 Only include those interpolation points that are truly 909 nonzero. Note this is very important for final grid lines 910 in x and y directions; since they have no right/top neighbors 911 */ 912 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 913 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 914 z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok); 915 916 nc = 0; 917 /* one left and below; or we are right on it */ 918 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 919 920 cols[nc] = idx_c[col]; 921 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 922 923 if (i_c*ratioi != i) { 924 cols[nc] = idx_c[col+1]; 925 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 926 } 927 928 if (j_c*ratioj != j) { 929 cols[nc] = idx_c[col+m_ghost_c]; 930 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 931 } 932 933 if (l_c*ratiok != l) { 934 cols[nc] = idx_c[col+m_ghost_c*n_ghost_c]; 935 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 936 } 937 938 if (j_c*ratioj != j && i_c*ratioi != i) { 939 cols[nc] = idx_c[col+(m_ghost_c+1)]; 940 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 941 } 942 943 if (j_c*ratioj != j && l_c*ratiok != l) { 944 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 945 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 946 } 947 948 if (i_c*ratioi != i && l_c*ratiok != l) { 949 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 950 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 951 } 952 953 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 954 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 955 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 956 } 957 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 958 } 959 } 960 } 961 962 } else { 963 PetscScalar *xi,*eta,*zeta; 964 PetscInt li,nxi,lj,neta,lk,nzeta,n; 965 PetscScalar Ni[8]; 966 967 /* compute local coordinate arrays */ 968 nxi = ratioi + 1; 969 neta = ratioj + 1; 970 nzeta = ratiok + 1; 971 PetscCall(PetscMalloc1(nxi,&xi)); 972 PetscCall(PetscMalloc1(neta,&eta)); 973 PetscCall(PetscMalloc1(nzeta,&zeta)); 974 for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 975 for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 976 for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1)); 977 978 for (l=l_start; l<l_start+p_f; l++) { 979 for (j=j_start; j<j_start+n_f; j++) { 980 for (i=i_start; i<i_start+m_f; i++) { 981 /* convert to local "natural" numbering and then to PETSc global numbering */ 982 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 983 984 i_c = (i/ratioi); 985 j_c = (j/ratioj); 986 l_c = (l/ratiok); 987 988 /* remainders */ 989 li = i - ratioi * (i/ratioi); 990 if (i==mx-1) li = nxi-1; 991 lj = j - ratioj * (j/ratioj); 992 if (j==my-1) lj = neta-1; 993 lk = l - ratiok * (l/ratiok); 994 if (l==mz-1) lk = nzeta-1; 995 996 /* corners */ 997 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 998 cols[0] = idx_c[col]; 999 Ni[0] = 1.0; 1000 if ((li==0) || (li==nxi-1)) { 1001 if ((lj==0) || (lj==neta-1)) { 1002 if ((lk==0) || (lk==nzeta-1)) { 1003 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 1004 continue; 1005 } 1006 } 1007 } 1008 1009 /* edges + interior */ 1010 /* remainders */ 1011 if (i==mx-1) i_c--; 1012 if (j==my-1) j_c--; 1013 if (l==mz-1) l_c--; 1014 1015 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 1016 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 1017 cols[1] = idx_c[col+1]; /* one right and below */ 1018 cols[2] = idx_c[col+m_ghost_c]; /* one left and above */ 1019 cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */ 1020 1021 cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */ 1022 cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */ 1023 cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/ 1024 cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */ 1025 1026 Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1027 Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1028 Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1029 Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1030 1031 Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1032 Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1033 Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1034 Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1035 1036 for (n=0; n<8; n++) { 1037 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 1038 } 1039 PetscCall(MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES)); 1040 1041 } 1042 } 1043 } 1044 PetscCall(PetscFree(xi)); 1045 PetscCall(PetscFree(eta)); 1046 PetscCall(PetscFree(zeta)); 1047 } 1048 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1049 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1050 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 1051 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 1052 1053 PetscCall(MatCreateMAIJ(mat,dof,A)); 1054 PetscCall(MatDestroy(&mat)); 1055 PetscFunctionReturn(0); 1056 } 1057 1058 PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale) 1059 { 1060 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1061 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1062 DMDAStencilType stc,stf; 1063 DM_DA *ddc = (DM_DA*)dac->data; 1064 1065 PetscFunctionBegin; 1066 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1067 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1068 PetscValidPointer(A,3); 1069 if (scale) PetscValidPointer(scale,4); 1070 1071 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1072 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1073 PetscCheck(dimc == dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dimc,dimf); 1074 PetscCheck(dofc == doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dofc,doff); 1075 PetscCheck(sc == sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,sc,sf); 1076 PetscCheck(bxc == bxf && byc == byf && bzc == bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1077 PetscCheck(stc == stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1078 PetscCheck(Mc >= 2 || Mf <= 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1079 PetscCheck(dimc <= 1 || Nc >= 2 || Nf <= 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1080 PetscCheck(dimc <= 2 || Pc >= 2 || Pf <= 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1081 1082 if (ddc->interptype == DMDA_Q1) { 1083 if (dimc == 1) { 1084 PetscCall(DMCreateInterpolation_DA_1D_Q1(dac,daf,A)); 1085 } else if (dimc == 2) { 1086 PetscCall(DMCreateInterpolation_DA_2D_Q1(dac,daf,A)); 1087 } else if (dimc == 3) { 1088 PetscCall(DMCreateInterpolation_DA_3D_Q1(dac,daf,A)); 1089 } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d",dimc,(int)ddc->interptype); 1090 } else if (ddc->interptype == DMDA_Q0) { 1091 if (dimc == 1) { 1092 PetscCall(DMCreateInterpolation_DA_1D_Q0(dac,daf,A)); 1093 } else if (dimc == 2) { 1094 PetscCall(DMCreateInterpolation_DA_2D_Q0(dac,daf,A)); 1095 } else if (dimc == 3) { 1096 PetscCall(DMCreateInterpolation_DA_3D_Q0(dac,daf,A)); 1097 } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %" PetscInt_FMT " for interpolation type %d",dimc,(int)ddc->interptype); 1098 } 1099 if (scale) PetscCall(DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale)); 1100 PetscFunctionReturn(0); 1101 } 1102 1103 PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject) 1104 { 1105 PetscInt i,i_start,m_f,Mx,dof; 1106 const PetscInt *idx_f; 1107 ISLocalToGlobalMapping ltog_f; 1108 PetscInt m_ghost,m_ghost_c; 1109 PetscInt row,i_start_ghost,mx,m_c,nc,ratioi; 1110 PetscInt i_start_c,i_start_ghost_c; 1111 PetscInt *cols; 1112 DMBoundaryType bx; 1113 Vec vecf,vecc; 1114 IS isf; 1115 1116 PetscFunctionBegin; 1117 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 1118 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1119 if (bx == DM_BOUNDARY_PERIODIC) { 1120 ratioi = mx/Mx; 1121 PetscCheck(ratioi*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1122 } else { 1123 ratioi = (mx-1)/(Mx-1); 1124 PetscCheck(ratioi*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1125 } 1126 1127 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 1128 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 1129 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1130 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1131 1132 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 1133 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 1134 1135 /* loop over local fine grid nodes setting interpolation for those*/ 1136 nc = 0; 1137 PetscCall(PetscMalloc1(m_f,&cols)); 1138 1139 for (i=i_start_c; i<i_start_c+m_c; i++) { 1140 PetscInt i_f = i*ratioi; 1141 1142 PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\ni_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1143 1144 row = idx_f[(i_f-i_start_ghost)]; 1145 cols[nc++] = row; 1146 } 1147 1148 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1149 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1150 PetscCall(DMGetGlobalVector(dac,&vecc)); 1151 PetscCall(DMGetGlobalVector(daf,&vecf)); 1152 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1153 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1154 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1155 PetscCall(ISDestroy(&isf)); 1156 PetscFunctionReturn(0); 1157 } 1158 1159 PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject) 1160 { 1161 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 1162 const PetscInt *idx_c,*idx_f; 1163 ISLocalToGlobalMapping ltog_f,ltog_c; 1164 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c; 1165 PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj; 1166 PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 1167 PetscInt *cols; 1168 DMBoundaryType bx,by; 1169 Vec vecf,vecc; 1170 IS isf; 1171 1172 PetscFunctionBegin; 1173 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 1174 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1175 if (bx == DM_BOUNDARY_PERIODIC) { 1176 ratioi = mx/Mx; 1177 PetscCheck(ratioi*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1178 } else { 1179 ratioi = (mx-1)/(Mx-1); 1180 PetscCheck(ratioi*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1181 } 1182 if (by == DM_BOUNDARY_PERIODIC) { 1183 ratioj = my/My; 1184 PetscCheck(ratioj*My == my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 1185 } else { 1186 ratioj = (my-1)/(My-1); 1187 PetscCheck(ratioj*(My-1) == my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 1188 } 1189 1190 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 1191 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 1192 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1193 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1194 1195 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 1196 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 1197 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 1198 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 1199 1200 /* loop over local fine grid nodes setting interpolation for those*/ 1201 nc = 0; 1202 PetscCall(PetscMalloc1(n_f*m_f,&cols)); 1203 for (j=j_start_c; j<j_start_c+n_c; j++) { 1204 for (i=i_start_c; i<i_start_c+m_c; i++) { 1205 PetscInt i_f = i*ratioi,j_f = j*ratioj; 1206 PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost+n_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1207 j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1208 PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1209 i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1210 row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1211 cols[nc++] = row; 1212 } 1213 } 1214 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1215 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1216 1217 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1218 PetscCall(DMGetGlobalVector(dac,&vecc)); 1219 PetscCall(DMGetGlobalVector(daf,&vecf)); 1220 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1221 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1222 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1223 PetscCall(ISDestroy(&isf)); 1224 PetscFunctionReturn(0); 1225 } 1226 1227 PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject) 1228 { 1229 PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz; 1230 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c; 1231 PetscInt i_start_ghost,j_start_ghost,k_start_ghost; 1232 PetscInt mx,my,mz,ratioi,ratioj,ratiok; 1233 PetscInt i_start_c,j_start_c,k_start_c; 1234 PetscInt m_c,n_c,p_c; 1235 PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c; 1236 PetscInt row,nc,dof; 1237 const PetscInt *idx_c,*idx_f; 1238 ISLocalToGlobalMapping ltog_f,ltog_c; 1239 PetscInt *cols; 1240 DMBoundaryType bx,by,bz; 1241 Vec vecf,vecc; 1242 IS isf; 1243 1244 PetscFunctionBegin; 1245 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 1246 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1247 1248 if (bx == DM_BOUNDARY_PERIODIC) { 1249 ratioi = mx/Mx; 1250 PetscCheck(ratioi*Mx == mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1251 } else { 1252 ratioi = (mx-1)/(Mx-1); 1253 PetscCheck(ratioi*(Mx-1) == mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %" PetscInt_FMT " Mx %" PetscInt_FMT,mx,Mx); 1254 } 1255 if (by == DM_BOUNDARY_PERIODIC) { 1256 ratioj = my/My; 1257 PetscCheck(ratioj*My == my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 1258 } else { 1259 ratioj = (my-1)/(My-1); 1260 PetscCheck(ratioj*(My-1) == my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %" PetscInt_FMT " My %" PetscInt_FMT,my,My); 1261 } 1262 if (bz == DM_BOUNDARY_PERIODIC) { 1263 ratiok = mz/Mz; 1264 PetscCheck(ratiok*Mz == mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %" PetscInt_FMT " My %" PetscInt_FMT,mz,Mz); 1265 } else { 1266 ratiok = (mz-1)/(Mz-1); 1267 PetscCheck(ratiok*(Mz-1) == mz-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %" PetscInt_FMT " Mz %" PetscInt_FMT,mz,Mz); 1268 } 1269 1270 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f)); 1271 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 1272 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1273 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1274 1275 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c)); 1276 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 1277 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 1278 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 1279 1280 /* loop over local fine grid nodes setting interpolation for those*/ 1281 nc = 0; 1282 PetscCall(PetscMalloc1(n_f*m_f*p_f,&cols)); 1283 for (k=k_start_c; k<k_start_c+p_c; k++) { 1284 for (j=j_start_c; j<j_start_c+n_c; j++) { 1285 for (i=i_start_c; i<i_start_c+m_c; i++) { 1286 PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok; 1287 PetscCheck(k_f >= k_start_ghost && k_f < k_start_ghost+p_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1288 "k_c %" PetscInt_FMT " k_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",k,k_f,k_start_ghost,k_start_ghost+p_ghost); 1289 PetscCheck(j_f >= j_start_ghost && j_f < j_start_ghost+n_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1290 "j_c %" PetscInt_FMT " j_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1291 PetscCheck(i_f >= i_start_ghost && i_f < i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1292 "i_c %" PetscInt_FMT " i_f %" PetscInt_FMT " fine ghost range [%" PetscInt_FMT ",%" PetscInt_FMT "]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1293 row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1294 cols[nc++] = row; 1295 } 1296 } 1297 } 1298 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1299 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1300 1301 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1302 PetscCall(DMGetGlobalVector(dac,&vecc)); 1303 PetscCall(DMGetGlobalVector(daf,&vecf)); 1304 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1305 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1306 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1307 PetscCall(ISDestroy(&isf)); 1308 PetscFunctionReturn(0); 1309 } 1310 1311 PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat) 1312 { 1313 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1314 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1315 DMDAStencilType stc,stf; 1316 VecScatter inject = NULL; 1317 1318 PetscFunctionBegin; 1319 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1320 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1321 PetscValidPointer(mat,3); 1322 1323 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1324 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1325 PetscCheck(dimc == dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dimc,dimf); 1326 PetscCheck(dofc == doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dofc,doff); 1327 PetscCheck(sc == sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,sc,sf); 1328 PetscCheck(bxc == bxf && byc == byf && bzc == bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1329 PetscCheck(stc == stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1330 PetscCheck(Mc >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1331 PetscCheck(dimc <= 1 || Nc >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1332 PetscCheck(dimc <= 2 || Pc >= 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1333 1334 if (dimc == 1) { 1335 PetscCall(DMCreateInjection_DA_1D(dac,daf,&inject)); 1336 } else if (dimc == 2) { 1337 PetscCall(DMCreateInjection_DA_2D(dac,daf,&inject)); 1338 } else if (dimc == 3) { 1339 PetscCall(DMCreateInjection_DA_3D(dac,daf,&inject)); 1340 } 1341 PetscCall(MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat)); 1342 PetscCall(VecScatterDestroy(&inject)); 1343 PetscFunctionReturn(0); 1344 } 1345 1346 /*@ 1347 DMCreateAggregates - Deprecated, see DMDACreateAggregates. 1348 1349 Level: intermediate 1350 @*/ 1351 PetscErrorCode DMCreateAggregates(DM dac,DM daf,Mat *mat) 1352 { 1353 return DMDACreateAggregates(dac,daf,mat); 1354 } 1355 1356 /*@ 1357 DMDACreateAggregates - Gets the aggregates that map between 1358 grids associated with two DMDAs. 1359 1360 Collective on dmc 1361 1362 Input Parameters: 1363 + dmc - the coarse grid DMDA 1364 - dmf - the fine grid DMDA 1365 1366 Output Parameters: 1367 . rest - the restriction matrix (transpose of the projection matrix) 1368 1369 Level: intermediate 1370 1371 Note: This routine is not used by PETSc. 1372 It is not clear what its use case is and it may be removed in a future release. 1373 Users should contact petsc-maint@mcs.anl.gov if they plan to use it. 1374 1375 .seealso: `DMRefine()`, `DMCreateInjection()`, `DMCreateInterpolation()` 1376 @*/ 1377 PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest) 1378 { 1379 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc; 1380 PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1381 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1382 DMDAStencilType stc,stf; 1383 PetscInt i,j,l; 1384 PetscInt i_start,j_start,l_start, m_f,n_f,p_f; 1385 PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost; 1386 const PetscInt *idx_f; 1387 PetscInt i_c,j_c,l_c; 1388 PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c; 1389 PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c; 1390 const PetscInt *idx_c; 1391 PetscInt d; 1392 PetscInt a; 1393 PetscInt max_agg_size; 1394 PetscInt *fine_nodes; 1395 PetscScalar *one_vec; 1396 PetscInt fn_idx; 1397 ISLocalToGlobalMapping ltogmf,ltogmc; 1398 1399 PetscFunctionBegin; 1400 PetscValidHeaderSpecificType(dac,DM_CLASSID,1,DMDA); 1401 PetscValidHeaderSpecificType(daf,DM_CLASSID,2,DMDA); 1402 PetscValidPointer(rest,3); 1403 1404 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1405 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1406 PetscCheck(dimc == dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dimc,dimf); 1407 PetscCheck(dofc == doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,dofc,doff); 1408 PetscCheck(sc == sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %" PetscInt_FMT " %" PetscInt_FMT,sc,sf); 1409 PetscCheck(bxc == bxf && byc == byf && bzc == bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1410 PetscCheck(stc == stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1411 1412 PetscCheck(Mf >= Mc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Mc %" PetscInt_FMT ", Mf %" PetscInt_FMT, Mc, Mf); 1413 PetscCheck(Nf >= Nc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Nc %" PetscInt_FMT ", Nf %" PetscInt_FMT, Nc, Nf); 1414 PetscCheck(Pf >= Pc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Pc %" PetscInt_FMT ", Pf %" PetscInt_FMT, Pc, Pf); 1415 1416 if (Pc < 0) Pc = 1; 1417 if (Pf < 0) Pf = 1; 1418 if (Nc < 0) Nc = 1; 1419 if (Nf < 0) Nf = 1; 1420 1421 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 1422 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 1423 1424 PetscCall(DMGetLocalToGlobalMapping(daf,<ogmf)); 1425 PetscCall(ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f)); 1426 1427 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 1428 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 1429 1430 PetscCall(DMGetLocalToGlobalMapping(dac,<ogmc)); 1431 PetscCall(ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c)); 1432 1433 /* 1434 Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios 1435 for dimension 1 and 2 respectively. 1436 Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1) 1437 and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate. 1438 Each specific dof on the fine grid is mapped to one dof on the coarse grid. 1439 */ 1440 1441 max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1); 1442 1443 /* create the matrix that will contain the restriction operator */ 1444 PetscCall(MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff, 1445 max_agg_size, NULL, max_agg_size, NULL, rest)); 1446 1447 /* store nodes in the fine grid here */ 1448 PetscCall(PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes)); 1449 for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0; 1450 1451 /* loop over all coarse nodes */ 1452 for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) { 1453 for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) { 1454 for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) { 1455 for (d=0; d<dofc; d++) { 1456 /* convert to local "natural" numbering and then to PETSc global numbering */ 1457 a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d; 1458 1459 fn_idx = 0; 1460 /* Corresponding fine points are all points (i_f, j_f, l_f) such that 1461 i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc 1462 (same for other dimensions) 1463 */ 1464 for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) { 1465 for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) { 1466 for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) { 1467 fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d; 1468 fn_idx++; 1469 } 1470 } 1471 } 1472 /* add all these points to one aggregate */ 1473 PetscCall(MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES)); 1474 } 1475 } 1476 } 1477 } 1478 PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f)); 1479 PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c)); 1480 PetscCall(PetscFree2(one_vec,fine_nodes)); 1481 PetscCall(MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY)); 1482 PetscCall(MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY)); 1483 PetscFunctionReturn(0); 1484 } 1485