1 2 /* 3 Defines projective product routines where A is a MPIAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <../src/mat/impls/aij/mpi/mpiaij.h> 10 #include <petscbt.h> 11 #include <petsctime.h> 12 #include <petsc/private/hashmapiv.h> 13 #include <petsc/private/hashseti.h> 14 #include <petscsf.h> 15 16 17 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer) 18 { 19 PetscErrorCode ierr; 20 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 21 Mat_APMPI *ptap=a->ap; 22 PetscBool iascii; 23 PetscViewerFormat format; 24 25 PetscFunctionBegin; 26 if (!ptap) { 27 /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */ 28 A->ops->view = MatView_MPIAIJ; 29 ierr = (A->ops->view)(A,viewer);CHKERRQ(ierr); 30 PetscFunctionReturn(0); 31 } 32 33 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 34 if (iascii) { 35 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 36 if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 37 if (ptap->algType == 0) { 38 ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr); 39 } else if (ptap->algType == 1) { 40 ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr); 41 } else if (ptap->algType == 2) { 42 ierr = PetscViewerASCIIPrintf(viewer,"using allatonce MatPtAP() implementation\n");CHKERRQ(ierr); 43 } else if (ptap->algType == 3) { 44 ierr = PetscViewerASCIIPrintf(viewer,"using merged allatonce MatPtAP() implementation\n");CHKERRQ(ierr); 45 } 46 } 47 } 48 ierr = (ptap->view)(A,viewer);CHKERRQ(ierr); 49 PetscFunctionReturn(0); 50 } 51 52 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A) 53 { 54 PetscErrorCode ierr; 55 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 56 Mat_APMPI *ptap=a->ap; 57 Mat_Merge_SeqsToMPI *merge; 58 59 PetscFunctionBegin; 60 if (!ptap) PetscFunctionReturn(0); 61 62 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 63 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 64 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 65 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 66 ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */ 67 ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr); 68 ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr); 69 if (ptap->AP_loc) { /* used by alg_rap */ 70 Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data; 71 ierr = PetscFree(ap->i);CHKERRQ(ierr); 72 ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr); 73 ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr); 74 } else { /* used by alg_ptap */ 75 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 76 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 77 } 78 ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr); 79 ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr); 80 if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);} 81 82 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 83 84 merge=ptap->merge; 85 if (merge) { /* used by alg_ptap */ 86 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 87 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 88 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 89 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 90 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 91 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 92 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 93 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 94 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 95 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 96 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 97 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 98 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 99 ierr = PetscFree(ptap->merge);CHKERRQ(ierr); 100 } 101 ierr = ISLocalToGlobalMappingDestroy(&ptap->ltog);CHKERRQ(ierr); 102 103 ierr = PetscSFDestroy(&ptap->sf);CHKERRQ(ierr); 104 ierr = PetscFree(ptap->c_othi);CHKERRQ(ierr); 105 ierr = PetscFree(ptap->c_rmti);CHKERRQ(ierr); 106 PetscFunctionReturn(0); 107 } 108 109 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A) 110 { 111 PetscErrorCode ierr; 112 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 113 Mat_APMPI *ptap=a->ap; 114 115 PetscFunctionBegin; 116 ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr); 117 ierr = (*ptap->destroy)(A);CHKERRQ(ierr); /* MatDestroy_MPIAIJ(A) */ 118 ierr = PetscFree(ptap);CHKERRQ(ierr); 119 PetscFunctionReturn(0); 120 } 121 122 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 123 { 124 PetscErrorCode ierr; 125 PetscBool flg; 126 MPI_Comm comm; 127 #if !defined(PETSC_HAVE_HYPRE) 128 const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"}; 129 PetscInt nalg=4; 130 #else 131 const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"}; 132 PetscInt nalg=5; 133 #endif 134 PetscInt pN=P->cmap->N,alg=1; /* set default algorithm */ 135 136 PetscFunctionBegin; 137 /* check if matrix local sizes are compatible */ 138 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 139 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 140 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 141 142 if (scall == MAT_INITIAL_MATRIX) { 143 /* pick an algorithm */ 144 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 145 ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 146 ierr = PetscOptionsEnd();CHKERRQ(ierr); 147 148 if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */ 149 MatInfo Ainfo,Pinfo; 150 PetscInt nz_local; 151 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 152 153 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 154 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 155 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 156 157 if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 158 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 159 160 if (alg_scalable) { 161 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 162 } 163 } 164 165 switch (alg) { 166 case 1: 167 /* do R=P^T locally, then C=R*A*P -- nonscalable */ 168 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 169 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); 170 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 171 break; 172 case 2: 173 /* compute C=P^T*A*P allatonce */ 174 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 175 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(A,P,fill,C);CHKERRQ(ierr); 176 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 177 break; 178 case 3: 179 /* compute C=P^T*A*P allatonce */ 180 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 181 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(A,P,fill,C);CHKERRQ(ierr); 182 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 183 break; 184 #if defined(PETSC_HAVE_HYPRE) 185 case 4: 186 /* Use boomerAMGBuildCoarseOperator */ 187 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 188 ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr); 189 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 190 break; 191 #endif 192 default: 193 /* do R=P^T locally, then C=R*A*P */ 194 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 195 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr); 196 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 197 break; 198 } 199 200 if (alg == 0 || alg == 1 || alg == 2 || alg == 3) { 201 Mat_MPIAIJ *c = (Mat_MPIAIJ*)(*C)->data; 202 Mat_APMPI *ap = c->ap; 203 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr); 204 ap->freestruct = PETSC_FALSE; 205 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);CHKERRQ(ierr); 206 ierr = PetscOptionsEnd();CHKERRQ(ierr); 207 } 208 } 209 210 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 211 ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr); 212 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 213 PetscFunctionReturn(0); 214 } 215 216 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C) 217 { 218 PetscErrorCode ierr; 219 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 220 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 221 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 222 Mat_APMPI *ptap = c->ap; 223 Mat AP_loc,C_loc,C_oth; 224 PetscInt i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz,nout; 225 PetscScalar *apa; 226 const PetscInt *cols; 227 const PetscScalar *vals; 228 229 PetscFunctionBegin; 230 if (!ptap->AP_loc) { 231 MPI_Comm comm; 232 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 233 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 234 } 235 236 ierr = MatZeroEntries(C);CHKERRQ(ierr); 237 238 /* 1) get R = Pd^T,Ro = Po^T */ 239 if (ptap->reuse == MAT_REUSE_MATRIX) { 240 ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 241 ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 242 } 243 244 /* 2) get AP_loc */ 245 AP_loc = ptap->AP_loc; 246 ap = (Mat_SeqAIJ*)AP_loc->data; 247 248 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 249 /*-----------------------------------------------------*/ 250 if (ptap->reuse == MAT_REUSE_MATRIX) { 251 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 252 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 253 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 254 } 255 256 /* 2-2) compute numeric A_loc*P - dominating part */ 257 /* ---------------------------------------------- */ 258 /* get data from symbolic products */ 259 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 260 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 261 262 api = ap->i; 263 apj = ap->j; 264 ierr = ISLocalToGlobalMappingApply(ptap->ltog,api[AP_loc->rmap->n],apj,apj);CHKERRQ(ierr); 265 for (i=0; i<am; i++) { 266 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 267 apnz = api[i+1] - api[i]; 268 apa = ap->a + api[i]; 269 ierr = PetscArrayzero(apa,apnz);CHKERRQ(ierr); 270 AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa); 271 } 272 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,api[AP_loc->rmap->n],apj,&nout,apj);CHKERRQ(ierr); 273 if (api[AP_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",api[AP_loc->rmap->n],nout); 274 275 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 276 /* Always use scalable version since we are in the MPI scalable version */ 277 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 278 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 279 280 C_loc = ptap->C_loc; 281 C_oth = ptap->C_oth; 282 283 /* add C_loc and Co to to C */ 284 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 285 286 /* C_loc -> C */ 287 cm = C_loc->rmap->N; 288 c_seq = (Mat_SeqAIJ*)C_loc->data; 289 cols = c_seq->j; 290 vals = c_seq->a; 291 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_loc->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr); 292 293 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 294 /* when there are no off-processor parts. */ 295 /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */ 296 /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */ 297 /* a table, and other, more complex stuff has to be done. */ 298 if (C->assembled) { 299 C->was_assembled = PETSC_TRUE; 300 C->assembled = PETSC_FALSE; 301 } 302 if (C->was_assembled) { 303 for (i=0; i<cm; i++) { 304 ncols = c_seq->i[i+1] - c_seq->i[i]; 305 row = rstart + i; 306 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 307 cols += ncols; vals += ncols; 308 } 309 } else { 310 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 311 } 312 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_loc->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr); 313 if (c_seq->i[C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout); 314 315 /* Co -> C, off-processor part */ 316 cm = C_oth->rmap->N; 317 c_seq = (Mat_SeqAIJ*)C_oth->data; 318 cols = c_seq->j; 319 vals = c_seq->a; 320 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_oth->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr); 321 for (i=0; i<cm; i++) { 322 ncols = c_seq->i[i+1] - c_seq->i[i]; 323 row = p->garray[i]; 324 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 325 cols += ncols; vals += ncols; 326 } 327 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 328 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 329 330 ptap->reuse = MAT_REUSE_MATRIX; 331 332 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_oth->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr); 333 if (c_seq->i[C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout); 334 335 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 336 if (ptap->freestruct) { 337 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 338 } 339 PetscFunctionReturn(0); 340 } 341 342 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C) 343 { 344 PetscErrorCode ierr; 345 Mat_APMPI *ptap; 346 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 347 MPI_Comm comm; 348 PetscMPIInt size,rank; 349 Mat Cmpi,P_loc,P_oth; 350 PetscFreeSpaceList free_space=NULL,current_space=NULL; 351 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 352 PetscInt *lnk,i,k,pnz,row,nsend; 353 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 354 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 355 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 356 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 357 MPI_Request *swaits,*rwaits; 358 MPI_Status *sstatus,rstatus; 359 PetscLayout rowmap; 360 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 361 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 362 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi,nout; 363 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 364 PetscScalar *apv; 365 PetscTable ta; 366 MatType mtype; 367 const char *prefix; 368 #if defined(PETSC_USE_INFO) 369 PetscReal apfill; 370 #endif 371 372 PetscFunctionBegin; 373 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 374 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 375 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 376 377 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 378 379 /* create symbolic parallel matrix Cmpi */ 380 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 381 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 382 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 383 384 /* create struct Mat_APMPI and attached it to C later */ 385 ierr = PetscNew(&ptap);CHKERRQ(ierr); 386 ptap->reuse = MAT_INITIAL_MATRIX; 387 ptap->algType = 0; 388 389 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 390 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr); 391 /* get P_loc by taking all local rows of P */ 392 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr); 393 394 ptap->P_loc = P_loc; 395 ptap->P_oth = P_oth; 396 397 /* (0) compute Rd = Pd^T, Ro = Po^T */ 398 /* --------------------------------- */ 399 ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 400 ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 401 402 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 403 /* ----------------------------------------------------------------- */ 404 p_loc = (Mat_SeqAIJ*)P_loc->data; 405 if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data; 406 407 /* create and initialize a linked list */ 408 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 409 MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta); 410 MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta); 411 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 412 413 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 414 415 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 416 if (ao) { 417 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 418 } else { 419 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 420 } 421 current_space = free_space; 422 nspacedouble = 0; 423 424 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 425 api[0] = 0; 426 for (i=0; i<am; i++) { 427 /* diagonal portion: Ad[i,:]*P */ 428 ai = ad->i; pi = p_loc->i; 429 nzi = ai[i+1] - ai[i]; 430 aj = ad->j + ai[i]; 431 for (j=0; j<nzi; j++) { 432 row = aj[j]; 433 pnz = pi[row+1] - pi[row]; 434 Jptr = p_loc->j + pi[row]; 435 /* add non-zero cols of P into the sorted linked list lnk */ 436 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 437 } 438 /* off-diagonal portion: Ao[i,:]*P */ 439 if (ao) { 440 ai = ao->i; pi = p_oth->i; 441 nzi = ai[i+1] - ai[i]; 442 aj = ao->j + ai[i]; 443 for (j=0; j<nzi; j++) { 444 row = aj[j]; 445 pnz = pi[row+1] - pi[row]; 446 Jptr = p_oth->j + pi[row]; 447 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 448 } 449 } 450 apnz = lnk[0]; 451 api[i+1] = api[i] + apnz; 452 453 /* if free space is not available, double the total space in the list */ 454 if (current_space->local_remaining<apnz) { 455 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 456 nspacedouble++; 457 } 458 459 /* Copy data into free space, then initialize lnk */ 460 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 461 462 current_space->array += apnz; 463 current_space->local_used += apnz; 464 current_space->local_remaining -= apnz; 465 } 466 /* Allocate space for apj and apv, initialize apj, and */ 467 /* destroy list of free space and other temporary array(s) */ 468 ierr = PetscCalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 469 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 470 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 471 472 /* Create AP_loc for reuse */ 473 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 474 ierr = MatSeqAIJCompactOutExtraColumns_SeqAIJ(ptap->AP_loc, &ptap->ltog);CHKERRQ(ierr); 475 476 #if defined(PETSC_USE_INFO) 477 if (ao) { 478 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 479 } else { 480 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 481 } 482 ptap->AP_loc->info.mallocs = nspacedouble; 483 ptap->AP_loc->info.fill_ratio_given = fill; 484 ptap->AP_loc->info.fill_ratio_needed = apfill; 485 486 if (api[am]) { 487 ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 488 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 489 } else { 490 ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 491 } 492 #endif 493 494 /* (2-1) compute symbolic Co = Ro*AP_loc */ 495 /* ------------------------------------ */ 496 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 497 ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr); 498 ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr); 499 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 500 501 /* (3) send coj of C_oth to other processors */ 502 /* ------------------------------------------ */ 503 /* determine row ownership */ 504 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 505 rowmap->n = pn; 506 rowmap->bs = 1; 507 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 508 owners = rowmap->range; 509 510 /* determine the number of messages to send, their lengths */ 511 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 512 ierr = PetscArrayzero(len_s,size);CHKERRQ(ierr); 513 ierr = PetscArrayzero(len_si,size);CHKERRQ(ierr); 514 515 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 516 coi = c_oth->i; coj = c_oth->j; 517 con = ptap->C_oth->rmap->n; 518 proc = 0; 519 ierr = ISLocalToGlobalMappingApply(ptap->ltog,coi[con],coj,coj);CHKERRQ(ierr); 520 for (i=0; i<con; i++) { 521 while (prmap[i] >= owners[proc+1]) proc++; 522 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 523 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 524 } 525 526 len = 0; /* max length of buf_si[], see (4) */ 527 owners_co[0] = 0; 528 nsend = 0; 529 for (proc=0; proc<size; proc++) { 530 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 531 if (len_s[proc]) { 532 nsend++; 533 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 534 len += len_si[proc]; 535 } 536 } 537 538 /* determine the number and length of messages to receive for coi and coj */ 539 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 540 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 541 542 /* post the Irecv and Isend of coj */ 543 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 544 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 545 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 546 for (proc=0, k=0; proc<size; proc++) { 547 if (!len_s[proc]) continue; 548 i = owners_co[proc]; 549 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 550 k++; 551 } 552 553 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 554 /* ---------------------------------------- */ 555 ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr); 556 ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr); 557 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 558 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 559 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,c_loc->j);CHKERRQ(ierr); 560 561 /* receives coj are complete */ 562 for (i=0; i<nrecv; i++) { 563 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 564 } 565 ierr = PetscFree(rwaits);CHKERRQ(ierr); 566 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 567 568 /* add received column indices into ta to update Crmax */ 569 for (k=0; k<nrecv; k++) {/* k-th received message */ 570 Jptr = buf_rj[k]; 571 for (j=0; j<len_r[k]; j++) { 572 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 573 } 574 } 575 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 576 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 577 578 /* (4) send and recv coi */ 579 /*-----------------------*/ 580 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 581 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 582 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 583 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 584 for (proc=0,k=0; proc<size; proc++) { 585 if (!len_s[proc]) continue; 586 /* form outgoing message for i-structure: 587 buf_si[0]: nrows to be sent 588 [1:nrows]: row index (global) 589 [nrows+1:2*nrows+1]: i-structure index 590 */ 591 /*-------------------------------------------*/ 592 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 593 buf_si_i = buf_si + nrows+1; 594 buf_si[0] = nrows; 595 buf_si_i[0] = 0; 596 nrows = 0; 597 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 598 nzi = coi[i+1] - coi[i]; 599 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 600 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 601 nrows++; 602 } 603 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 604 k++; 605 buf_si += len_si[proc]; 606 } 607 for (i=0; i<nrecv; i++) { 608 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 609 } 610 ierr = PetscFree(rwaits);CHKERRQ(ierr); 611 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 612 613 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 614 ierr = PetscFree(len_ri);CHKERRQ(ierr); 615 ierr = PetscFree(swaits);CHKERRQ(ierr); 616 ierr = PetscFree(buf_s);CHKERRQ(ierr); 617 618 /* (5) compute the local portion of Cmpi */ 619 /* ------------------------------------------ */ 620 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 621 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 622 current_space = free_space; 623 624 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 625 for (k=0; k<nrecv; k++) { 626 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 627 nrows = *buf_ri_k[k]; 628 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 629 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 630 } 631 632 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 633 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 634 for (i=0; i<pn; i++) { 635 /* add C_loc into Cmpi */ 636 nzi = c_loc->i[i+1] - c_loc->i[i]; 637 Jptr = c_loc->j + c_loc->i[i]; 638 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 639 640 /* add received col data into lnk */ 641 for (k=0; k<nrecv; k++) { /* k-th received message */ 642 if (i == *nextrow[k]) { /* i-th row */ 643 nzi = *(nextci[k]+1) - *nextci[k]; 644 Jptr = buf_rj[k] + *nextci[k]; 645 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 646 nextrow[k]++; nextci[k]++; 647 } 648 } 649 nzi = lnk[0]; 650 651 /* copy data into free space, then initialize lnk */ 652 ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr); 653 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 654 } 655 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 656 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 657 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 658 659 /* local sizes and preallocation */ 660 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 661 if (P->cmap->bs > 0) { 662 ierr = PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);CHKERRQ(ierr); 663 ierr = PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);CHKERRQ(ierr); 664 } 665 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 666 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 667 668 /* members in merge */ 669 ierr = PetscFree(id_r);CHKERRQ(ierr); 670 ierr = PetscFree(len_r);CHKERRQ(ierr); 671 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 672 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 673 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 674 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 675 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 676 677 /* attach the supporting struct to Cmpi for reuse */ 678 c = (Mat_MPIAIJ*)Cmpi->data; 679 c->ap = ptap; 680 ptap->duplicate = Cmpi->ops->duplicate; 681 ptap->destroy = Cmpi->ops->destroy; 682 ptap->view = Cmpi->ops->view; 683 684 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 685 Cmpi->assembled = PETSC_FALSE; 686 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable; 687 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 688 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 689 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 690 *C = Cmpi; 691 692 nout = 0; 693 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_oth->i[ptap->C_oth->rmap->n],c_oth->j,&nout,c_oth->j);CHKERRQ(ierr); 694 if (c_oth->i[ptap->C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_oth->i[ptap->C_oth->rmap->n],nout); 695 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,&nout,c_loc->j);CHKERRQ(ierr); 696 if (c_loc->i[ptap->C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_loc->i[ptap->C_loc->rmap->n],nout); 697 698 PetscFunctionReturn(0); 699 } 700 701 PETSC_STATIC_INLINE PetscErrorCode MatPtAPSymbolicComputeOneRowOfAP_private(Mat A,Mat P,Mat P_oth,const PetscInt *map,PetscInt dof,PetscInt i,PetscHSetI dht,PetscHSetI oht) 702 { 703 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 704 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_oth=(Mat_SeqAIJ*)P_oth->data,*pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 705 PetscInt *ai,nzi,j,*aj,row,col,*pi,*pj,pnz,nzpi,*p_othcols,k; 706 PetscInt pcstart,pcend,column,offset; 707 PetscErrorCode ierr; 708 709 PetscFunctionBegin; 710 pcstart = P->cmap->rstart; 711 pcstart *= dof; 712 pcend = P->cmap->rend; 713 pcend *= dof; 714 /* diagonal portion: Ad[i,:]*P */ 715 ai = ad->i; 716 nzi = ai[i+1] - ai[i]; 717 aj = ad->j + ai[i]; 718 for (j=0; j<nzi; j++) { 719 row = aj[j]; 720 offset = row%dof; 721 row /= dof; 722 nzpi = pd->i[row+1] - pd->i[row]; 723 pj = pd->j + pd->i[row]; 724 for (k=0; k<nzpi; k++) { 725 ierr = PetscHSetIAdd(dht,pj[k]*dof+offset+pcstart);CHKERRQ(ierr); 726 } 727 } 728 /* off diag P */ 729 for (j=0; j<nzi; j++) { 730 row = aj[j]; 731 offset = row%dof; 732 row /= dof; 733 nzpi = po->i[row+1] - po->i[row]; 734 pj = po->j + po->i[row]; 735 for (k=0; k<nzpi; k++) { 736 ierr = PetscHSetIAdd(oht,p->garray[pj[k]]*dof+offset);CHKERRQ(ierr); 737 } 738 } 739 740 /* off diagonal part: Ao[i, :]*P_oth */ 741 if (ao) { 742 ai = ao->i; 743 pi = p_oth->i; 744 nzi = ai[i+1] - ai[i]; 745 aj = ao->j + ai[i]; 746 for (j=0; j<nzi; j++) { 747 row = aj[j]; 748 offset = a->garray[row]%dof; 749 row = map[row]; 750 pnz = pi[row+1] - pi[row]; 751 p_othcols = p_oth->j + pi[row]; 752 for (col=0; col<pnz; col++) { 753 column = p_othcols[col] * dof + offset; 754 if (column>=pcstart && column<pcend) { 755 ierr = PetscHSetIAdd(dht,column);CHKERRQ(ierr); 756 } else { 757 ierr = PetscHSetIAdd(oht,column);CHKERRQ(ierr); 758 } 759 } 760 } 761 } /* end if (ao) */ 762 PetscFunctionReturn(0); 763 } 764 765 PETSC_STATIC_INLINE PetscErrorCode MatPtAPNumericComputeOneRowOfAP_private(Mat A,Mat P,Mat P_oth,const PetscInt *map,PetscInt dof,PetscInt i,PetscHMapIV hmap) 766 { 767 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 768 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_oth=(Mat_SeqAIJ*)P_oth->data,*pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 769 PetscInt *ai,nzi,j,*aj,row,col,*pi,pnz,*p_othcols,pcstart,*pj,k,nzpi,offset; 770 PetscScalar ra,*aa,*pa; 771 PetscErrorCode ierr; 772 773 PetscFunctionBegin; 774 pcstart = P->cmap->rstart; 775 pcstart *= dof; 776 777 /* diagonal portion: Ad[i,:]*P */ 778 ai = ad->i; 779 nzi = ai[i+1] - ai[i]; 780 aj = ad->j + ai[i]; 781 aa = ad->a + ai[i]; 782 for (j=0; j<nzi; j++) { 783 ra = aa[j]; 784 row = aj[j]; 785 offset = row%dof; 786 row /= dof; 787 nzpi = pd->i[row+1] - pd->i[row]; 788 pj = pd->j + pd->i[row]; 789 pa = pd->a + pd->i[row]; 790 for (k=0; k<nzpi; k++) { 791 ierr = PetscHMapIVAddValue(hmap,pj[k]*dof+offset+pcstart,ra*pa[k]);CHKERRQ(ierr); 792 } 793 ierr = PetscLogFlops(2.0*nzpi);CHKERRQ(ierr); 794 } 795 for (j=0; j<nzi; j++) { 796 ra = aa[j]; 797 row = aj[j]; 798 offset = row%dof; 799 row /= dof; 800 nzpi = po->i[row+1] - po->i[row]; 801 pj = po->j + po->i[row]; 802 pa = po->a + po->i[row]; 803 for (k=0; k<nzpi; k++) { 804 ierr = PetscHMapIVAddValue(hmap,p->garray[pj[k]]*dof+offset,ra*pa[k]);CHKERRQ(ierr); 805 } 806 ierr = PetscLogFlops(2.0*nzpi);CHKERRQ(ierr); 807 } 808 809 /* off diagonal part: Ao[i, :]*P_oth */ 810 if (ao) { 811 ai = ao->i; 812 pi = p_oth->i; 813 nzi = ai[i+1] - ai[i]; 814 aj = ao->j + ai[i]; 815 aa = ao->a + ai[i]; 816 for (j=0; j<nzi; j++) { 817 row = aj[j]; 818 offset = a->garray[row]%dof; 819 row = map[row]; 820 ra = aa[j]; 821 pnz = pi[row+1] - pi[row]; 822 p_othcols = p_oth->j + pi[row]; 823 pa = p_oth->a + pi[row]; 824 for (col=0; col<pnz; col++) { 825 ierr = PetscHMapIVAddValue(hmap,p_othcols[col]*dof+offset,ra*pa[col]);CHKERRQ(ierr); 826 } 827 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 828 } 829 } /* end if (ao) */ 830 831 PetscFunctionReturn(0); 832 } 833 834 PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat,Mat,PetscInt dof,MatReuse,Mat*); 835 836 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce(Mat A,Mat P,PetscInt dof,Mat C) 837 { 838 PetscErrorCode ierr; 839 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 840 Mat_SeqAIJ *cd,*co,*po=(Mat_SeqAIJ*)p->B->data,*pd=(Mat_SeqAIJ*)p->A->data; 841 Mat_APMPI *ptap = c->ap; 842 PetscHMapIV hmap; 843 PetscInt i,j,jj,kk,nzi,*c_rmtj,voff,*c_othj,pn,pon,pcstart,pcend,ccstart,ccend,row,am,*poj,*pdj,*apindices,cmaxr,*c_rmtc,*c_rmtjj,*dcc,*occ,loc; 844 PetscScalar *c_rmta,*c_otha,*poa,*pda,*apvalues,*apvaluestmp,*c_rmtaa; 845 PetscInt offset,ii,pocol; 846 const PetscInt *mappingindices; 847 IS map; 848 MPI_Comm comm; 849 850 PetscFunctionBegin; 851 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 852 if (!ptap->P_oth) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 853 854 ierr = MatZeroEntries(C);CHKERRQ(ierr); 855 856 /* Get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 857 /*-----------------------------------------------------*/ 858 if (ptap->reuse == MAT_REUSE_MATRIX) { 859 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 860 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_REUSE_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 861 } 862 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 863 864 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 865 pon *= dof; 866 ierr = PetscCalloc2(ptap->c_rmti[pon],&c_rmtj,ptap->c_rmti[pon],&c_rmta);CHKERRQ(ierr); 867 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 868 cmaxr = 0; 869 for (i=0; i<pon; i++) { 870 cmaxr = PetscMax(cmaxr,ptap->c_rmti[i+1]-ptap->c_rmti[i]); 871 } 872 ierr = PetscCalloc4(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pon,&c_rmtc);CHKERRQ(ierr); 873 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 874 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 875 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 876 for (i=0; i<am && pon; i++) { 877 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 878 offset = i%dof; 879 ii = i/dof; 880 nzi = po->i[ii+1] - po->i[ii]; 881 if (!nzi) continue; 882 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 883 voff = 0; 884 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 885 if (!voff) continue; 886 887 /* Form C(ii, :) */ 888 poj = po->j + po->i[ii]; 889 poa = po->a + po->i[ii]; 890 for (j=0; j<nzi; j++) { 891 pocol = poj[j]*dof+offset; 892 c_rmtjj = c_rmtj + ptap->c_rmti[pocol]; 893 c_rmtaa = c_rmta + ptap->c_rmti[pocol]; 894 for (jj=0; jj<voff; jj++) { 895 apvaluestmp[jj] = apvalues[jj]*poa[j]; 896 /*If the row is empty */ 897 if (!c_rmtc[pocol]) { 898 c_rmtjj[jj] = apindices[jj]; 899 c_rmtaa[jj] = apvaluestmp[jj]; 900 c_rmtc[pocol]++; 901 } else { 902 ierr = PetscFindInt(apindices[jj],c_rmtc[pocol],c_rmtjj,&loc);CHKERRQ(ierr); 903 if (loc>=0){ /* hit */ 904 c_rmtaa[loc] += apvaluestmp[jj]; 905 ierr = PetscLogFlops(1.0);CHKERRQ(ierr); 906 } else { /* new element */ 907 loc = -(loc+1); 908 /* Move data backward */ 909 for (kk=c_rmtc[pocol]; kk>loc; kk--) { 910 c_rmtjj[kk] = c_rmtjj[kk-1]; 911 c_rmtaa[kk] = c_rmtaa[kk-1]; 912 }/* End kk */ 913 c_rmtjj[loc] = apindices[jj]; 914 c_rmtaa[loc] = apvaluestmp[jj]; 915 c_rmtc[pocol]++; 916 } 917 } 918 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 919 } /* End jj */ 920 } /* End j */ 921 } /* End i */ 922 923 ierr = PetscFree4(apindices,apvalues,apvaluestmp,c_rmtc);CHKERRQ(ierr); 924 925 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 926 pn *= dof; 927 ierr = PetscCalloc2(ptap->c_othi[pn],&c_othj,ptap->c_othi[pn],&c_otha);CHKERRQ(ierr); 928 929 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 930 ierr = PetscSFReduceBegin(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 931 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 932 pcstart = pcstart*dof; 933 pcend = pcend*dof; 934 cd = (Mat_SeqAIJ*)(c->A)->data; 935 co = (Mat_SeqAIJ*)(c->B)->data; 936 937 cmaxr = 0; 938 for (i=0; i<pn; i++) { 939 cmaxr = PetscMax(cmaxr,(cd->i[i+1]-cd->i[i])+(co->i[i+1]-co->i[i])); 940 } 941 ierr = PetscCalloc5(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pn,&dcc,pn,&occ);CHKERRQ(ierr); 942 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 943 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 944 for (i=0; i<am && pn; i++) { 945 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 946 offset = i%dof; 947 ii = i/dof; 948 nzi = pd->i[ii+1] - pd->i[ii]; 949 if (!nzi) continue; 950 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 951 voff = 0; 952 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 953 if (!voff) continue; 954 /* Form C(ii, :) */ 955 pdj = pd->j + pd->i[ii]; 956 pda = pd->a + pd->i[ii]; 957 for (j=0; j<nzi; j++) { 958 row = pcstart + pdj[j] * dof + offset; 959 for (jj=0; jj<voff; jj++) { 960 apvaluestmp[jj] = apvalues[jj]*pda[j]; 961 } 962 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 963 ierr = MatSetValues(C,1,&row,voff,apindices,apvaluestmp,ADD_VALUES);CHKERRQ(ierr); 964 } 965 } 966 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 967 ierr = MatGetOwnershipRangeColumn(C,&ccstart,&ccend);CHKERRQ(ierr); 968 ierr = PetscFree5(apindices,apvalues,apvaluestmp,dcc,occ);CHKERRQ(ierr); 969 ierr = PetscHMapIVDestroy(&hmap);CHKERRQ(ierr); 970 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 971 ierr = PetscSFReduceEnd(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 972 ierr = PetscFree2(c_rmtj,c_rmta);CHKERRQ(ierr); 973 974 /* Add contributions from remote */ 975 for (i = 0; i < pn; i++) { 976 row = i + pcstart; 977 ierr = MatSetValues(C,1,&row,ptap->c_othi[i+1]-ptap->c_othi[i],c_othj+ptap->c_othi[i],c_otha+ptap->c_othi[i],ADD_VALUES);CHKERRQ(ierr); 978 } 979 ierr = PetscFree2(c_othj,c_otha);CHKERRQ(ierr); 980 981 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 982 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 983 984 ptap->reuse = MAT_REUSE_MATRIX; 985 986 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 987 if (ptap->freestruct) { 988 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 989 } 990 PetscFunctionReturn(0); 991 } 992 993 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat A,Mat P,Mat C) 994 { 995 PetscErrorCode ierr; 996 997 PetscFunctionBegin; 998 ierr = MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce(A,P,1,C);CHKERRQ(ierr); 999 PetscFunctionReturn(0); 1000 } 1001 1002 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce_merged(Mat A,Mat P,PetscInt dof,Mat C) 1003 { 1004 PetscErrorCode ierr; 1005 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1006 Mat_SeqAIJ *cd,*co,*po=(Mat_SeqAIJ*)p->B->data,*pd=(Mat_SeqAIJ*)p->A->data; 1007 Mat_APMPI *ptap = c->ap; 1008 PetscHMapIV hmap; 1009 PetscInt i,j,jj,kk,nzi,dnzi,*c_rmtj,voff,*c_othj,pn,pon,pcstart,pcend,row,am,*poj,*pdj,*apindices,cmaxr,*c_rmtc,*c_rmtjj,loc; 1010 PetscScalar *c_rmta,*c_otha,*poa,*pda,*apvalues,*apvaluestmp,*c_rmtaa; 1011 PetscInt offset,ii,pocol; 1012 const PetscInt *mappingindices; 1013 IS map; 1014 MPI_Comm comm; 1015 1016 PetscFunctionBegin; 1017 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1018 if (!ptap->P_oth) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 1019 1020 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1021 1022 /* Get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 1023 /*-----------------------------------------------------*/ 1024 if (ptap->reuse == MAT_REUSE_MATRIX) { 1025 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 1026 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_REUSE_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1027 } 1028 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1029 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1030 pon *= dof; 1031 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1032 pn *= dof; 1033 1034 ierr = PetscCalloc2(ptap->c_rmti[pon],&c_rmtj,ptap->c_rmti[pon],&c_rmta);CHKERRQ(ierr); 1035 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1036 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1037 pcstart *= dof; 1038 pcend *= dof; 1039 cmaxr = 0; 1040 for (i=0; i<pon; i++) { 1041 cmaxr = PetscMax(cmaxr,ptap->c_rmti[i+1]-ptap->c_rmti[i]); 1042 } 1043 cd = (Mat_SeqAIJ*)(c->A)->data; 1044 co = (Mat_SeqAIJ*)(c->B)->data; 1045 for (i=0; i<pn; i++) { 1046 cmaxr = PetscMax(cmaxr,(cd->i[i+1]-cd->i[i])+(co->i[i+1]-co->i[i])); 1047 } 1048 ierr = PetscCalloc4(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pon,&c_rmtc);CHKERRQ(ierr); 1049 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 1050 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 1051 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1052 for (i=0; i<am && (pon || pn); i++) { 1053 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 1054 offset = i%dof; 1055 ii = i/dof; 1056 nzi = po->i[ii+1] - po->i[ii]; 1057 dnzi = pd->i[ii+1] - pd->i[ii]; 1058 if (!nzi && !dnzi) continue; 1059 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 1060 voff = 0; 1061 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 1062 if (!voff) continue; 1063 1064 /* Form remote C(ii, :) */ 1065 poj = po->j + po->i[ii]; 1066 poa = po->a + po->i[ii]; 1067 for (j=0; j<nzi; j++) { 1068 pocol = poj[j]*dof+offset; 1069 c_rmtjj = c_rmtj + ptap->c_rmti[pocol]; 1070 c_rmtaa = c_rmta + ptap->c_rmti[pocol]; 1071 for (jj=0; jj<voff; jj++) { 1072 apvaluestmp[jj] = apvalues[jj]*poa[j]; 1073 /*If the row is empty */ 1074 if (!c_rmtc[pocol]) { 1075 c_rmtjj[jj] = apindices[jj]; 1076 c_rmtaa[jj] = apvaluestmp[jj]; 1077 c_rmtc[pocol]++; 1078 } else { 1079 ierr = PetscFindInt(apindices[jj],c_rmtc[pocol],c_rmtjj,&loc);CHKERRQ(ierr); 1080 if (loc>=0){ /* hit */ 1081 c_rmtaa[loc] += apvaluestmp[jj]; 1082 ierr = PetscLogFlops(1.0);CHKERRQ(ierr); 1083 } else { /* new element */ 1084 loc = -(loc+1); 1085 /* Move data backward */ 1086 for (kk=c_rmtc[pocol]; kk>loc; kk--) { 1087 c_rmtjj[kk] = c_rmtjj[kk-1]; 1088 c_rmtaa[kk] = c_rmtaa[kk-1]; 1089 }/* End kk */ 1090 c_rmtjj[loc] = apindices[jj]; 1091 c_rmtaa[loc] = apvaluestmp[jj]; 1092 c_rmtc[pocol]++; 1093 } 1094 } 1095 } /* End jj */ 1096 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 1097 } /* End j */ 1098 1099 /* Form local C(ii, :) */ 1100 pdj = pd->j + pd->i[ii]; 1101 pda = pd->a + pd->i[ii]; 1102 for (j=0; j<dnzi; j++) { 1103 row = pcstart + pdj[j] * dof + offset; 1104 for (jj=0; jj<voff; jj++) { 1105 apvaluestmp[jj] = apvalues[jj]*pda[j]; 1106 }/* End kk */ 1107 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 1108 ierr = MatSetValues(C,1,&row,voff,apindices,apvaluestmp,ADD_VALUES);CHKERRQ(ierr); 1109 }/* End j */ 1110 } /* End i */ 1111 1112 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1113 ierr = PetscFree4(apindices,apvalues,apvaluestmp,c_rmtc);CHKERRQ(ierr); 1114 ierr = PetscHMapIVDestroy(&hmap);CHKERRQ(ierr); 1115 ierr = PetscCalloc2(ptap->c_othi[pn],&c_othj,ptap->c_othi[pn],&c_otha);CHKERRQ(ierr); 1116 1117 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1118 ierr = PetscSFReduceBegin(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 1119 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1120 ierr = PetscSFReduceEnd(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 1121 ierr = PetscFree2(c_rmtj,c_rmta);CHKERRQ(ierr); 1122 1123 /* Add contributions from remote */ 1124 for (i = 0; i < pn; i++) { 1125 row = i + pcstart; 1126 ierr = MatSetValues(C,1,&row,ptap->c_othi[i+1]-ptap->c_othi[i],c_othj+ptap->c_othi[i],c_otha+ptap->c_othi[i],ADD_VALUES);CHKERRQ(ierr); 1127 } 1128 ierr = PetscFree2(c_othj,c_otha);CHKERRQ(ierr); 1129 1130 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1131 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1132 1133 ptap->reuse = MAT_REUSE_MATRIX; 1134 1135 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 1136 if (ptap->freestruct) { 1137 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 1138 } 1139 PetscFunctionReturn(0); 1140 } 1141 1142 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat A,Mat P,Mat C) 1143 { 1144 PetscErrorCode ierr; 1145 1146 PetscFunctionBegin; 1147 ierr = MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce_merged(A,P,1,C);CHKERRQ(ierr); 1148 PetscFunctionReturn(0); 1149 } 1150 1151 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce(Mat A,Mat P,PetscInt dof,PetscReal fill,Mat *C) 1152 { 1153 Mat_APMPI *ptap; 1154 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1155 MPI_Comm comm; 1156 Mat Cmpi; 1157 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 1158 MatType mtype; 1159 PetscSF sf; 1160 PetscSFNode *iremote; 1161 PetscInt rootspacesize,*rootspace,*rootspaceoffsets,nleaves; 1162 const PetscInt *rootdegrees; 1163 PetscHSetI ht,oht,*hta,*hto; 1164 PetscInt pn,pon,*c_rmtc,i,j,nzi,htsize,htosize,*c_rmtj,off,*c_othj,rcvncols,sendncols,*c_rmtoffsets; 1165 PetscInt owner,lidx,*rdj,col,pcstart,pcend,*dnz,*onz,am,arstart,arend,*poj,*pdj; 1166 PetscInt nalg=2,alg=0,offset,ii; 1167 const PetscInt *mappingindices; 1168 PetscBool flg; 1169 const char *algTypes[2] = {"overlapping","merged"}; 1170 IS map; 1171 PetscErrorCode ierr; 1172 1173 PetscFunctionBegin; 1174 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1175 /* Create symbolic parallel matrix Cmpi */ 1176 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1177 pn *= dof; 1178 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1179 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1180 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1181 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1182 1183 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1184 ptap->reuse = MAT_INITIAL_MATRIX; 1185 ptap->algType = 2; 1186 1187 /* Get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1188 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_INITIAL_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1189 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1190 /* This equals to the number of offdiag columns in P */ 1191 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1192 pon *= dof; 1193 /* offsets */ 1194 ierr = PetscMalloc1(pon+1,&ptap->c_rmti);CHKERRQ(ierr); 1195 /* The number of columns we will send to remote ranks */ 1196 ierr = PetscMalloc1(pon,&c_rmtc);CHKERRQ(ierr); 1197 ierr = PetscMalloc1(pon,&hta);CHKERRQ(ierr); 1198 for (i=0; i<pon; i++) { 1199 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1200 } 1201 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1202 ierr = MatGetOwnershipRange(A,&arstart,&arend);CHKERRQ(ierr); 1203 /* Create hash table to merge all columns for C(i, :) */ 1204 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1205 1206 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1207 ptap->c_rmti[0] = 0; 1208 /* 2) Pass 1: calculate the size for C_rmt (a matrix need to be sent to other processors) */ 1209 for (i=0; i<am && pon; i++) { 1210 /* Form one row of AP */ 1211 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1212 offset = i%dof; 1213 ii = i/dof; 1214 /* If the off diag is empty, we should not do any calculation */ 1215 nzi = po->i[ii+1] - po->i[ii]; 1216 if (!nzi) continue; 1217 1218 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,ht);CHKERRQ(ierr); 1219 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1220 /* If AP is empty, just continue */ 1221 if (!htsize) continue; 1222 /* Form C(ii, :) */ 1223 poj = po->j + po->i[ii]; 1224 for (j=0; j<nzi; j++) { 1225 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],ht);CHKERRQ(ierr); 1226 } 1227 } 1228 1229 for (i=0; i<pon; i++) { 1230 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1231 ptap->c_rmti[i+1] = ptap->c_rmti[i] + htsize; 1232 c_rmtc[i] = htsize; 1233 } 1234 1235 ierr = PetscMalloc1(ptap->c_rmti[pon],&c_rmtj);CHKERRQ(ierr); 1236 1237 for (i=0; i<pon; i++) { 1238 off = 0; 1239 ierr = PetscHSetIGetElems(hta[i],&off,c_rmtj+ptap->c_rmti[i]);CHKERRQ(ierr); 1240 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1241 } 1242 ierr = PetscFree(hta);CHKERRQ(ierr); 1243 1244 ierr = PetscMalloc1(pon,&iremote);CHKERRQ(ierr); 1245 for (i=0; i<pon; i++) { 1246 owner = 0; lidx = 0; 1247 offset = i%dof; 1248 ii = i/dof; 1249 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,&lidx);CHKERRQ(ierr); 1250 iremote[i].index = lidx*dof + offset; 1251 iremote[i].rank = owner; 1252 } 1253 1254 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 1255 ierr = PetscSFSetGraph(sf,pn,pon,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1256 /* Reorder ranks properly so that the data handled by gather and scatter have the same order */ 1257 ierr = PetscSFSetRankOrder(sf,PETSC_TRUE);CHKERRQ(ierr); 1258 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1259 ierr = PetscSFSetUp(sf);CHKERRQ(ierr); 1260 /* How many neighbors have contributions to my rows? */ 1261 ierr = PetscSFComputeDegreeBegin(sf,&rootdegrees);CHKERRQ(ierr); 1262 ierr = PetscSFComputeDegreeEnd(sf,&rootdegrees);CHKERRQ(ierr); 1263 rootspacesize = 0; 1264 for (i = 0; i < pn; i++) { 1265 rootspacesize += rootdegrees[i]; 1266 } 1267 ierr = PetscMalloc1(rootspacesize,&rootspace);CHKERRQ(ierr); 1268 ierr = PetscMalloc1(rootspacesize+1,&rootspaceoffsets);CHKERRQ(ierr); 1269 /* Get information from leaves 1270 * Number of columns other people contribute to my rows 1271 * */ 1272 ierr = PetscSFGatherBegin(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1273 ierr = PetscSFGatherEnd(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1274 ierr = PetscFree(c_rmtc);CHKERRQ(ierr); 1275 ierr = PetscCalloc1(pn+1,&ptap->c_othi);CHKERRQ(ierr); 1276 /* The number of columns is received for each row */ 1277 ptap->c_othi[0] = 0; 1278 rootspacesize = 0; 1279 rootspaceoffsets[0] = 0; 1280 for (i = 0; i < pn; i++) { 1281 rcvncols = 0; 1282 for (j = 0; j<rootdegrees[i]; j++) { 1283 rcvncols += rootspace[rootspacesize]; 1284 rootspaceoffsets[rootspacesize+1] = rootspaceoffsets[rootspacesize] + rootspace[rootspacesize]; 1285 rootspacesize++; 1286 } 1287 ptap->c_othi[i+1] = ptap->c_othi[i] + rcvncols; 1288 } 1289 ierr = PetscFree(rootspace);CHKERRQ(ierr); 1290 1291 ierr = PetscMalloc1(pon,&c_rmtoffsets);CHKERRQ(ierr); 1292 ierr = PetscSFScatterBegin(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1293 ierr = PetscSFScatterEnd(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1294 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1295 ierr = PetscFree(rootspaceoffsets);CHKERRQ(ierr); 1296 1297 ierr = PetscCalloc1(ptap->c_rmti[pon],&iremote);CHKERRQ(ierr); 1298 nleaves = 0; 1299 for (i = 0; i<pon; i++) { 1300 owner = 0; 1301 ii = i/dof; 1302 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,NULL);CHKERRQ(ierr); 1303 sendncols = ptap->c_rmti[i+1] - ptap->c_rmti[i]; 1304 for (j=0; j<sendncols; j++) { 1305 iremote[nleaves].rank = owner; 1306 iremote[nleaves++].index = c_rmtoffsets[i] + j; 1307 } 1308 } 1309 ierr = PetscFree(c_rmtoffsets);CHKERRQ(ierr); 1310 ierr = PetscCalloc1(ptap->c_othi[pn],&c_othj);CHKERRQ(ierr); 1311 1312 ierr = PetscSFCreate(comm,&ptap->sf);CHKERRQ(ierr); 1313 ierr = PetscSFSetGraph(ptap->sf,ptap->c_othi[pn],nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1314 ierr = PetscSFSetFromOptions(ptap->sf);CHKERRQ(ierr); 1315 /* One to one map */ 1316 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1317 1318 ierr = PetscMalloc2(pn,&dnz,pn,&onz);CHKERRQ(ierr); 1319 ierr = PetscHSetICreate(&oht);CHKERRQ(ierr); 1320 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1321 pcstart *= dof; 1322 pcend *= dof; 1323 ierr = PetscMalloc2(pn,&hta,pn,&hto);CHKERRQ(ierr); 1324 for (i=0; i<pn; i++) { 1325 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1326 ierr = PetscHSetICreate(&hto[i]);CHKERRQ(ierr); 1327 } 1328 /* Work on local part */ 1329 /* 4) Pass 1: Estimate memory for C_loc */ 1330 for (i=0; i<am && pn; i++) { 1331 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1332 ierr = PetscHSetIClear(oht);CHKERRQ(ierr); 1333 offset = i%dof; 1334 ii = i/dof; 1335 nzi = pd->i[ii+1] - pd->i[ii]; 1336 if (!nzi) continue; 1337 1338 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,oht);CHKERRQ(ierr); 1339 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1340 ierr = PetscHSetIGetSize(oht,&htosize);CHKERRQ(ierr); 1341 if (!(htsize+htosize)) continue; 1342 /* Form C(ii, :) */ 1343 pdj = pd->j + pd->i[ii]; 1344 for (j=0; j<nzi; j++) { 1345 ierr = PetscHSetIUpdate(hta[pdj[j]*dof+offset],ht);CHKERRQ(ierr); 1346 ierr = PetscHSetIUpdate(hto[pdj[j]*dof+offset],oht);CHKERRQ(ierr); 1347 } 1348 } 1349 1350 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1351 1352 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1353 ierr = PetscHSetIDestroy(&oht);CHKERRQ(ierr); 1354 1355 /* Get remote data */ 1356 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1357 ierr = PetscFree(c_rmtj);CHKERRQ(ierr); 1358 1359 for (i = 0; i < pn; i++) { 1360 nzi = ptap->c_othi[i+1] - ptap->c_othi[i]; 1361 rdj = c_othj + ptap->c_othi[i]; 1362 for (j = 0; j < nzi; j++) { 1363 col = rdj[j]; 1364 /* diag part */ 1365 if (col>=pcstart && col<pcend) { 1366 ierr = PetscHSetIAdd(hta[i],col);CHKERRQ(ierr); 1367 } else { /* off diag */ 1368 ierr = PetscHSetIAdd(hto[i],col);CHKERRQ(ierr); 1369 } 1370 } 1371 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1372 dnz[i] = htsize; 1373 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1374 ierr = PetscHSetIGetSize(hto[i],&htsize);CHKERRQ(ierr); 1375 onz[i] = htsize; 1376 ierr = PetscHSetIDestroy(&hto[i]);CHKERRQ(ierr); 1377 } 1378 1379 ierr = PetscFree2(hta,hto);CHKERRQ(ierr); 1380 ierr = PetscFree(c_othj);CHKERRQ(ierr); 1381 1382 /* local sizes and preallocation */ 1383 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1384 ierr = MatSetBlockSizes(Cmpi,PetscAbs(A->rmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 1385 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1386 ierr = MatSetUp(Cmpi);CHKERRQ(ierr); 1387 ierr = PetscFree2(dnz,onz);CHKERRQ(ierr); 1388 1389 /* attach the supporting struct to Cmpi for reuse */ 1390 c = (Mat_MPIAIJ*)Cmpi->data; 1391 c->ap = ptap; 1392 ptap->duplicate = Cmpi->ops->duplicate; 1393 ptap->destroy = Cmpi->ops->destroy; 1394 ptap->view = Cmpi->ops->view; 1395 1396 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1397 Cmpi->assembled = PETSC_FALSE; 1398 /* pick an algorithm */ 1399 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 1400 alg = 0; 1401 ierr = PetscOptionsEList("-matptap_allatonce_via","PtAP allatonce numeric approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 1402 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1403 switch (alg) { 1404 case 0: 1405 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce; 1406 break; 1407 case 1: 1408 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged; 1409 break; 1410 default: 1411 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG," Unsupported allatonce numerical algorithm \n"); 1412 } 1413 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1414 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 1415 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 1416 *C = Cmpi; 1417 PetscFunctionReturn(0); 1418 } 1419 1420 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat A,Mat P,PetscReal fill,Mat *C) 1421 { 1422 PetscErrorCode ierr; 1423 1424 PetscFunctionBegin; 1425 ierr = MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce(A,P,1,fill,C);CHKERRQ(ierr); 1426 PetscFunctionReturn(0); 1427 } 1428 1429 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce_merged(Mat A,Mat P,PetscInt dof,PetscReal fill,Mat *C) 1430 { 1431 Mat_APMPI *ptap; 1432 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1433 MPI_Comm comm; 1434 Mat Cmpi; 1435 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 1436 MatType mtype; 1437 PetscSF sf; 1438 PetscSFNode *iremote; 1439 PetscInt rootspacesize,*rootspace,*rootspaceoffsets,nleaves; 1440 const PetscInt *rootdegrees; 1441 PetscHSetI ht,oht,*hta,*hto,*htd; 1442 PetscInt pn,pon,*c_rmtc,i,j,nzi,dnzi,htsize,htosize,*c_rmtj,off,*c_othj,rcvncols,sendncols,*c_rmtoffsets; 1443 PetscInt owner,lidx,*rdj,col,pcstart,pcend,*dnz,*onz,am,arstart,arend,*poj,*pdj; 1444 PetscInt nalg=2,alg=0,offset,ii; 1445 PetscBool flg; 1446 const char *algTypes[2] = {"merged","overlapping"}; 1447 const PetscInt *mappingindices; 1448 IS map; 1449 PetscErrorCode ierr; 1450 1451 PetscFunctionBegin; 1452 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1453 /* Create symbolic parallel matrix Cmpi */ 1454 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1455 pn *= dof; 1456 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1457 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1458 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1459 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1460 1461 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1462 ptap->reuse = MAT_INITIAL_MATRIX; 1463 ptap->algType = 3; 1464 1465 /* 0) Get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1466 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_INITIAL_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1467 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1468 1469 /* This equals to the number of offdiag columns in P */ 1470 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1471 pon *= dof; 1472 /* offsets */ 1473 ierr = PetscMalloc1(pon+1,&ptap->c_rmti);CHKERRQ(ierr); 1474 /* The number of columns we will send to remote ranks */ 1475 ierr = PetscMalloc1(pon,&c_rmtc);CHKERRQ(ierr); 1476 ierr = PetscMalloc1(pon,&hta);CHKERRQ(ierr); 1477 for (i=0; i<pon; i++) { 1478 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1479 } 1480 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1481 ierr = MatGetOwnershipRange(A,&arstart,&arend);CHKERRQ(ierr); 1482 /* Create hash table to merge all columns for C(i, :) */ 1483 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1484 ierr = PetscHSetICreate(&oht);CHKERRQ(ierr); 1485 ierr = PetscMalloc2(pn,&htd,pn,&hto);CHKERRQ(ierr); 1486 for (i=0; i<pn; i++) { 1487 ierr = PetscHSetICreate(&htd[i]);CHKERRQ(ierr); 1488 ierr = PetscHSetICreate(&hto[i]);CHKERRQ(ierr); 1489 } 1490 1491 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1492 ptap->c_rmti[0] = 0; 1493 /* 2) Pass 1: calculate the size for C_rmt (a matrix need to be sent to other processors) */ 1494 for (i=0; i<am && (pon || pn); i++) { 1495 /* Form one row of AP */ 1496 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1497 ierr = PetscHSetIClear(oht);CHKERRQ(ierr); 1498 offset = i%dof; 1499 ii = i/dof; 1500 /* If the off diag is empty, we should not do any calculation */ 1501 nzi = po->i[ii+1] - po->i[ii]; 1502 dnzi = pd->i[ii+1] - pd->i[ii]; 1503 if (!nzi && !dnzi) continue; 1504 1505 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,oht);CHKERRQ(ierr); 1506 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1507 ierr = PetscHSetIGetSize(oht,&htosize);CHKERRQ(ierr); 1508 /* If AP is empty, just continue */ 1509 if (!(htsize+htosize)) continue; 1510 1511 /* Form remote C(ii, :) */ 1512 poj = po->j + po->i[ii]; 1513 for (j=0; j<nzi; j++) { 1514 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],ht);CHKERRQ(ierr); 1515 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],oht);CHKERRQ(ierr); 1516 } 1517 1518 /* Form local C(ii, :) */ 1519 pdj = pd->j + pd->i[ii]; 1520 for (j=0; j<dnzi; j++) { 1521 ierr = PetscHSetIUpdate(htd[pdj[j]*dof+offset],ht);CHKERRQ(ierr); 1522 ierr = PetscHSetIUpdate(hto[pdj[j]*dof+offset],oht);CHKERRQ(ierr); 1523 } 1524 } 1525 1526 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1527 1528 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1529 ierr = PetscHSetIDestroy(&oht);CHKERRQ(ierr); 1530 1531 for (i=0; i<pon; i++) { 1532 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1533 ptap->c_rmti[i+1] = ptap->c_rmti[i] + htsize; 1534 c_rmtc[i] = htsize; 1535 } 1536 1537 ierr = PetscMalloc1(ptap->c_rmti[pon],&c_rmtj);CHKERRQ(ierr); 1538 1539 for (i=0; i<pon; i++) { 1540 off = 0; 1541 ierr = PetscHSetIGetElems(hta[i],&off,c_rmtj+ptap->c_rmti[i]);CHKERRQ(ierr); 1542 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1543 } 1544 ierr = PetscFree(hta);CHKERRQ(ierr); 1545 1546 ierr = PetscMalloc1(pon,&iremote);CHKERRQ(ierr); 1547 for (i=0; i<pon; i++) { 1548 owner = 0; lidx = 0; 1549 offset = i%dof; 1550 ii = i/dof; 1551 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,&lidx);CHKERRQ(ierr); 1552 iremote[i].index = lidx*dof+offset; 1553 iremote[i].rank = owner; 1554 } 1555 1556 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 1557 ierr = PetscSFSetGraph(sf,pn,pon,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1558 /* Reorder ranks properly so that the data handled by gather and scatter have the same order */ 1559 ierr = PetscSFSetRankOrder(sf,PETSC_TRUE);CHKERRQ(ierr); 1560 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1561 ierr = PetscSFSetUp(sf);CHKERRQ(ierr); 1562 /* How many neighbors have contributions to my rows? */ 1563 ierr = PetscSFComputeDegreeBegin(sf,&rootdegrees);CHKERRQ(ierr); 1564 ierr = PetscSFComputeDegreeEnd(sf,&rootdegrees);CHKERRQ(ierr); 1565 rootspacesize = 0; 1566 for (i = 0; i < pn; i++) { 1567 rootspacesize += rootdegrees[i]; 1568 } 1569 ierr = PetscMalloc1(rootspacesize,&rootspace);CHKERRQ(ierr); 1570 ierr = PetscMalloc1(rootspacesize+1,&rootspaceoffsets);CHKERRQ(ierr); 1571 /* Get information from leaves 1572 * Number of columns other people contribute to my rows 1573 * */ 1574 ierr = PetscSFGatherBegin(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1575 ierr = PetscSFGatherEnd(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1576 ierr = PetscFree(c_rmtc);CHKERRQ(ierr); 1577 ierr = PetscMalloc1(pn+1,&ptap->c_othi);CHKERRQ(ierr); 1578 /* The number of columns is received for each row */ 1579 ptap->c_othi[0] = 0; 1580 rootspacesize = 0; 1581 rootspaceoffsets[0] = 0; 1582 for (i = 0; i < pn; i++) { 1583 rcvncols = 0; 1584 for (j = 0; j<rootdegrees[i]; j++) { 1585 rcvncols += rootspace[rootspacesize]; 1586 rootspaceoffsets[rootspacesize+1] = rootspaceoffsets[rootspacesize] + rootspace[rootspacesize]; 1587 rootspacesize++; 1588 } 1589 ptap->c_othi[i+1] = ptap->c_othi[i] + rcvncols; 1590 } 1591 ierr = PetscFree(rootspace);CHKERRQ(ierr); 1592 1593 ierr = PetscMalloc1(pon,&c_rmtoffsets);CHKERRQ(ierr); 1594 ierr = PetscSFScatterBegin(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1595 ierr = PetscSFScatterEnd(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1596 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1597 ierr = PetscFree(rootspaceoffsets);CHKERRQ(ierr); 1598 1599 ierr = PetscCalloc1(ptap->c_rmti[pon],&iremote);CHKERRQ(ierr); 1600 nleaves = 0; 1601 for (i = 0; i<pon; i++) { 1602 owner = 0; 1603 ii = i/dof; 1604 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,NULL);CHKERRQ(ierr); 1605 sendncols = ptap->c_rmti[i+1] - ptap->c_rmti[i]; 1606 for (j=0; j<sendncols; j++) { 1607 iremote[nleaves].rank = owner; 1608 iremote[nleaves++].index = c_rmtoffsets[i] + j; 1609 } 1610 } 1611 ierr = PetscFree(c_rmtoffsets);CHKERRQ(ierr); 1612 ierr = PetscCalloc1(ptap->c_othi[pn],&c_othj);CHKERRQ(ierr); 1613 1614 ierr = PetscSFCreate(comm,&ptap->sf);CHKERRQ(ierr); 1615 ierr = PetscSFSetGraph(ptap->sf,ptap->c_othi[pn],nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1616 ierr = PetscSFSetFromOptions(ptap->sf);CHKERRQ(ierr); 1617 /* One to one map */ 1618 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1619 /* Get remote data */ 1620 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1621 ierr = PetscFree(c_rmtj);CHKERRQ(ierr); 1622 ierr = PetscMalloc2(pn,&dnz,pn,&onz);CHKERRQ(ierr); 1623 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1624 pcstart *= dof; 1625 pcend *= dof; 1626 for (i = 0; i < pn; i++) { 1627 nzi = ptap->c_othi[i+1] - ptap->c_othi[i]; 1628 rdj = c_othj + ptap->c_othi[i]; 1629 for (j = 0; j < nzi; j++) { 1630 col = rdj[j]; 1631 /* diag part */ 1632 if (col>=pcstart && col<pcend) { 1633 ierr = PetscHSetIAdd(htd[i],col);CHKERRQ(ierr); 1634 } else { /* off diag */ 1635 ierr = PetscHSetIAdd(hto[i],col);CHKERRQ(ierr); 1636 } 1637 } 1638 ierr = PetscHSetIGetSize(htd[i],&htsize);CHKERRQ(ierr); 1639 dnz[i] = htsize; 1640 ierr = PetscHSetIDestroy(&htd[i]);CHKERRQ(ierr); 1641 ierr = PetscHSetIGetSize(hto[i],&htsize);CHKERRQ(ierr); 1642 onz[i] = htsize; 1643 ierr = PetscHSetIDestroy(&hto[i]);CHKERRQ(ierr); 1644 } 1645 1646 ierr = PetscFree2(htd,hto);CHKERRQ(ierr); 1647 ierr = PetscFree(c_othj);CHKERRQ(ierr); 1648 1649 /* local sizes and preallocation */ 1650 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1651 ierr = MatSetBlockSizes(Cmpi,PetscAbs(A->rmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 1652 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1653 ierr = PetscFree2(dnz,onz);CHKERRQ(ierr); 1654 1655 /* attach the supporting struct to Cmpi for reuse */ 1656 c = (Mat_MPIAIJ*)Cmpi->data; 1657 c->ap = ptap; 1658 ptap->duplicate = Cmpi->ops->duplicate; 1659 ptap->destroy = Cmpi->ops->destroy; 1660 ptap->view = Cmpi->ops->view; 1661 1662 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1663 Cmpi->assembled = PETSC_FALSE; 1664 /* pick an algorithm */ 1665 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 1666 alg = 0; 1667 ierr = PetscOptionsEList("-matptap_allatonce_via","PtAP allatonce numeric approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 1668 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1669 switch (alg) { 1670 case 0: 1671 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged; 1672 break; 1673 case 1: 1674 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce; 1675 break; 1676 default: 1677 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG," Unsupported allatonce numerical algorithm \n"); 1678 } 1679 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1680 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 1681 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 1682 *C = Cmpi; 1683 PetscFunctionReturn(0); 1684 } 1685 1686 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat A,Mat P,PetscReal fill,Mat *C) 1687 { 1688 PetscErrorCode ierr; 1689 1690 PetscFunctionBegin; 1691 ierr = MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce_merged(A,P,1,fill,C);CHKERRQ(ierr); 1692 PetscFunctionReturn(0); 1693 } 1694 1695 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 1696 { 1697 PetscErrorCode ierr; 1698 Mat_APMPI *ptap; 1699 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 1700 MPI_Comm comm; 1701 PetscMPIInt size,rank; 1702 Mat Cmpi; 1703 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1704 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 1705 PetscInt *lnk,i,k,pnz,row,nsend; 1706 PetscBT lnkbt; 1707 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1708 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1709 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 1710 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1711 MPI_Request *swaits,*rwaits; 1712 MPI_Status *sstatus,rstatus; 1713 PetscLayout rowmap; 1714 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1715 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1716 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi; 1717 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 1718 PetscScalar *apv; 1719 PetscTable ta; 1720 MatType mtype; 1721 const char *prefix; 1722 #if defined(PETSC_USE_INFO) 1723 PetscReal apfill; 1724 #endif 1725 1726 PetscFunctionBegin; 1727 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1728 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1729 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1730 1731 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 1732 1733 /* create symbolic parallel matrix Cmpi */ 1734 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1735 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1736 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1737 1738 /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */ 1739 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ; 1740 1741 /* create struct Mat_APMPI and attached it to C later */ 1742 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1743 ptap->reuse = MAT_INITIAL_MATRIX; 1744 ptap->algType = 1; 1745 1746 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1747 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1748 /* get P_loc by taking all local rows of P */ 1749 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1750 1751 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1752 /* --------------------------------- */ 1753 ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1754 ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1755 1756 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 1757 /* ----------------------------------------------------------------- */ 1758 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1759 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 1760 1761 /* create and initialize a linked list */ 1762 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 1763 MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta); 1764 MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta); 1765 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 1766 /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */ 1767 1768 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1769 1770 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 1771 if (ao) { 1772 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 1773 } else { 1774 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 1775 } 1776 current_space = free_space; 1777 nspacedouble = 0; 1778 1779 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 1780 api[0] = 0; 1781 for (i=0; i<am; i++) { 1782 /* diagonal portion: Ad[i,:]*P */ 1783 ai = ad->i; pi = p_loc->i; 1784 nzi = ai[i+1] - ai[i]; 1785 aj = ad->j + ai[i]; 1786 for (j=0; j<nzi; j++) { 1787 row = aj[j]; 1788 pnz = pi[row+1] - pi[row]; 1789 Jptr = p_loc->j + pi[row]; 1790 /* add non-zero cols of P into the sorted linked list lnk */ 1791 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1792 } 1793 /* off-diagonal portion: Ao[i,:]*P */ 1794 if (ao) { 1795 ai = ao->i; pi = p_oth->i; 1796 nzi = ai[i+1] - ai[i]; 1797 aj = ao->j + ai[i]; 1798 for (j=0; j<nzi; j++) { 1799 row = aj[j]; 1800 pnz = pi[row+1] - pi[row]; 1801 Jptr = p_oth->j + pi[row]; 1802 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1803 } 1804 } 1805 apnz = lnk[0]; 1806 api[i+1] = api[i] + apnz; 1807 if (ap_rmax < apnz) ap_rmax = apnz; 1808 1809 /* if free space is not available, double the total space in the list */ 1810 if (current_space->local_remaining<apnz) { 1811 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1812 nspacedouble++; 1813 } 1814 1815 /* Copy data into free space, then initialize lnk */ 1816 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1817 1818 current_space->array += apnz; 1819 current_space->local_used += apnz; 1820 current_space->local_remaining -= apnz; 1821 } 1822 /* Allocate space for apj and apv, initialize apj, and */ 1823 /* destroy list of free space and other temporary array(s) */ 1824 ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 1825 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 1826 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1827 1828 /* Create AP_loc for reuse */ 1829 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 1830 1831 #if defined(PETSC_USE_INFO) 1832 if (ao) { 1833 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 1834 } else { 1835 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 1836 } 1837 ptap->AP_loc->info.mallocs = nspacedouble; 1838 ptap->AP_loc->info.fill_ratio_given = fill; 1839 ptap->AP_loc->info.fill_ratio_needed = apfill; 1840 1841 if (api[am]) { 1842 ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 1843 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 1844 } else { 1845 ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 1846 } 1847 #endif 1848 1849 /* (2-1) compute symbolic Co = Ro*AP_loc */ 1850 /* ------------------------------------ */ 1851 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 1852 ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr); 1853 ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr); 1854 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 1855 1856 /* (3) send coj of C_oth to other processors */ 1857 /* ------------------------------------------ */ 1858 /* determine row ownership */ 1859 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1860 rowmap->n = pn; 1861 rowmap->bs = 1; 1862 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1863 owners = rowmap->range; 1864 1865 /* determine the number of messages to send, their lengths */ 1866 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1867 ierr = PetscArrayzero(len_s,size);CHKERRQ(ierr); 1868 ierr = PetscArrayzero(len_si,size);CHKERRQ(ierr); 1869 1870 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1871 coi = c_oth->i; coj = c_oth->j; 1872 con = ptap->C_oth->rmap->n; 1873 proc = 0; 1874 for (i=0; i<con; i++) { 1875 while (prmap[i] >= owners[proc+1]) proc++; 1876 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 1877 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1878 } 1879 1880 len = 0; /* max length of buf_si[], see (4) */ 1881 owners_co[0] = 0; 1882 nsend = 0; 1883 for (proc=0; proc<size; proc++) { 1884 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1885 if (len_s[proc]) { 1886 nsend++; 1887 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1888 len += len_si[proc]; 1889 } 1890 } 1891 1892 /* determine the number and length of messages to receive for coi and coj */ 1893 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1894 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1895 1896 /* post the Irecv and Isend of coj */ 1897 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1898 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1899 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1900 for (proc=0, k=0; proc<size; proc++) { 1901 if (!len_s[proc]) continue; 1902 i = owners_co[proc]; 1903 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1904 k++; 1905 } 1906 1907 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 1908 /* ---------------------------------------- */ 1909 ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr); 1910 ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr); 1911 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1912 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1913 1914 /* receives coj are complete */ 1915 for (i=0; i<nrecv; i++) { 1916 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1917 } 1918 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1919 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1920 1921 /* add received column indices into ta to update Crmax */ 1922 for (k=0; k<nrecv; k++) {/* k-th received message */ 1923 Jptr = buf_rj[k]; 1924 for (j=0; j<len_r[k]; j++) { 1925 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1926 } 1927 } 1928 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1929 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1930 1931 /* (4) send and recv coi */ 1932 /*-----------------------*/ 1933 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1934 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1935 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1936 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1937 for (proc=0,k=0; proc<size; proc++) { 1938 if (!len_s[proc]) continue; 1939 /* form outgoing message for i-structure: 1940 buf_si[0]: nrows to be sent 1941 [1:nrows]: row index (global) 1942 [nrows+1:2*nrows+1]: i-structure index 1943 */ 1944 /*-------------------------------------------*/ 1945 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1946 buf_si_i = buf_si + nrows+1; 1947 buf_si[0] = nrows; 1948 buf_si_i[0] = 0; 1949 nrows = 0; 1950 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1951 nzi = coi[i+1] - coi[i]; 1952 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1953 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1954 nrows++; 1955 } 1956 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1957 k++; 1958 buf_si += len_si[proc]; 1959 } 1960 for (i=0; i<nrecv; i++) { 1961 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1962 } 1963 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1964 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1965 1966 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1967 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1968 ierr = PetscFree(swaits);CHKERRQ(ierr); 1969 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1970 1971 /* (5) compute the local portion of Cmpi */ 1972 /* ------------------------------------------ */ 1973 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1974 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1975 current_space = free_space; 1976 1977 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1978 for (k=0; k<nrecv; k++) { 1979 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1980 nrows = *buf_ri_k[k]; 1981 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1982 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1983 } 1984 1985 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 1986 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1987 for (i=0; i<pn; i++) { 1988 /* add C_loc into Cmpi */ 1989 nzi = c_loc->i[i+1] - c_loc->i[i]; 1990 Jptr = c_loc->j + c_loc->i[i]; 1991 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1992 1993 /* add received col data into lnk */ 1994 for (k=0; k<nrecv; k++) { /* k-th received message */ 1995 if (i == *nextrow[k]) { /* i-th row */ 1996 nzi = *(nextci[k]+1) - *nextci[k]; 1997 Jptr = buf_rj[k] + *nextci[k]; 1998 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1999 nextrow[k]++; nextci[k]++; 2000 } 2001 } 2002 nzi = lnk[0]; 2003 2004 /* copy data into free space, then initialize lnk */ 2005 ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 2006 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 2007 } 2008 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 2009 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 2010 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 2011 2012 /* local sizes and preallocation */ 2013 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2014 if (P->cmap->bs > 0) { 2015 ierr = PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);CHKERRQ(ierr); 2016 ierr = PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);CHKERRQ(ierr); 2017 } 2018 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 2019 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2020 2021 /* members in merge */ 2022 ierr = PetscFree(id_r);CHKERRQ(ierr); 2023 ierr = PetscFree(len_r);CHKERRQ(ierr); 2024 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 2025 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 2026 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 2027 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 2028 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 2029 2030 /* attach the supporting struct to Cmpi for reuse */ 2031 c = (Mat_MPIAIJ*)Cmpi->data; 2032 c->ap = ptap; 2033 ptap->duplicate = Cmpi->ops->duplicate; 2034 ptap->destroy = Cmpi->ops->destroy; 2035 ptap->view = Cmpi->ops->view; 2036 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 2037 2038 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 2039 Cmpi->assembled = PETSC_FALSE; 2040 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 2041 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 2042 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 2043 *C = Cmpi; 2044 PetscFunctionReturn(0); 2045 } 2046 2047 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 2048 { 2049 PetscErrorCode ierr; 2050 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 2051 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 2052 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 2053 Mat_APMPI *ptap = c->ap; 2054 Mat AP_loc,C_loc,C_oth; 2055 PetscInt i,rstart,rend,cm,ncols,row; 2056 PetscInt *api,*apj,am = A->rmap->n,j,col,apnz; 2057 PetscScalar *apa; 2058 const PetscInt *cols; 2059 const PetscScalar *vals; 2060 2061 PetscFunctionBegin; 2062 if (!ptap->AP_loc) { 2063 MPI_Comm comm; 2064 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2065 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 2066 } 2067 2068 ierr = MatZeroEntries(C);CHKERRQ(ierr); 2069 /* 1) get R = Pd^T,Ro = Po^T */ 2070 if (ptap->reuse == MAT_REUSE_MATRIX) { 2071 ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 2072 ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 2073 } 2074 2075 /* 2) get AP_loc */ 2076 AP_loc = ptap->AP_loc; 2077 ap = (Mat_SeqAIJ*)AP_loc->data; 2078 2079 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 2080 /*-----------------------------------------------------*/ 2081 if (ptap->reuse == MAT_REUSE_MATRIX) { 2082 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 2083 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 2084 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 2085 } 2086 2087 /* 2-2) compute numeric A_loc*P - dominating part */ 2088 /* ---------------------------------------------- */ 2089 /* get data from symbolic products */ 2090 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 2091 if (ptap->P_oth) { 2092 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 2093 } 2094 apa = ptap->apa; 2095 api = ap->i; 2096 apj = ap->j; 2097 for (i=0; i<am; i++) { 2098 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 2099 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 2100 apnz = api[i+1] - api[i]; 2101 for (j=0; j<apnz; j++) { 2102 col = apj[j+api[i]]; 2103 ap->a[j+ap->i[i]] = apa[col]; 2104 apa[col] = 0.0; 2105 } 2106 } 2107 2108 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 2109 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 2110 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 2111 C_loc = ptap->C_loc; 2112 C_oth = ptap->C_oth; 2113 2114 /* add C_loc and Co to to C */ 2115 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 2116 2117 /* C_loc -> C */ 2118 cm = C_loc->rmap->N; 2119 c_seq = (Mat_SeqAIJ*)C_loc->data; 2120 cols = c_seq->j; 2121 vals = c_seq->a; 2122 2123 2124 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 2125 /* when there are no off-processor parts. */ 2126 /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */ 2127 /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */ 2128 /* a table, and other, more complex stuff has to be done. */ 2129 if (C->assembled) { 2130 C->was_assembled = PETSC_TRUE; 2131 C->assembled = PETSC_FALSE; 2132 } 2133 if (C->was_assembled) { 2134 for (i=0; i<cm; i++) { 2135 ncols = c_seq->i[i+1] - c_seq->i[i]; 2136 row = rstart + i; 2137 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 2138 cols += ncols; vals += ncols; 2139 } 2140 } else { 2141 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 2142 } 2143 2144 /* Co -> C, off-processor part */ 2145 cm = C_oth->rmap->N; 2146 c_seq = (Mat_SeqAIJ*)C_oth->data; 2147 cols = c_seq->j; 2148 vals = c_seq->a; 2149 for (i=0; i<cm; i++) { 2150 ncols = c_seq->i[i+1] - c_seq->i[i]; 2151 row = p->garray[i]; 2152 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 2153 cols += ncols; vals += ncols; 2154 } 2155 2156 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2157 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2158 2159 ptap->reuse = MAT_REUSE_MATRIX; 2160 2161 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 2162 if (ptap->freestruct) { 2163 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 2164 } 2165 PetscFunctionReturn(0); 2166 } 2167