1 2 /* 3 Defines projective product routines where A is a MPIAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <../src/mat/impls/aij/mpi/mpiaij.h> 10 #include <petscbt.h> 11 #include <petsctime.h> 12 13 /* #define PTAP_PROFILE */ 14 15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer) 16 { 17 PetscErrorCode ierr; 18 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 19 Mat_PtAPMPI *ptap=a->ptap; 20 PetscBool iascii; 21 PetscViewerFormat format; 22 23 PetscFunctionBegin; 24 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 25 if (iascii) { 26 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 27 if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 28 if (ptap->algType == 0) { 29 ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr); 30 } else if (ptap->algType == 1) { 31 ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr); 32 } 33 } 34 } 35 ierr = (ptap->view)(A,viewer);CHKERRQ(ierr); 36 PetscFunctionReturn(0); 37 } 38 39 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A) 40 { 41 PetscErrorCode ierr; 42 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 43 Mat_PtAPMPI *ptap=a->ptap; 44 45 PetscFunctionBegin; 46 if (ptap) { 47 Mat_Merge_SeqsToMPI *merge=ptap->merge; 48 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 49 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 50 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 51 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 52 ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */ 53 ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr); 54 ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr); 55 if (ptap->AP_loc) { /* used by alg_rap */ 56 Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data; 57 ierr = PetscFree(ap->i);CHKERRQ(ierr); 58 ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr); 59 ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr); 60 } else { /* used by alg_ptap */ 61 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 62 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 63 } 64 ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr); 65 ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr); 66 if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);} 67 68 if (merge) { /* used by alg_ptap */ 69 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 70 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 71 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 72 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 73 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 74 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 75 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 76 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 77 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 78 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 79 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 80 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 81 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 82 ierr = PetscFree(ptap->merge);CHKERRQ(ierr); 83 } 84 85 ierr = ptap->destroy(A);CHKERRQ(ierr); 86 ierr = PetscFree(ptap);CHKERRQ(ierr); 87 } 88 PetscFunctionReturn(0); 89 } 90 91 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M) 92 { 93 PetscErrorCode ierr; 94 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 95 Mat_PtAPMPI *ptap = a->ptap; 96 97 PetscFunctionBegin; 98 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 99 (*M)->ops->destroy = ptap->destroy; 100 (*M)->ops->duplicate = ptap->duplicate; 101 (*M)->ops->view = ptap->view; 102 PetscFunctionReturn(0); 103 } 104 105 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 106 { 107 PetscErrorCode ierr; 108 PetscBool flg; 109 MPI_Comm comm; 110 #if !defined(PETSC_HAVE_HYPRE) 111 const char *algTypes[2] = {"scalable","nonscalable"}; 112 PetscInt nalg=2; 113 #else 114 const char *algTypes[3] = {"scalable","nonscalable","hypre"}; 115 PetscInt nalg=3; 116 #endif 117 PetscInt pN=P->cmap->N,alg=1; /* set default algorithm */ 118 119 PetscFunctionBegin; 120 /* check if matrix local sizes are compatible */ 121 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 122 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 123 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 124 125 if (scall == MAT_INITIAL_MATRIX) { 126 /* pick an algorithm */ 127 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 128 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 129 ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 130 ierr = PetscOptionsEnd();CHKERRQ(ierr); 131 132 if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */ 133 MatInfo Ainfo,Pinfo; 134 PetscInt nz_local; 135 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 136 137 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 138 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 139 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 140 141 if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 142 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 143 144 if (alg_scalable) { 145 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 146 } 147 } 148 149 switch (alg) { 150 case 1: 151 /* do R=P^T locally, then C=R*A*P */ 152 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 153 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); 154 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 155 break; 156 #if defined(PETSC_HAVE_HYPRE) 157 case 2: 158 /* Use boomerAMGBuildCoarseOperator */ 159 ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr); 160 PetscFunctionReturn(0); 161 break; 162 #endif 163 default: 164 /* do R=P^T locally, then C=R*A*P */ 165 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 166 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr); 167 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 168 break; 169 } 170 } 171 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 172 ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr); 173 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 174 PetscFunctionReturn(0); 175 } 176 177 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C) 178 { 179 PetscErrorCode ierr; 180 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 181 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 182 Mat_SeqAIJ *ap,*p_loc,*p_oth,*c_seq; 183 Mat_PtAPMPI *ptap = c->ptap; 184 Mat AP_loc,C_loc,C_oth; 185 PetscInt i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz; 186 PetscScalar *apa; 187 const PetscInt *cols; 188 const PetscScalar *vals; 189 190 PetscFunctionBegin; 191 ierr = MatZeroEntries(C);CHKERRQ(ierr); 192 193 /* 1) get R = Pd^T,Ro = Po^T */ 194 if (ptap->reuse == MAT_REUSE_MATRIX) { 195 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 196 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 197 } 198 199 /* 2) get AP_loc */ 200 AP_loc = ptap->AP_loc; 201 ap = (Mat_SeqAIJ*)AP_loc->data; 202 203 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 204 /*-----------------------------------------------------*/ 205 if (ptap->reuse == MAT_REUSE_MATRIX) { 206 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 207 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 208 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 209 } 210 211 /* 2-2) compute numeric A_loc*P - dominating part */ 212 /* ---------------------------------------------- */ 213 /* get data from symbolic products */ 214 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 215 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 216 else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!"); 217 218 api = ap->i; 219 apj = ap->j; 220 for (i=0; i<am; i++) { 221 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 222 apnz = api[i+1] - api[i]; 223 apa = ap->a + api[i]; 224 ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr); 225 AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa); 226 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 227 } 228 229 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 230 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 231 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 232 233 C_loc = ptap->C_loc; 234 C_oth = ptap->C_oth; 235 236 /* add C_loc and Co to to C */ 237 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 238 239 /* C_loc -> C */ 240 cm = C_loc->rmap->N; 241 c_seq = (Mat_SeqAIJ*)C_loc->data; 242 cols = c_seq->j; 243 vals = c_seq->a; 244 245 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 246 /* when there are no off-processor parts. */ 247 if (C->assembled) { 248 C->was_assembled = PETSC_TRUE; 249 C->assembled = PETSC_FALSE; 250 } 251 if (C->was_assembled) { 252 for (i=0; i<cm; i++) { 253 ncols = c_seq->i[i+1] - c_seq->i[i]; 254 row = rstart + i; 255 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 256 cols += ncols; vals += ncols; 257 } 258 } else { 259 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 260 } 261 262 /* Co -> C, off-processor part */ 263 cm = C_oth->rmap->N; 264 c_seq = (Mat_SeqAIJ*)C_oth->data; 265 cols = c_seq->j; 266 vals = c_seq->a; 267 for (i=0; i<cm; i++) { 268 ncols = c_seq->i[i+1] - c_seq->i[i]; 269 row = p->garray[i]; 270 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 271 cols += ncols; vals += ncols; 272 } 273 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 274 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 275 276 ptap->reuse = MAT_REUSE_MATRIX; 277 PetscFunctionReturn(0); 278 } 279 280 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C) 281 { 282 PetscErrorCode ierr; 283 Mat_PtAPMPI *ptap; 284 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 285 MPI_Comm comm; 286 PetscMPIInt size,rank; 287 Mat Cmpi,P_loc,P_oth; 288 PetscFreeSpaceList free_space=NULL,current_space=NULL; 289 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 290 PetscInt *lnk,i,k,pnz,row,nsend; 291 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 292 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 293 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 294 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 295 MPI_Request *swaits,*rwaits; 296 MPI_Status *sstatus,rstatus; 297 PetscLayout rowmap; 298 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 299 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 300 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi; 301 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 302 PetscScalar *apv; 303 PetscTable ta; 304 #if defined(PETSC_USE_INFO) 305 PetscReal apfill; 306 #endif 307 308 PetscFunctionBegin; 309 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 310 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 311 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 312 313 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 314 315 /* create symbolic parallel matrix Cmpi */ 316 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 317 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 318 319 /* create struct Mat_PtAPMPI and attached it to C later */ 320 ierr = PetscNew(&ptap);CHKERRQ(ierr); 321 ptap->reuse = MAT_INITIAL_MATRIX; 322 ptap->algType = 0; 323 324 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 325 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr); 326 /* get P_loc by taking all local rows of P */ 327 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr); 328 329 ptap->P_loc = P_loc; 330 ptap->P_oth = P_oth; 331 332 /* (0) compute Rd = Pd^T, Ro = Po^T */ 333 /* --------------------------------- */ 334 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 335 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 336 337 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 338 /* ----------------------------------------------------------------- */ 339 p_loc = (Mat_SeqAIJ*)P_loc->data; 340 if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data; 341 342 /* create and initialize a linked list */ 343 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 344 MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta); 345 MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta); 346 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 347 348 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 349 350 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 351 if (ao) { 352 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 353 } else { 354 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 355 } 356 current_space = free_space; 357 nspacedouble = 0; 358 359 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 360 api[0] = 0; 361 for (i=0; i<am; i++) { 362 /* diagonal portion: Ad[i,:]*P */ 363 ai = ad->i; pi = p_loc->i; 364 nzi = ai[i+1] - ai[i]; 365 aj = ad->j + ai[i]; 366 for (j=0; j<nzi; j++) { 367 row = aj[j]; 368 pnz = pi[row+1] - pi[row]; 369 Jptr = p_loc->j + pi[row]; 370 /* add non-zero cols of P into the sorted linked list lnk */ 371 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 372 } 373 /* off-diagonal portion: Ao[i,:]*P */ 374 if (ao) { 375 ai = ao->i; pi = p_oth->i; 376 nzi = ai[i+1] - ai[i]; 377 aj = ao->j + ai[i]; 378 for (j=0; j<nzi; j++) { 379 row = aj[j]; 380 pnz = pi[row+1] - pi[row]; 381 Jptr = p_oth->j + pi[row]; 382 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 383 } 384 } 385 apnz = lnk[0]; 386 api[i+1] = api[i] + apnz; 387 388 /* if free space is not available, double the total space in the list */ 389 if (current_space->local_remaining<apnz) { 390 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 391 nspacedouble++; 392 } 393 394 /* Copy data into free space, then initialize lnk */ 395 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 396 397 current_space->array += apnz; 398 current_space->local_used += apnz; 399 current_space->local_remaining -= apnz; 400 } 401 /* Allocate space for apj and apv, initialize apj, and */ 402 /* destroy list of free space and other temporary array(s) */ 403 ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 404 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 405 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 406 407 /* Create AP_loc for reuse */ 408 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 409 410 #if defined(PETSC_USE_INFO) 411 if (ao) { 412 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 413 } else { 414 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 415 } 416 ptap->AP_loc->info.mallocs = nspacedouble; 417 ptap->AP_loc->info.fill_ratio_given = fill; 418 ptap->AP_loc->info.fill_ratio_needed = apfill; 419 420 if (api[am]) { 421 ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 422 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 423 } else { 424 ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 425 } 426 #endif 427 428 /* (2-1) compute symbolic Co = Ro*AP_loc */ 429 /* ------------------------------------ */ 430 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 431 432 /* (3) send coj of C_oth to other processors */ 433 /* ------------------------------------------ */ 434 /* determine row ownership */ 435 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 436 rowmap->n = pn; 437 rowmap->bs = 1; 438 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 439 owners = rowmap->range; 440 441 /* determine the number of messages to send, their lengths */ 442 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 443 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 444 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 445 446 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 447 coi = c_oth->i; coj = c_oth->j; 448 con = ptap->C_oth->rmap->n; 449 proc = 0; 450 for (i=0; i<con; i++) { 451 while (prmap[i] >= owners[proc+1]) proc++; 452 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 453 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 454 } 455 456 len = 0; /* max length of buf_si[], see (4) */ 457 owners_co[0] = 0; 458 nsend = 0; 459 for (proc=0; proc<size; proc++) { 460 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 461 if (len_s[proc]) { 462 nsend++; 463 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 464 len += len_si[proc]; 465 } 466 } 467 468 /* determine the number and length of messages to receive for coi and coj */ 469 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 470 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 471 472 /* post the Irecv and Isend of coj */ 473 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 474 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 475 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 476 for (proc=0, k=0; proc<size; proc++) { 477 if (!len_s[proc]) continue; 478 i = owners_co[proc]; 479 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 480 k++; 481 } 482 483 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 484 /* ---------------------------------------- */ 485 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 486 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 487 488 /* receives coj are complete */ 489 for (i=0; i<nrecv; i++) { 490 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 491 } 492 ierr = PetscFree(rwaits);CHKERRQ(ierr); 493 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 494 495 /* add received column indices into ta to update Crmax */ 496 for (k=0; k<nrecv; k++) {/* k-th received message */ 497 Jptr = buf_rj[k]; 498 for (j=0; j<len_r[k]; j++) { 499 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 500 } 501 } 502 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 503 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 504 505 /* (4) send and recv coi */ 506 /*-----------------------*/ 507 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 508 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 509 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 510 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 511 for (proc=0,k=0; proc<size; proc++) { 512 if (!len_s[proc]) continue; 513 /* form outgoing message for i-structure: 514 buf_si[0]: nrows to be sent 515 [1:nrows]: row index (global) 516 [nrows+1:2*nrows+1]: i-structure index 517 */ 518 /*-------------------------------------------*/ 519 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 520 buf_si_i = buf_si + nrows+1; 521 buf_si[0] = nrows; 522 buf_si_i[0] = 0; 523 nrows = 0; 524 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 525 nzi = coi[i+1] - coi[i]; 526 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 527 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 528 nrows++; 529 } 530 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 531 k++; 532 buf_si += len_si[proc]; 533 } 534 for (i=0; i<nrecv; i++) { 535 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 536 } 537 ierr = PetscFree(rwaits);CHKERRQ(ierr); 538 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 539 540 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 541 ierr = PetscFree(len_ri);CHKERRQ(ierr); 542 ierr = PetscFree(swaits);CHKERRQ(ierr); 543 ierr = PetscFree(buf_s);CHKERRQ(ierr); 544 545 /* (5) compute the local portion of Cmpi */ 546 /* ------------------------------------------ */ 547 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 548 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 549 current_space = free_space; 550 551 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 552 for (k=0; k<nrecv; k++) { 553 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 554 nrows = *buf_ri_k[k]; 555 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 556 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 557 } 558 559 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 560 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 561 for (i=0; i<pn; i++) { 562 /* add C_loc into Cmpi */ 563 nzi = c_loc->i[i+1] - c_loc->i[i]; 564 Jptr = c_loc->j + c_loc->i[i]; 565 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 566 567 /* add received col data into lnk */ 568 for (k=0; k<nrecv; k++) { /* k-th received message */ 569 if (i == *nextrow[k]) { /* i-th row */ 570 nzi = *(nextci[k]+1) - *nextci[k]; 571 Jptr = buf_rj[k] + *nextci[k]; 572 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 573 nextrow[k]++; nextci[k]++; 574 } 575 } 576 nzi = lnk[0]; 577 578 /* copy data into free space, then initialize lnk */ 579 ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr); 580 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 581 } 582 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 583 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 584 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 585 586 /* local sizes and preallocation */ 587 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 588 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 589 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 590 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 591 592 /* members in merge */ 593 ierr = PetscFree(id_r);CHKERRQ(ierr); 594 ierr = PetscFree(len_r);CHKERRQ(ierr); 595 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 596 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 597 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 598 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 599 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 600 601 /* attach the supporting struct to Cmpi for reuse */ 602 c = (Mat_MPIAIJ*)Cmpi->data; 603 c->ptap = ptap; 604 ptap->duplicate = Cmpi->ops->duplicate; 605 ptap->destroy = Cmpi->ops->destroy; 606 ptap->view = Cmpi->ops->view; 607 608 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 609 Cmpi->assembled = PETSC_FALSE; 610 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable; 611 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 612 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 613 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 614 *C = Cmpi; 615 PetscFunctionReturn(0); 616 } 617 618 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 619 { 620 PetscErrorCode ierr; 621 Mat_PtAPMPI *ptap; 622 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 623 MPI_Comm comm; 624 PetscMPIInt size,rank; 625 Mat Cmpi; 626 PetscFreeSpaceList free_space=NULL,current_space=NULL; 627 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 628 PetscInt *lnk,i,k,pnz,row,nsend; 629 PetscBT lnkbt; 630 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 631 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 632 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 633 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 634 MPI_Request *swaits,*rwaits; 635 MPI_Status *sstatus,rstatus; 636 PetscLayout rowmap; 637 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 638 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 639 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi; 640 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 641 PetscScalar *apv; 642 PetscTable ta; 643 #if defined(PETSC_USE_INFO) 644 PetscReal apfill; 645 #endif 646 647 PetscFunctionBegin; 648 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 649 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 650 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 651 652 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 653 654 /* create symbolic parallel matrix Cmpi */ 655 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 656 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 657 658 /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */ 659 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ; 660 661 /* create struct Mat_PtAPMPI and attached it to C later */ 662 ierr = PetscNew(&ptap);CHKERRQ(ierr); 663 ptap->reuse = MAT_INITIAL_MATRIX; 664 ptap->algType = 1; 665 666 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 667 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 668 /* get P_loc by taking all local rows of P */ 669 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 670 671 /* (0) compute Rd = Pd^T, Ro = Po^T */ 672 /* --------------------------------- */ 673 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 674 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 675 676 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 677 /* ----------------------------------------------------------------- */ 678 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 679 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 680 681 /* create and initialize a linked list */ 682 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 683 MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta); 684 MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta); 685 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 686 /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */ 687 688 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 689 690 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 691 if (ao) { 692 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 693 } else { 694 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 695 } 696 current_space = free_space; 697 nspacedouble = 0; 698 699 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 700 api[0] = 0; 701 for (i=0; i<am; i++) { 702 /* diagonal portion: Ad[i,:]*P */ 703 ai = ad->i; pi = p_loc->i; 704 nzi = ai[i+1] - ai[i]; 705 aj = ad->j + ai[i]; 706 for (j=0; j<nzi; j++) { 707 row = aj[j]; 708 pnz = pi[row+1] - pi[row]; 709 Jptr = p_loc->j + pi[row]; 710 /* add non-zero cols of P into the sorted linked list lnk */ 711 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 712 } 713 /* off-diagonal portion: Ao[i,:]*P */ 714 if (ao) { 715 ai = ao->i; pi = p_oth->i; 716 nzi = ai[i+1] - ai[i]; 717 aj = ao->j + ai[i]; 718 for (j=0; j<nzi; j++) { 719 row = aj[j]; 720 pnz = pi[row+1] - pi[row]; 721 Jptr = p_oth->j + pi[row]; 722 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 723 } 724 } 725 apnz = lnk[0]; 726 api[i+1] = api[i] + apnz; 727 if (ap_rmax < apnz) ap_rmax = apnz; 728 729 /* if free space is not available, double the total space in the list */ 730 if (current_space->local_remaining<apnz) { 731 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 732 nspacedouble++; 733 } 734 735 /* Copy data into free space, then initialize lnk */ 736 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 737 738 current_space->array += apnz; 739 current_space->local_used += apnz; 740 current_space->local_remaining -= apnz; 741 } 742 /* Allocate space for apj and apv, initialize apj, and */ 743 /* destroy list of free space and other temporary array(s) */ 744 ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 745 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 746 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 747 748 /* Create AP_loc for reuse */ 749 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 750 751 #if defined(PETSC_USE_INFO) 752 if (ao) { 753 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 754 } else { 755 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 756 } 757 ptap->AP_loc->info.mallocs = nspacedouble; 758 ptap->AP_loc->info.fill_ratio_given = fill; 759 ptap->AP_loc->info.fill_ratio_needed = apfill; 760 761 if (api[am]) { 762 ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 763 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 764 } else { 765 ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 766 } 767 #endif 768 769 /* (2-1) compute symbolic Co = Ro*AP_loc */ 770 /* ------------------------------------ */ 771 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 772 773 /* (3) send coj of C_oth to other processors */ 774 /* ------------------------------------------ */ 775 /* determine row ownership */ 776 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 777 rowmap->n = pn; 778 rowmap->bs = 1; 779 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 780 owners = rowmap->range; 781 782 /* determine the number of messages to send, their lengths */ 783 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 784 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 785 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 786 787 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 788 coi = c_oth->i; coj = c_oth->j; 789 con = ptap->C_oth->rmap->n; 790 proc = 0; 791 for (i=0; i<con; i++) { 792 while (prmap[i] >= owners[proc+1]) proc++; 793 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 794 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 795 } 796 797 len = 0; /* max length of buf_si[], see (4) */ 798 owners_co[0] = 0; 799 nsend = 0; 800 for (proc=0; proc<size; proc++) { 801 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 802 if (len_s[proc]) { 803 nsend++; 804 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 805 len += len_si[proc]; 806 } 807 } 808 809 /* determine the number and length of messages to receive for coi and coj */ 810 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 811 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 812 813 /* post the Irecv and Isend of coj */ 814 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 815 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 816 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 817 for (proc=0, k=0; proc<size; proc++) { 818 if (!len_s[proc]) continue; 819 i = owners_co[proc]; 820 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 821 k++; 822 } 823 824 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 825 /* ---------------------------------------- */ 826 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 827 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 828 829 /* receives coj are complete */ 830 for (i=0; i<nrecv; i++) { 831 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 832 } 833 ierr = PetscFree(rwaits);CHKERRQ(ierr); 834 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 835 836 /* add received column indices into ta to update Crmax */ 837 for (k=0; k<nrecv; k++) {/* k-th received message */ 838 Jptr = buf_rj[k]; 839 for (j=0; j<len_r[k]; j++) { 840 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 841 } 842 } 843 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 844 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 845 846 /* (4) send and recv coi */ 847 /*-----------------------*/ 848 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 849 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 850 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 851 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 852 for (proc=0,k=0; proc<size; proc++) { 853 if (!len_s[proc]) continue; 854 /* form outgoing message for i-structure: 855 buf_si[0]: nrows to be sent 856 [1:nrows]: row index (global) 857 [nrows+1:2*nrows+1]: i-structure index 858 */ 859 /*-------------------------------------------*/ 860 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 861 buf_si_i = buf_si + nrows+1; 862 buf_si[0] = nrows; 863 buf_si_i[0] = 0; 864 nrows = 0; 865 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 866 nzi = coi[i+1] - coi[i]; 867 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 868 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 869 nrows++; 870 } 871 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 872 k++; 873 buf_si += len_si[proc]; 874 } 875 for (i=0; i<nrecv; i++) { 876 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 877 } 878 ierr = PetscFree(rwaits);CHKERRQ(ierr); 879 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 880 881 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 882 ierr = PetscFree(len_ri);CHKERRQ(ierr); 883 ierr = PetscFree(swaits);CHKERRQ(ierr); 884 ierr = PetscFree(buf_s);CHKERRQ(ierr); 885 886 /* (5) compute the local portion of Cmpi */ 887 /* ------------------------------------------ */ 888 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 889 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 890 current_space = free_space; 891 892 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 893 for (k=0; k<nrecv; k++) { 894 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 895 nrows = *buf_ri_k[k]; 896 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 897 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 898 } 899 900 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 901 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 902 for (i=0; i<pn; i++) { 903 /* add C_loc into Cmpi */ 904 nzi = c_loc->i[i+1] - c_loc->i[i]; 905 Jptr = c_loc->j + c_loc->i[i]; 906 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 907 908 /* add received col data into lnk */ 909 for (k=0; k<nrecv; k++) { /* k-th received message */ 910 if (i == *nextrow[k]) { /* i-th row */ 911 nzi = *(nextci[k]+1) - *nextci[k]; 912 Jptr = buf_rj[k] + *nextci[k]; 913 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 914 nextrow[k]++; nextci[k]++; 915 } 916 } 917 nzi = lnk[0]; 918 919 /* copy data into free space, then initialize lnk */ 920 ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 921 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 922 } 923 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 924 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 925 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 926 927 /* local sizes and preallocation */ 928 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 929 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 930 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 931 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 932 933 /* members in merge */ 934 ierr = PetscFree(id_r);CHKERRQ(ierr); 935 ierr = PetscFree(len_r);CHKERRQ(ierr); 936 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 937 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 938 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 939 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 940 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 941 942 /* attach the supporting struct to Cmpi for reuse */ 943 c = (Mat_MPIAIJ*)Cmpi->data; 944 c->ptap = ptap; 945 ptap->duplicate = Cmpi->ops->duplicate; 946 ptap->destroy = Cmpi->ops->destroy; 947 ptap->view = Cmpi->ops->view; 948 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 949 950 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 951 Cmpi->assembled = PETSC_FALSE; 952 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 953 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 954 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 955 *C = Cmpi; 956 PetscFunctionReturn(0); 957 } 958 959 960 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 961 { 962 PetscErrorCode ierr; 963 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 964 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 965 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 966 Mat_PtAPMPI *ptap = c->ptap; 967 Mat AP_loc,C_loc,C_oth; 968 PetscInt i,rstart,rend,cm,ncols,row; 969 PetscInt *api,*apj,am = A->rmap->n,j,col,apnz; 970 PetscScalar *apa; 971 const PetscInt *cols; 972 const PetscScalar *vals; 973 974 PetscFunctionBegin; 975 ierr = MatZeroEntries(C);CHKERRQ(ierr); 976 /* 1) get R = Pd^T,Ro = Po^T */ 977 if (ptap->reuse == MAT_REUSE_MATRIX) { 978 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 979 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 980 } 981 982 /* 2) get AP_loc */ 983 AP_loc = ptap->AP_loc; 984 ap = (Mat_SeqAIJ*)AP_loc->data; 985 986 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 987 /*-----------------------------------------------------*/ 988 if (ptap->reuse == MAT_REUSE_MATRIX) { 989 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 990 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 991 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 992 } 993 994 /* 2-2) compute numeric A_loc*P - dominating part */ 995 /* ---------------------------------------------- */ 996 /* get data from symbolic products */ 997 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 998 if (ptap->P_oth) { 999 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 1000 } 1001 apa = ptap->apa; 1002 api = ap->i; 1003 apj = ap->j; 1004 for (i=0; i<am; i++) { 1005 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 1006 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 1007 apnz = api[i+1] - api[i]; 1008 for (j=0; j<apnz; j++) { 1009 col = apj[j+api[i]]; 1010 ap->a[j+ap->i[i]] = apa[col]; 1011 apa[col] = 0.0; 1012 } 1013 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 1014 } 1015 1016 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 1017 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 1018 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 1019 C_loc = ptap->C_loc; 1020 C_oth = ptap->C_oth; 1021 1022 /* add C_loc and Co to to C */ 1023 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1024 1025 /* C_loc -> C */ 1026 cm = C_loc->rmap->N; 1027 c_seq = (Mat_SeqAIJ*)C_loc->data; 1028 cols = c_seq->j; 1029 vals = c_seq->a; 1030 1031 1032 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 1033 /* when there are no off-processor parts. */ 1034 if (C->assembled) { 1035 C->was_assembled = PETSC_TRUE; 1036 C->assembled = PETSC_FALSE; 1037 } 1038 if (C->was_assembled) { 1039 for (i=0; i<cm; i++) { 1040 ncols = c_seq->i[i+1] - c_seq->i[i]; 1041 row = rstart + i; 1042 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1043 cols += ncols; vals += ncols; 1044 } 1045 } else { 1046 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 1047 } 1048 1049 /* Co -> C, off-processor part */ 1050 cm = C_oth->rmap->N; 1051 c_seq = (Mat_SeqAIJ*)C_oth->data; 1052 cols = c_seq->j; 1053 vals = c_seq->a; 1054 for (i=0; i<cm; i++) { 1055 ncols = c_seq->i[i+1] - c_seq->i[i]; 1056 row = p->garray[i]; 1057 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1058 cols += ncols; vals += ncols; 1059 } 1060 1061 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1062 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1063 1064 ptap->reuse = MAT_REUSE_MATRIX; 1065 PetscFunctionReturn(0); 1066 } 1067