1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 13 #if defined(PETSC_HAVE_HYPRE) 14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*); 15 #endif 16 17 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 18 { 19 PetscErrorCode ierr; 20 #if defined(PETSC_HAVE_HYPRE) 21 const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"}; 22 PetscInt nalg = 4; 23 #else 24 const char *algTypes[3] = {"scalable","nonscalable","seqmpi"}; 25 PetscInt nalg = 3; 26 #endif 27 PetscInt alg = 1; /* set nonscalable algorithm as default */ 28 MPI_Comm comm; 29 PetscBool flg; 30 31 PetscFunctionBegin; 32 if (scall == MAT_INITIAL_MATRIX) { 33 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 34 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 35 36 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 37 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 38 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[1],&alg,&flg);CHKERRQ(ierr); 39 ierr = PetscOptionsEnd();CHKERRQ(ierr); 40 41 if (!flg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 42 MatInfo Ainfo,Binfo; 43 PetscInt nz_local; 44 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 45 46 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 47 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 48 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 49 50 if (B->cmap->N > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 51 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 52 53 if (alg_scalable) { 54 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 55 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,fill*nz_local);CHKERRQ(ierr); 56 } 57 } 58 59 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 60 switch (alg) { 61 case 1: 62 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 63 break; 64 case 2: 65 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);CHKERRQ(ierr); 66 break; 67 #if defined(PETSC_HAVE_HYPRE) 68 case 3: 69 ierr = MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);CHKERRQ(ierr); 70 break; 71 #endif 72 default: 73 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 74 break; 75 } 76 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 77 } 78 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 79 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 80 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 81 PetscFunctionReturn(0); 82 } 83 84 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 85 { 86 PetscErrorCode ierr; 87 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 88 Mat_APMPI *ptap = a->ap; 89 90 PetscFunctionBegin; 91 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 92 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 93 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 94 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 95 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 96 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 97 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 98 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 99 ierr = ptap->destroy(A);CHKERRQ(ierr); 100 ierr = PetscFree(ptap);CHKERRQ(ierr); 101 PetscFunctionReturn(0); 102 } 103 104 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C) 105 { 106 PetscErrorCode ierr; 107 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 108 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 109 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 110 PetscScalar *cda=cd->a,*coa=co->a; 111 Mat_SeqAIJ *p_loc,*p_oth; 112 PetscScalar *apa,*ca; 113 PetscInt cm =C->rmap->n; 114 Mat_APMPI *ptap=c->ap; 115 PetscInt *api,*apj,*apJ,i,k; 116 PetscInt cstart=C->cmap->rstart; 117 PetscInt cdnz,conz,k0,k1; 118 MPI_Comm comm; 119 PetscMPIInt size; 120 PetscBool freestruct; 121 122 PetscFunctionBegin; 123 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 124 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 125 126 if (!ptap) { 127 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 128 } 129 130 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 131 /*-----------------------------------------------------*/ 132 /* update numerical values of P_oth and P_loc */ 133 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 134 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 135 136 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 137 /*----------------------------------------------------------*/ 138 /* get data from symbolic products */ 139 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 140 p_oth = NULL; 141 if (size >1) { 142 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 143 } 144 145 /* get apa for storing dense row A[i,:]*P */ 146 apa = ptap->apa; 147 148 api = ptap->api; 149 apj = ptap->apj; 150 for (i=0; i<cm; i++) { 151 /* compute apa = A[i,:]*P */ 152 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 153 154 /* set values in C */ 155 apJ = apj + api[i]; 156 cdnz = cd->i[i+1] - cd->i[i]; 157 conz = co->i[i+1] - co->i[i]; 158 159 /* 1st off-diagoanl part of C */ 160 ca = coa + co->i[i]; 161 k = 0; 162 for (k0=0; k0<conz; k0++) { 163 if (apJ[k] >= cstart) break; 164 ca[k0] = apa[apJ[k]]; 165 apa[apJ[k++]] = 0.0; 166 } 167 168 /* diagonal part of C */ 169 ca = cda + cd->i[i]; 170 for (k1=0; k1<cdnz; k1++) { 171 ca[k1] = apa[apJ[k]]; 172 apa[apJ[k++]] = 0.0; 173 } 174 175 /* 2nd off-diagoanl part of C */ 176 ca = coa + co->i[i]; 177 for (; k0<conz; k0++) { 178 ca[k0] = apa[apJ[k]]; 179 apa[apJ[k++]] = 0.0; 180 } 181 } 182 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 183 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 184 185 ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr); 186 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 187 /* supporting struct ptap consumes almost same amount of memory as C=AP, release it if C will not be updated by A and P */ 188 freestruct = PETSC_FALSE; 189 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr); 190 if (freestruct) { 191 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 192 } 193 ierr = PetscOptionsEnd();CHKERRQ(ierr); 194 PetscFunctionReturn(0); 195 } 196 197 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 198 { 199 PetscErrorCode ierr; 200 MPI_Comm comm; 201 PetscMPIInt size; 202 Mat Cmpi; 203 Mat_APMPI *ptap; 204 PetscFreeSpaceList free_space=NULL,current_space=NULL; 205 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 206 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 207 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 208 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 209 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 210 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 211 PetscBT lnkbt; 212 PetscScalar *apa; 213 PetscReal afill; 214 MatType mtype; 215 216 PetscFunctionBegin; 217 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 218 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 219 220 /* create struct Mat_APMPI and attached it to C later */ 221 ierr = PetscNew(&ptap);CHKERRQ(ierr); 222 223 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 224 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 225 226 /* get P_loc by taking all local rows of P */ 227 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 228 229 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 230 pi_loc = p_loc->i; pj_loc = p_loc->j; 231 if (size > 1) { 232 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 233 pi_oth = p_oth->i; pj_oth = p_oth->j; 234 } else { 235 p_oth = NULL; 236 pi_oth = NULL; pj_oth = NULL; 237 } 238 239 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 240 /*-------------------------------------------------------------------*/ 241 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 242 ptap->api = api; 243 api[0] = 0; 244 245 /* create and initialize a linked list */ 246 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 247 248 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 249 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 250 current_space = free_space; 251 252 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 253 for (i=0; i<am; i++) { 254 /* diagonal portion of A */ 255 nzi = adi[i+1] - adi[i]; 256 for (j=0; j<nzi; j++) { 257 row = *adj++; 258 pnz = pi_loc[row+1] - pi_loc[row]; 259 Jptr = pj_loc + pi_loc[row]; 260 /* add non-zero cols of P into the sorted linked list lnk */ 261 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 262 } 263 /* off-diagonal portion of A */ 264 nzi = aoi[i+1] - aoi[i]; 265 for (j=0; j<nzi; j++) { 266 row = *aoj++; 267 pnz = pi_oth[row+1] - pi_oth[row]; 268 Jptr = pj_oth + pi_oth[row]; 269 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 270 } 271 272 apnz = lnk[0]; 273 api[i+1] = api[i] + apnz; 274 275 /* if free space is not available, double the total space in the list */ 276 if (current_space->local_remaining<apnz) { 277 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 278 nspacedouble++; 279 } 280 281 /* Copy data into free space, then initialize lnk */ 282 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 283 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 284 285 current_space->array += apnz; 286 current_space->local_used += apnz; 287 current_space->local_remaining -= apnz; 288 } 289 290 /* Allocate space for apj, initialize apj, and */ 291 /* destroy list of free space and other temporary array(s) */ 292 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 293 apj = ptap->apj; 294 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 295 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 296 297 /* malloc apa to store dense row A[i,:]*P */ 298 ierr = PetscCalloc1(pN,&apa);CHKERRQ(ierr); 299 300 ptap->apa = apa; 301 302 /* create and assemble symbolic parallel matrix Cmpi */ 303 /*----------------------------------------------------*/ 304 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 305 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 306 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 307 308 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 309 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 310 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 311 312 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 313 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 314 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 315 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 316 317 ptap->destroy = Cmpi->ops->destroy; 318 ptap->duplicate = Cmpi->ops->duplicate; 319 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 320 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 321 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 322 323 /* attach the supporting struct to Cmpi for reuse */ 324 c = (Mat_MPIAIJ*)Cmpi->data; 325 c->ap = ptap; 326 327 *C = Cmpi; 328 329 /* set MatInfo */ 330 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 331 if (afill < 1.0) afill = 1.0; 332 Cmpi->info.mallocs = nspacedouble; 333 Cmpi->info.fill_ratio_given = fill; 334 Cmpi->info.fill_ratio_needed = afill; 335 336 #if defined(PETSC_USE_INFO) 337 if (api[am]) { 338 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 339 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 340 } else { 341 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 342 } 343 #endif 344 PetscFunctionReturn(0); 345 } 346 347 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 348 { 349 PetscErrorCode ierr; 350 351 PetscFunctionBegin; 352 if (scall == MAT_INITIAL_MATRIX) { 353 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 354 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 355 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 356 } 357 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 358 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 359 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 360 PetscFunctionReturn(0); 361 } 362 363 typedef struct { 364 Mat workB; 365 PetscScalar *rvalues,*svalues; 366 MPI_Request *rwaits,*swaits; 367 } MPIAIJ_MPIDense; 368 369 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 370 { 371 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 372 PetscErrorCode ierr; 373 374 PetscFunctionBegin; 375 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 376 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 377 ierr = PetscFree(contents);CHKERRQ(ierr); 378 PetscFunctionReturn(0); 379 } 380 381 #include <petsc/private/vecscatterimpl.h> 382 /* 383 This is a "dummy function" that handles the case where matrix C was created as a dense matrix 384 directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option 385 386 It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C 387 388 Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product 389 for this matrix. This is not desirable.. 390 391 */ 392 PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C) 393 { 394 PetscErrorCode ierr; 395 PetscBool flg; 396 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 397 PetscInt nz = aij->B->cmap->n; 398 PetscContainer container; 399 MPIAIJ_MPIDense *contents; 400 VecScatter ctx = aij->Mvctx; 401 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 402 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 403 404 PetscFunctionBegin; 405 ierr = PetscObjectTypeCompare((PetscObject)B,MATMPIDENSE,&flg);CHKERRQ(ierr); 406 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Second matrix must be mpidense"); 407 408 /* Handle case where where user provided the final C matrix rather than calling MatMatMult() with MAT_INITIAL_MATRIX*/ 409 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr); 410 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"First matrix must be MPIAIJ"); 411 412 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 413 414 ierr = PetscNew(&contents);CHKERRQ(ierr); 415 /* Create work matrix used to store off processor rows of B needed for local product */ 416 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 417 /* Create work arrays needed */ 418 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 419 B->cmap->N*to->starts[to->n],&contents->svalues, 420 from->n,&contents->rwaits, 421 to->n,&contents->swaits);CHKERRQ(ierr); 422 423 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 424 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 425 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 426 ierr = PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);CHKERRQ(ierr); 427 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 428 429 ierr = (*C->ops->matmultnumeric)(A,B,C);CHKERRQ(ierr); 430 PetscFunctionReturn(0); 431 } 432 433 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 434 { 435 PetscErrorCode ierr; 436 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 437 PetscInt nz = aij->B->cmap->n; 438 PetscContainer container; 439 MPIAIJ_MPIDense *contents; 440 VecScatter ctx = aij->Mvctx; 441 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 442 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 443 PetscInt m = A->rmap->n,n=B->cmap->n; 444 445 PetscFunctionBegin; 446 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 447 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 448 ierr = MatSetBlockSizesFromMats(*C,A,B);CHKERRQ(ierr); 449 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 450 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 451 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 452 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 453 454 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 455 456 ierr = PetscNew(&contents);CHKERRQ(ierr); 457 /* Create work matrix used to store off processor rows of B needed for local product */ 458 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 459 /* Create work arrays needed */ 460 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 461 B->cmap->N*to->starts[to->n],&contents->svalues, 462 from->n,&contents->rwaits, 463 to->n,&contents->swaits);CHKERRQ(ierr); 464 465 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 466 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 467 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 468 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 469 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 470 PetscFunctionReturn(0); 471 } 472 473 /* 474 Performs an efficient scatter on the rows of B needed by this process; this is 475 a modification of the VecScatterBegin_() routines. 476 */ 477 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 478 { 479 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 480 PetscErrorCode ierr; 481 PetscScalar *b,*w,*svalues,*rvalues; 482 VecScatter ctx = aij->Mvctx; 483 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 484 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 485 PetscInt i,j,k; 486 PetscInt *sindices,*sstarts,*rindices,*rstarts; 487 PetscMPIInt *sprocs,*rprocs,nrecvs; 488 MPI_Request *swaits,*rwaits; 489 MPI_Comm comm; 490 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 491 MPI_Status status; 492 MPIAIJ_MPIDense *contents; 493 PetscContainer container; 494 Mat workB; 495 496 PetscFunctionBegin; 497 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 498 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 499 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 500 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 501 502 workB = *outworkB = contents->workB; 503 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 504 sindices = to->indices; 505 sstarts = to->starts; 506 sprocs = to->procs; 507 swaits = contents->swaits; 508 svalues = contents->svalues; 509 510 rindices = from->indices; 511 rstarts = from->starts; 512 rprocs = from->procs; 513 rwaits = contents->rwaits; 514 rvalues = contents->rvalues; 515 516 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 517 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 518 519 for (i=0; i<from->n; i++) { 520 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 521 } 522 523 for (i=0; i<to->n; i++) { 524 /* pack a message at a time */ 525 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 526 for (k=0; k<ncols; k++) { 527 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 528 } 529 } 530 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 531 } 532 533 nrecvs = from->n; 534 while (nrecvs) { 535 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 536 nrecvs--; 537 /* unpack a message at a time */ 538 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 539 for (k=0; k<ncols; k++) { 540 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 541 } 542 } 543 } 544 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 545 546 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 547 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 548 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 549 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 550 PetscFunctionReturn(0); 551 } 552 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 553 554 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 555 { 556 PetscErrorCode ierr; 557 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 558 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 559 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 560 Mat workB; 561 562 PetscFunctionBegin; 563 /* diagonal block of A times all local rows of B*/ 564 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 565 566 /* get off processor parts of B needed to complete the product */ 567 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 568 569 /* off-diagonal block of A times nonlocal rows of B */ 570 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 571 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 572 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 573 PetscFunctionReturn(0); 574 } 575 576 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 577 { 578 PetscErrorCode ierr; 579 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 580 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 581 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 582 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 583 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 584 Mat_SeqAIJ *p_loc,*p_oth; 585 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 586 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 587 PetscInt cm = C->rmap->n,anz,pnz; 588 Mat_APMPI *ptap = c->ap; 589 PetscScalar *apa_sparse; 590 PetscInt *api,*apj,*apJ,i,j,k,row; 591 PetscInt cstart = C->cmap->rstart; 592 PetscInt cdnz,conz,k0,k1,nextp; 593 MPI_Comm comm; 594 PetscMPIInt size; 595 PetscBool freestruct; 596 597 PetscFunctionBegin; 598 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 599 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 600 601 if (!ptap) { 602 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 603 } 604 apa_sparse = ptap->apa; 605 606 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 607 /*-----------------------------------------------------*/ 608 /* update numerical values of P_oth and P_loc */ 609 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 610 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 611 612 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 613 /*----------------------------------------------------------*/ 614 /* get data from symbolic products */ 615 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 616 pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a; 617 if (size >1) { 618 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 619 pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a; 620 } else { 621 p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL; 622 } 623 624 api = ptap->api; 625 apj = ptap->apj; 626 for (i=0; i<cm; i++) { 627 apJ = apj + api[i]; 628 629 /* diagonal portion of A */ 630 anz = adi[i+1] - adi[i]; 631 adj = ad->j + adi[i]; 632 ada = ad->a + adi[i]; 633 for (j=0; j<anz; j++) { 634 row = adj[j]; 635 pnz = pi_loc[row+1] - pi_loc[row]; 636 pj = pj_loc + pi_loc[row]; 637 pa = pa_loc + pi_loc[row]; 638 /* perform sparse axpy */ 639 valtmp = ada[j]; 640 nextp = 0; 641 for (k=0; nextp<pnz; k++) { 642 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 643 apa_sparse[k] += valtmp*pa[nextp++]; 644 } 645 } 646 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 647 } 648 649 /* off-diagonal portion of A */ 650 anz = aoi[i+1] - aoi[i]; 651 aoj = ao->j + aoi[i]; 652 aoa = ao->a + aoi[i]; 653 for (j=0; j<anz; j++) { 654 row = aoj[j]; 655 pnz = pi_oth[row+1] - pi_oth[row]; 656 pj = pj_oth + pi_oth[row]; 657 pa = pa_oth + pi_oth[row]; 658 /* perform sparse axpy */ 659 valtmp = aoa[j]; 660 nextp = 0; 661 for (k=0; nextp<pnz; k++) { 662 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 663 apa_sparse[k] += valtmp*pa[nextp++]; 664 } 665 } 666 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 667 } 668 669 /* set values in C */ 670 cdnz = cd->i[i+1] - cd->i[i]; 671 conz = co->i[i+1] - co->i[i]; 672 673 /* 1st off-diagoanl part of C */ 674 ca = coa + co->i[i]; 675 k = 0; 676 for (k0=0; k0<conz; k0++) { 677 if (apJ[k] >= cstart) break; 678 ca[k0] = apa_sparse[k]; 679 apa_sparse[k] = 0.0; 680 k++; 681 } 682 683 /* diagonal part of C */ 684 ca = cda + cd->i[i]; 685 for (k1=0; k1<cdnz; k1++) { 686 ca[k1] = apa_sparse[k]; 687 apa_sparse[k] = 0.0; 688 k++; 689 } 690 691 /* 2nd off-diagoanl part of C */ 692 ca = coa + co->i[i]; 693 for (; k0<conz; k0++) { 694 ca[k0] = apa_sparse[k]; 695 apa_sparse[k] = 0.0; 696 k++; 697 } 698 } 699 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 700 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 701 702 ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr); 703 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 704 /* supporting struct ptap consumes almost same amount of memory as C=AP, release it if C will not be updated by A and P */ 705 freestruct = PETSC_FALSE; 706 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr); 707 if (freestruct) { 708 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 709 } 710 ierr = PetscOptionsEnd();CHKERRQ(ierr); 711 PetscFunctionReturn(0); 712 } 713 714 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 715 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 716 { 717 PetscErrorCode ierr; 718 MPI_Comm comm; 719 PetscMPIInt size; 720 Mat Cmpi; 721 Mat_APMPI *ptap; 722 PetscFreeSpaceList free_space = NULL,current_space=NULL; 723 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 724 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 725 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 726 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 727 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0; 728 PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20; 729 PetscReal afill; 730 PetscScalar *apa; 731 MatType mtype; 732 733 PetscFunctionBegin; 734 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 735 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 736 737 /* create struct Mat_APMPI and attached it to C later */ 738 ierr = PetscNew(&ptap);CHKERRQ(ierr); 739 740 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 741 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 742 743 /* get P_loc by taking all local rows of P */ 744 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 745 746 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 747 pi_loc = p_loc->i; pj_loc = p_loc->j; 748 if (size > 1) { 749 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 750 pi_oth = p_oth->i; pj_oth = p_oth->j; 751 } else { 752 p_oth = NULL; 753 pi_oth = NULL; pj_oth = NULL; 754 } 755 756 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 757 /*-------------------------------------------------------------------*/ 758 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 759 ptap->api = api; 760 api[0] = 0; 761 762 ierr = PetscLLCondensedCreate_Scalable(lsize,&lnk);CHKERRQ(ierr); 763 764 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 765 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 766 current_space = free_space; 767 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 768 for (i=0; i<am; i++) { 769 /* diagonal portion of A */ 770 nzi = adi[i+1] - adi[i]; 771 for (j=0; j<nzi; j++) { 772 row = *adj++; 773 pnz = pi_loc[row+1] - pi_loc[row]; 774 Jptr = pj_loc + pi_loc[row]; 775 /* Expand list if it is not long enough */ 776 if (pnz+apnz_max > lsize) { 777 lsize = pnz+apnz_max; 778 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 779 } 780 /* add non-zero cols of P into the sorted linked list lnk */ 781 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 782 apnz = *lnk; /* The first element in the list is the number of items in the list */ 783 api[i+1] = api[i] + apnz; 784 if (apnz > apnz_max) apnz_max = apnz; 785 } 786 /* off-diagonal portion of A */ 787 nzi = aoi[i+1] - aoi[i]; 788 for (j=0; j<nzi; j++) { 789 row = *aoj++; 790 pnz = pi_oth[row+1] - pi_oth[row]; 791 Jptr = pj_oth + pi_oth[row]; 792 /* Expand list if it is not long enough */ 793 if (pnz+apnz_max > lsize) { 794 lsize = pnz + apnz_max; 795 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 796 } 797 /* add non-zero cols of P into the sorted linked list lnk */ 798 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 799 apnz = *lnk; /* The first element in the list is the number of items in the list */ 800 api[i+1] = api[i] + apnz; 801 if (apnz > apnz_max) apnz_max = apnz; 802 } 803 apnz = *lnk; 804 api[i+1] = api[i] + apnz; 805 if (apnz > apnz_max) apnz_max = apnz; 806 807 /* if free space is not available, double the total space in the list */ 808 if (current_space->local_remaining<apnz) { 809 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 810 nspacedouble++; 811 } 812 813 /* Copy data into free space, then initialize lnk */ 814 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 815 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 816 817 current_space->array += apnz; 818 current_space->local_used += apnz; 819 current_space->local_remaining -= apnz; 820 } 821 822 /* Allocate space for apj, initialize apj, and */ 823 /* destroy list of free space and other temporary array(s) */ 824 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 825 apj = ptap->apj; 826 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 827 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 828 829 /* create and assemble symbolic parallel matrix Cmpi */ 830 /*----------------------------------------------------*/ 831 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 832 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 833 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 834 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 835 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 836 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 837 838 /* malloc apa for assembly Cmpi */ 839 ierr = PetscCalloc1(apnz_max,&apa);CHKERRQ(ierr); 840 ptap->apa = apa; 841 842 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 843 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 844 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 845 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 846 847 ptap->destroy = Cmpi->ops->destroy; 848 ptap->duplicate = Cmpi->ops->duplicate; 849 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 850 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 851 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 852 853 /* attach the supporting struct to Cmpi for reuse */ 854 c = (Mat_MPIAIJ*)Cmpi->data; 855 c->ap = ptap; 856 *C = Cmpi; 857 858 /* set MatInfo */ 859 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 860 if (afill < 1.0) afill = 1.0; 861 Cmpi->info.mallocs = nspacedouble; 862 Cmpi->info.fill_ratio_given = fill; 863 Cmpi->info.fill_ratio_needed = afill; 864 865 #if defined(PETSC_USE_INFO) 866 if (api[am]) { 867 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 868 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 869 } else { 870 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 871 } 872 #endif 873 PetscFunctionReturn(0); 874 } 875 876 /* This function is needed for the seqMPI matrix-matrix multiplication. */ 877 /* Three input arrays are merged to one output array. The size of the */ 878 /* output array is also output. Duplicate entries only show up once. */ 879 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, 880 PetscInt size2, PetscInt *in2, 881 PetscInt size3, PetscInt *in3, 882 PetscInt *size4, PetscInt *out) 883 { 884 int i = 0, j = 0, k = 0, l = 0; 885 886 /* Traverse all three arrays */ 887 while (i<size1 && j<size2 && k<size3) { 888 if (in1[i] < in2[j] && in1[i] < in3[k]) { 889 out[l++] = in1[i++]; 890 } 891 else if(in2[j] < in1[i] && in2[j] < in3[k]) { 892 out[l++] = in2[j++]; 893 } 894 else if(in3[k] < in1[i] && in3[k] < in2[j]) { 895 out[l++] = in3[k++]; 896 } 897 else if(in1[i] == in2[j] && in1[i] < in3[k]) { 898 out[l++] = in1[i]; 899 i++, j++; 900 } 901 else if(in1[i] == in3[k] && in1[i] < in2[j]) { 902 out[l++] = in1[i]; 903 i++, k++; 904 } 905 else if(in3[k] == in2[j] && in2[j] < in1[i]) { 906 out[l++] = in2[j]; 907 k++, j++; 908 } 909 else if(in1[i] == in2[j] && in1[i] == in3[k]) { 910 out[l++] = in1[i]; 911 i++, j++, k++; 912 } 913 } 914 915 /* Traverse two remaining arrays */ 916 while (i<size1 && j<size2) { 917 if (in1[i] < in2[j]) { 918 out[l++] = in1[i++]; 919 } 920 else if(in1[i] > in2[j]) { 921 out[l++] = in2[j++]; 922 } 923 else { 924 out[l++] = in1[i]; 925 i++, j++; 926 } 927 } 928 929 while (i<size1 && k<size3) { 930 if (in1[i] < in3[k]) { 931 out[l++] = in1[i++]; 932 } 933 else if(in1[i] > in3[k]) { 934 out[l++] = in3[k++]; 935 } 936 else { 937 out[l++] = in1[i]; 938 i++, k++; 939 } 940 } 941 942 while (k<size3 && j<size2) { 943 if (in3[k] < in2[j]) { 944 out[l++] = in3[k++]; 945 } 946 else if(in3[k] > in2[j]) { 947 out[l++] = in2[j++]; 948 } 949 else { 950 out[l++] = in3[k]; 951 k++, j++; 952 } 953 } 954 955 /* Traverse one remaining array */ 956 while (i<size1) out[l++] = in1[i++]; 957 while (j<size2) out[l++] = in2[j++]; 958 while (k<size3) out[l++] = in3[k++]; 959 960 *size4 = l; 961 } 962 963 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */ 964 /* adds up the products. Two of these three multiplications are performed with existing (sequential) */ 965 /* matrix-matrix multiplications. */ 966 #undef __FUNCT__ 967 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI" 968 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat *C) 969 { 970 PetscErrorCode ierr; 971 MPI_Comm comm; 972 PetscMPIInt size; 973 Mat Cmpi; 974 Mat_APMPI *ptap; 975 PetscFreeSpaceList free_space_diag=NULL, current_space=NULL; 976 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data; 977 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc; 978 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data; 979 Mat_MPIAIJ *c; 980 Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq; 981 PetscInt adponz, adpdnz; 982 PetscInt *pi_loc,*dnz,*onz; 983 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart; 984 PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi, 985 *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj; 986 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend; 987 PetscBT lnkbt; 988 PetscScalar *apa; 989 PetscReal afill; 990 PetscMPIInt rank; 991 Mat adpd, aopoth; 992 MatType mtype; 993 994 PetscFunctionBegin; 995 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 996 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 997 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 998 ierr = MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend); CHKERRQ(ierr); 999 1000 /* create struct Mat_APMPI and attached it to C later */ 1001 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1002 1003 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1004 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1005 1006 /* get P_loc by taking all local rows of P */ 1007 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1008 1009 1010 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1011 pi_loc = p_loc->i; 1012 1013 /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */ 1014 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 1015 ierr = PetscMalloc1(am+2,&adpoi);CHKERRQ(ierr); 1016 1017 adpoi[0] = 0; 1018 ptap->api = api; 1019 api[0] = 0; 1020 1021 /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */ 1022 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1023 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 1024 1025 /* Symbolic calc of A_loc_diag * P_loc_diag */ 1026 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->A, p->A, fill, &adpd);CHKERRQ(ierr); 1027 adpd_seq = (Mat_SeqAIJ*)((adpd)->data); 1028 adpdi = adpd_seq->i; adpdj = adpd_seq->j; 1029 p_off = (Mat_SeqAIJ*)((p->B)->data); 1030 poff_i = p_off->i; poff_j = p_off->j; 1031 1032 /* j_temp stores indices of a result row before they are added to the linked list */ 1033 ierr = PetscMalloc1(pN+2,&j_temp);CHKERRQ(ierr); 1034 1035 1036 /* Symbolic calc of the A_diag * p_loc_off */ 1037 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 1038 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);CHKERRQ(ierr); 1039 current_space = free_space_diag; 1040 1041 for (i=0; i<am; i++) { 1042 /* A_diag * P_loc_off */ 1043 nzi = adi[i+1] - adi[i]; 1044 for (j=0; j<nzi; j++) { 1045 row = *adj++; 1046 pnz = poff_i[row+1] - poff_i[row]; 1047 Jptr = poff_j + poff_i[row]; 1048 for(i1 = 0; i1 < pnz; i1++) { 1049 j_temp[i1] = p->garray[Jptr[i1]]; 1050 } 1051 /* add non-zero cols of P into the sorted linked list lnk */ 1052 ierr = PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);CHKERRQ(ierr); 1053 } 1054 1055 adponz = lnk[0]; 1056 adpoi[i+1] = adpoi[i] + adponz; 1057 1058 /* if free space is not available, double the total space in the list */ 1059 if (current_space->local_remaining<adponz) { 1060 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1061 nspacedouble++; 1062 } 1063 1064 /* Copy data into free space, then initialize lnk */ 1065 ierr = PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1066 1067 current_space->array += adponz; 1068 current_space->local_used += adponz; 1069 current_space->local_remaining -= adponz; 1070 } 1071 1072 /* Symbolic calc of A_off * P_oth */ 1073 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, &aopoth);CHKERRQ(ierr); 1074 aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data); 1075 aopothi = aopoth_seq->i; aopothj = aopoth_seq->j; 1076 1077 /* Allocate space for apj, adpj, aopj, ... */ 1078 /* destroy lists of free space and other temporary array(s) */ 1079 1080 ierr = PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);CHKERRQ(ierr); 1081 ierr = PetscMalloc1(adpoi[am]+2, &adpoj);CHKERRQ(ierr); 1082 1083 /* Copy from linked list to j-array */ 1084 ierr = PetscFreeSpaceContiguous(&free_space_diag,adpoj);CHKERRQ(ierr); 1085 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1086 1087 adpoJ = adpoj; 1088 adpdJ = adpdj; 1089 aopJ = aopothj; 1090 apj = ptap->apj; 1091 apJ = apj; /* still empty */ 1092 1093 /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */ 1094 /* A_diag * P_loc_diag to get A*P */ 1095 for (i = 0; i < am; i++) { 1096 aopnz = aopothi[i+1] - aopothi[i]; 1097 adponz = adpoi[i+1] - adpoi[i]; 1098 adpdnz = adpdi[i+1] - adpdi[i]; 1099 1100 /* Correct indices from A_diag*P_diag */ 1101 for(i1 = 0; i1 < adpdnz; i1++) { 1102 adpdJ[i1] += p_colstart; 1103 } 1104 /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */ 1105 Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ); 1106 ierr = MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz); CHKERRQ(ierr); 1107 1108 aopJ += aopnz; 1109 adpoJ += adponz; 1110 adpdJ += adpdnz; 1111 apJ += apnz; 1112 api[i+1] = api[i] + apnz; 1113 } 1114 1115 /* malloc apa to store dense row A[i,:]*P */ 1116 ierr = PetscCalloc1(pN+2,&apa);CHKERRQ(ierr); 1117 1118 ptap->apa = apa; 1119 /* create and assemble symbolic parallel matrix Cmpi */ 1120 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1121 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1122 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 1123 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1124 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1125 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1126 1127 1128 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 1129 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1130 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1131 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1132 1133 1134 ptap->destroy = Cmpi->ops->destroy; 1135 ptap->duplicate = Cmpi->ops->duplicate; 1136 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1137 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 1138 1139 /* attach the supporting struct to Cmpi for reuse */ 1140 c = (Mat_MPIAIJ*)Cmpi->data; 1141 c->ap = ptap; 1142 *C = Cmpi; 1143 1144 /* set MatInfo */ 1145 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 1146 if (afill < 1.0) afill = 1.0; 1147 Cmpi->info.mallocs = nspacedouble; 1148 Cmpi->info.fill_ratio_given = fill; 1149 Cmpi->info.fill_ratio_needed = afill; 1150 1151 #if defined(PETSC_USE_INFO) 1152 if (api[am]) { 1153 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 1154 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 1155 } else { 1156 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1157 } 1158 #endif 1159 1160 ierr = MatDestroy(&aopoth);CHKERRQ(ierr); 1161 ierr = MatDestroy(&adpd);CHKERRQ(ierr); 1162 ierr = PetscFree(j_temp);CHKERRQ(ierr); 1163 ierr = PetscFree(adpoj);CHKERRQ(ierr); 1164 ierr = PetscFree(adpoi);CHKERRQ(ierr); 1165 PetscFunctionReturn(0); 1166 } 1167 1168 1169 /*-------------------------------------------------------------------------*/ 1170 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 1171 { 1172 PetscErrorCode ierr; 1173 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 1174 PetscInt aN=A->cmap->N,alg=1; /* set default algorithm */ 1175 PetscBool flg; 1176 1177 PetscFunctionBegin; 1178 if (scall == MAT_INITIAL_MATRIX) { 1179 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 1180 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 1181 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[1],&alg,&flg);CHKERRQ(ierr); 1182 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1183 1184 ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 1185 switch (alg) { 1186 case 1: 1187 if (!flg && aN > 100000) { /* may switch to scalable algorithm as default */ 1188 MatInfo Ainfo,Pinfo; 1189 PetscInt nz_local; 1190 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 1191 MPI_Comm comm; 1192 1193 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 1194 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 1195 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); /* estimated local nonzero entries */ 1196 1197 if (aN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 1198 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1199 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 1200 1201 if (alg_scalable) { 1202 alg = 0; /* scalable algorithm would slower than nonscalable algorithm */ 1203 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 1204 break; 1205 } 1206 } 1207 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);CHKERRQ(ierr); 1208 break; 1209 case 2: 1210 { 1211 Mat Pt; 1212 Mat_APMPI *ptap; 1213 Mat_MPIAIJ *c; 1214 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 1215 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 1216 c = (Mat_MPIAIJ*)(*C)->data; 1217 ptap = c->ap; 1218 ptap->Pt = Pt; 1219 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 1220 PetscFunctionReturn(0); 1221 } 1222 break; 1223 default: /* scalable algorithm */ 1224 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 1225 break; 1226 } 1227 ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 1228 } 1229 ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 1230 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 1231 ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 1232 PetscFunctionReturn(0); 1233 } 1234 1235 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 1236 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 1237 { 1238 PetscErrorCode ierr; 1239 Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data; 1240 Mat_APMPI *ptap= c->ap; 1241 Mat Pt=ptap->Pt; 1242 1243 PetscFunctionBegin; 1244 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 1245 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 1246 PetscFunctionReturn(0); 1247 } 1248 1249 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1250 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C) 1251 { 1252 PetscErrorCode ierr; 1253 Mat_APMPI *ptap; 1254 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1255 MPI_Comm comm; 1256 PetscMPIInt size,rank; 1257 Mat Cmpi; 1258 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1259 PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n; 1260 PetscInt *lnk,i,k,nsend; 1261 PetscBT lnkbt; 1262 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1263 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1264 PetscInt len,proc,*dnz,*onz,*owners,nzi; 1265 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1266 MPI_Request *swaits,*rwaits; 1267 MPI_Status *sstatus,rstatus; 1268 PetscLayout rowmap; 1269 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1270 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1271 PetscInt *Jptr,*prmap=p->garray,con,j,Crmax; 1272 Mat_SeqAIJ *a_loc,*c_loc,*c_oth; 1273 PetscTable ta; 1274 MatType mtype; 1275 1276 PetscFunctionBegin; 1277 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1278 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1279 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1280 1281 /* create symbolic parallel matrix Cmpi */ 1282 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1283 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1284 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1285 1286 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1287 1288 /* create struct Mat_APMPI and attached it to C later */ 1289 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1290 ptap->reuse = MAT_INITIAL_MATRIX; 1291 1292 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1293 /* --------------------------------- */ 1294 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1295 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1296 1297 /* (1) compute symbolic A_loc */ 1298 /* ---------------------------*/ 1299 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1300 1301 /* (2-1) compute symbolic C_oth = Ro*A_loc */ 1302 /* ------------------------------------ */ 1303 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 1304 1305 /* (3) send coj of C_oth to other processors */ 1306 /* ------------------------------------------ */ 1307 /* determine row ownership */ 1308 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1309 rowmap->n = pn; 1310 rowmap->bs = 1; 1311 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1312 owners = rowmap->range; 1313 1314 /* determine the number of messages to send, their lengths */ 1315 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1316 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1317 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1318 1319 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1320 coi = c_oth->i; coj = c_oth->j; 1321 con = ptap->C_oth->rmap->n; 1322 proc = 0; 1323 for (i=0; i<con; i++) { 1324 while (prmap[i] >= owners[proc+1]) proc++; 1325 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1326 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1327 } 1328 1329 len = 0; /* max length of buf_si[], see (4) */ 1330 owners_co[0] = 0; 1331 nsend = 0; 1332 for (proc=0; proc<size; proc++) { 1333 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1334 if (len_s[proc]) { 1335 nsend++; 1336 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1337 len += len_si[proc]; 1338 } 1339 } 1340 1341 /* determine the number and length of messages to receive for coi and coj */ 1342 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1343 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1344 1345 /* post the Irecv and Isend of coj */ 1346 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1347 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1348 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1349 for (proc=0, k=0; proc<size; proc++) { 1350 if (!len_s[proc]) continue; 1351 i = owners_co[proc]; 1352 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1353 k++; 1354 } 1355 1356 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1357 /* ---------------------------------------- */ 1358 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1359 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1360 1361 /* receives coj are complete */ 1362 for (i=0; i<nrecv; i++) { 1363 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1364 } 1365 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1366 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1367 1368 /* add received column indices into ta to update Crmax */ 1369 a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data; 1370 1371 /* create and initialize a linked list */ 1372 ierr = PetscTableCreate(an,aN,&ta);CHKERRQ(ierr); /* for compute Crmax */ 1373 MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta); 1374 1375 for (k=0; k<nrecv; k++) {/* k-th received message */ 1376 Jptr = buf_rj[k]; 1377 for (j=0; j<len_r[k]; j++) { 1378 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1379 } 1380 } 1381 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1382 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1383 1384 /* (4) send and recv coi */ 1385 /*-----------------------*/ 1386 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1387 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1388 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1389 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1390 for (proc=0,k=0; proc<size; proc++) { 1391 if (!len_s[proc]) continue; 1392 /* form outgoing message for i-structure: 1393 buf_si[0]: nrows to be sent 1394 [1:nrows]: row index (global) 1395 [nrows+1:2*nrows+1]: i-structure index 1396 */ 1397 /*-------------------------------------------*/ 1398 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1399 buf_si_i = buf_si + nrows+1; 1400 buf_si[0] = nrows; 1401 buf_si_i[0] = 0; 1402 nrows = 0; 1403 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1404 nzi = coi[i+1] - coi[i]; 1405 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1406 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1407 nrows++; 1408 } 1409 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1410 k++; 1411 buf_si += len_si[proc]; 1412 } 1413 for (i=0; i<nrecv; i++) { 1414 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1415 } 1416 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1417 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1418 1419 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1420 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1421 ierr = PetscFree(swaits);CHKERRQ(ierr); 1422 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1423 1424 /* (5) compute the local portion of Cmpi */ 1425 /* ------------------------------------------ */ 1426 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1427 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1428 current_space = free_space; 1429 1430 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1431 for (k=0; k<nrecv; k++) { 1432 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1433 nrows = *buf_ri_k[k]; 1434 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1435 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1436 } 1437 1438 ierr = MatPreallocateInitialize(comm,pn,an,dnz,onz);CHKERRQ(ierr); 1439 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1440 for (i=0; i<pn; i++) { 1441 /* add C_loc into Cmpi */ 1442 nzi = c_loc->i[i+1] - c_loc->i[i]; 1443 Jptr = c_loc->j + c_loc->i[i]; 1444 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1445 1446 /* add received col data into lnk */ 1447 for (k=0; k<nrecv; k++) { /* k-th received message */ 1448 if (i == *nextrow[k]) { /* i-th row */ 1449 nzi = *(nextci[k]+1) - *nextci[k]; 1450 Jptr = buf_rj[k] + *nextci[k]; 1451 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1452 nextrow[k]++; nextci[k]++; 1453 } 1454 } 1455 nzi = lnk[0]; 1456 1457 /* copy data into free space, then initialize lnk */ 1458 ierr = PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1459 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 1460 } 1461 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1462 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1463 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 1464 1465 /* local sizes and preallocation */ 1466 ierr = MatSetSizes(Cmpi,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1467 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 1468 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1469 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1470 1471 /* members in merge */ 1472 ierr = PetscFree(id_r);CHKERRQ(ierr); 1473 ierr = PetscFree(len_r);CHKERRQ(ierr); 1474 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 1475 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 1476 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 1477 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 1478 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 1479 1480 /* attach the supporting struct to Cmpi for reuse */ 1481 c = (Mat_MPIAIJ*)Cmpi->data; 1482 c->ap = ptap; 1483 ptap->destroy = Cmpi->ops->destroy; 1484 1485 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1486 Cmpi->assembled = PETSC_FALSE; 1487 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1488 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 1489 1490 *C = Cmpi; 1491 PetscFunctionReturn(0); 1492 } 1493 1494 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C) 1495 { 1496 PetscErrorCode ierr; 1497 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1498 Mat_SeqAIJ *c_seq; 1499 Mat_APMPI *ptap = c->ap; 1500 Mat A_loc,C_loc,C_oth; 1501 PetscInt i,rstart,rend,cm,ncols,row; 1502 const PetscInt *cols; 1503 const PetscScalar *vals; 1504 PetscBool freestruct; 1505 1506 PetscFunctionBegin; 1507 if (!ptap) { 1508 MPI_Comm comm; 1509 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1510 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 1511 } 1512 1513 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1514 1515 if (ptap->reuse == MAT_REUSE_MATRIX) { 1516 /* These matrices are obtained in MatTransposeMatMultSymbolic() */ 1517 /* 1) get R = Pd^T, Ro = Po^T */ 1518 /*----------------------------*/ 1519 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1520 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1521 1522 /* 2) compute numeric A_loc */ 1523 /*--------------------------*/ 1524 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1525 } 1526 1527 /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */ 1528 A_loc = ptap->A_loc; 1529 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);CHKERRQ(ierr); 1530 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);CHKERRQ(ierr); 1531 C_loc = ptap->C_loc; 1532 C_oth = ptap->C_oth; 1533 1534 /* add C_loc and Co to to C */ 1535 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1536 1537 /* C_loc -> C */ 1538 cm = C_loc->rmap->N; 1539 c_seq = (Mat_SeqAIJ*)C_loc->data; 1540 cols = c_seq->j; 1541 vals = c_seq->a; 1542 for (i=0; i<cm; i++) { 1543 ncols = c_seq->i[i+1] - c_seq->i[i]; 1544 row = rstart + i; 1545 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1546 cols += ncols; vals += ncols; 1547 } 1548 1549 /* Co -> C, off-processor part */ 1550 cm = C_oth->rmap->N; 1551 c_seq = (Mat_SeqAIJ*)C_oth->data; 1552 cols = c_seq->j; 1553 vals = c_seq->a; 1554 for (i=0; i<cm; i++) { 1555 ncols = c_seq->i[i+1] - c_seq->i[i]; 1556 row = p->garray[i]; 1557 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1558 cols += ncols; vals += ncols; 1559 } 1560 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1561 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1562 1563 ptap->reuse = MAT_REUSE_MATRIX; 1564 1565 ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr); 1566 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 1567 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 1568 freestruct = PETSC_FALSE; 1569 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr); 1570 if (freestruct) { 1571 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 1572 } 1573 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1574 PetscFunctionReturn(0); 1575 } 1576 1577 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 1578 { 1579 PetscErrorCode ierr; 1580 Mat_Merge_SeqsToMPI *merge; 1581 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1582 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1583 Mat_APMPI *ptap; 1584 PetscInt *adj; 1585 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1586 MatScalar *ada,*ca,valtmp; 1587 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1588 MPI_Comm comm; 1589 PetscMPIInt size,rank,taga,*len_s; 1590 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1591 PetscInt **buf_ri,**buf_rj; 1592 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1593 MPI_Request *s_waits,*r_waits; 1594 MPI_Status *status; 1595 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1596 PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ; 1597 Mat A_loc; 1598 Mat_SeqAIJ *a_loc; 1599 PetscBool freestruct; 1600 1601 PetscFunctionBegin; 1602 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1603 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1604 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1605 1606 ptap = c->ap; 1607 if (!ptap) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 1608 merge = ptap->merge; 1609 1610 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1611 /*------------------------------------------*/ 1612 /* get data from symbolic products */ 1613 coi = merge->coi; coj = merge->coj; 1614 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 1615 bi = merge->bi; bj = merge->bj; 1616 owners = merge->rowmap->range; 1617 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 1618 1619 /* get A_loc by taking all local rows of A */ 1620 A_loc = ptap->A_loc; 1621 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1622 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1623 ai = a_loc->i; 1624 aj = a_loc->j; 1625 1626 for (i=0; i<am; i++) { 1627 anz = ai[i+1] - ai[i]; 1628 adj = aj + ai[i]; 1629 ada = a_loc->a + ai[i]; 1630 1631 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1632 /*-------------------------------------------------------------*/ 1633 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1634 pnz = po->i[i+1] - po->i[i]; 1635 poJ = po->j + po->i[i]; 1636 pA = po->a + po->i[i]; 1637 for (j=0; j<pnz; j++) { 1638 row = poJ[j]; 1639 cj = coj + coi[row]; 1640 ca = coa + coi[row]; 1641 /* perform sparse axpy */ 1642 nexta = 0; 1643 valtmp = pA[j]; 1644 for (k=0; nexta<anz; k++) { 1645 if (cj[k] == adj[nexta]) { 1646 ca[k] += valtmp*ada[nexta]; 1647 nexta++; 1648 } 1649 } 1650 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1651 } 1652 1653 /* put the value into Cd (diagonal part) */ 1654 pnz = pd->i[i+1] - pd->i[i]; 1655 pdJ = pd->j + pd->i[i]; 1656 pA = pd->a + pd->i[i]; 1657 for (j=0; j<pnz; j++) { 1658 row = pdJ[j]; 1659 cj = bj + bi[row]; 1660 ca = ba + bi[row]; 1661 /* perform sparse axpy */ 1662 nexta = 0; 1663 valtmp = pA[j]; 1664 for (k=0; nexta<anz; k++) { 1665 if (cj[k] == adj[nexta]) { 1666 ca[k] += valtmp*ada[nexta]; 1667 nexta++; 1668 } 1669 } 1670 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1671 } 1672 } 1673 1674 /* 3) send and recv matrix values coa */ 1675 /*------------------------------------*/ 1676 buf_ri = merge->buf_ri; 1677 buf_rj = merge->buf_rj; 1678 len_s = merge->len_s; 1679 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1680 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1681 1682 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1683 for (proc=0,k=0; proc<size; proc++) { 1684 if (!len_s[proc]) continue; 1685 i = merge->owners_co[proc]; 1686 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1687 k++; 1688 } 1689 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1690 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1691 1692 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1693 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1694 ierr = PetscFree(coa);CHKERRQ(ierr); 1695 1696 /* 4) insert local Cseq and received values into Cmpi */ 1697 /*----------------------------------------------------*/ 1698 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1699 for (k=0; k<merge->nrecv; k++) { 1700 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1701 nrows = *(buf_ri_k[k]); 1702 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1703 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1704 } 1705 1706 for (i=0; i<cm; i++) { 1707 row = owners[rank] + i; /* global row index of C_seq */ 1708 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1709 ba_i = ba + bi[i]; 1710 bnz = bi[i+1] - bi[i]; 1711 /* add received vals into ba */ 1712 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1713 /* i-th row */ 1714 if (i == *nextrow[k]) { 1715 cnz = *(nextci[k]+1) - *nextci[k]; 1716 cj = buf_rj[k] + *(nextci[k]); 1717 ca = abuf_r[k] + *(nextci[k]); 1718 nextcj = 0; 1719 for (j=0; nextcj<cnz; j++) { 1720 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1721 ba_i[j] += ca[nextcj++]; 1722 } 1723 } 1724 nextrow[k]++; nextci[k]++; 1725 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1726 } 1727 } 1728 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1729 } 1730 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1731 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1732 1733 ierr = PetscFree(ba);CHKERRQ(ierr); 1734 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1735 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1736 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1737 1738 ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr); 1739 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 1740 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 1741 freestruct = PETSC_FALSE; 1742 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr); 1743 if (freestruct) { 1744 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 1745 } 1746 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1747 PetscFunctionReturn(0); 1748 } 1749 1750 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C) 1751 { 1752 PetscErrorCode ierr; 1753 Mat Cmpi,A_loc,POt,PDt; 1754 Mat_APMPI *ptap; 1755 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1756 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c; 1757 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1758 PetscInt nnz; 1759 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1760 PetscInt am =A->rmap->n,pn=P->cmap->n; 1761 MPI_Comm comm; 1762 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1763 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1764 PetscInt len,proc,*dnz,*onz,*owners; 1765 PetscInt nzi,*bi,*bj; 1766 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1767 MPI_Request *swaits,*rwaits; 1768 MPI_Status *sstatus,rstatus; 1769 Mat_Merge_SeqsToMPI *merge; 1770 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1771 PetscReal afill =1.0,afill_tmp; 1772 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax; 1773 PetscScalar *vals; 1774 Mat_SeqAIJ *a_loc,*pdt,*pot; 1775 PetscTable ta; 1776 MatType mtype; 1777 1778 PetscFunctionBegin; 1779 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1780 /* check if matrix local sizes are compatible */ 1781 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1782 1783 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1784 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1785 1786 /* create struct Mat_APMPI and attached it to C later */ 1787 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1788 1789 /* get A_loc by taking all local rows of A */ 1790 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1791 1792 ptap->A_loc = A_loc; 1793 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1794 ai = a_loc->i; 1795 aj = a_loc->j; 1796 1797 /* determine symbolic Co=(p->B)^T*A - send to others */ 1798 /*----------------------------------------------------*/ 1799 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1800 pdt = (Mat_SeqAIJ*)PDt->data; 1801 pdti = pdt->i; pdtj = pdt->j; 1802 1803 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1804 pot = (Mat_SeqAIJ*)POt->data; 1805 poti = pot->i; potj = pot->j; 1806 1807 /* then, compute symbolic Co = (p->B)^T*A */ 1808 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1809 >= (num of nonzero rows of C_seq) - pn */ 1810 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 1811 coi[0] = 0; 1812 1813 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1814 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 1815 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1816 current_space = free_space; 1817 1818 /* create and initialize a linked list */ 1819 ierr = PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);CHKERRQ(ierr); 1820 MatRowMergeMax_SeqAIJ(a_loc,am,ta); 1821 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1822 1823 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1824 1825 for (i=0; i<pon; i++) { 1826 pnz = poti[i+1] - poti[i]; 1827 ptJ = potj + poti[i]; 1828 for (j=0; j<pnz; j++) { 1829 row = ptJ[j]; /* row of A_loc == col of Pot */ 1830 anz = ai[row+1] - ai[row]; 1831 Jptr = aj + ai[row]; 1832 /* add non-zero cols of AP into the sorted linked list lnk */ 1833 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1834 } 1835 nnz = lnk[0]; 1836 1837 /* If free space is not available, double the total space in the list */ 1838 if (current_space->local_remaining<nnz) { 1839 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1840 nspacedouble++; 1841 } 1842 1843 /* Copy data into free space, and zero out denserows */ 1844 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1845 1846 current_space->array += nnz; 1847 current_space->local_used += nnz; 1848 current_space->local_remaining -= nnz; 1849 1850 coi[i+1] = coi[i] + nnz; 1851 } 1852 1853 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 1854 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1855 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); /* must destroy to get a new one for C */ 1856 1857 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1858 if (afill_tmp > afill) afill = afill_tmp; 1859 1860 /* send j-array (coj) of Co to other processors */ 1861 /*----------------------------------------------*/ 1862 /* determine row ownership */ 1863 ierr = PetscNew(&merge);CHKERRQ(ierr); 1864 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1865 1866 merge->rowmap->n = pn; 1867 merge->rowmap->bs = 1; 1868 1869 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1870 owners = merge->rowmap->range; 1871 1872 /* determine the number of messages to send, their lengths */ 1873 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 1874 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 1875 1876 len_s = merge->len_s; 1877 merge->nsend = 0; 1878 1879 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 1880 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1881 1882 proc = 0; 1883 for (i=0; i<pon; i++) { 1884 while (prmap[i] >= owners[proc+1]) proc++; 1885 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1886 len_s[proc] += coi[i+1] - coi[i]; 1887 } 1888 1889 len = 0; /* max length of buf_si[] */ 1890 owners_co[0] = 0; 1891 for (proc=0; proc<size; proc++) { 1892 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1893 if (len_si[proc]) { 1894 merge->nsend++; 1895 len_si[proc] = 2*(len_si[proc] + 1); 1896 len += len_si[proc]; 1897 } 1898 } 1899 1900 /* determine the number and length of messages to receive for coi and coj */ 1901 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1902 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1903 1904 /* post the Irecv and Isend of coj */ 1905 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1906 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1907 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 1908 for (proc=0, k=0; proc<size; proc++) { 1909 if (!len_s[proc]) continue; 1910 i = owners_co[proc]; 1911 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1912 k++; 1913 } 1914 1915 /* receives and sends of coj are complete */ 1916 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 1917 for (i=0; i<merge->nrecv; i++) { 1918 PetscMPIInt icompleted; 1919 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1920 } 1921 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1922 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1923 1924 /* add received column indices into table to update Armax */ 1925 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/examples/tutorials/ex56.c! */ 1926 for (k=0; k<merge->nrecv; k++) {/* k-th received message */ 1927 Jptr = buf_rj[k]; 1928 for (j=0; j<merge->len_r[k]; j++) { 1929 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1930 } 1931 } 1932 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1933 /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */ 1934 1935 /* send and recv coi */ 1936 /*-------------------*/ 1937 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1938 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1939 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1940 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1941 for (proc=0,k=0; proc<size; proc++) { 1942 if (!len_s[proc]) continue; 1943 /* form outgoing message for i-structure: 1944 buf_si[0]: nrows to be sent 1945 [1:nrows]: row index (global) 1946 [nrows+1:2*nrows+1]: i-structure index 1947 */ 1948 /*-------------------------------------------*/ 1949 nrows = len_si[proc]/2 - 1; 1950 buf_si_i = buf_si + nrows+1; 1951 buf_si[0] = nrows; 1952 buf_si_i[0] = 0; 1953 nrows = 0; 1954 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1955 nzi = coi[i+1] - coi[i]; 1956 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1957 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1958 nrows++; 1959 } 1960 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1961 k++; 1962 buf_si += len_si[proc]; 1963 } 1964 i = merge->nrecv; 1965 while (i--) { 1966 PetscMPIInt icompleted; 1967 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1968 } 1969 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1970 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1971 ierr = PetscFree(len_si);CHKERRQ(ierr); 1972 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1973 ierr = PetscFree(swaits);CHKERRQ(ierr); 1974 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1975 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1976 1977 /* compute the local portion of C (mpi mat) */ 1978 /*------------------------------------------*/ 1979 /* allocate bi array and free space for accumulating nonzero column info */ 1980 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 1981 bi[0] = 0; 1982 1983 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1984 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 1985 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1986 current_space = free_space; 1987 1988 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1989 for (k=0; k<merge->nrecv; k++) { 1990 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1991 nrows = *buf_ri_k[k]; 1992 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1993 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 1994 } 1995 1996 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1997 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1998 rmax = 0; 1999 for (i=0; i<pn; i++) { 2000 /* add pdt[i,:]*AP into lnk */ 2001 pnz = pdti[i+1] - pdti[i]; 2002 ptJ = pdtj + pdti[i]; 2003 for (j=0; j<pnz; j++) { 2004 row = ptJ[j]; /* row of AP == col of Pt */ 2005 anz = ai[row+1] - ai[row]; 2006 Jptr = aj + ai[row]; 2007 /* add non-zero cols of AP into the sorted linked list lnk */ 2008 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 2009 } 2010 2011 /* add received col data into lnk */ 2012 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 2013 if (i == *nextrow[k]) { /* i-th row */ 2014 nzi = *(nextci[k]+1) - *nextci[k]; 2015 Jptr = buf_rj[k] + *nextci[k]; 2016 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 2017 nextrow[k]++; nextci[k]++; 2018 } 2019 } 2020 nnz = lnk[0]; 2021 2022 /* if free space is not available, make more free space */ 2023 if (current_space->local_remaining<nnz) { 2024 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 2025 nspacedouble++; 2026 } 2027 /* copy data into free space, then initialize lnk */ 2028 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 2029 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 2030 2031 current_space->array += nnz; 2032 current_space->local_used += nnz; 2033 current_space->local_remaining -= nnz; 2034 2035 bi[i+1] = bi[i] + nnz; 2036 if (nnz > rmax) rmax = nnz; 2037 } 2038 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 2039 2040 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 2041 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 2042 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 2043 if (afill_tmp > afill) afill = afill_tmp; 2044 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 2045 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 2046 2047 ierr = MatDestroy(&POt);CHKERRQ(ierr); 2048 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 2049 2050 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 2051 /*----------------------------------------------------------------------------------*/ 2052 ierr = PetscCalloc1(rmax+1,&vals);CHKERRQ(ierr); 2053 2054 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 2055 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2056 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 2057 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 2058 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 2059 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 2060 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2061 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 2062 for (i=0; i<pn; i++) { 2063 row = i + rstart; 2064 nnz = bi[i+1] - bi[i]; 2065 Jptr = bj + bi[i]; 2066 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 2067 } 2068 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2069 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2070 ierr = PetscFree(vals);CHKERRQ(ierr); 2071 2072 merge->bi = bi; 2073 merge->bj = bj; 2074 merge->coi = coi; 2075 merge->coj = coj; 2076 merge->buf_ri = buf_ri; 2077 merge->buf_rj = buf_rj; 2078 merge->owners_co = owners_co; 2079 2080 /* attach the supporting struct to Cmpi for reuse */ 2081 c = (Mat_MPIAIJ*)Cmpi->data; 2082 2083 c->ap = ptap; 2084 ptap->api = NULL; 2085 ptap->apj = NULL; 2086 ptap->merge = merge; 2087 ptap->apa = NULL; 2088 ptap->destroy = Cmpi->ops->destroy; 2089 ptap->duplicate = Cmpi->ops->duplicate; 2090 2091 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 2092 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 2093 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 2094 2095 *C = Cmpi; 2096 #if defined(PETSC_USE_INFO) 2097 if (bi[pn] != 0) { 2098 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 2099 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 2100 } else { 2101 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 2102 } 2103 #endif 2104 PetscFunctionReturn(0); 2105 } 2106