1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 #include <petsc/private/vecscatterimpl.h> 13 14 #if defined(PETSC_HAVE_HYPRE) 15 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat); 16 #endif 17 18 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C) 19 { 20 PetscErrorCode ierr; 21 Mat_Product *product = C->product; 22 Mat A=product->A,B=product->B; 23 MatProductAlgorithm alg=product->alg; 24 PetscReal fill=product->fill; 25 PetscBool flg; 26 27 PetscFunctionBegin; 28 /* scalable */ 29 ierr = PetscStrcmp(alg,"scalable",&flg);CHKERRQ(ierr); 30 if (flg) { 31 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 32 PetscFunctionReturn(0); 33 } 34 35 /* nonscalable */ 36 ierr = PetscStrcmp(alg,"nonscalable",&flg);CHKERRQ(ierr); 37 if (flg) { 38 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 39 PetscFunctionReturn(0); 40 } 41 42 /* seqmpi */ 43 ierr = PetscStrcmp(alg,"seqmpi",&flg);CHKERRQ(ierr); 44 if (flg) { 45 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);CHKERRQ(ierr); 46 PetscFunctionReturn(0); 47 } 48 49 #if defined(PETSC_HAVE_HYPRE) 50 ierr = PetscStrcmp(alg,"hypre",&flg);CHKERRQ(ierr); 51 if (flg) { 52 ierr = MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);CHKERRQ(ierr); 53 PetscFunctionReturn(0); 54 } 55 #endif 56 SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported"); 57 } 58 59 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data) 60 { 61 PetscErrorCode ierr; 62 Mat_APMPI *ptap = (Mat_APMPI*)data; 63 64 PetscFunctionBegin; 65 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 66 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 67 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 68 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 69 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 70 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 71 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 72 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 73 ierr = PetscFree(ptap);CHKERRQ(ierr); 74 PetscFunctionReturn(0); 75 } 76 77 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C) 78 { 79 PetscErrorCode ierr; 80 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 81 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 82 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 83 PetscScalar *cda=cd->a,*coa=co->a; 84 Mat_SeqAIJ *p_loc,*p_oth; 85 PetscScalar *apa,*ca; 86 PetscInt cm =C->rmap->n; 87 Mat_APMPI *ptap; 88 PetscInt *api,*apj,*apJ,i,k; 89 PetscInt cstart=C->cmap->rstart; 90 PetscInt cdnz,conz,k0,k1; 91 MPI_Comm comm; 92 PetscMPIInt size; 93 94 PetscFunctionBegin; 95 MatCheckProduct(C,3); 96 ptap = (Mat_APMPI*)C->product->data; 97 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data"); 98 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 99 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 100 101 if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()"); 102 103 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 104 /*-----------------------------------------------------*/ 105 /* update numerical values of P_oth and P_loc */ 106 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 107 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 108 109 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 110 /*----------------------------------------------------------*/ 111 /* get data from symbolic products */ 112 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 113 p_oth = NULL; 114 if (size >1) { 115 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 116 } 117 118 /* get apa for storing dense row A[i,:]*P */ 119 apa = ptap->apa; 120 121 api = ptap->api; 122 apj = ptap->apj; 123 for (i=0; i<cm; i++) { 124 /* compute apa = A[i,:]*P */ 125 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 126 127 /* set values in C */ 128 apJ = apj + api[i]; 129 cdnz = cd->i[i+1] - cd->i[i]; 130 conz = co->i[i+1] - co->i[i]; 131 132 /* 1st off-diagonal part of C */ 133 ca = coa + co->i[i]; 134 k = 0; 135 for (k0=0; k0<conz; k0++) { 136 if (apJ[k] >= cstart) break; 137 ca[k0] = apa[apJ[k]]; 138 apa[apJ[k++]] = 0.0; 139 } 140 141 /* diagonal part of C */ 142 ca = cda + cd->i[i]; 143 for (k1=0; k1<cdnz; k1++) { 144 ca[k1] = apa[apJ[k]]; 145 apa[apJ[k++]] = 0.0; 146 } 147 148 /* 2nd off-diagonal part of C */ 149 ca = coa + co->i[i]; 150 for (; k0<conz; k0++) { 151 ca[k0] = apa[apJ[k]]; 152 apa[apJ[k++]] = 0.0; 153 } 154 } 155 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 156 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 157 PetscFunctionReturn(0); 158 } 159 160 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C) 161 { 162 PetscErrorCode ierr; 163 MPI_Comm comm; 164 PetscMPIInt size; 165 Mat_APMPI *ptap; 166 PetscFreeSpaceList free_space=NULL,current_space=NULL; 167 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 168 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 169 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 170 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 171 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 172 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 173 PetscBT lnkbt; 174 PetscReal afill; 175 MatType mtype; 176 177 PetscFunctionBegin; 178 MatCheckProduct(C,4); 179 if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty"); 180 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 181 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 182 183 /* create struct Mat_APMPI and attached it to C later */ 184 ierr = PetscNew(&ptap);CHKERRQ(ierr); 185 186 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 187 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 188 189 /* get P_loc by taking all local rows of P */ 190 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 191 192 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 193 pi_loc = p_loc->i; pj_loc = p_loc->j; 194 if (size > 1) { 195 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 196 pi_oth = p_oth->i; pj_oth = p_oth->j; 197 } else { 198 p_oth = NULL; 199 pi_oth = NULL; pj_oth = NULL; 200 } 201 202 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 203 /*-------------------------------------------------------------------*/ 204 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 205 ptap->api = api; 206 api[0] = 0; 207 208 /* create and initialize a linked list */ 209 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 210 211 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 212 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 213 current_space = free_space; 214 215 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 216 for (i=0; i<am; i++) { 217 /* diagonal portion of A */ 218 nzi = adi[i+1] - adi[i]; 219 for (j=0; j<nzi; j++) { 220 row = *adj++; 221 pnz = pi_loc[row+1] - pi_loc[row]; 222 Jptr = pj_loc + pi_loc[row]; 223 /* add non-zero cols of P into the sorted linked list lnk */ 224 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 225 } 226 /* off-diagonal portion of A */ 227 nzi = aoi[i+1] - aoi[i]; 228 for (j=0; j<nzi; j++) { 229 row = *aoj++; 230 pnz = pi_oth[row+1] - pi_oth[row]; 231 Jptr = pj_oth + pi_oth[row]; 232 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 233 } 234 235 apnz = lnk[0]; 236 api[i+1] = api[i] + apnz; 237 238 /* if free space is not available, double the total space in the list */ 239 if (current_space->local_remaining<apnz) { 240 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 241 nspacedouble++; 242 } 243 244 /* Copy data into free space, then initialize lnk */ 245 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 246 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 247 248 current_space->array += apnz; 249 current_space->local_used += apnz; 250 current_space->local_remaining -= apnz; 251 } 252 253 /* Allocate space for apj, initialize apj, and */ 254 /* destroy list of free space and other temporary array(s) */ 255 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 256 apj = ptap->apj; 257 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 258 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 259 260 /* malloc apa to store dense row A[i,:]*P */ 261 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 262 263 /* set and assemble symbolic parallel matrix C */ 264 /*---------------------------------------------*/ 265 ierr = MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 266 ierr = MatSetBlockSizesFromMats(C,A,P);CHKERRQ(ierr); 267 268 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 269 ierr = MatSetType(C,mtype);CHKERRQ(ierr); 270 ierr = MatMPIAIJSetPreallocation(C,0,dnz,0,onz);CHKERRQ(ierr); 271 272 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);CHKERRQ(ierr); 273 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 274 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 275 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 276 277 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 278 C->ops->productnumeric = MatProductNumeric_AB; 279 280 /* attach the supporting struct to C for reuse */ 281 C->product->data = ptap; 282 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 283 284 /* set MatInfo */ 285 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 286 if (afill < 1.0) afill = 1.0; 287 C->info.mallocs = nspacedouble; 288 C->info.fill_ratio_given = fill; 289 C->info.fill_ratio_needed = afill; 290 291 #if defined(PETSC_USE_INFO) 292 if (api[am]) { 293 ierr = PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 294 ierr = PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 295 } else { 296 ierr = PetscInfo(C,"Empty matrix product\n");CHKERRQ(ierr); 297 } 298 #endif 299 PetscFunctionReturn(0); 300 } 301 302 /* ------------------------------------------------------- */ 303 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat); 304 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat); 305 306 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C) 307 { 308 Mat_Product *product = C->product; 309 Mat A = product->A,B=product->B; 310 311 PetscFunctionBegin; 312 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) 313 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 314 315 C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense; 316 C->ops->productsymbolic = MatProductSymbolic_AB; 317 PetscFunctionReturn(0); 318 } 319 /* -------------------------------------------------------------------- */ 320 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C) 321 { 322 Mat_Product *product = C->product; 323 Mat A = product->A,B=product->B; 324 325 PetscFunctionBegin; 326 if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) 327 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend); 328 329 C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense; 330 C->ops->productsymbolic = MatProductSymbolic_AtB; 331 PetscFunctionReturn(0); 332 } 333 334 /* --------------------------------------------------------------------- */ 335 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C) 336 { 337 PetscErrorCode ierr; 338 Mat_Product *product = C->product; 339 340 PetscFunctionBegin; 341 switch (product->type) { 342 case MATPRODUCT_AB: 343 ierr = MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C);CHKERRQ(ierr); 344 break; 345 case MATPRODUCT_AtB: 346 ierr = MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C);CHKERRQ(ierr); 347 break; 348 default: 349 break; 350 } 351 PetscFunctionReturn(0); 352 } 353 /* ------------------------------------------------------- */ 354 355 typedef struct { 356 Mat workB,workB1; 357 MPI_Request *rwaits,*swaits; 358 PetscInt nsends,nrecvs; 359 MPI_Datatype *stype,*rtype; 360 PetscInt blda; 361 } MPIAIJ_MPIDense; 362 363 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 364 { 365 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx; 366 PetscErrorCode ierr; 367 PetscInt i; 368 369 PetscFunctionBegin; 370 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 371 ierr = MatDestroy(&contents->workB1);CHKERRQ(ierr); 372 for (i=0; i<contents->nsends; i++) { 373 ierr = MPI_Type_free(&contents->stype[i]);CHKERRQ(ierr); 374 } 375 for (i=0; i<contents->nrecvs; i++) { 376 ierr = MPI_Type_free(&contents->rtype[i]);CHKERRQ(ierr); 377 } 378 ierr = PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits);CHKERRQ(ierr); 379 ierr = PetscFree(contents);CHKERRQ(ierr); 380 PetscFunctionReturn(0); 381 } 382 383 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C) 384 { 385 PetscErrorCode ierr; 386 Mat_MPIAIJ *aij=(Mat_MPIAIJ*)A->data; 387 PetscInt nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,clda; 388 MPIAIJ_MPIDense *contents; 389 VecScatter ctx=aij->Mvctx; 390 PetscInt Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb; 391 MPI_Comm comm; 392 MPI_Datatype type1,*stype,*rtype; 393 const PetscInt *sindices,*sstarts,*rstarts; 394 PetscMPIInt *disp; 395 PetscBool cisdense; 396 397 PetscFunctionBegin; 398 MatCheckProduct(C,4); 399 if (C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty"); 400 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 401 ierr = PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense);CHKERRQ(ierr); 402 if (!cisdense) { 403 ierr = MatSetType(C,((PetscObject)B)->type_name);CHKERRQ(ierr); 404 } 405 ierr = MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN);CHKERRQ(ierr); 406 ierr = MatSetBlockSizesFromMats(C,A,B);CHKERRQ(ierr); 407 ierr = MatSetUp(C);CHKERRQ(ierr); 408 ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); 409 ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); 410 ierr = PetscNew(&contents);CHKERRQ(ierr); 411 412 ierr = VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);CHKERRQ(ierr); 413 ierr = VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);CHKERRQ(ierr); 414 415 /* Create column block of B and C for memory scalability when BN is too large */ 416 /* Estimate Bbn, column size of Bb */ 417 if (nz) { 418 Bbn1 = 2*Am*BN/nz; 419 } else Bbn1 = BN; 420 421 bs = PetscAbs(B->cmap->bs); 422 Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */ 423 if (Bbn1 > BN) Bbn1 = BN; 424 ierr = MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 425 426 /* Enable runtime option for Bbn */ 427 ierr = PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");CHKERRQ(ierr); 428 ierr = PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL);CHKERRQ(ierr); 429 ierr = PetscOptionsEnd();CHKERRQ(ierr); 430 Bbn = PetscMin(Bbn,BN); 431 432 if (Bbn > 0 && Bbn < BN) { 433 numBb = BN/Bbn; 434 Bbn1 = BN - numBb*Bbn; 435 } else numBb = 0; 436 437 if (numBb) { 438 ierr = PetscInfo3(C,"use Bb, BN=%D, Bbn=%D; numBb=%D\n",BN,Bbn,numBb);CHKERRQ(ierr); 439 if (Bbn1) { /* Create workB1 for the remaining columns */ 440 ierr = PetscInfo2(C,"use Bb1, BN=%D, Bbn1=%D\n",BN,Bbn1);CHKERRQ(ierr); 441 /* Create work matrix used to store off processor rows of B needed for local product */ 442 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1);CHKERRQ(ierr); 443 } else contents->workB1 = NULL; 444 } 445 446 /* Create work matrix used to store off processor rows of B needed for local product */ 447 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB);CHKERRQ(ierr); 448 449 /* Use MPI derived data type to reduce memory required by the send/recv buffers */ 450 ierr = PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits);CHKERRQ(ierr); 451 contents->stype = stype; 452 contents->nsends = nsends; 453 454 contents->rtype = rtype; 455 contents->nrecvs = nrecvs; 456 contents->blda = blda; 457 458 ierr = PetscMalloc1(Bm+1,&disp);CHKERRQ(ierr); 459 for (i=0; i<nsends; i++) { 460 nrows_to = sstarts[i+1]-sstarts[i]; 461 for (j=0; j<nrows_to; j++){ 462 disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */ 463 } 464 ierr = MPI_Type_create_indexed_block(nrows_to,1,(const PetscMPIInt *)disp,MPIU_SCALAR,&type1);CHKERRQ(ierr); 465 466 ierr = MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]);CHKERRQ(ierr); 467 ierr = MPI_Type_commit(&stype[i]);CHKERRQ(ierr); 468 ierr = MPI_Type_free(&type1);CHKERRQ(ierr); 469 } 470 471 for (i=0; i<nrecvs; i++) { 472 /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */ 473 nrows_from = rstarts[i+1]-rstarts[i]; 474 disp[0] = 0; 475 ierr = MPI_Type_create_indexed_block(1, nrows_from, (const PetscMPIInt *)disp, MPIU_SCALAR, &type1);CHKERRQ(ierr); 476 ierr = MPI_Type_create_resized(type1, 0, nz*sizeof(PetscScalar), &rtype[i]);CHKERRQ(ierr); 477 ierr = MPI_Type_commit(&rtype[i]);CHKERRQ(ierr); 478 ierr = MPI_Type_free(&type1);CHKERRQ(ierr); 479 } 480 481 ierr = PetscFree(disp);CHKERRQ(ierr); 482 ierr = VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL);CHKERRQ(ierr); 483 ierr = VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL);CHKERRQ(ierr); 484 ierr = MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr); 485 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 486 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 487 488 C->product->data = contents; 489 C->product->destroy = MatMPIAIJ_MPIDenseDestroy; 490 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 491 PetscFunctionReturn(0); 492 } 493 494 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool); 495 /* 496 Performs an efficient scatter on the rows of B needed by this process; this is 497 a modification of the VecScatterBegin_() routines. 498 499 Input: Bbidx = 0: B = Bb 500 = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense() 501 */ 502 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB) 503 { 504 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 505 PetscErrorCode ierr; 506 const PetscScalar *b; 507 PetscScalar *rvalues; 508 VecScatter ctx = aij->Mvctx; 509 const PetscInt *sindices,*sstarts,*rstarts; 510 const PetscMPIInt *sprocs,*rprocs; 511 PetscInt i,nsends,nrecvs; 512 MPI_Request *swaits,*rwaits; 513 MPI_Comm comm; 514 PetscMPIInt tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi; 515 MPIAIJ_MPIDense *contents; 516 Mat workB; 517 MPI_Datatype *stype,*rtype; 518 PetscInt blda; 519 520 PetscFunctionBegin; 521 MatCheckProduct(C,4); 522 if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); 523 contents = (MPIAIJ_MPIDense*)C->product->data; 524 ierr = VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/);CHKERRQ(ierr); 525 ierr = VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/);CHKERRQ(ierr); 526 ierr = PetscMPIIntCast(nsends,&nsends_mpi);CHKERRQ(ierr); 527 ierr = PetscMPIIntCast(nrecvs,&nrecvs_mpi);CHKERRQ(ierr); 528 if (Bbidx == 0) { 529 workB = *outworkB = contents->workB; 530 } else { 531 workB = *outworkB = contents->workB1; 532 } 533 if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",workB->cmap->n,nrows); 534 swaits = contents->swaits; 535 rwaits = contents->rwaits; 536 537 ierr = MatDenseGetArrayRead(B,&b);CHKERRQ(ierr); 538 ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); 539 if (blda != contents->blda) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot reuse an input matrix with lda %D != %D",blda,contents->blda); 540 ierr = MatDenseGetArray(workB,&rvalues);CHKERRQ(ierr); 541 542 /* Post recv, use MPI derived data type to save memory */ 543 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 544 rtype = contents->rtype; 545 for (i=0; i<nrecvs; i++) { 546 ierr = MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 547 } 548 549 stype = contents->stype; 550 for (i=0; i<nsends; i++) { 551 ierr = MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 552 } 553 554 if (nrecvs) {ierr = MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE);CHKERRQ(ierr);} 555 if (nsends) {ierr = MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE);CHKERRQ(ierr);} 556 557 ierr = VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL);CHKERRQ(ierr); 558 ierr = VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL);CHKERRQ(ierr); 559 ierr = MatDenseRestoreArrayRead(B,&b);CHKERRQ(ierr); 560 ierr = MatDenseRestoreArray(workB,&rvalues);CHKERRQ(ierr); 561 PetscFunctionReturn(0); 562 } 563 564 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 565 { 566 PetscErrorCode ierr; 567 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 568 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 569 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 570 Mat workB; 571 MPIAIJ_MPIDense *contents; 572 573 PetscFunctionBegin; 574 MatCheckProduct(C,3); 575 if (!C->product->data) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty"); 576 contents = (MPIAIJ_MPIDense*)C->product->data; 577 /* diagonal block of A times all local rows of B */ 578 /* TODO: this calls a symbolic multiplication every time, which could be avoided */ 579 ierr = MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A);CHKERRQ(ierr); 580 if (contents->workB->cmap->n == B->cmap->N) { 581 /* get off processor parts of B needed to complete C=A*B */ 582 ierr = MatMPIDenseScatter(A,B,0,C,&workB);CHKERRQ(ierr); 583 584 /* off-diagonal block of A times nonlocal rows of B */ 585 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);CHKERRQ(ierr); 586 } else { 587 Mat Bb,Cb; 588 PetscInt BN=B->cmap->N,n=contents->workB->cmap->n,i; 589 590 for (i=0; i<BN; i+=n) { 591 ierr = MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb);CHKERRQ(ierr); 592 ierr = MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb);CHKERRQ(ierr); 593 594 /* get off processor parts of B needed to complete C=A*B */ 595 ierr = MatMPIDenseScatter(A,Bb,i+n>BN,C,&workB);CHKERRQ(ierr); 596 597 /* off-diagonal block of A times nonlocal rows of B */ 598 cdense = (Mat_MPIDense*)Cb->data; 599 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE);CHKERRQ(ierr); 600 601 ierr = MatDenseRestoreSubMatrix(B,&Bb);CHKERRQ(ierr); 602 ierr = MatDenseRestoreSubMatrix(C,&Cb);CHKERRQ(ierr); 603 } 604 } 605 PetscFunctionReturn(0); 606 } 607 608 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 609 { 610 PetscErrorCode ierr; 611 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 612 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 613 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 614 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 615 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 616 Mat_SeqAIJ *p_loc,*p_oth; 617 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 618 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 619 PetscInt cm = C->rmap->n,anz,pnz; 620 Mat_APMPI *ptap; 621 PetscScalar *apa_sparse; 622 PetscInt *api,*apj,*apJ,i,j,k,row; 623 PetscInt cstart = C->cmap->rstart; 624 PetscInt cdnz,conz,k0,k1,nextp; 625 MPI_Comm comm; 626 PetscMPIInt size; 627 628 PetscFunctionBegin; 629 MatCheckProduct(C,3); 630 ptap = (Mat_APMPI*)C->product->data; 631 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data"); 632 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 633 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 634 if (!ptap->P_oth && size>1) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()"); 635 636 apa_sparse = ptap->apa; 637 638 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 639 /*-----------------------------------------------------*/ 640 /* update numerical values of P_oth and P_loc */ 641 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 642 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 643 644 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 645 /*----------------------------------------------------------*/ 646 /* get data from symbolic products */ 647 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 648 pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a; 649 if (size >1) { 650 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 651 pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a; 652 } else { 653 p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL; 654 } 655 656 api = ptap->api; 657 apj = ptap->apj; 658 for (i=0; i<cm; i++) { 659 apJ = apj + api[i]; 660 661 /* diagonal portion of A */ 662 anz = adi[i+1] - adi[i]; 663 adj = ad->j + adi[i]; 664 ada = ad->a + adi[i]; 665 for (j=0; j<anz; j++) { 666 row = adj[j]; 667 pnz = pi_loc[row+1] - pi_loc[row]; 668 pj = pj_loc + pi_loc[row]; 669 pa = pa_loc + pi_loc[row]; 670 /* perform sparse axpy */ 671 valtmp = ada[j]; 672 nextp = 0; 673 for (k=0; nextp<pnz; k++) { 674 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 675 apa_sparse[k] += valtmp*pa[nextp++]; 676 } 677 } 678 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 679 } 680 681 /* off-diagonal portion of A */ 682 anz = aoi[i+1] - aoi[i]; 683 aoj = ao->j + aoi[i]; 684 aoa = ao->a + aoi[i]; 685 for (j=0; j<anz; j++) { 686 row = aoj[j]; 687 pnz = pi_oth[row+1] - pi_oth[row]; 688 pj = pj_oth + pi_oth[row]; 689 pa = pa_oth + pi_oth[row]; 690 /* perform sparse axpy */ 691 valtmp = aoa[j]; 692 nextp = 0; 693 for (k=0; nextp<pnz; k++) { 694 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 695 apa_sparse[k] += valtmp*pa[nextp++]; 696 } 697 } 698 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 699 } 700 701 /* set values in C */ 702 cdnz = cd->i[i+1] - cd->i[i]; 703 conz = co->i[i+1] - co->i[i]; 704 705 /* 1st off-diagonal part of C */ 706 ca = coa + co->i[i]; 707 k = 0; 708 for (k0=0; k0<conz; k0++) { 709 if (apJ[k] >= cstart) break; 710 ca[k0] = apa_sparse[k]; 711 apa_sparse[k] = 0.0; 712 k++; 713 } 714 715 /* diagonal part of C */ 716 ca = cda + cd->i[i]; 717 for (k1=0; k1<cdnz; k1++) { 718 ca[k1] = apa_sparse[k]; 719 apa_sparse[k] = 0.0; 720 k++; 721 } 722 723 /* 2nd off-diagonal part of C */ 724 ca = coa + co->i[i]; 725 for (; k0<conz; k0++) { 726 ca[k0] = apa_sparse[k]; 727 apa_sparse[k] = 0.0; 728 k++; 729 } 730 } 731 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 732 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 733 PetscFunctionReturn(0); 734 } 735 736 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 737 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C) 738 { 739 PetscErrorCode ierr; 740 MPI_Comm comm; 741 PetscMPIInt size; 742 Mat_APMPI *ptap; 743 PetscFreeSpaceList free_space = NULL,current_space=NULL; 744 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 745 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 746 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 747 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 748 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0; 749 PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20; 750 PetscReal afill; 751 MatType mtype; 752 753 PetscFunctionBegin; 754 MatCheckProduct(C,4); 755 if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty"); 756 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 757 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 758 759 /* create struct Mat_APMPI and attached it to C later */ 760 ierr = PetscNew(&ptap);CHKERRQ(ierr); 761 762 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 763 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 764 765 /* get P_loc by taking all local rows of P */ 766 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 767 768 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 769 pi_loc = p_loc->i; pj_loc = p_loc->j; 770 if (size > 1) { 771 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 772 pi_oth = p_oth->i; pj_oth = p_oth->j; 773 } else { 774 p_oth = NULL; 775 pi_oth = NULL; pj_oth = NULL; 776 } 777 778 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 779 /*-------------------------------------------------------------------*/ 780 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 781 ptap->api = api; 782 api[0] = 0; 783 784 ierr = PetscLLCondensedCreate_Scalable(lsize,&lnk);CHKERRQ(ierr); 785 786 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 787 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 788 current_space = free_space; 789 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 790 for (i=0; i<am; i++) { 791 /* diagonal portion of A */ 792 nzi = adi[i+1] - adi[i]; 793 for (j=0; j<nzi; j++) { 794 row = *adj++; 795 pnz = pi_loc[row+1] - pi_loc[row]; 796 Jptr = pj_loc + pi_loc[row]; 797 /* Expand list if it is not long enough */ 798 if (pnz+apnz_max > lsize) { 799 lsize = pnz+apnz_max; 800 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 801 } 802 /* add non-zero cols of P into the sorted linked list lnk */ 803 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 804 apnz = *lnk; /* The first element in the list is the number of items in the list */ 805 api[i+1] = api[i] + apnz; 806 if (apnz > apnz_max) apnz_max = apnz; 807 } 808 /* off-diagonal portion of A */ 809 nzi = aoi[i+1] - aoi[i]; 810 for (j=0; j<nzi; j++) { 811 row = *aoj++; 812 pnz = pi_oth[row+1] - pi_oth[row]; 813 Jptr = pj_oth + pi_oth[row]; 814 /* Expand list if it is not long enough */ 815 if (pnz+apnz_max > lsize) { 816 lsize = pnz + apnz_max; 817 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 818 } 819 /* add non-zero cols of P into the sorted linked list lnk */ 820 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 821 apnz = *lnk; /* The first element in the list is the number of items in the list */ 822 api[i+1] = api[i] + apnz; 823 if (apnz > apnz_max) apnz_max = apnz; 824 } 825 apnz = *lnk; 826 api[i+1] = api[i] + apnz; 827 if (apnz > apnz_max) apnz_max = apnz; 828 829 /* if free space is not available, double the total space in the list */ 830 if (current_space->local_remaining<apnz) { 831 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 832 nspacedouble++; 833 } 834 835 /* Copy data into free space, then initialize lnk */ 836 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 837 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 838 839 current_space->array += apnz; 840 current_space->local_used += apnz; 841 current_space->local_remaining -= apnz; 842 } 843 844 /* Allocate space for apj, initialize apj, and */ 845 /* destroy list of free space and other temporary array(s) */ 846 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 847 apj = ptap->apj; 848 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 849 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 850 851 /* create and assemble symbolic parallel matrix C */ 852 /*----------------------------------------------------*/ 853 ierr = MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 854 ierr = MatSetBlockSizesFromMats(C,A,P);CHKERRQ(ierr); 855 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 856 ierr = MatSetType(C,mtype);CHKERRQ(ierr); 857 ierr = MatMPIAIJSetPreallocation(C,0,dnz,0,onz);CHKERRQ(ierr); 858 859 /* malloc apa for assembly C */ 860 ierr = PetscCalloc1(apnz_max,&ptap->apa);CHKERRQ(ierr); 861 862 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);CHKERRQ(ierr); 863 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 864 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 865 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 866 867 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 868 C->ops->productnumeric = MatProductNumeric_AB; 869 870 /* attach the supporting struct to C for reuse */ 871 C->product->data = ptap; 872 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 873 874 /* set MatInfo */ 875 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 876 if (afill < 1.0) afill = 1.0; 877 C->info.mallocs = nspacedouble; 878 C->info.fill_ratio_given = fill; 879 C->info.fill_ratio_needed = afill; 880 881 #if defined(PETSC_USE_INFO) 882 if (api[am]) { 883 ierr = PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 884 ierr = PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 885 } else { 886 ierr = PetscInfo(C,"Empty matrix product\n");CHKERRQ(ierr); 887 } 888 #endif 889 PetscFunctionReturn(0); 890 } 891 892 /* This function is needed for the seqMPI matrix-matrix multiplication. */ 893 /* Three input arrays are merged to one output array. The size of the */ 894 /* output array is also output. Duplicate entries only show up once. */ 895 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, 896 PetscInt size2, PetscInt *in2, 897 PetscInt size3, PetscInt *in3, 898 PetscInt *size4, PetscInt *out) 899 { 900 int i = 0, j = 0, k = 0, l = 0; 901 902 /* Traverse all three arrays */ 903 while (i<size1 && j<size2 && k<size3) { 904 if (in1[i] < in2[j] && in1[i] < in3[k]) { 905 out[l++] = in1[i++]; 906 } 907 else if (in2[j] < in1[i] && in2[j] < in3[k]) { 908 out[l++] = in2[j++]; 909 } 910 else if (in3[k] < in1[i] && in3[k] < in2[j]) { 911 out[l++] = in3[k++]; 912 } 913 else if (in1[i] == in2[j] && in1[i] < in3[k]) { 914 out[l++] = in1[i]; 915 i++, j++; 916 } 917 else if (in1[i] == in3[k] && in1[i] < in2[j]) { 918 out[l++] = in1[i]; 919 i++, k++; 920 } 921 else if (in3[k] == in2[j] && in2[j] < in1[i]) { 922 out[l++] = in2[j]; 923 k++, j++; 924 } 925 else if (in1[i] == in2[j] && in1[i] == in3[k]) { 926 out[l++] = in1[i]; 927 i++, j++, k++; 928 } 929 } 930 931 /* Traverse two remaining arrays */ 932 while (i<size1 && j<size2) { 933 if (in1[i] < in2[j]) { 934 out[l++] = in1[i++]; 935 } 936 else if (in1[i] > in2[j]) { 937 out[l++] = in2[j++]; 938 } 939 else { 940 out[l++] = in1[i]; 941 i++, j++; 942 } 943 } 944 945 while (i<size1 && k<size3) { 946 if (in1[i] < in3[k]) { 947 out[l++] = in1[i++]; 948 } 949 else if (in1[i] > in3[k]) { 950 out[l++] = in3[k++]; 951 } 952 else { 953 out[l++] = in1[i]; 954 i++, k++; 955 } 956 } 957 958 while (k<size3 && j<size2) { 959 if (in3[k] < in2[j]) { 960 out[l++] = in3[k++]; 961 } 962 else if (in3[k] > in2[j]) { 963 out[l++] = in2[j++]; 964 } 965 else { 966 out[l++] = in3[k]; 967 k++, j++; 968 } 969 } 970 971 /* Traverse one remaining array */ 972 while (i<size1) out[l++] = in1[i++]; 973 while (j<size2) out[l++] = in2[j++]; 974 while (k<size3) out[l++] = in3[k++]; 975 976 *size4 = l; 977 } 978 979 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */ 980 /* adds up the products. Two of these three multiplications are performed with existing (sequential) */ 981 /* matrix-matrix multiplications. */ 982 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C) 983 { 984 PetscErrorCode ierr; 985 MPI_Comm comm; 986 PetscMPIInt size; 987 Mat_APMPI *ptap; 988 PetscFreeSpaceList free_space_diag=NULL, current_space=NULL; 989 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data; 990 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc; 991 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data; 992 Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq; 993 PetscInt adponz, adpdnz; 994 PetscInt *pi_loc,*dnz,*onz; 995 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart; 996 PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi, 997 *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj; 998 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend; 999 PetscBT lnkbt; 1000 PetscReal afill; 1001 PetscMPIInt rank; 1002 Mat adpd, aopoth; 1003 MatType mtype; 1004 const char *prefix; 1005 1006 PetscFunctionBegin; 1007 MatCheckProduct(C,4); 1008 if (C->product->data) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty"); 1009 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1010 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1011 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 1012 ierr = MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend);CHKERRQ(ierr); 1013 1014 /* create struct Mat_APMPI and attached it to C later */ 1015 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1016 1017 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1018 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1019 1020 /* get P_loc by taking all local rows of P */ 1021 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1022 1023 1024 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1025 pi_loc = p_loc->i; 1026 1027 /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */ 1028 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 1029 ierr = PetscMalloc1(am+2,&adpoi);CHKERRQ(ierr); 1030 1031 adpoi[0] = 0; 1032 ptap->api = api; 1033 api[0] = 0; 1034 1035 /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */ 1036 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1037 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 1038 1039 /* Symbolic calc of A_loc_diag * P_loc_diag */ 1040 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 1041 ierr = MatProductCreate(a->A,p->A,NULL,&adpd);CHKERRQ(ierr); 1042 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 1043 ierr = MatSetOptionsPrefix(adpd,prefix);CHKERRQ(ierr); 1044 ierr = MatAppendOptionsPrefix(adpd,"inner_diag_");CHKERRQ(ierr); 1045 1046 ierr = MatProductSetType(adpd,MATPRODUCT_AB);CHKERRQ(ierr); 1047 ierr = MatProductSetAlgorithm(adpd,"sorted");CHKERRQ(ierr); 1048 ierr = MatProductSetFill(adpd,fill);CHKERRQ(ierr); 1049 ierr = MatProductSetFromOptions(adpd);CHKERRQ(ierr); 1050 ierr = MatProductSymbolic(adpd);CHKERRQ(ierr); 1051 1052 adpd_seq = (Mat_SeqAIJ*)((adpd)->data); 1053 adpdi = adpd_seq->i; adpdj = adpd_seq->j; 1054 p_off = (Mat_SeqAIJ*)((p->B)->data); 1055 poff_i = p_off->i; poff_j = p_off->j; 1056 1057 /* j_temp stores indices of a result row before they are added to the linked list */ 1058 ierr = PetscMalloc1(pN+2,&j_temp);CHKERRQ(ierr); 1059 1060 1061 /* Symbolic calc of the A_diag * p_loc_off */ 1062 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 1063 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);CHKERRQ(ierr); 1064 current_space = free_space_diag; 1065 1066 for (i=0; i<am; i++) { 1067 /* A_diag * P_loc_off */ 1068 nzi = adi[i+1] - adi[i]; 1069 for (j=0; j<nzi; j++) { 1070 row = *adj++; 1071 pnz = poff_i[row+1] - poff_i[row]; 1072 Jptr = poff_j + poff_i[row]; 1073 for (i1 = 0; i1 < pnz; i1++) { 1074 j_temp[i1] = p->garray[Jptr[i1]]; 1075 } 1076 /* add non-zero cols of P into the sorted linked list lnk */ 1077 ierr = PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);CHKERRQ(ierr); 1078 } 1079 1080 adponz = lnk[0]; 1081 adpoi[i+1] = adpoi[i] + adponz; 1082 1083 /* if free space is not available, double the total space in the list */ 1084 if (current_space->local_remaining<adponz) { 1085 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1086 nspacedouble++; 1087 } 1088 1089 /* Copy data into free space, then initialize lnk */ 1090 ierr = PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1091 1092 current_space->array += adponz; 1093 current_space->local_used += adponz; 1094 current_space->local_remaining -= adponz; 1095 } 1096 1097 /* Symbolic calc of A_off * P_oth */ 1098 ierr = MatSetOptionsPrefix(a->B,prefix);CHKERRQ(ierr); 1099 ierr = MatAppendOptionsPrefix(a->B,"inner_offdiag_");CHKERRQ(ierr); 1100 ierr = MatCreate(PETSC_COMM_SELF,&aopoth);CHKERRQ(ierr); 1101 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth);CHKERRQ(ierr); 1102 aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data); 1103 aopothi = aopoth_seq->i; aopothj = aopoth_seq->j; 1104 1105 /* Allocate space for apj, adpj, aopj, ... */ 1106 /* destroy lists of free space and other temporary array(s) */ 1107 1108 ierr = PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);CHKERRQ(ierr); 1109 ierr = PetscMalloc1(adpoi[am]+2, &adpoj);CHKERRQ(ierr); 1110 1111 /* Copy from linked list to j-array */ 1112 ierr = PetscFreeSpaceContiguous(&free_space_diag,adpoj);CHKERRQ(ierr); 1113 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1114 1115 adpoJ = adpoj; 1116 adpdJ = adpdj; 1117 aopJ = aopothj; 1118 apj = ptap->apj; 1119 apJ = apj; /* still empty */ 1120 1121 /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */ 1122 /* A_diag * P_loc_diag to get A*P */ 1123 for (i = 0; i < am; i++) { 1124 aopnz = aopothi[i+1] - aopothi[i]; 1125 adponz = adpoi[i+1] - adpoi[i]; 1126 adpdnz = adpdi[i+1] - adpdi[i]; 1127 1128 /* Correct indices from A_diag*P_diag */ 1129 for (i1 = 0; i1 < adpdnz; i1++) { 1130 adpdJ[i1] += p_colstart; 1131 } 1132 /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */ 1133 Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ); 1134 ierr = MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz);CHKERRQ(ierr); 1135 1136 aopJ += aopnz; 1137 adpoJ += adponz; 1138 adpdJ += adpdnz; 1139 apJ += apnz; 1140 api[i+1] = api[i] + apnz; 1141 } 1142 1143 /* malloc apa to store dense row A[i,:]*P */ 1144 ierr = PetscCalloc1(pN+2,&ptap->apa);CHKERRQ(ierr); 1145 1146 /* create and assemble symbolic parallel matrix C */ 1147 ierr = MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1148 ierr = MatSetBlockSizesFromMats(C,A,P);CHKERRQ(ierr); 1149 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1150 ierr = MatSetType(C,mtype);CHKERRQ(ierr); 1151 ierr = MatMPIAIJSetPreallocation(C,0,dnz,0,onz);CHKERRQ(ierr); 1152 1153 1154 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api);CHKERRQ(ierr); 1155 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1156 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1157 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1158 1159 1160 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1161 C->ops->productnumeric = MatProductNumeric_AB; 1162 1163 /* attach the supporting struct to C for reuse */ 1164 C->product->data = ptap; 1165 C->product->destroy = MatDestroy_MPIAIJ_MatMatMult; 1166 1167 /* set MatInfo */ 1168 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 1169 if (afill < 1.0) afill = 1.0; 1170 C->info.mallocs = nspacedouble; 1171 C->info.fill_ratio_given = fill; 1172 C->info.fill_ratio_needed = afill; 1173 1174 #if defined(PETSC_USE_INFO) 1175 if (api[am]) { 1176 ierr = PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 1177 ierr = PetscInfo1(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 1178 } else { 1179 ierr = PetscInfo(C,"Empty matrix product\n");CHKERRQ(ierr); 1180 } 1181 #endif 1182 1183 ierr = MatDestroy(&aopoth);CHKERRQ(ierr); 1184 ierr = MatDestroy(&adpd);CHKERRQ(ierr); 1185 ierr = PetscFree(j_temp);CHKERRQ(ierr); 1186 ierr = PetscFree(adpoj);CHKERRQ(ierr); 1187 ierr = PetscFree(adpoi);CHKERRQ(ierr); 1188 PetscFunctionReturn(0); 1189 } 1190 1191 /*-------------------------------------------------------------------------*/ 1192 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 1193 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 1194 { 1195 PetscErrorCode ierr; 1196 Mat_APMPI *ptap; 1197 Mat Pt; 1198 1199 PetscFunctionBegin; 1200 MatCheckProduct(C,3); 1201 ptap = (Mat_APMPI*)C->product->data; 1202 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data"); 1203 if (!ptap->Pt) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()"); 1204 1205 Pt = ptap->Pt; 1206 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 1207 ierr = MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C);CHKERRQ(ierr); 1208 PetscFunctionReturn(0); 1209 } 1210 1211 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1212 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C) 1213 { 1214 PetscErrorCode ierr; 1215 Mat_APMPI *ptap; 1216 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data; 1217 MPI_Comm comm; 1218 PetscMPIInt size,rank; 1219 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1220 PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n; 1221 PetscInt *lnk,i,k,nsend,rstart; 1222 PetscBT lnkbt; 1223 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1224 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1225 PetscInt len,proc,*dnz,*onz,*owners,nzi; 1226 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1227 MPI_Request *swaits,*rwaits; 1228 MPI_Status *sstatus,rstatus; 1229 PetscLayout rowmap; 1230 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1231 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1232 PetscInt *Jptr,*prmap=p->garray,con,j,Crmax; 1233 Mat_SeqAIJ *a_loc,*c_loc,*c_oth; 1234 PetscTable ta; 1235 MatType mtype; 1236 const char *prefix; 1237 1238 PetscFunctionBegin; 1239 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1240 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1241 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1242 1243 /* create symbolic parallel matrix C */ 1244 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1245 ierr = MatSetType(C,mtype);CHKERRQ(ierr); 1246 1247 C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1248 1249 /* create struct Mat_APMPI and attached it to C later */ 1250 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1251 ptap->reuse = MAT_INITIAL_MATRIX; 1252 1253 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1254 /* --------------------------------- */ 1255 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1256 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1257 1258 /* (1) compute symbolic A_loc */ 1259 /* ---------------------------*/ 1260 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1261 1262 /* (2-1) compute symbolic C_oth = Ro*A_loc */ 1263 /* ------------------------------------ */ 1264 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 1265 ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr); 1266 ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr); 1267 ierr = MatCreate(PETSC_COMM_SELF,&ptap->C_oth);CHKERRQ(ierr); 1268 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth);CHKERRQ(ierr); 1269 1270 /* (3) send coj of C_oth to other processors */ 1271 /* ------------------------------------------ */ 1272 /* determine row ownership */ 1273 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1274 rowmap->n = pn; 1275 rowmap->bs = 1; 1276 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1277 owners = rowmap->range; 1278 1279 /* determine the number of messages to send, their lengths */ 1280 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1281 ierr = PetscArrayzero(len_s,size);CHKERRQ(ierr); 1282 ierr = PetscArrayzero(len_si,size);CHKERRQ(ierr); 1283 1284 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1285 coi = c_oth->i; coj = c_oth->j; 1286 con = ptap->C_oth->rmap->n; 1287 proc = 0; 1288 for (i=0; i<con; i++) { 1289 while (prmap[i] >= owners[proc+1]) proc++; 1290 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1291 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1292 } 1293 1294 len = 0; /* max length of buf_si[], see (4) */ 1295 owners_co[0] = 0; 1296 nsend = 0; 1297 for (proc=0; proc<size; proc++) { 1298 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1299 if (len_s[proc]) { 1300 nsend++; 1301 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1302 len += len_si[proc]; 1303 } 1304 } 1305 1306 /* determine the number and length of messages to receive for coi and coj */ 1307 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1308 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1309 1310 /* post the Irecv and Isend of coj */ 1311 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1312 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1313 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1314 for (proc=0, k=0; proc<size; proc++) { 1315 if (!len_s[proc]) continue; 1316 i = owners_co[proc]; 1317 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1318 k++; 1319 } 1320 1321 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1322 /* ---------------------------------------- */ 1323 ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr); 1324 ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr); 1325 ierr = MatCreate(PETSC_COMM_SELF,&ptap->C_loc);CHKERRQ(ierr); 1326 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc);CHKERRQ(ierr); 1327 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1328 1329 /* receives coj are complete */ 1330 for (i=0; i<nrecv; i++) { 1331 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1332 } 1333 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1334 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1335 1336 /* add received column indices into ta to update Crmax */ 1337 a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data; 1338 1339 /* create and initialize a linked list */ 1340 ierr = PetscTableCreate(an,aN,&ta);CHKERRQ(ierr); /* for compute Crmax */ 1341 MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta); 1342 1343 for (k=0; k<nrecv; k++) {/* k-th received message */ 1344 Jptr = buf_rj[k]; 1345 for (j=0; j<len_r[k]; j++) { 1346 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1347 } 1348 } 1349 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1350 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1351 1352 /* (4) send and recv coi */ 1353 /*-----------------------*/ 1354 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1355 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1356 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1357 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1358 for (proc=0,k=0; proc<size; proc++) { 1359 if (!len_s[proc]) continue; 1360 /* form outgoing message for i-structure: 1361 buf_si[0]: nrows to be sent 1362 [1:nrows]: row index (global) 1363 [nrows+1:2*nrows+1]: i-structure index 1364 */ 1365 /*-------------------------------------------*/ 1366 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1367 buf_si_i = buf_si + nrows+1; 1368 buf_si[0] = nrows; 1369 buf_si_i[0] = 0; 1370 nrows = 0; 1371 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1372 nzi = coi[i+1] - coi[i]; 1373 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1374 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1375 nrows++; 1376 } 1377 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1378 k++; 1379 buf_si += len_si[proc]; 1380 } 1381 for (i=0; i<nrecv; i++) { 1382 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1383 } 1384 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1385 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1386 1387 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1388 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1389 ierr = PetscFree(swaits);CHKERRQ(ierr); 1390 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1391 1392 /* (5) compute the local portion of C */ 1393 /* ------------------------------------------ */ 1394 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */ 1395 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1396 current_space = free_space; 1397 1398 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1399 for (k=0; k<nrecv; k++) { 1400 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1401 nrows = *buf_ri_k[k]; 1402 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1403 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1404 } 1405 1406 ierr = MatPreallocateInitialize(comm,pn,an,dnz,onz);CHKERRQ(ierr); 1407 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1408 for (i=0; i<pn; i++) { 1409 /* add C_loc into C */ 1410 nzi = c_loc->i[i+1] - c_loc->i[i]; 1411 Jptr = c_loc->j + c_loc->i[i]; 1412 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1413 1414 /* add received col data into lnk */ 1415 for (k=0; k<nrecv; k++) { /* k-th received message */ 1416 if (i == *nextrow[k]) { /* i-th row */ 1417 nzi = *(nextci[k]+1) - *nextci[k]; 1418 Jptr = buf_rj[k] + *nextci[k]; 1419 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1420 nextrow[k]++; nextci[k]++; 1421 } 1422 } 1423 nzi = lnk[0]; 1424 1425 /* copy data into free space, then initialize lnk */ 1426 ierr = PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1427 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 1428 } 1429 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1430 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1431 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 1432 1433 /* local sizes and preallocation */ 1434 ierr = MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1435 if (P->cmap->bs > 0) {ierr = PetscLayoutSetBlockSize(C->rmap,P->cmap->bs);CHKERRQ(ierr);} 1436 if (A->cmap->bs > 0) {ierr = PetscLayoutSetBlockSize(C->cmap,A->cmap->bs);CHKERRQ(ierr);} 1437 ierr = MatMPIAIJSetPreallocation(C,0,dnz,0,onz);CHKERRQ(ierr); 1438 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1439 1440 /* add C_loc and C_oth to C */ 1441 ierr = MatGetOwnershipRange(C,&rstart,NULL);CHKERRQ(ierr); 1442 for (i=0; i<pn; i++) { 1443 const PetscInt ncols = c_loc->i[i+1] - c_loc->i[i]; 1444 const PetscInt *cols = c_loc->j + c_loc->i[i]; 1445 const PetscInt row = rstart + i; 1446 ierr = MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);CHKERRQ(ierr); 1447 } 1448 for (i=0; i<con; i++) { 1449 const PetscInt ncols = c_oth->i[i+1] - c_oth->i[i]; 1450 const PetscInt *cols = c_oth->j + c_oth->i[i]; 1451 const PetscInt row = prmap[i]; 1452 ierr = MatSetValues(C,1,&row,ncols,cols,NULL,INSERT_VALUES);CHKERRQ(ierr); 1453 } 1454 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1455 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1456 1457 /* members in merge */ 1458 ierr = PetscFree(id_r);CHKERRQ(ierr); 1459 ierr = PetscFree(len_r);CHKERRQ(ierr); 1460 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 1461 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 1462 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 1463 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 1464 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 1465 1466 /* attach the supporting struct to C for reuse */ 1467 C->product->data = ptap; 1468 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 1469 PetscFunctionReturn(0); 1470 } 1471 1472 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C) 1473 { 1474 PetscErrorCode ierr; 1475 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data; 1476 Mat_SeqAIJ *c_seq; 1477 Mat_APMPI *ptap; 1478 Mat A_loc,C_loc,C_oth; 1479 PetscInt i,rstart,rend,cm,ncols,row; 1480 const PetscInt *cols; 1481 const PetscScalar *vals; 1482 1483 PetscFunctionBegin; 1484 MatCheckProduct(C,3); 1485 ptap = (Mat_APMPI*)C->product->data; 1486 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data"); 1487 if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()"); 1488 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1489 1490 if (ptap->reuse == MAT_REUSE_MATRIX) { 1491 /* These matrices are obtained in MatTransposeMatMultSymbolic() */ 1492 /* 1) get R = Pd^T, Ro = Po^T */ 1493 /*----------------------------*/ 1494 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1495 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1496 1497 /* 2) compute numeric A_loc */ 1498 /*--------------------------*/ 1499 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1500 } 1501 1502 /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */ 1503 A_loc = ptap->A_loc; 1504 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);CHKERRQ(ierr); 1505 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);CHKERRQ(ierr); 1506 C_loc = ptap->C_loc; 1507 C_oth = ptap->C_oth; 1508 1509 /* add C_loc and C_oth to C */ 1510 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1511 1512 /* C_loc -> C */ 1513 cm = C_loc->rmap->N; 1514 c_seq = (Mat_SeqAIJ*)C_loc->data; 1515 cols = c_seq->j; 1516 vals = c_seq->a; 1517 for (i=0; i<cm; i++) { 1518 ncols = c_seq->i[i+1] - c_seq->i[i]; 1519 row = rstart + i; 1520 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1521 cols += ncols; vals += ncols; 1522 } 1523 1524 /* Co -> C, off-processor part */ 1525 cm = C_oth->rmap->N; 1526 c_seq = (Mat_SeqAIJ*)C_oth->data; 1527 cols = c_seq->j; 1528 vals = c_seq->a; 1529 for (i=0; i<cm; i++) { 1530 ncols = c_seq->i[i+1] - c_seq->i[i]; 1531 row = p->garray[i]; 1532 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1533 cols += ncols; vals += ncols; 1534 } 1535 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1536 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1537 1538 ptap->reuse = MAT_REUSE_MATRIX; 1539 PetscFunctionReturn(0); 1540 } 1541 1542 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 1543 { 1544 PetscErrorCode ierr; 1545 Mat_Merge_SeqsToMPI *merge; 1546 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data; 1547 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1548 Mat_APMPI *ptap; 1549 PetscInt *adj; 1550 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1551 MatScalar *ada,*ca,valtmp; 1552 PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1553 MPI_Comm comm; 1554 PetscMPIInt size,rank,taga,*len_s; 1555 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1556 PetscInt **buf_ri,**buf_rj; 1557 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1558 MPI_Request *s_waits,*r_waits; 1559 MPI_Status *status; 1560 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1561 PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ; 1562 Mat A_loc; 1563 Mat_SeqAIJ *a_loc; 1564 1565 PetscFunctionBegin; 1566 MatCheckProduct(C,3); 1567 ptap = (Mat_APMPI*)C->product->data; 1568 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data"); 1569 if (!ptap->A_loc) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()"); 1570 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1571 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1572 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1573 1574 merge = ptap->merge; 1575 1576 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1577 /*------------------------------------------*/ 1578 /* get data from symbolic products */ 1579 coi = merge->coi; coj = merge->coj; 1580 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 1581 bi = merge->bi; bj = merge->bj; 1582 owners = merge->rowmap->range; 1583 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 1584 1585 /* get A_loc by taking all local rows of A */ 1586 A_loc = ptap->A_loc; 1587 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1588 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1589 ai = a_loc->i; 1590 aj = a_loc->j; 1591 1592 for (i=0; i<am; i++) { 1593 anz = ai[i+1] - ai[i]; 1594 adj = aj + ai[i]; 1595 ada = a_loc->a + ai[i]; 1596 1597 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1598 /*-------------------------------------------------------------*/ 1599 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1600 pnz = po->i[i+1] - po->i[i]; 1601 poJ = po->j + po->i[i]; 1602 pA = po->a + po->i[i]; 1603 for (j=0; j<pnz; j++) { 1604 row = poJ[j]; 1605 cj = coj + coi[row]; 1606 ca = coa + coi[row]; 1607 /* perform sparse axpy */ 1608 nexta = 0; 1609 valtmp = pA[j]; 1610 for (k=0; nexta<anz; k++) { 1611 if (cj[k] == adj[nexta]) { 1612 ca[k] += valtmp*ada[nexta]; 1613 nexta++; 1614 } 1615 } 1616 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1617 } 1618 1619 /* put the value into Cd (diagonal part) */ 1620 pnz = pd->i[i+1] - pd->i[i]; 1621 pdJ = pd->j + pd->i[i]; 1622 pA = pd->a + pd->i[i]; 1623 for (j=0; j<pnz; j++) { 1624 row = pdJ[j]; 1625 cj = bj + bi[row]; 1626 ca = ba + bi[row]; 1627 /* perform sparse axpy */ 1628 nexta = 0; 1629 valtmp = pA[j]; 1630 for (k=0; nexta<anz; k++) { 1631 if (cj[k] == adj[nexta]) { 1632 ca[k] += valtmp*ada[nexta]; 1633 nexta++; 1634 } 1635 } 1636 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1637 } 1638 } 1639 1640 /* 3) send and recv matrix values coa */ 1641 /*------------------------------------*/ 1642 buf_ri = merge->buf_ri; 1643 buf_rj = merge->buf_rj; 1644 len_s = merge->len_s; 1645 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1646 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1647 1648 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1649 for (proc=0,k=0; proc<size; proc++) { 1650 if (!len_s[proc]) continue; 1651 i = merge->owners_co[proc]; 1652 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1653 k++; 1654 } 1655 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1656 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1657 1658 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1659 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1660 ierr = PetscFree(coa);CHKERRQ(ierr); 1661 1662 /* 4) insert local Cseq and received values into Cmpi */ 1663 /*----------------------------------------------------*/ 1664 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1665 for (k=0; k<merge->nrecv; k++) { 1666 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1667 nrows = *(buf_ri_k[k]); 1668 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1669 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1670 } 1671 1672 for (i=0; i<cm; i++) { 1673 row = owners[rank] + i; /* global row index of C_seq */ 1674 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1675 ba_i = ba + bi[i]; 1676 bnz = bi[i+1] - bi[i]; 1677 /* add received vals into ba */ 1678 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1679 /* i-th row */ 1680 if (i == *nextrow[k]) { 1681 cnz = *(nextci[k]+1) - *nextci[k]; 1682 cj = buf_rj[k] + *(nextci[k]); 1683 ca = abuf_r[k] + *(nextci[k]); 1684 nextcj = 0; 1685 for (j=0; nextcj<cnz; j++) { 1686 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1687 ba_i[j] += ca[nextcj++]; 1688 } 1689 } 1690 nextrow[k]++; nextci[k]++; 1691 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1692 } 1693 } 1694 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1695 } 1696 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1697 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1698 1699 ierr = PetscFree(ba);CHKERRQ(ierr); 1700 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1701 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1702 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1703 PetscFunctionReturn(0); 1704 } 1705 1706 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C) 1707 { 1708 PetscErrorCode ierr; 1709 Mat A_loc,POt,PDt; 1710 Mat_APMPI *ptap; 1711 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1712 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data; 1713 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1714 PetscInt nnz; 1715 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1716 PetscInt am =A->rmap->n,pn=P->cmap->n; 1717 MPI_Comm comm; 1718 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1719 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1720 PetscInt len,proc,*dnz,*onz,*owners; 1721 PetscInt nzi,*bi,*bj; 1722 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1723 MPI_Request *swaits,*rwaits; 1724 MPI_Status *sstatus,rstatus; 1725 Mat_Merge_SeqsToMPI *merge; 1726 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1727 PetscReal afill =1.0,afill_tmp; 1728 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax; 1729 Mat_SeqAIJ *a_loc,*pdt,*pot; 1730 PetscTable ta; 1731 MatType mtype; 1732 1733 PetscFunctionBegin; 1734 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1735 /* check if matrix local sizes are compatible */ 1736 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1737 1738 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1739 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1740 1741 /* create struct Mat_APMPI and attached it to C later */ 1742 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1743 1744 /* get A_loc by taking all local rows of A */ 1745 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1746 1747 ptap->A_loc = A_loc; 1748 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1749 ai = a_loc->i; 1750 aj = a_loc->j; 1751 1752 /* determine symbolic Co=(p->B)^T*A - send to others */ 1753 /*----------------------------------------------------*/ 1754 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1755 pdt = (Mat_SeqAIJ*)PDt->data; 1756 pdti = pdt->i; pdtj = pdt->j; 1757 1758 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1759 pot = (Mat_SeqAIJ*)POt->data; 1760 poti = pot->i; potj = pot->j; 1761 1762 /* then, compute symbolic Co = (p->B)^T*A */ 1763 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1764 >= (num of nonzero rows of C_seq) - pn */ 1765 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 1766 coi[0] = 0; 1767 1768 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1769 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 1770 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1771 current_space = free_space; 1772 1773 /* create and initialize a linked list */ 1774 ierr = PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);CHKERRQ(ierr); 1775 MatRowMergeMax_SeqAIJ(a_loc,am,ta); 1776 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1777 1778 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1779 1780 for (i=0; i<pon; i++) { 1781 pnz = poti[i+1] - poti[i]; 1782 ptJ = potj + poti[i]; 1783 for (j=0; j<pnz; j++) { 1784 row = ptJ[j]; /* row of A_loc == col of Pot */ 1785 anz = ai[row+1] - ai[row]; 1786 Jptr = aj + ai[row]; 1787 /* add non-zero cols of AP into the sorted linked list lnk */ 1788 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1789 } 1790 nnz = lnk[0]; 1791 1792 /* If free space is not available, double the total space in the list */ 1793 if (current_space->local_remaining<nnz) { 1794 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1795 nspacedouble++; 1796 } 1797 1798 /* Copy data into free space, and zero out denserows */ 1799 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1800 1801 current_space->array += nnz; 1802 current_space->local_used += nnz; 1803 current_space->local_remaining -= nnz; 1804 1805 coi[i+1] = coi[i] + nnz; 1806 } 1807 1808 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 1809 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1810 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); /* must destroy to get a new one for C */ 1811 1812 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1813 if (afill_tmp > afill) afill = afill_tmp; 1814 1815 /* send j-array (coj) of Co to other processors */ 1816 /*----------------------------------------------*/ 1817 /* determine row ownership */ 1818 ierr = PetscNew(&merge);CHKERRQ(ierr); 1819 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1820 1821 merge->rowmap->n = pn; 1822 merge->rowmap->bs = 1; 1823 1824 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1825 owners = merge->rowmap->range; 1826 1827 /* determine the number of messages to send, their lengths */ 1828 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 1829 ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr); 1830 1831 len_s = merge->len_s; 1832 merge->nsend = 0; 1833 1834 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 1835 1836 proc = 0; 1837 for (i=0; i<pon; i++) { 1838 while (prmap[i] >= owners[proc+1]) proc++; 1839 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1840 len_s[proc] += coi[i+1] - coi[i]; 1841 } 1842 1843 len = 0; /* max length of buf_si[] */ 1844 owners_co[0] = 0; 1845 for (proc=0; proc<size; proc++) { 1846 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1847 if (len_si[proc]) { 1848 merge->nsend++; 1849 len_si[proc] = 2*(len_si[proc] + 1); 1850 len += len_si[proc]; 1851 } 1852 } 1853 1854 /* determine the number and length of messages to receive for coi and coj */ 1855 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1856 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1857 1858 /* post the Irecv and Isend of coj */ 1859 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1860 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1861 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 1862 for (proc=0, k=0; proc<size; proc++) { 1863 if (!len_s[proc]) continue; 1864 i = owners_co[proc]; 1865 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1866 k++; 1867 } 1868 1869 /* receives and sends of coj are complete */ 1870 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 1871 for (i=0; i<merge->nrecv; i++) { 1872 PetscMPIInt icompleted; 1873 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1874 } 1875 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1876 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1877 1878 /* add received column indices into table to update Armax */ 1879 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */ 1880 for (k=0; k<merge->nrecv; k++) {/* k-th received message */ 1881 Jptr = buf_rj[k]; 1882 for (j=0; j<merge->len_r[k]; j++) { 1883 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1884 } 1885 } 1886 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1887 /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */ 1888 1889 /* send and recv coi */ 1890 /*-------------------*/ 1891 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1892 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1893 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1894 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1895 for (proc=0,k=0; proc<size; proc++) { 1896 if (!len_s[proc]) continue; 1897 /* form outgoing message for i-structure: 1898 buf_si[0]: nrows to be sent 1899 [1:nrows]: row index (global) 1900 [nrows+1:2*nrows+1]: i-structure index 1901 */ 1902 /*-------------------------------------------*/ 1903 nrows = len_si[proc]/2 - 1; 1904 buf_si_i = buf_si + nrows+1; 1905 buf_si[0] = nrows; 1906 buf_si_i[0] = 0; 1907 nrows = 0; 1908 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1909 nzi = coi[i+1] - coi[i]; 1910 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1911 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1912 nrows++; 1913 } 1914 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1915 k++; 1916 buf_si += len_si[proc]; 1917 } 1918 i = merge->nrecv; 1919 while (i--) { 1920 PetscMPIInt icompleted; 1921 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1922 } 1923 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1924 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1925 ierr = PetscFree(len_si);CHKERRQ(ierr); 1926 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1927 ierr = PetscFree(swaits);CHKERRQ(ierr); 1928 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1929 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1930 1931 /* compute the local portion of C (mpi mat) */ 1932 /*------------------------------------------*/ 1933 /* allocate bi array and free space for accumulating nonzero column info */ 1934 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 1935 bi[0] = 0; 1936 1937 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1938 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 1939 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1940 current_space = free_space; 1941 1942 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1943 for (k=0; k<merge->nrecv; k++) { 1944 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1945 nrows = *buf_ri_k[k]; 1946 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1947 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure */ 1948 } 1949 1950 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1951 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1952 rmax = 0; 1953 for (i=0; i<pn; i++) { 1954 /* add pdt[i,:]*AP into lnk */ 1955 pnz = pdti[i+1] - pdti[i]; 1956 ptJ = pdtj + pdti[i]; 1957 for (j=0; j<pnz; j++) { 1958 row = ptJ[j]; /* row of AP == col of Pt */ 1959 anz = ai[row+1] - ai[row]; 1960 Jptr = aj + ai[row]; 1961 /* add non-zero cols of AP into the sorted linked list lnk */ 1962 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1963 } 1964 1965 /* add received col data into lnk */ 1966 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1967 if (i == *nextrow[k]) { /* i-th row */ 1968 nzi = *(nextci[k]+1) - *nextci[k]; 1969 Jptr = buf_rj[k] + *nextci[k]; 1970 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 1971 nextrow[k]++; nextci[k]++; 1972 } 1973 } 1974 nnz = lnk[0]; 1975 1976 /* if free space is not available, make more free space */ 1977 if (current_space->local_remaining<nnz) { 1978 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1979 nspacedouble++; 1980 } 1981 /* copy data into free space, then initialize lnk */ 1982 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1983 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1984 1985 current_space->array += nnz; 1986 current_space->local_used += nnz; 1987 current_space->local_remaining -= nnz; 1988 1989 bi[i+1] = bi[i] + nnz; 1990 if (nnz > rmax) rmax = nnz; 1991 } 1992 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1993 1994 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 1995 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1996 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1997 if (afill_tmp > afill) afill = afill_tmp; 1998 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 1999 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 2000 2001 ierr = MatDestroy(&POt);CHKERRQ(ierr); 2002 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 2003 2004 /* create symbolic parallel matrix C - why cannot be assembled in Numeric part */ 2005 /*-------------------------------------------------------------------------------*/ 2006 ierr = MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2007 ierr = MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 2008 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 2009 ierr = MatSetType(C,mtype);CHKERRQ(ierr); 2010 ierr = MatMPIAIJSetPreallocation(C,0,dnz,0,onz);CHKERRQ(ierr); 2011 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2012 ierr = MatSetBlockSize(C,1);CHKERRQ(ierr); 2013 ierr = MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr); 2014 for (i=0; i<pn; i++) { 2015 row = i + rstart; 2016 nnz = bi[i+1] - bi[i]; 2017 Jptr = bj + bi[i]; 2018 ierr = MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES);CHKERRQ(ierr); 2019 } 2020 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2021 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2022 merge->bi = bi; 2023 merge->bj = bj; 2024 merge->coi = coi; 2025 merge->coj = coj; 2026 merge->buf_ri = buf_ri; 2027 merge->buf_rj = buf_rj; 2028 merge->owners_co = owners_co; 2029 2030 /* attach the supporting struct to C for reuse */ 2031 C->product->data = ptap; 2032 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 2033 ptap->merge = merge; 2034 2035 C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 2036 2037 #if defined(PETSC_USE_INFO) 2038 if (bi[pn] != 0) { 2039 ierr = PetscInfo3(C,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 2040 ierr = PetscInfo1(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 2041 } else { 2042 ierr = PetscInfo(C,"Empty matrix product\n");CHKERRQ(ierr); 2043 } 2044 #endif 2045 PetscFunctionReturn(0); 2046 } 2047 2048 /* ---------------------------------------------------------------- */ 2049 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C) 2050 { 2051 PetscErrorCode ierr; 2052 Mat_Product *product = C->product; 2053 Mat A=product->A,B=product->B; 2054 PetscReal fill=product->fill; 2055 PetscBool flg; 2056 2057 PetscFunctionBegin; 2058 /* scalable */ 2059 ierr = PetscStrcmp(product->alg,"scalable",&flg);CHKERRQ(ierr); 2060 if (flg) { 2061 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 2062 goto next; 2063 } 2064 2065 /* nonscalable */ 2066 ierr = PetscStrcmp(product->alg,"nonscalable",&flg);CHKERRQ(ierr); 2067 if (flg) { 2068 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 2069 goto next; 2070 } 2071 2072 /* matmatmult */ 2073 ierr = PetscStrcmp(product->alg,"at*b",&flg);CHKERRQ(ierr); 2074 if (flg) { 2075 Mat At; 2076 Mat_APMPI *ptap; 2077 2078 ierr = MatTranspose(A,MAT_INITIAL_MATRIX,&At);CHKERRQ(ierr); 2079 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C);CHKERRQ(ierr); 2080 ptap = (Mat_APMPI*)C->product->data; 2081 if (ptap) { 2082 ptap->Pt = At; 2083 C->product->destroy = MatDestroy_MPIAIJ_PtAP; 2084 } 2085 C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 2086 goto next; 2087 } 2088 2089 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported"); 2090 2091 next: 2092 C->ops->productnumeric = MatProductNumeric_AtB; 2093 PetscFunctionReturn(0); 2094 } 2095 2096 /* ---------------------------------------------------------------- */ 2097 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */ 2098 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C) 2099 { 2100 PetscErrorCode ierr; 2101 Mat_Product *product = C->product; 2102 Mat A=product->A,B=product->B; 2103 #if defined(PETSC_HAVE_HYPRE) 2104 const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"}; 2105 PetscInt nalg = 4; 2106 #else 2107 const char *algTypes[3] = {"scalable","nonscalable","seqmpi"}; 2108 PetscInt nalg = 3; 2109 #endif 2110 PetscInt alg = 1; /* set nonscalable algorithm as default */ 2111 PetscBool flg; 2112 MPI_Comm comm; 2113 2114 PetscFunctionBegin; 2115 /* Check matrix local sizes */ 2116 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2117 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 2118 2119 /* Set "nonscalable" as default algorithm */ 2120 ierr = PetscStrcmp(C->product->alg,"default",&flg);CHKERRQ(ierr); 2121 if (flg) { 2122 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2123 2124 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2125 if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 2126 MatInfo Ainfo,Binfo; 2127 PetscInt nz_local; 2128 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 2129 2130 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 2131 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 2132 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 2133 2134 if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE; 2135 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 2136 2137 if (alg_scalable) { 2138 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2139 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2140 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);CHKERRQ(ierr); 2141 } 2142 } 2143 } 2144 2145 /* Get runtime option */ 2146 if (product->api_user) { 2147 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");CHKERRQ(ierr); 2148 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2149 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2150 } else { 2151 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");CHKERRQ(ierr); 2152 ierr = PetscOptionsEList("-matproduct_ab_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2153 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2154 } 2155 if (flg) { 2156 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2157 } 2158 2159 C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ; 2160 PetscFunctionReturn(0); 2161 } 2162 2163 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */ 2164 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C) 2165 { 2166 PetscErrorCode ierr; 2167 Mat_Product *product = C->product; 2168 Mat A=product->A,B=product->B; 2169 const char *algTypes[3] = {"scalable","nonscalable","at*b"}; 2170 PetscInt nalg = 3; 2171 PetscInt alg = 1; /* set default algorithm */ 2172 PetscBool flg; 2173 MPI_Comm comm; 2174 2175 PetscFunctionBegin; 2176 /* Check matrix local sizes */ 2177 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2178 if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != B (%D,%D)",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend); 2179 2180 /* Set default algorithm */ 2181 ierr = PetscStrcmp(C->product->alg,"default",&flg);CHKERRQ(ierr); 2182 if (flg) { 2183 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2184 } 2185 2186 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2187 if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 2188 MatInfo Ainfo,Binfo; 2189 PetscInt nz_local; 2190 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 2191 2192 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 2193 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 2194 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 2195 2196 if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE; 2197 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 2198 2199 if (alg_scalable) { 2200 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2201 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2202 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,product->fill*nz_local);CHKERRQ(ierr); 2203 } 2204 } 2205 2206 /* Get runtime option */ 2207 if (product->api_user) { 2208 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");CHKERRQ(ierr); 2209 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2210 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2211 } else { 2212 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");CHKERRQ(ierr); 2213 ierr = PetscOptionsEList("-matproduct_atb_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2214 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2215 } 2216 if (flg) { 2217 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2218 } 2219 2220 C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ; 2221 PetscFunctionReturn(0); 2222 } 2223 2224 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C) 2225 { 2226 PetscErrorCode ierr; 2227 Mat_Product *product = C->product; 2228 Mat A=product->A,P=product->B; 2229 MPI_Comm comm; 2230 PetscBool flg; 2231 PetscInt alg=1; /* set default algorithm */ 2232 #if !defined(PETSC_HAVE_HYPRE) 2233 const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"}; 2234 PetscInt nalg=4; 2235 #else 2236 const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"}; 2237 PetscInt nalg=5; 2238 #endif 2239 PetscInt pN=P->cmap->N; 2240 2241 PetscFunctionBegin; 2242 /* Check matrix local sizes */ 2243 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2244 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 2245 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 2246 2247 /* Set "nonscalable" as default algorithm */ 2248 ierr = PetscStrcmp(C->product->alg,"default",&flg);CHKERRQ(ierr); 2249 if (flg) { 2250 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2251 2252 /* Set "scalable" as default if BN and local nonzeros of A and B are large */ 2253 if (pN > 100000) { 2254 MatInfo Ainfo,Pinfo; 2255 PetscInt nz_local; 2256 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 2257 2258 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 2259 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 2260 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 2261 2262 if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE; 2263 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 2264 2265 if (alg_scalable) { 2266 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 2267 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2268 } 2269 } 2270 } 2271 2272 /* Get runtime option */ 2273 if (product->api_user) { 2274 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 2275 ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2276 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2277 } else { 2278 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");CHKERRQ(ierr); 2279 ierr = PetscOptionsEList("-matproduct_ptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2280 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2281 } 2282 if (flg) { 2283 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2284 } 2285 2286 C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ; 2287 PetscFunctionReturn(0); 2288 } 2289 2290 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C) 2291 { 2292 Mat_Product *product = C->product; 2293 Mat A = product->A,R=product->B; 2294 2295 PetscFunctionBegin; 2296 /* Check matrix local sizes */ 2297 if (A->cmap->n != R->cmap->n || A->rmap->n != R->cmap->n) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%D, %D), R local (%D,%D)",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n); 2298 2299 C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ; 2300 PetscFunctionReturn(0); 2301 } 2302 2303 /* 2304 Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm 2305 */ 2306 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C) 2307 { 2308 PetscErrorCode ierr; 2309 Mat_Product *product = C->product; 2310 PetscBool flg = PETSC_FALSE; 2311 PetscInt alg = 1; /* default algorithm */ 2312 const char *algTypes[3] = {"scalable","nonscalable","seqmpi"}; 2313 PetscInt nalg = 3; 2314 2315 PetscFunctionBegin; 2316 /* Set default algorithm */ 2317 ierr = PetscStrcmp(C->product->alg,"default",&flg);CHKERRQ(ierr); 2318 if (flg) { 2319 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2320 } 2321 2322 /* Get runtime option */ 2323 if (product->api_user) { 2324 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");CHKERRQ(ierr); 2325 ierr = PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2326 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2327 } else { 2328 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");CHKERRQ(ierr); 2329 ierr = PetscOptionsEList("-matproduct_abc_via","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 2330 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2331 } 2332 if (flg) { 2333 ierr = MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]);CHKERRQ(ierr); 2334 } 2335 2336 C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ; 2337 C->ops->productsymbolic = MatProductSymbolic_ABC; 2338 PetscFunctionReturn(0); 2339 } 2340 2341 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C) 2342 { 2343 PetscErrorCode ierr; 2344 Mat_Product *product = C->product; 2345 2346 PetscFunctionBegin; 2347 switch (product->type) { 2348 case MATPRODUCT_AB: 2349 ierr = MatProductSetFromOptions_MPIAIJ_AB(C);CHKERRQ(ierr); 2350 break; 2351 case MATPRODUCT_AtB: 2352 ierr = MatProductSetFromOptions_MPIAIJ_AtB(C);CHKERRQ(ierr); 2353 break; 2354 case MATPRODUCT_PtAP: 2355 ierr = MatProductSetFromOptions_MPIAIJ_PtAP(C);CHKERRQ(ierr); 2356 break; 2357 case MATPRODUCT_RARt: 2358 ierr = MatProductSetFromOptions_MPIAIJ_RARt(C);CHKERRQ(ierr); 2359 break; 2360 case MATPRODUCT_ABC: 2361 ierr = MatProductSetFromOptions_MPIAIJ_ABC(C);CHKERRQ(ierr); 2362 break; 2363 default: 2364 break; 2365 } 2366 PetscFunctionReturn(0); 2367 } 2368