1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 13 #if defined(PETSC_HAVE_HYPRE) 14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*); 15 #endif 16 17 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 18 { 19 PetscErrorCode ierr; 20 #if defined(PETSC_HAVE_HYPRE) 21 const char *algTypes[3] = {"scalable","nonscalable","hypre"}; 22 PetscInt nalg = 3; 23 #else 24 const char *algTypes[2] = {"scalable","nonscalable"}; 25 PetscInt nalg = 2; 26 #endif 27 PetscInt alg = 1; /* set nonscalable algorithm as default */ 28 MPI_Comm comm; 29 PetscBool flg; 30 31 PetscFunctionBegin; 32 if (scall == MAT_INITIAL_MATRIX) { 33 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 34 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 35 36 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 37 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 38 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[1],&alg,&flg);CHKERRQ(ierr); 39 ierr = PetscOptionsEnd();CHKERRQ(ierr); 40 41 if (!flg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 42 MatInfo Ainfo,Binfo; 43 PetscInt nz_local; 44 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 45 46 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 47 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 48 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 49 50 if (B->cmap->N > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 51 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 52 53 if (alg_scalable) { 54 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 55 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,fill*nz_local);CHKERRQ(ierr); 56 } 57 } 58 59 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 60 switch (alg) { 61 case 1: 62 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 63 break; 64 #if defined(PETSC_HAVE_HYPRE) 65 case 2: 66 ierr = MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);CHKERRQ(ierr); 67 break; 68 #endif 69 default: 70 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 71 break; 72 } 73 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 74 } 75 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 76 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 77 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 78 PetscFunctionReturn(0); 79 } 80 81 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 82 { 83 PetscErrorCode ierr; 84 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 85 Mat_PtAPMPI *ptap = a->ptap; 86 87 PetscFunctionBegin; 88 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 89 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 90 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 91 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 92 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 93 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 94 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 95 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 96 ierr = ptap->destroy(A);CHKERRQ(ierr); 97 ierr = PetscFree(ptap);CHKERRQ(ierr); 98 PetscFunctionReturn(0); 99 } 100 101 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 102 { 103 PetscErrorCode ierr; 104 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 105 Mat_PtAPMPI *ptap = a->ptap; 106 107 PetscFunctionBegin; 108 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 109 110 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 111 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 112 PetscFunctionReturn(0); 113 } 114 115 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C) 116 { 117 PetscErrorCode ierr; 118 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 119 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 120 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 121 PetscScalar *cda=cd->a,*coa=co->a; 122 Mat_SeqAIJ *p_loc,*p_oth; 123 PetscScalar *apa,*ca; 124 PetscInt cm =C->rmap->n; 125 Mat_PtAPMPI *ptap=c->ptap; 126 PetscInt *api,*apj,*apJ,i,k; 127 PetscInt cstart=C->cmap->rstart; 128 PetscInt cdnz,conz,k0,k1; 129 MPI_Comm comm; 130 PetscMPIInt size; 131 132 PetscFunctionBegin; 133 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 134 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 135 136 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 137 /*-----------------------------------------------------*/ 138 /* update numerical values of P_oth and P_loc */ 139 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 140 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 141 142 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 143 /*----------------------------------------------------------*/ 144 /* get data from symbolic products */ 145 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 146 p_oth = NULL; 147 if (size >1) { 148 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 149 } 150 151 /* get apa for storing dense row A[i,:]*P */ 152 apa = ptap->apa; 153 154 api = ptap->api; 155 apj = ptap->apj; 156 for (i=0; i<cm; i++) { 157 /* compute apa = A[i,:]*P */ 158 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 159 160 /* set values in C */ 161 apJ = apj + api[i]; 162 cdnz = cd->i[i+1] - cd->i[i]; 163 conz = co->i[i+1] - co->i[i]; 164 165 /* 1st off-diagoanl part of C */ 166 ca = coa + co->i[i]; 167 k = 0; 168 for (k0=0; k0<conz; k0++) { 169 if (apJ[k] >= cstart) break; 170 ca[k0] = apa[apJ[k]]; 171 apa[apJ[k++]] = 0.0; 172 } 173 174 /* diagonal part of C */ 175 ca = cda + cd->i[i]; 176 for (k1=0; k1<cdnz; k1++) { 177 ca[k1] = apa[apJ[k]]; 178 apa[apJ[k++]] = 0.0; 179 } 180 181 /* 2nd off-diagoanl part of C */ 182 ca = coa + co->i[i]; 183 for (; k0<conz; k0++) { 184 ca[k0] = apa[apJ[k]]; 185 apa[apJ[k++]] = 0.0; 186 } 187 } 188 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 189 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 190 PetscFunctionReturn(0); 191 } 192 193 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 194 { 195 PetscErrorCode ierr; 196 MPI_Comm comm; 197 PetscMPIInt size; 198 Mat Cmpi; 199 Mat_PtAPMPI *ptap; 200 PetscFreeSpaceList free_space=NULL,current_space=NULL; 201 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 202 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 203 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 204 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 205 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 206 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 207 PetscBT lnkbt; 208 PetscScalar *apa; 209 PetscReal afill; 210 211 PetscFunctionBegin; 212 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 213 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 214 215 /* create struct Mat_PtAPMPI and attached it to C later */ 216 ierr = PetscNew(&ptap);CHKERRQ(ierr); 217 218 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 219 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 220 221 /* get P_loc by taking all local rows of P */ 222 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 223 224 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 225 pi_loc = p_loc->i; pj_loc = p_loc->j; 226 if (size > 1) { 227 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 228 pi_oth = p_oth->i; pj_oth = p_oth->j; 229 } else { 230 p_oth = NULL; 231 pi_oth = NULL; pj_oth = NULL; 232 } 233 234 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 235 /*-------------------------------------------------------------------*/ 236 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 237 ptap->api = api; 238 api[0] = 0; 239 240 /* create and initialize a linked list */ 241 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 242 243 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 244 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 245 current_space = free_space; 246 247 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 248 for (i=0; i<am; i++) { 249 /* diagonal portion of A */ 250 nzi = adi[i+1] - adi[i]; 251 for (j=0; j<nzi; j++) { 252 row = *adj++; 253 pnz = pi_loc[row+1] - pi_loc[row]; 254 Jptr = pj_loc + pi_loc[row]; 255 /* add non-zero cols of P into the sorted linked list lnk */ 256 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 257 } 258 /* off-diagonal portion of A */ 259 nzi = aoi[i+1] - aoi[i]; 260 for (j=0; j<nzi; j++) { 261 row = *aoj++; 262 pnz = pi_oth[row+1] - pi_oth[row]; 263 Jptr = pj_oth + pi_oth[row]; 264 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 265 } 266 267 apnz = lnk[0]; 268 api[i+1] = api[i] + apnz; 269 270 /* if free space is not available, double the total space in the list */ 271 if (current_space->local_remaining<apnz) { 272 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 273 nspacedouble++; 274 } 275 276 /* Copy data into free space, then initialize lnk */ 277 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 278 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 279 280 current_space->array += apnz; 281 current_space->local_used += apnz; 282 current_space->local_remaining -= apnz; 283 } 284 285 /* Allocate space for apj, initialize apj, and */ 286 /* destroy list of free space and other temporary array(s) */ 287 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 288 apj = ptap->apj; 289 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 290 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 291 292 /* malloc apa to store dense row A[i,:]*P */ 293 ierr = PetscCalloc1(pN,&apa);CHKERRQ(ierr); 294 295 ptap->apa = apa; 296 297 /* create and assemble symbolic parallel matrix Cmpi */ 298 /*----------------------------------------------------*/ 299 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 300 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 301 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 302 303 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 304 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 305 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 306 for (i=0; i<am; i++) { 307 row = i + rstart; 308 apnz = api[i+1] - api[i]; 309 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 310 apj += apnz; 311 } 312 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 313 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 314 315 ptap->destroy = Cmpi->ops->destroy; 316 ptap->duplicate = Cmpi->ops->duplicate; 317 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 318 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 319 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 320 321 /* attach the supporting struct to Cmpi for reuse */ 322 c = (Mat_MPIAIJ*)Cmpi->data; 323 c->ptap = ptap; 324 325 *C = Cmpi; 326 327 /* set MatInfo */ 328 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 329 if (afill < 1.0) afill = 1.0; 330 Cmpi->info.mallocs = nspacedouble; 331 Cmpi->info.fill_ratio_given = fill; 332 Cmpi->info.fill_ratio_needed = afill; 333 334 #if defined(PETSC_USE_INFO) 335 if (api[am]) { 336 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 337 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 338 } else { 339 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 340 } 341 #endif 342 PetscFunctionReturn(0); 343 } 344 345 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 346 { 347 PetscErrorCode ierr; 348 349 PetscFunctionBegin; 350 if (scall == MAT_INITIAL_MATRIX) { 351 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 352 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 353 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 354 } 355 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 356 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 357 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 358 PetscFunctionReturn(0); 359 } 360 361 typedef struct { 362 Mat workB; 363 PetscScalar *rvalues,*svalues; 364 MPI_Request *rwaits,*swaits; 365 } MPIAIJ_MPIDense; 366 367 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 368 { 369 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 370 PetscErrorCode ierr; 371 372 PetscFunctionBegin; 373 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 374 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 375 ierr = PetscFree(contents);CHKERRQ(ierr); 376 PetscFunctionReturn(0); 377 } 378 379 /* 380 This is a "dummy function" that handles the case where matrix C was created as a dense matrix 381 directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option 382 383 It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C 384 */ 385 PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C) 386 { 387 PetscErrorCode ierr; 388 PetscBool flg; 389 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 390 PetscInt nz = aij->B->cmap->n; 391 PetscContainer container; 392 MPIAIJ_MPIDense *contents; 393 VecScatter ctx = aij->Mvctx; 394 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 395 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 396 397 PetscFunctionBegin; 398 ierr = PetscObjectTypeCompare((PetscObject)B,MATMPIDENSE,&flg);CHKERRQ(ierr); 399 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Second matrix must be mpidense"); 400 401 /* Handle case where where user provided the final C matrix rather than calling MatMatMult() with MAT_INITIAL_MATRIX*/ 402 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr); 403 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"First matrix must be MPIAIJ"); 404 405 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 406 407 ierr = PetscNew(&contents);CHKERRQ(ierr); 408 /* Create work matrix used to store off processor rows of B needed for local product */ 409 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 410 /* Create work arrays needed */ 411 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 412 B->cmap->N*to->starts[to->n],&contents->svalues, 413 from->n,&contents->rwaits, 414 to->n,&contents->swaits);CHKERRQ(ierr); 415 416 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 417 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 418 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 419 ierr = PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);CHKERRQ(ierr); 420 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 421 422 ierr = (*C->ops->matmultnumeric)(A,B,C);CHKERRQ(ierr); 423 PetscFunctionReturn(0); 424 } 425 426 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 427 { 428 PetscErrorCode ierr; 429 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 430 PetscInt nz = aij->B->cmap->n; 431 PetscContainer container; 432 MPIAIJ_MPIDense *contents; 433 VecScatter ctx = aij->Mvctx; 434 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 435 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 436 PetscInt m = A->rmap->n,n=B->cmap->n; 437 438 PetscFunctionBegin; 439 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 440 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 441 ierr = MatSetBlockSizesFromMats(*C,A,B);CHKERRQ(ierr); 442 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 443 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 444 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 445 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 446 447 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 448 449 ierr = PetscNew(&contents);CHKERRQ(ierr); 450 /* Create work matrix used to store off processor rows of B needed for local product */ 451 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 452 /* Create work arrays needed */ 453 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 454 B->cmap->N*to->starts[to->n],&contents->svalues, 455 from->n,&contents->rwaits, 456 to->n,&contents->swaits);CHKERRQ(ierr); 457 458 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 459 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 460 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 461 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 462 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 463 PetscFunctionReturn(0); 464 } 465 466 /* 467 Performs an efficient scatter on the rows of B needed by this process; this is 468 a modification of the VecScatterBegin_() routines. 469 */ 470 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 471 { 472 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 473 PetscErrorCode ierr; 474 PetscScalar *b,*w,*svalues,*rvalues; 475 VecScatter ctx = aij->Mvctx; 476 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 477 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 478 PetscInt i,j,k; 479 PetscInt *sindices,*sstarts,*rindices,*rstarts; 480 PetscMPIInt *sprocs,*rprocs,nrecvs; 481 MPI_Request *swaits,*rwaits; 482 MPI_Comm comm; 483 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 484 MPI_Status status; 485 MPIAIJ_MPIDense *contents; 486 PetscContainer container; 487 Mat workB; 488 489 PetscFunctionBegin; 490 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 491 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 492 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 493 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 494 495 workB = *outworkB = contents->workB; 496 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 497 sindices = to->indices; 498 sstarts = to->starts; 499 sprocs = to->procs; 500 swaits = contents->swaits; 501 svalues = contents->svalues; 502 503 rindices = from->indices; 504 rstarts = from->starts; 505 rprocs = from->procs; 506 rwaits = contents->rwaits; 507 rvalues = contents->rvalues; 508 509 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 510 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 511 512 for (i=0; i<from->n; i++) { 513 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 514 } 515 516 for (i=0; i<to->n; i++) { 517 /* pack a message at a time */ 518 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 519 for (k=0; k<ncols; k++) { 520 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 521 } 522 } 523 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 524 } 525 526 nrecvs = from->n; 527 while (nrecvs) { 528 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 529 nrecvs--; 530 /* unpack a message at a time */ 531 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 532 for (k=0; k<ncols; k++) { 533 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 534 } 535 } 536 } 537 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 538 539 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 540 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 541 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 542 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 543 PetscFunctionReturn(0); 544 } 545 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 546 547 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 548 { 549 PetscErrorCode ierr; 550 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 551 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 552 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 553 Mat workB; 554 555 PetscFunctionBegin; 556 /* diagonal block of A times all local rows of B*/ 557 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 558 559 /* get off processor parts of B needed to complete the product */ 560 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 561 562 /* off-diagonal block of A times nonlocal rows of B */ 563 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 564 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 565 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 566 PetscFunctionReturn(0); 567 } 568 569 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 570 { 571 PetscErrorCode ierr; 572 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 573 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 574 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 575 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 576 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 577 Mat_SeqAIJ *p_loc,*p_oth; 578 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 579 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 580 PetscInt cm = C->rmap->n,anz,pnz; 581 Mat_PtAPMPI *ptap = c->ptap; 582 PetscScalar *apa_sparse = ptap->apa; 583 PetscInt *api,*apj,*apJ,i,j,k,row; 584 PetscInt cstart = C->cmap->rstart; 585 PetscInt cdnz,conz,k0,k1,nextp; 586 MPI_Comm comm; 587 PetscMPIInt size; 588 589 PetscFunctionBegin; 590 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 591 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 592 593 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 594 /*-----------------------------------------------------*/ 595 /* update numerical values of P_oth and P_loc */ 596 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 597 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 598 599 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 600 /*----------------------------------------------------------*/ 601 /* get data from symbolic products */ 602 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 603 pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a; 604 if (size >1) { 605 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 606 pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a; 607 } else { 608 p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL; 609 } 610 611 api = ptap->api; 612 apj = ptap->apj; 613 for (i=0; i<cm; i++) { 614 apJ = apj + api[i]; 615 616 /* diagonal portion of A */ 617 anz = adi[i+1] - adi[i]; 618 adj = ad->j + adi[i]; 619 ada = ad->a + adi[i]; 620 for (j=0; j<anz; j++) { 621 row = adj[j]; 622 pnz = pi_loc[row+1] - pi_loc[row]; 623 pj = pj_loc + pi_loc[row]; 624 pa = pa_loc + pi_loc[row]; 625 /* perform sparse axpy */ 626 valtmp = ada[j]; 627 nextp = 0; 628 for (k=0; nextp<pnz; k++) { 629 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 630 apa_sparse[k] += valtmp*pa[nextp++]; 631 } 632 } 633 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 634 } 635 636 /* off-diagonal portion of A */ 637 anz = aoi[i+1] - aoi[i]; 638 aoj = ao->j + aoi[i]; 639 aoa = ao->a + aoi[i]; 640 for (j=0; j<anz; j++) { 641 row = aoj[j]; 642 pnz = pi_oth[row+1] - pi_oth[row]; 643 pj = pj_oth + pi_oth[row]; 644 pa = pa_oth + pi_oth[row]; 645 /* perform sparse axpy */ 646 valtmp = aoa[j]; 647 nextp = 0; 648 for (k=0; nextp<pnz; k++) { 649 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 650 apa_sparse[k] += valtmp*pa[nextp++]; 651 } 652 } 653 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 654 } 655 656 /* set values in C */ 657 cdnz = cd->i[i+1] - cd->i[i]; 658 conz = co->i[i+1] - co->i[i]; 659 660 /* 1st off-diagoanl part of C */ 661 ca = coa + co->i[i]; 662 k = 0; 663 for (k0=0; k0<conz; k0++) { 664 if (apJ[k] >= cstart) break; 665 ca[k0] = apa_sparse[k]; 666 apa_sparse[k] = 0.0; 667 k++; 668 } 669 670 /* diagonal part of C */ 671 ca = cda + cd->i[i]; 672 for (k1=0; k1<cdnz; k1++) { 673 ca[k1] = apa_sparse[k]; 674 apa_sparse[k] = 0.0; 675 k++; 676 } 677 678 /* 2nd off-diagoanl part of C */ 679 ca = coa + co->i[i]; 680 for (; k0<conz; k0++) { 681 ca[k0] = apa_sparse[k]; 682 apa_sparse[k] = 0.0; 683 k++; 684 } 685 } 686 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 687 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 688 PetscFunctionReturn(0); 689 } 690 691 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 692 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 693 { 694 PetscErrorCode ierr; 695 MPI_Comm comm; 696 PetscMPIInt size; 697 Mat Cmpi; 698 Mat_PtAPMPI *ptap; 699 PetscFreeSpaceList free_space = NULL,current_space=NULL; 700 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 701 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 702 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 703 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 704 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max; 705 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 706 PetscReal afill; 707 PetscScalar *apa; 708 PetscTable ta; 709 710 PetscFunctionBegin; 711 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 712 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 713 714 /* create struct Mat_PtAPMPI and attached it to C later */ 715 ierr = PetscNew(&ptap);CHKERRQ(ierr); 716 717 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 718 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 719 720 /* get P_loc by taking all local rows of P */ 721 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 722 723 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 724 pi_loc = p_loc->i; pj_loc = p_loc->j; 725 if (size > 1) { 726 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 727 pi_oth = p_oth->i; pj_oth = p_oth->j; 728 } else { 729 p_oth = NULL; 730 pi_oth = NULL; pj_oth = NULL; 731 } 732 733 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 734 /*-------------------------------------------------------------------*/ 735 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 736 ptap->api = api; 737 api[0] = 0; 738 739 /* create and initialize a linked list */ 740 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); 741 742 /* Calculate apnz_max */ 743 apnz_max = 0; 744 for (i=0; i<am; i++) { 745 ierr = PetscTableRemoveAll(ta);CHKERRQ(ierr); 746 /* diagonal portion of A */ 747 nzi = adi[i+1] - adi[i]; 748 Jptr = adj+adi[i]; /* cols of A_diag */ 749 MatMergeRows_SeqAIJ(p_loc,nzi,Jptr,ta); 750 ierr = PetscTableGetCount(ta,&apnz);CHKERRQ(ierr); 751 if (apnz_max < apnz) apnz_max = apnz; 752 753 /* off-diagonal portion of A */ 754 nzi = aoi[i+1] - aoi[i]; 755 Jptr = aoj+aoi[i]; /* cols of A_off */ 756 MatMergeRows_SeqAIJ(p_oth,nzi,Jptr,ta); 757 ierr = PetscTableGetCount(ta,&apnz);CHKERRQ(ierr); 758 if (apnz_max < apnz) apnz_max = apnz; 759 } 760 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 761 762 ierr = PetscLLCondensedCreate_Scalable(apnz_max,&lnk);CHKERRQ(ierr); 763 764 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 765 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 766 current_space = free_space; 767 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 768 for (i=0; i<am; i++) { 769 /* diagonal portion of A */ 770 nzi = adi[i+1] - adi[i]; 771 for (j=0; j<nzi; j++) { 772 row = *adj++; 773 pnz = pi_loc[row+1] - pi_loc[row]; 774 Jptr = pj_loc + pi_loc[row]; 775 /* add non-zero cols of P into the sorted linked list lnk */ 776 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 777 } 778 /* off-diagonal portion of A */ 779 nzi = aoi[i+1] - aoi[i]; 780 for (j=0; j<nzi; j++) { 781 row = *aoj++; 782 pnz = pi_oth[row+1] - pi_oth[row]; 783 Jptr = pj_oth + pi_oth[row]; 784 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 785 } 786 787 apnz = *lnk; 788 api[i+1] = api[i] + apnz; 789 790 /* if free space is not available, double the total space in the list */ 791 if (current_space->local_remaining<apnz) { 792 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 793 nspacedouble++; 794 } 795 796 /* Copy data into free space, then initialize lnk */ 797 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 798 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 799 800 current_space->array += apnz; 801 current_space->local_used += apnz; 802 current_space->local_remaining -= apnz; 803 } 804 805 /* Allocate space for apj, initialize apj, and */ 806 /* destroy list of free space and other temporary array(s) */ 807 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 808 apj = ptap->apj; 809 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 810 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 811 812 /* create and assemble symbolic parallel matrix Cmpi */ 813 /*----------------------------------------------------*/ 814 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 815 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 816 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 817 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 818 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 819 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 820 821 /* malloc apa for assembly Cmpi */ 822 ierr = PetscCalloc1(apnz_max,&apa);CHKERRQ(ierr); 823 824 ptap->apa = apa; 825 for (i=0; i<am; i++) { 826 row = i + rstart; 827 apnz = api[i+1] - api[i]; 828 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 829 apj += apnz; 830 } 831 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 832 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 833 834 ptap->destroy = Cmpi->ops->destroy; 835 ptap->duplicate = Cmpi->ops->duplicate; 836 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 837 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 838 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 839 840 /* attach the supporting struct to Cmpi for reuse */ 841 c = (Mat_MPIAIJ*)Cmpi->data; 842 c->ptap = ptap; 843 844 *C = Cmpi; 845 846 /* set MatInfo */ 847 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 848 if (afill < 1.0) afill = 1.0; 849 Cmpi->info.mallocs = nspacedouble; 850 Cmpi->info.fill_ratio_given = fill; 851 Cmpi->info.fill_ratio_needed = afill; 852 853 #if defined(PETSC_USE_INFO) 854 if (api[am]) { 855 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 856 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 857 } else { 858 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 859 } 860 #endif 861 PetscFunctionReturn(0); 862 } 863 864 /*-------------------------------------------------------------------------*/ 865 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 866 { 867 PetscErrorCode ierr; 868 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 869 PetscInt alg=0; /* set default algorithm */ 870 871 PetscFunctionBegin; 872 if (scall == MAT_INITIAL_MATRIX) { 873 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 874 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 875 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[0],&alg,NULL);CHKERRQ(ierr); 876 ierr = PetscOptionsEnd();CHKERRQ(ierr); 877 878 //ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 879 switch (alg) { 880 case 1: 881 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);CHKERRQ(ierr); 882 break; 883 case 2: 884 { 885 Mat Pt; 886 Mat_PtAPMPI *ptap; 887 Mat_MPIAIJ *c; 888 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 889 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 890 c = (Mat_MPIAIJ*)(*C)->data; 891 ptap = c->ptap; 892 ptap->Pt = Pt; 893 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 894 PetscFunctionReturn(0); 895 } 896 break; 897 default: 898 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 899 break; 900 } 901 //ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 902 } 903 //ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 904 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 905 //ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 906 PetscFunctionReturn(0); 907 } 908 909 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 910 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 911 { 912 PetscErrorCode ierr; 913 Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data; 914 Mat_PtAPMPI *ptap= c->ptap; 915 Mat Pt=ptap->Pt; 916 917 PetscFunctionBegin; 918 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 919 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 920 PetscFunctionReturn(0); 921 } 922 923 /* Non-scalable version, use dense axpy */ 924 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable_old(Mat P,Mat A,Mat C) 925 { 926 PetscErrorCode ierr; 927 Mat_Merge_SeqsToMPI *merge; 928 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 929 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 930 Mat_PtAPMPI *ptap; 931 PetscInt *adj,*aJ; 932 PetscInt i,j,k,anz,pnz,row,*cj; 933 MatScalar *ada,*aval,*ca,valtmp; 934 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 935 MPI_Comm comm; 936 PetscMPIInt size,rank,taga,*len_s; 937 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 938 PetscInt **buf_ri,**buf_rj; 939 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 940 MPI_Request *s_waits,*r_waits; 941 MPI_Status *status; 942 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 943 PetscInt *ai,*aj,*coi,*coj; 944 PetscInt *poJ,*pdJ; 945 Mat A_loc; 946 Mat_SeqAIJ *a_loc; 947 948 PetscFunctionBegin; 949 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 950 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 951 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 952 953 ptap = c->ptap; 954 merge = ptap->merge; 955 956 /* 2) compute numeric C_seq = P_loc^T*A_loc - dominating part */ 957 /*------------------------------------------------------------*/ 958 /* get data from symbolic products */ 959 coi = merge->coi; coj = merge->coj; 960 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 961 962 bi = merge->bi; bj = merge->bj; 963 owners = merge->rowmap->range; 964 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 965 966 /* get A_loc by taking all local rows of A */ 967 A_loc = ptap->A_loc; 968 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 969 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 970 ai = a_loc->i; 971 aj = a_loc->j; 972 973 ierr = PetscCalloc1(A->cmap->N,&aval);CHKERRQ(ierr); /* non-scalable!!! */ 974 975 for (i=0; i<am; i++) { 976 /* 2-a) put A[i,:] to dense array aval */ 977 anz = ai[i+1] - ai[i]; 978 adj = aj + ai[i]; 979 ada = a_loc->a + ai[i]; 980 for (j=0; j<anz; j++) { 981 aval[adj[j]] = ada[j]; 982 } 983 984 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 985 /*--------------------------------------------------------------*/ 986 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 987 pnz = po->i[i+1] - po->i[i]; 988 poJ = po->j + po->i[i]; 989 pA = po->a + po->i[i]; 990 for (j=0; j<pnz; j++) { 991 row = poJ[j]; 992 cnz = coi[row+1] - coi[row]; 993 cj = coj + coi[row]; 994 ca = coa + coi[row]; 995 /* perform dense axpy */ 996 valtmp = pA[j]; 997 for (k=0; k<cnz; k++) { 998 ca[k] += valtmp*aval[cj[k]]; 999 } 1000 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1001 } 1002 1003 /* put the value into Cd (diagonal part) */ 1004 pnz = pd->i[i+1] - pd->i[i]; 1005 pdJ = pd->j + pd->i[i]; 1006 pA = pd->a + pd->i[i]; 1007 for (j=0; j<pnz; j++) { 1008 row = pdJ[j]; 1009 cnz = bi[row+1] - bi[row]; 1010 cj = bj + bi[row]; 1011 ca = ba + bi[row]; 1012 /* perform dense axpy */ 1013 valtmp = pA[j]; 1014 for (k=0; k<cnz; k++) { 1015 ca[k] += valtmp*aval[cj[k]]; 1016 } 1017 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1018 } 1019 1020 /* zero the current row of Pt*A */ 1021 aJ = aj + ai[i]; 1022 for (k=0; k<anz; k++) aval[aJ[k]] = 0.0; 1023 } 1024 1025 /* 3) send and recv matrix values coa */ 1026 /*------------------------------------*/ 1027 buf_ri = merge->buf_ri; 1028 buf_rj = merge->buf_rj; 1029 len_s = merge->len_s; 1030 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1031 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1032 1033 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1034 for (proc=0,k=0; proc<size; proc++) { 1035 if (!len_s[proc]) continue; 1036 i = merge->owners_co[proc]; 1037 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1038 k++; 1039 } 1040 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1041 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1042 1043 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1044 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1045 ierr = PetscFree(coa);CHKERRQ(ierr); 1046 1047 /* 4) insert local Cseq and received values into Cmpi */ 1048 /*----------------------------------------------------*/ 1049 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1050 for (k=0; k<merge->nrecv; k++) { 1051 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1052 nrows = *(buf_ri_k[k]); 1053 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1054 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1055 } 1056 1057 for (i=0; i<cm; i++) { 1058 row = owners[rank] + i; /* global row index of C_seq */ 1059 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1060 ba_i = ba + bi[i]; 1061 bnz = bi[i+1] - bi[i]; 1062 /* add received vals into ba */ 1063 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1064 /* i-th row */ 1065 if (i == *nextrow[k]) { 1066 cnz = *(nextci[k]+1) - *nextci[k]; 1067 cj = buf_rj[k] + *(nextci[k]); 1068 ca = abuf_r[k] + *(nextci[k]); 1069 nextcj = 0; 1070 for (j=0; nextcj<cnz; j++) { 1071 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1072 ba_i[j] += ca[nextcj++]; 1073 } 1074 } 1075 nextrow[k]++; nextci[k]++; 1076 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1077 } 1078 } 1079 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1080 } 1081 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1082 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1083 1084 ierr = PetscFree(ba);CHKERRQ(ierr); 1085 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1086 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1087 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1088 ierr = PetscFree(aval);CHKERRQ(ierr); 1089 PetscFunctionReturn(0); 1090 } 1091 1092 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat, MatDuplicateOption,Mat*); 1093 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1094 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable_old(Mat P,Mat A,PetscReal fill,Mat *C) 1095 { 1096 PetscErrorCode ierr; 1097 Mat Cmpi,A_loc,POt,PDt; 1098 Mat_PtAPMPI *ptap; 1099 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1100 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c; 1101 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1102 PetscInt nnz; 1103 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1104 PetscInt am=A->rmap->n,pn=P->cmap->n; 1105 PetscBT lnkbt; 1106 MPI_Comm comm; 1107 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1108 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1109 PetscInt len,proc,*dnz,*onz,*owners; 1110 PetscInt nzi,*bi,*bj; 1111 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1112 MPI_Request *swaits,*rwaits; 1113 MPI_Status *sstatus,rstatus; 1114 Mat_Merge_SeqsToMPI *merge; 1115 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1116 PetscReal afill =1.0,afill_tmp; 1117 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N; 1118 PetscScalar *vals; 1119 Mat_SeqAIJ *a_loc, *pdt,*pot; 1120 1121 PetscFunctionBegin; 1122 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1123 /* check if matrix local sizes are compatible */ 1124 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1125 1126 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1127 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1128 1129 /* create struct Mat_PtAPMPI and attached it to C later */ 1130 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1131 1132 /* get A_loc by taking all local rows of A */ 1133 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1134 1135 ptap->A_loc = A_loc; 1136 1137 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1138 ai = a_loc->i; 1139 aj = a_loc->j; 1140 1141 /* determine symbolic Co=(p->B)^T*A - send to others */ 1142 /*----------------------------------------------------*/ 1143 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1144 pdt = (Mat_SeqAIJ*)PDt->data; 1145 pdti = pdt->i; pdtj = pdt->j; 1146 1147 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1148 pot = (Mat_SeqAIJ*)POt->data; 1149 poti = pot->i; potj = pot->j; 1150 1151 /* then, compute symbolic Co = (p->B)^T*A */ 1152 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors >= (num of nonzero rows of C_seq) - pn */ 1153 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 1154 coi[0] = 0; 1155 1156 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1157 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 1158 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1159 current_space = free_space; 1160 1161 /* create and initialize a linked list */ 1162 ierr = PetscLLCondensedCreate(aN,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1163 1164 for (i=0; i<pon; i++) { 1165 pnz = poti[i+1] - poti[i]; 1166 ptJ = potj + poti[i]; 1167 for (j=0; j<pnz; j++) { 1168 row = ptJ[j]; /* row of A_loc == col of Pot */ 1169 anz = ai[row+1] - ai[row]; 1170 Jptr = aj + ai[row]; 1171 /* add non-zero cols of AP into the sorted linked list lnk */ 1172 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1173 } 1174 nnz = lnk[0]; 1175 1176 /* If free space is not available, double the total space in the list */ 1177 if (current_space->local_remaining<nnz) { 1178 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1179 nspacedouble++; 1180 } 1181 1182 /* Copy data into free space, and zero out denserows */ 1183 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1184 1185 current_space->array += nnz; 1186 current_space->local_used += nnz; 1187 current_space->local_remaining -= nnz; 1188 1189 coi[i+1] = coi[i] + nnz; 1190 } 1191 1192 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 1193 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1194 1195 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1196 if (afill_tmp > afill) afill = afill_tmp; 1197 1198 /* send j-array (coj) of Co to other processors */ 1199 /*----------------------------------------------*/ 1200 /* determine row ownership */ 1201 ierr = PetscNew(&merge);CHKERRQ(ierr); 1202 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1203 1204 merge->rowmap->n = pn; 1205 merge->rowmap->bs = 1; 1206 1207 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1208 owners = merge->rowmap->range; 1209 1210 /* determine the number of messages to send, their lengths */ 1211 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 1212 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 1213 1214 len_s = merge->len_s; 1215 merge->nsend = 0; 1216 1217 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 1218 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1219 1220 proc = 0; 1221 for (i=0; i<pon; i++) { 1222 while (prmap[i] >= owners[proc+1]) proc++; 1223 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1224 len_s[proc] += coi[i+1] - coi[i]; 1225 } 1226 1227 len = 0; /* max length of buf_si[] */ 1228 owners_co[0] = 0; 1229 for (proc=0; proc<size; proc++) { 1230 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1231 if (len_si[proc]) { 1232 merge->nsend++; 1233 len_si[proc] = 2*(len_si[proc] + 1); 1234 len += len_si[proc]; 1235 } 1236 } 1237 1238 /* determine the number and length of messages to receive for coi and coj */ 1239 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1240 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1241 1242 /* post the Irecv and Isend of coj */ 1243 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1244 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1245 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 1246 for (proc=0, k=0; proc<size; proc++) { 1247 if (!len_s[proc]) continue; 1248 i = owners_co[proc]; 1249 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1250 k++; 1251 } 1252 1253 /* receives and sends of coj are complete */ 1254 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 1255 for (i=0; i<merge->nrecv; i++) { 1256 PetscMPIInt icompleted; 1257 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1258 } 1259 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1260 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1261 1262 /* send and recv coi */ 1263 /*-------------------*/ 1264 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1265 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1266 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1267 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1268 for (proc=0,k=0; proc<size; proc++) { 1269 if (!len_s[proc]) continue; 1270 /* form outgoing message for i-structure: 1271 buf_si[0]: nrows to be sent 1272 [1:nrows]: row index (global) 1273 [nrows+1:2*nrows+1]: i-structure index 1274 */ 1275 /*-------------------------------------------*/ 1276 nrows = len_si[proc]/2 - 1; 1277 buf_si_i = buf_si + nrows+1; 1278 buf_si[0] = nrows; 1279 buf_si_i[0] = 0; 1280 nrows = 0; 1281 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1282 nzi = coi[i+1] - coi[i]; 1283 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1284 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1285 nrows++; 1286 } 1287 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1288 k++; 1289 buf_si += len_si[proc]; 1290 } 1291 i = merge->nrecv; 1292 while (i--) { 1293 PetscMPIInt icompleted; 1294 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1295 } 1296 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1297 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1298 ierr = PetscFree(len_si);CHKERRQ(ierr); 1299 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1300 ierr = PetscFree(swaits);CHKERRQ(ierr); 1301 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1302 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1303 1304 /* compute the local portion of C (mpi mat) */ 1305 /*------------------------------------------*/ 1306 /* allocate bi array and free space for accumulating nonzero column info */ 1307 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 1308 bi[0] = 0; 1309 1310 /* set initial free space to be fill*(nnz(P) + nnz(A)) */ 1311 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 1312 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1313 current_space = free_space; 1314 1315 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1316 for (k=0; k<merge->nrecv; k++) { 1317 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1318 nrows = *buf_ri_k[k]; 1319 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1320 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1321 } 1322 1323 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1324 rmax = 0; 1325 for (i=0; i<pn; i++) { 1326 /* add pdt[i,:]*AP into lnk */ 1327 pnz = pdti[i+1] - pdti[i]; 1328 ptJ = pdtj + pdti[i]; 1329 for (j=0; j<pnz; j++) { 1330 row = ptJ[j]; /* row of AP == col of Pt */ 1331 anz = ai[row+1] - ai[row]; 1332 Jptr = aj + ai[row]; 1333 /* add non-zero cols of AP into the sorted linked list lnk */ 1334 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1335 } 1336 1337 /* add received col data into lnk */ 1338 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1339 if (i == *nextrow[k]) { /* i-th row */ 1340 nzi = *(nextci[k]+1) - *nextci[k]; 1341 Jptr = buf_rj[k] + *nextci[k]; 1342 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1343 nextrow[k]++; nextci[k]++; 1344 } 1345 } 1346 nnz = lnk[0]; 1347 1348 /* if free space is not available, make more free space */ 1349 if (current_space->local_remaining<nnz) { 1350 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1351 nspacedouble++; 1352 } 1353 /* copy data into free space, then initialize lnk */ 1354 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1355 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1356 1357 current_space->array += nnz; 1358 current_space->local_used += nnz; 1359 current_space->local_remaining -= nnz; 1360 1361 bi[i+1] = bi[i] + nnz; 1362 if (nnz > rmax) rmax = nnz; 1363 } 1364 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1365 1366 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 1367 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1368 1369 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1370 if (afill_tmp > afill) afill = afill_tmp; 1371 ierr = PetscLLCondensedDestroy(lnk,lnkbt);CHKERRQ(ierr); 1372 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1373 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1374 1375 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1376 /*----------------------------------------------------------------------------------*/ 1377 ierr = PetscCalloc1(rmax+1,&vals);CHKERRQ(ierr); 1378 1379 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1380 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1381 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 1382 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1383 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1384 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1385 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1386 for (i=0; i<pn; i++) { 1387 row = i + rstart; 1388 nnz = bi[i+1] - bi[i]; 1389 Jptr = bj + bi[i]; 1390 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1391 } 1392 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1393 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1394 ierr = PetscFree(vals);CHKERRQ(ierr); 1395 1396 merge->bi = bi; 1397 merge->bj = bj; 1398 merge->coi = coi; 1399 merge->coj = coj; 1400 merge->buf_ri = buf_ri; 1401 merge->buf_rj = buf_rj; 1402 merge->owners_co = owners_co; 1403 1404 /* attach the supporting struct to Cmpi for reuse */ 1405 c = (Mat_MPIAIJ*)Cmpi->data; 1406 c->ptap = ptap; 1407 ptap->api = NULL; 1408 ptap->apj = NULL; 1409 ptap->merge = merge; 1410 ptap->destroy = Cmpi->ops->destroy; 1411 ptap->duplicate = Cmpi->ops->duplicate; 1412 1413 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable_old; 1414 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1415 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 1416 1417 *C = Cmpi; 1418 #if defined(PETSC_USE_INFO) 1419 if (bi[pn] != 0) { 1420 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 1421 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 1422 } else { 1423 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1424 } 1425 #endif 1426 PetscFunctionReturn(0); 1427 } 1428 1429 //================================ 1430 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C) 1431 { 1432 PetscErrorCode ierr; 1433 Mat_PtAPMPI *ptap; 1434 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1435 MPI_Comm comm; 1436 PetscMPIInt size,rank; 1437 Mat Cmpi; 1438 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1439 PetscInt pN=P->cmap->N,pn=P->cmap->n; 1440 PetscInt *lnk,i,k,nsend; 1441 PetscBT lnkbt; 1442 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1443 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1444 PetscInt len,proc,*dnz,*onz,*owners,nzi; 1445 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1446 MPI_Request *swaits,*rwaits; 1447 MPI_Status *sstatus,rstatus; 1448 PetscLayout rowmap; 1449 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1450 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1451 PetscInt *Jptr,*prmap=p->garray,con,j,Crmax; 1452 Mat_SeqAIJ *p_loc,*p_oth=NULL,*c_loc,*c_oth; 1453 PetscTable ta; 1454 #if defined(PETSC_HAVE_HYPRE) 1455 const char *algTypes[3] = {"scalable","nonscalable","hypre"}; 1456 PetscInt nalg = 3; 1457 #else 1458 const char *algTypes[2] = {"scalable","nonscalable"}; 1459 PetscInt nalg = 2; 1460 #endif 1461 PetscInt alg = 1; /* set default algorithm */ 1462 #if defined(PETSC_USE_INFO) 1463 PetscReal apfill; 1464 #endif 1465 PetscBool flg; 1466 1467 PetscFunctionBegin; 1468 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1469 printf("MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable..\n"); 1470 #if 0 1471 /* pick an algorithm */ 1472 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 1473 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 1474 ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[1],&alg,&flg);CHKERRQ(ierr); 1475 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1476 1477 if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */ 1478 MatInfo Ainfo,Pinfo; 1479 PetscInt nz_local; 1480 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 1481 1482 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 1483 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 1484 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 1485 1486 if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 1487 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 1488 1489 if (alg_scalable) { 1490 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 1491 } 1492 } 1493 1494 if (alg == 0) { 1495 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr); 1496 (*C)->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable; 1497 PetscFunctionReturn(0); 1498 1499 #if defined(PETSC_HAVE_HYPRE) 1500 } else if (alg == 2) { 1501 /* Use boomerAMGBuildCoarseOperator */ 1502 ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr); 1503 PetscFunctionReturn(0); 1504 #endif 1505 } 1506 1507 #endif 1508 1509 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1510 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1511 1512 /* create symbolic parallel matrix Cmpi */ 1513 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1514 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1515 1516 /* Do dense axpy in MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ() */ 1517 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1518 1519 /* create struct Mat_PtAPMPI and attached it to C later */ 1520 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1521 ptap->reuse = MAT_INITIAL_MATRIX; 1522 ptap->algType = alg; 1523 1524 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1525 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1526 /* get P_loc by taking all local rows of P */ 1527 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1528 1529 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1530 /* --------------------------------- */ 1531 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1532 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1533 1534 /* (1) compute symbolic A_loc (A_loc = P_loc in ex209!!!) */ 1535 /* -------------------------------------------------------*/ 1536 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1537 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 1538 1539 /* create and initialize a linked list ??? */ 1540 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 1541 MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta); 1542 MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta); 1543 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 1544 /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */ 1545 1546 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->AP_loc);CHKERRQ(ierr); //ptap->AP_loc = A_loc 1547 1548 /* (2-1) compute symbolic Co = Ro*A_loc */ 1549 /* ------------------------------------ */ 1550 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 1551 1552 /* (3) send coj of C_oth to other processors */ 1553 /* ------------------------------------------ */ 1554 /* determine row ownership */ 1555 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1556 rowmap->n = pn; 1557 rowmap->bs = 1; 1558 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1559 owners = rowmap->range; 1560 1561 /* determine the number of messages to send, their lengths */ 1562 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1563 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1564 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1565 1566 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1567 coi = c_oth->i; coj = c_oth->j; 1568 con = ptap->C_oth->rmap->n; 1569 proc = 0; 1570 for (i=0; i<con; i++) { 1571 while (prmap[i] >= owners[proc+1]) proc++; 1572 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1573 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1574 } 1575 1576 len = 0; /* max length of buf_si[], see (4) */ 1577 owners_co[0] = 0; 1578 nsend = 0; 1579 for (proc=0; proc<size; proc++) { 1580 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1581 if (len_s[proc]) { 1582 nsend++; 1583 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1584 len += len_si[proc]; 1585 } 1586 } 1587 1588 /* determine the number and length of messages to receive for coi and coj */ 1589 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1590 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1591 1592 /* post the Irecv and Isend of coj */ 1593 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1594 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1595 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1596 for (proc=0, k=0; proc<size; proc++) { 1597 if (!len_s[proc]) continue; 1598 i = owners_co[proc]; 1599 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1600 k++; 1601 } 1602 1603 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1604 /* ---------------------------------------- */ 1605 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1606 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1607 1608 /* receives coj are complete */ 1609 for (i=0; i<nrecv; i++) { 1610 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1611 } 1612 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1613 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1614 1615 /* add received column indices into ta to update Crmax */ 1616 for (k=0; k<nrecv; k++) {/* k-th received message */ 1617 Jptr = buf_rj[k]; 1618 for (j=0; j<len_r[k]; j++) { 1619 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1620 } 1621 } 1622 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1623 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1624 1625 /* (4) send and recv coi */ 1626 /*-----------------------*/ 1627 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1628 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1629 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1630 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1631 for (proc=0,k=0; proc<size; proc++) { 1632 if (!len_s[proc]) continue; 1633 /* form outgoing message for i-structure: 1634 buf_si[0]: nrows to be sent 1635 [1:nrows]: row index (global) 1636 [nrows+1:2*nrows+1]: i-structure index 1637 */ 1638 /*-------------------------------------------*/ 1639 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1640 buf_si_i = buf_si + nrows+1; 1641 buf_si[0] = nrows; 1642 buf_si_i[0] = 0; 1643 nrows = 0; 1644 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1645 nzi = coi[i+1] - coi[i]; 1646 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1647 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1648 nrows++; 1649 } 1650 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1651 k++; 1652 buf_si += len_si[proc]; 1653 } 1654 for (i=0; i<nrecv; i++) { 1655 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1656 } 1657 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1658 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1659 1660 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1661 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1662 ierr = PetscFree(swaits);CHKERRQ(ierr); 1663 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1664 1665 /* (5) compute the local portion of Cmpi */ 1666 /* ------------------------------------------ */ 1667 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1668 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1669 current_space = free_space; 1670 1671 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1672 for (k=0; k<nrecv; k++) { 1673 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1674 nrows = *buf_ri_k[k]; 1675 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1676 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1677 } 1678 1679 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 1680 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1681 for (i=0; i<pn; i++) { 1682 /* add C_loc into Cmpi */ 1683 nzi = c_loc->i[i+1] - c_loc->i[i]; 1684 Jptr = c_loc->j + c_loc->i[i]; 1685 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1686 1687 /* add received col data into lnk */ 1688 for (k=0; k<nrecv; k++) { /* k-th received message */ 1689 if (i == *nextrow[k]) { /* i-th row */ 1690 nzi = *(nextci[k]+1) - *nextci[k]; 1691 Jptr = buf_rj[k] + *nextci[k]; 1692 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1693 nextrow[k]++; nextci[k]++; 1694 } 1695 } 1696 nzi = lnk[0]; 1697 1698 /* copy data into free space, then initialize lnk */ 1699 ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1700 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 1701 } 1702 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1703 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1704 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 1705 1706 /* local sizes and preallocation */ 1707 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1708 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 1709 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1710 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1711 1712 /* members in merge */ 1713 ierr = PetscFree(id_r);CHKERRQ(ierr); 1714 ierr = PetscFree(len_r);CHKERRQ(ierr); 1715 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 1716 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 1717 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 1718 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 1719 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 1720 1721 /* attach the supporting struct to Cmpi for reuse */ 1722 c = (Mat_MPIAIJ*)Cmpi->data; 1723 c->ptap = ptap; 1724 ptap->duplicate = Cmpi->ops->duplicate; 1725 ptap->destroy = Cmpi->ops->destroy; 1726 ptap->view = Cmpi->ops->view; 1727 1728 if (alg == 1) { 1729 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 1730 } 1731 1732 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1733 Cmpi->assembled = PETSC_FALSE; 1734 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1735 //Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 1736 //Cmpi->ops->view = MatView_MPIAIJ_PtAP; 1737 *C = Cmpi; 1738 PetscFunctionReturn(0); 1739 } 1740 1741 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C) 1742 { 1743 PetscErrorCode ierr; 1744 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1745 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 1746 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 1747 Mat_PtAPMPI *ptap = c->ptap; 1748 Mat AP_loc,C_loc,C_oth; 1749 PetscInt i,rstart,rend,cm,ncols,row; 1750 PetscInt *api,*apj,am = A->rmap->n,j,col,apnz; 1751 PetscScalar *apa; 1752 const PetscInt *cols; 1753 const PetscScalar *vals; 1754 1755 PetscFunctionBegin; 1756 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1757 printf("MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable...\n"); 1758 1759 /* 1) get R = Pd^T,Ro = Po^T */ 1760 if (ptap->reuse == MAT_REUSE_MATRIX) { 1761 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1762 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1763 } 1764 1765 /* 2) get AP_loc */ 1766 AP_loc = ptap->AP_loc; 1767 ap = (Mat_SeqAIJ*)AP_loc->data; 1768 1769 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 1770 /*-----------------------------------------------------*/ 1771 if (ptap->reuse == MAT_REUSE_MATRIX) { 1772 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 1773 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1774 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1775 } 1776 1777 /* 2-2) compute numeric A_loc*P - dominating part */ 1778 /* ---------------------------------------------- */ 1779 /* get data from symbolic products */ 1780 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1781 if (ptap->P_oth) { 1782 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 1783 } 1784 apa = ptap->apa; 1785 api = ap->i; 1786 apj = ap->j; 1787 1788 for (i=0; i<am; i++) { 1789 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 1790 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 1791 apnz = api[i+1] - api[i]; 1792 for (j=0; j<apnz; j++) { 1793 col = apj[j+api[i]]; 1794 ap->a[j+ap->i[i]] = apa[col]; 1795 apa[col] = 0.0; 1796 } 1797 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 1798 } 1799 1800 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 1801 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 1802 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 1803 C_loc = ptap->C_loc; 1804 C_oth = ptap->C_oth; 1805 1806 /* add C_loc and Co to to C */ 1807 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1808 1809 /* C_loc -> C */ 1810 cm = C_loc->rmap->N; 1811 c_seq = (Mat_SeqAIJ*)C_loc->data; 1812 cols = c_seq->j; 1813 vals = c_seq->a; 1814 for (i=0; i<cm; i++) { 1815 ncols = c_seq->i[i+1] - c_seq->i[i]; 1816 row = rstart + i; 1817 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1818 cols += ncols; vals += ncols; 1819 } 1820 1821 /* Co -> C, off-processor part */ 1822 cm = C_oth->rmap->N; 1823 c_seq = (Mat_SeqAIJ*)C_oth->data; 1824 cols = c_seq->j; 1825 vals = c_seq->a; 1826 for (i=0; i<cm; i++) { 1827 ncols = c_seq->i[i+1] - c_seq->i[i]; 1828 row = p->garray[i]; 1829 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1830 cols += ncols; vals += ncols; 1831 } 1832 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1833 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1834 1835 ptap->reuse = MAT_REUSE_MATRIX; 1836 PetscFunctionReturn(0); 1837 } 1838 //============================= 1839 1840 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 1841 { 1842 PetscErrorCode ierr; 1843 Mat_Merge_SeqsToMPI *merge; 1844 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1845 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1846 Mat_PtAPMPI *ptap; 1847 PetscInt *adj; 1848 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1849 MatScalar *ada,*ca,valtmp; 1850 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1851 MPI_Comm comm; 1852 PetscMPIInt size,rank,taga,*len_s; 1853 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1854 PetscInt **buf_ri,**buf_rj; 1855 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1856 MPI_Request *s_waits,*r_waits; 1857 MPI_Status *status; 1858 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1859 PetscInt *ai,*aj,*coi,*coj; 1860 PetscInt *poJ,*pdJ; 1861 Mat A_loc; 1862 Mat_SeqAIJ *a_loc; 1863 1864 PetscFunctionBegin; 1865 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1866 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1867 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1868 1869 ptap = c->ptap; 1870 merge = ptap->merge; 1871 1872 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1873 /*------------------------------------------*/ 1874 /* get data from symbolic products */ 1875 coi = merge->coi; coj = merge->coj; 1876 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 1877 bi = merge->bi; bj = merge->bj; 1878 owners = merge->rowmap->range; 1879 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 1880 1881 /* get A_loc by taking all local rows of A */ 1882 A_loc = ptap->A_loc; 1883 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1884 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1885 ai = a_loc->i; 1886 aj = a_loc->j; 1887 1888 for (i=0; i<am; i++) { 1889 anz = ai[i+1] - ai[i]; 1890 adj = aj + ai[i]; 1891 ada = a_loc->a + ai[i]; 1892 1893 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1894 /*-------------------------------------------------------------*/ 1895 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1896 pnz = po->i[i+1] - po->i[i]; 1897 poJ = po->j + po->i[i]; 1898 pA = po->a + po->i[i]; 1899 for (j=0; j<pnz; j++) { 1900 row = poJ[j]; 1901 cj = coj + coi[row]; 1902 ca = coa + coi[row]; 1903 /* perform sparse axpy */ 1904 nexta = 0; 1905 valtmp = pA[j]; 1906 for (k=0; nexta<anz; k++) { 1907 if (cj[k] == adj[nexta]) { 1908 ca[k] += valtmp*ada[nexta]; 1909 nexta++; 1910 } 1911 } 1912 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1913 } 1914 1915 /* put the value into Cd (diagonal part) */ 1916 pnz = pd->i[i+1] - pd->i[i]; 1917 pdJ = pd->j + pd->i[i]; 1918 pA = pd->a + pd->i[i]; 1919 for (j=0; j<pnz; j++) { 1920 row = pdJ[j]; 1921 cj = bj + bi[row]; 1922 ca = ba + bi[row]; 1923 /* perform sparse axpy */ 1924 nexta = 0; 1925 valtmp = pA[j]; 1926 for (k=0; nexta<anz; k++) { 1927 if (cj[k] == adj[nexta]) { 1928 ca[k] += valtmp*ada[nexta]; 1929 nexta++; 1930 } 1931 } 1932 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1933 } 1934 } 1935 1936 /* 3) send and recv matrix values coa */ 1937 /*------------------------------------*/ 1938 buf_ri = merge->buf_ri; 1939 buf_rj = merge->buf_rj; 1940 len_s = merge->len_s; 1941 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1942 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1943 1944 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1945 for (proc=0,k=0; proc<size; proc++) { 1946 if (!len_s[proc]) continue; 1947 i = merge->owners_co[proc]; 1948 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1949 k++; 1950 } 1951 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1952 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1953 1954 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1955 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1956 ierr = PetscFree(coa);CHKERRQ(ierr); 1957 1958 /* 4) insert local Cseq and received values into Cmpi */ 1959 /*----------------------------------------------------*/ 1960 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1961 for (k=0; k<merge->nrecv; k++) { 1962 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1963 nrows = *(buf_ri_k[k]); 1964 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1965 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1966 } 1967 1968 for (i=0; i<cm; i++) { 1969 row = owners[rank] + i; /* global row index of C_seq */ 1970 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1971 ba_i = ba + bi[i]; 1972 bnz = bi[i+1] - bi[i]; 1973 /* add received vals into ba */ 1974 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1975 /* i-th row */ 1976 if (i == *nextrow[k]) { 1977 cnz = *(nextci[k]+1) - *nextci[k]; 1978 cj = buf_rj[k] + *(nextci[k]); 1979 ca = abuf_r[k] + *(nextci[k]); 1980 nextcj = 0; 1981 for (j=0; nextcj<cnz; j++) { 1982 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1983 ba_i[j] += ca[nextcj++]; 1984 } 1985 } 1986 nextrow[k]++; nextci[k]++; 1987 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1988 } 1989 } 1990 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1991 } 1992 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1993 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1994 1995 ierr = PetscFree(ba);CHKERRQ(ierr); 1996 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1997 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1998 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1999 PetscFunctionReturn(0); 2000 } 2001 2002 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ(); 2003 differ from MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable in using LLCondensedCreate_Scalable() */ 2004 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C) 2005 { 2006 PetscErrorCode ierr; 2007 Mat Cmpi,A_loc,POt,PDt; 2008 Mat_PtAPMPI *ptap; 2009 PetscFreeSpaceList free_space=NULL,current_space=NULL; 2010 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c; 2011 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 2012 PetscInt nnz; 2013 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 2014 PetscInt am =A->rmap->n,pn=P->cmap->n; 2015 MPI_Comm comm; 2016 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 2017 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 2018 PetscInt len,proc,*dnz,*onz,*owners; 2019 PetscInt nzi,*bi,*bj; 2020 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 2021 MPI_Request *swaits,*rwaits; 2022 MPI_Status *sstatus,rstatus; 2023 Mat_Merge_SeqsToMPI *merge; 2024 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 2025 PetscReal afill =1.0,afill_tmp; 2026 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax; 2027 PetscScalar *vals; 2028 Mat_SeqAIJ *a_loc,*pdt,*pot; 2029 PetscTable ta; 2030 2031 PetscFunctionBegin; 2032 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 2033 /* check if matrix local sizes are compatible */ 2034 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 2035 2036 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 2037 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 2038 2039 /* create struct Mat_PtAPMPI and attached it to C later */ 2040 ierr = PetscNew(&ptap);CHKERRQ(ierr); 2041 2042 /* get A_loc by taking all local rows of A */ 2043 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 2044 2045 ptap->A_loc = A_loc; 2046 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 2047 ai = a_loc->i; 2048 aj = a_loc->j; 2049 2050 /* determine symbolic Co=(p->B)^T*A - send to others */ 2051 /*----------------------------------------------------*/ 2052 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 2053 pdt = (Mat_SeqAIJ*)PDt->data; 2054 pdti = pdt->i; pdtj = pdt->j; 2055 2056 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 2057 pot = (Mat_SeqAIJ*)POt->data; 2058 poti = pot->i; potj = pot->j; 2059 2060 /* then, compute symbolic Co = (p->B)^T*A */ 2061 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 2062 >= (num of nonzero rows of C_seq) - pn */ 2063 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 2064 coi[0] = 0; 2065 2066 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 2067 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 2068 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 2069 current_space = free_space; 2070 2071 /* create and initialize a linked list */ 2072 ierr = PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);CHKERRQ(ierr); 2073 MatRowMergeMax_SeqAIJ(a_loc,am,ta); 2074 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 2075 2076 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 2077 2078 for (i=0; i<pon; i++) { 2079 pnz = poti[i+1] - poti[i]; 2080 ptJ = potj + poti[i]; 2081 for (j=0; j<pnz; j++) { 2082 row = ptJ[j]; /* row of A_loc == col of Pot */ 2083 anz = ai[row+1] - ai[row]; 2084 Jptr = aj + ai[row]; 2085 /* add non-zero cols of AP into the sorted linked list lnk */ 2086 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 2087 } 2088 nnz = lnk[0]; 2089 2090 /* If free space is not available, double the total space in the list */ 2091 if (current_space->local_remaining<nnz) { 2092 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 2093 nspacedouble++; 2094 } 2095 2096 /* Copy data into free space, and zero out denserows */ 2097 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 2098 2099 current_space->array += nnz; 2100 current_space->local_used += nnz; 2101 current_space->local_remaining -= nnz; 2102 2103 coi[i+1] = coi[i] + nnz; 2104 } 2105 2106 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 2107 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 2108 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); /* must destroy to get a new one for C */ 2109 2110 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 2111 if (afill_tmp > afill) afill = afill_tmp; 2112 2113 /* send j-array (coj) of Co to other processors */ 2114 /*----------------------------------------------*/ 2115 /* determine row ownership */ 2116 ierr = PetscNew(&merge);CHKERRQ(ierr); 2117 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 2118 2119 merge->rowmap->n = pn; 2120 merge->rowmap->bs = 1; 2121 2122 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 2123 owners = merge->rowmap->range; 2124 2125 /* determine the number of messages to send, their lengths */ 2126 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 2127 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 2128 2129 len_s = merge->len_s; 2130 merge->nsend = 0; 2131 2132 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 2133 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 2134 2135 proc = 0; 2136 for (i=0; i<pon; i++) { 2137 while (prmap[i] >= owners[proc+1]) proc++; 2138 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 2139 len_s[proc] += coi[i+1] - coi[i]; 2140 } 2141 2142 len = 0; /* max length of buf_si[] */ 2143 owners_co[0] = 0; 2144 for (proc=0; proc<size; proc++) { 2145 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 2146 if (len_si[proc]) { 2147 merge->nsend++; 2148 len_si[proc] = 2*(len_si[proc] + 1); 2149 len += len_si[proc]; 2150 } 2151 } 2152 2153 /* determine the number and length of messages to receive for coi and coj */ 2154 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 2155 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 2156 2157 /* post the Irecv and Isend of coj */ 2158 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 2159 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 2160 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 2161 for (proc=0, k=0; proc<size; proc++) { 2162 if (!len_s[proc]) continue; 2163 i = owners_co[proc]; 2164 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 2165 k++; 2166 } 2167 2168 /* receives and sends of coj are complete */ 2169 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 2170 for (i=0; i<merge->nrecv; i++) { 2171 PetscMPIInt icompleted; 2172 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 2173 } 2174 ierr = PetscFree(rwaits);CHKERRQ(ierr); 2175 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 2176 2177 /* add received column indices into table to update Armax */ 2178 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/examples/tutorials/ex56.c! */ 2179 for (k=0; k<merge->nrecv; k++) {/* k-th received message */ 2180 Jptr = buf_rj[k]; 2181 for (j=0; j<merge->len_r[k]; j++) { 2182 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 2183 } 2184 } 2185 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 2186 /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */ 2187 2188 /* send and recv coi */ 2189 /*-------------------*/ 2190 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 2191 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 2192 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 2193 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 2194 for (proc=0,k=0; proc<size; proc++) { 2195 if (!len_s[proc]) continue; 2196 /* form outgoing message for i-structure: 2197 buf_si[0]: nrows to be sent 2198 [1:nrows]: row index (global) 2199 [nrows+1:2*nrows+1]: i-structure index 2200 */ 2201 /*-------------------------------------------*/ 2202 nrows = len_si[proc]/2 - 1; 2203 buf_si_i = buf_si + nrows+1; 2204 buf_si[0] = nrows; 2205 buf_si_i[0] = 0; 2206 nrows = 0; 2207 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 2208 nzi = coi[i+1] - coi[i]; 2209 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 2210 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 2211 nrows++; 2212 } 2213 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 2214 k++; 2215 buf_si += len_si[proc]; 2216 } 2217 i = merge->nrecv; 2218 while (i--) { 2219 PetscMPIInt icompleted; 2220 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 2221 } 2222 ierr = PetscFree(rwaits);CHKERRQ(ierr); 2223 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 2224 ierr = PetscFree(len_si);CHKERRQ(ierr); 2225 ierr = PetscFree(len_ri);CHKERRQ(ierr); 2226 ierr = PetscFree(swaits);CHKERRQ(ierr); 2227 ierr = PetscFree(sstatus);CHKERRQ(ierr); 2228 ierr = PetscFree(buf_s);CHKERRQ(ierr); 2229 2230 /* compute the local portion of C (mpi mat) */ 2231 /*------------------------------------------*/ 2232 /* allocate bi array and free space for accumulating nonzero column info */ 2233 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 2234 bi[0] = 0; 2235 2236 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 2237 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 2238 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 2239 current_space = free_space; 2240 2241 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 2242 for (k=0; k<merge->nrecv; k++) { 2243 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 2244 nrows = *buf_ri_k[k]; 2245 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 2246 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 2247 } 2248 2249 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 2250 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 2251 rmax = 0; 2252 for (i=0; i<pn; i++) { 2253 /* add pdt[i,:]*AP into lnk */ 2254 pnz = pdti[i+1] - pdti[i]; 2255 ptJ = pdtj + pdti[i]; 2256 for (j=0; j<pnz; j++) { 2257 row = ptJ[j]; /* row of AP == col of Pt */ 2258 anz = ai[row+1] - ai[row]; 2259 Jptr = aj + ai[row]; 2260 /* add non-zero cols of AP into the sorted linked list lnk */ 2261 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 2262 } 2263 2264 /* add received col data into lnk */ 2265 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 2266 if (i == *nextrow[k]) { /* i-th row */ 2267 nzi = *(nextci[k]+1) - *nextci[k]; 2268 Jptr = buf_rj[k] + *nextci[k]; 2269 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 2270 nextrow[k]++; nextci[k]++; 2271 } 2272 } 2273 nnz = lnk[0]; 2274 2275 /* if free space is not available, make more free space */ 2276 if (current_space->local_remaining<nnz) { 2277 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 2278 nspacedouble++; 2279 } 2280 /* copy data into free space, then initialize lnk */ 2281 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 2282 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 2283 2284 current_space->array += nnz; 2285 current_space->local_used += nnz; 2286 current_space->local_remaining -= nnz; 2287 2288 bi[i+1] = bi[i] + nnz; 2289 if (nnz > rmax) rmax = nnz; 2290 } 2291 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 2292 2293 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 2294 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 2295 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 2296 if (afill_tmp > afill) afill = afill_tmp; 2297 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 2298 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 2299 2300 ierr = MatDestroy(&POt);CHKERRQ(ierr); 2301 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 2302 2303 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 2304 /*----------------------------------------------------------------------------------*/ 2305 ierr = PetscCalloc1(rmax+1,&vals);CHKERRQ(ierr); 2306 2307 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 2308 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2309 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 2310 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 2311 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 2312 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2313 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 2314 for (i=0; i<pn; i++) { 2315 row = i + rstart; 2316 nnz = bi[i+1] - bi[i]; 2317 Jptr = bj + bi[i]; 2318 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 2319 } 2320 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2321 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2322 ierr = PetscFree(vals);CHKERRQ(ierr); 2323 2324 merge->bi = bi; 2325 merge->bj = bj; 2326 merge->coi = coi; 2327 merge->coj = coj; 2328 merge->buf_ri = buf_ri; 2329 merge->buf_rj = buf_rj; 2330 merge->owners_co = owners_co; 2331 2332 /* attach the supporting struct to Cmpi for reuse */ 2333 c = (Mat_MPIAIJ*)Cmpi->data; 2334 2335 c->ptap = ptap; 2336 ptap->api = NULL; 2337 ptap->apj = NULL; 2338 ptap->merge = merge; 2339 ptap->apa = NULL; 2340 ptap->destroy = Cmpi->ops->destroy; 2341 ptap->duplicate = Cmpi->ops->duplicate; 2342 2343 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 2344 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 2345 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 2346 2347 *C = Cmpi; 2348 #if defined(PETSC_USE_INFO) 2349 if (bi[pn] != 0) { 2350 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 2351 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 2352 } else { 2353 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 2354 } 2355 #endif 2356 PetscFunctionReturn(0); 2357 } 2358