1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc/private/vecimpl.h> 12 13 #if defined(PETSC_HAVE_HYPRE) 14 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat*); 15 #endif 16 17 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 18 { 19 PetscErrorCode ierr; 20 #if defined(PETSC_HAVE_HYPRE) 21 const char *algTypes[4] = {"scalable","nonscalable","seqmpi","hypre"}; 22 PetscInt nalg = 4; 23 #else 24 const char *algTypes[3] = {"scalable","nonscalable","seqmpi"}; 25 PetscInt nalg = 3; 26 #endif 27 PetscInt alg = 1; /* set nonscalable algorithm as default */ 28 MPI_Comm comm; 29 PetscBool flg; 30 31 PetscFunctionBegin; 32 if (scall == MAT_INITIAL_MATRIX) { 33 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 34 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 35 36 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 37 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 38 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[1],&alg,&flg);CHKERRQ(ierr); 39 ierr = PetscOptionsEnd();CHKERRQ(ierr); 40 41 if (!flg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */ 42 MatInfo Ainfo,Binfo; 43 PetscInt nz_local; 44 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 45 46 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 47 ierr = MatGetInfo(B,MAT_LOCAL,&Binfo);CHKERRQ(ierr); 48 nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated); 49 50 if (B->cmap->N > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 51 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 52 53 if (alg_scalable) { 54 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 55 ierr = PetscInfo2(B,"Use scalable algorithm, BN %D, fill*nz_allocated %g\n",B->cmap->N,fill*nz_local);CHKERRQ(ierr); 56 } 57 } 58 59 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 60 switch (alg) { 61 case 1: 62 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 63 break; 64 case 2: 65 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C);CHKERRQ(ierr); 66 break; 67 #if defined(PETSC_HAVE_HYPRE) 68 case 3: 69 ierr = MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C);CHKERRQ(ierr); 70 break; 71 #endif 72 default: 73 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 74 break; 75 } 76 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 77 } 78 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 79 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 80 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 81 PetscFunctionReturn(0); 82 } 83 84 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 85 { 86 PetscErrorCode ierr; 87 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 88 Mat_PtAPMPI *ptap = a->ptap; 89 90 PetscFunctionBegin; 91 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 92 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 93 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 94 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 95 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 96 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 97 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 98 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 99 ierr = ptap->destroy(A);CHKERRQ(ierr); 100 ierr = PetscFree(ptap);CHKERRQ(ierr); 101 PetscFunctionReturn(0); 102 } 103 104 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C) 105 { 106 PetscErrorCode ierr; 107 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 108 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 109 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 110 PetscScalar *cda=cd->a,*coa=co->a; 111 Mat_SeqAIJ *p_loc,*p_oth; 112 PetscScalar *apa,*ca; 113 PetscInt cm =C->rmap->n; 114 Mat_PtAPMPI *ptap=c->ptap; 115 PetscInt *api,*apj,*apJ,i,k; 116 PetscInt cstart=C->cmap->rstart; 117 PetscInt cdnz,conz,k0,k1; 118 MPI_Comm comm; 119 PetscMPIInt size; 120 121 PetscFunctionBegin; 122 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 123 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 124 125 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 126 /*-----------------------------------------------------*/ 127 /* update numerical values of P_oth and P_loc */ 128 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 129 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 130 131 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 132 /*----------------------------------------------------------*/ 133 /* get data from symbolic products */ 134 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 135 p_oth = NULL; 136 if (size >1) { 137 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 138 } 139 140 /* get apa for storing dense row A[i,:]*P */ 141 apa = ptap->apa; 142 143 api = ptap->api; 144 apj = ptap->apj; 145 for (i=0; i<cm; i++) { 146 /* compute apa = A[i,:]*P */ 147 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 148 149 /* set values in C */ 150 apJ = apj + api[i]; 151 cdnz = cd->i[i+1] - cd->i[i]; 152 conz = co->i[i+1] - co->i[i]; 153 154 /* 1st off-diagoanl part of C */ 155 ca = coa + co->i[i]; 156 k = 0; 157 for (k0=0; k0<conz; k0++) { 158 if (apJ[k] >= cstart) break; 159 ca[k0] = apa[apJ[k]]; 160 apa[apJ[k++]] = 0.0; 161 } 162 163 /* diagonal part of C */ 164 ca = cda + cd->i[i]; 165 for (k1=0; k1<cdnz; k1++) { 166 ca[k1] = apa[apJ[k]]; 167 apa[apJ[k++]] = 0.0; 168 } 169 170 /* 2nd off-diagoanl part of C */ 171 ca = coa + co->i[i]; 172 for (; k0<conz; k0++) { 173 ca[k0] = apa[apJ[k]]; 174 apa[apJ[k++]] = 0.0; 175 } 176 } 177 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 178 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 179 PetscFunctionReturn(0); 180 } 181 182 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 183 { 184 PetscErrorCode ierr; 185 MPI_Comm comm; 186 PetscMPIInt size; 187 Mat Cmpi; 188 Mat_PtAPMPI *ptap; 189 PetscFreeSpaceList free_space=NULL,current_space=NULL; 190 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 191 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 192 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 193 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 194 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 195 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 196 PetscBT lnkbt; 197 PetscScalar *apa; 198 PetscReal afill; 199 MatType mtype; 200 201 PetscFunctionBegin; 202 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 203 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 204 205 /* create struct Mat_PtAPMPI and attached it to C later */ 206 ierr = PetscNew(&ptap);CHKERRQ(ierr); 207 208 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 209 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 210 211 /* get P_loc by taking all local rows of P */ 212 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 213 214 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 215 pi_loc = p_loc->i; pj_loc = p_loc->j; 216 if (size > 1) { 217 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 218 pi_oth = p_oth->i; pj_oth = p_oth->j; 219 } else { 220 p_oth = NULL; 221 pi_oth = NULL; pj_oth = NULL; 222 } 223 224 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 225 /*-------------------------------------------------------------------*/ 226 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 227 ptap->api = api; 228 api[0] = 0; 229 230 /* create and initialize a linked list */ 231 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 232 233 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 234 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 235 current_space = free_space; 236 237 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 238 for (i=0; i<am; i++) { 239 /* diagonal portion of A */ 240 nzi = adi[i+1] - adi[i]; 241 for (j=0; j<nzi; j++) { 242 row = *adj++; 243 pnz = pi_loc[row+1] - pi_loc[row]; 244 Jptr = pj_loc + pi_loc[row]; 245 /* add non-zero cols of P into the sorted linked list lnk */ 246 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 247 } 248 /* off-diagonal portion of A */ 249 nzi = aoi[i+1] - aoi[i]; 250 for (j=0; j<nzi; j++) { 251 row = *aoj++; 252 pnz = pi_oth[row+1] - pi_oth[row]; 253 Jptr = pj_oth + pi_oth[row]; 254 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 255 } 256 257 apnz = lnk[0]; 258 api[i+1] = api[i] + apnz; 259 260 /* if free space is not available, double the total space in the list */ 261 if (current_space->local_remaining<apnz) { 262 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 263 nspacedouble++; 264 } 265 266 /* Copy data into free space, then initialize lnk */ 267 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 268 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 269 270 current_space->array += apnz; 271 current_space->local_used += apnz; 272 current_space->local_remaining -= apnz; 273 } 274 275 /* Allocate space for apj, initialize apj, and */ 276 /* destroy list of free space and other temporary array(s) */ 277 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 278 apj = ptap->apj; 279 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 280 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 281 282 /* malloc apa to store dense row A[i,:]*P */ 283 ierr = PetscCalloc1(pN,&apa);CHKERRQ(ierr); 284 285 ptap->apa = apa; 286 287 /* create and assemble symbolic parallel matrix Cmpi */ 288 /*----------------------------------------------------*/ 289 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 290 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 291 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 292 293 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 294 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 295 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 296 297 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 298 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 299 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 300 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 301 302 ptap->destroy = Cmpi->ops->destroy; 303 ptap->duplicate = Cmpi->ops->duplicate; 304 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 305 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 306 307 /* attach the supporting struct to Cmpi for reuse */ 308 c = (Mat_MPIAIJ*)Cmpi->data; 309 c->ptap = ptap; 310 311 *C = Cmpi; 312 313 /* set MatInfo */ 314 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 315 if (afill < 1.0) afill = 1.0; 316 Cmpi->info.mallocs = nspacedouble; 317 Cmpi->info.fill_ratio_given = fill; 318 Cmpi->info.fill_ratio_needed = afill; 319 320 #if defined(PETSC_USE_INFO) 321 if (api[am]) { 322 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 323 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 324 } else { 325 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 326 } 327 #endif 328 PetscFunctionReturn(0); 329 } 330 331 PETSC_INTERN PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 332 { 333 PetscErrorCode ierr; 334 335 PetscFunctionBegin; 336 if (scall == MAT_INITIAL_MATRIX) { 337 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 338 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 339 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 340 } 341 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 342 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 343 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 344 PetscFunctionReturn(0); 345 } 346 347 typedef struct { 348 Mat workB; 349 PetscScalar *rvalues,*svalues; 350 MPI_Request *rwaits,*swaits; 351 } MPIAIJ_MPIDense; 352 353 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 354 { 355 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 356 PetscErrorCode ierr; 357 358 PetscFunctionBegin; 359 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 360 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 361 ierr = PetscFree(contents);CHKERRQ(ierr); 362 PetscFunctionReturn(0); 363 } 364 365 #include <petsc/private/vecscatterimpl.h> 366 /* 367 This is a "dummy function" that handles the case where matrix C was created as a dense matrix 368 directly by the user and passed to MatMatMult() with the MAT_REUSE_MATRIX option 369 370 It is the same as MatMatMultSymbolic_MPIAIJ_MPIDense() except does not create C 371 372 Developer Notes: This directly accesses information inside the VecScatter associated with the matrix-vector product 373 for this matrix. This is not desirable.. 374 375 */ 376 PetscErrorCode MatMatMultNumeric_MPIDense(Mat A,Mat B,Mat C) 377 { 378 PetscErrorCode ierr; 379 PetscBool flg; 380 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 381 PetscInt nz = aij->B->cmap->n; 382 PetscContainer container; 383 MPIAIJ_MPIDense *contents; 384 VecScatter ctx = aij->Mvctx; 385 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 386 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 387 388 PetscFunctionBegin; 389 ierr = PetscObjectTypeCompare((PetscObject)B,MATMPIDENSE,&flg);CHKERRQ(ierr); 390 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Second matrix must be mpidense"); 391 392 /* Handle case where where user provided the final C matrix rather than calling MatMatMult() with MAT_INITIAL_MATRIX*/ 393 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr); 394 if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"First matrix must be MPIAIJ"); 395 396 C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 397 398 ierr = PetscNew(&contents);CHKERRQ(ierr); 399 /* Create work matrix used to store off processor rows of B needed for local product */ 400 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 401 /* Create work arrays needed */ 402 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 403 B->cmap->N*to->starts[to->n],&contents->svalues, 404 from->n,&contents->rwaits, 405 to->n,&contents->swaits);CHKERRQ(ierr); 406 407 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 408 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 409 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 410 ierr = PetscObjectCompose((PetscObject)C,"workB",(PetscObject)container);CHKERRQ(ierr); 411 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 412 413 ierr = (*C->ops->matmultnumeric)(A,B,C);CHKERRQ(ierr); 414 PetscFunctionReturn(0); 415 } 416 417 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 418 { 419 PetscErrorCode ierr; 420 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 421 PetscInt nz = aij->B->cmap->n; 422 PetscContainer container; 423 MPIAIJ_MPIDense *contents; 424 VecScatter ctx = aij->Mvctx; 425 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 426 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 427 PetscInt m = A->rmap->n,n=B->cmap->n; 428 429 PetscFunctionBegin; 430 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 431 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 432 ierr = MatSetBlockSizesFromMats(*C,A,B);CHKERRQ(ierr); 433 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 434 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 435 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 436 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 437 438 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 439 440 ierr = PetscNew(&contents);CHKERRQ(ierr); 441 /* Create work matrix used to store off processor rows of B needed for local product */ 442 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 443 /* Create work arrays needed */ 444 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],&contents->rvalues, 445 B->cmap->N*to->starts[to->n],&contents->svalues, 446 from->n,&contents->rwaits, 447 to->n,&contents->swaits);CHKERRQ(ierr); 448 449 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 450 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 451 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 452 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 453 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 454 PetscFunctionReturn(0); 455 } 456 457 /* 458 Performs an efficient scatter on the rows of B needed by this process; this is 459 a modification of the VecScatterBegin_() routines. 460 */ 461 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 462 { 463 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 464 PetscErrorCode ierr; 465 PetscScalar *b,*w,*svalues,*rvalues; 466 VecScatter ctx = aij->Mvctx; 467 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 468 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 469 PetscInt i,j,k; 470 PetscInt *sindices,*sstarts,*rindices,*rstarts; 471 PetscMPIInt *sprocs,*rprocs,nrecvs; 472 MPI_Request *swaits,*rwaits; 473 MPI_Comm comm; 474 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 475 MPI_Status status; 476 MPIAIJ_MPIDense *contents; 477 PetscContainer container; 478 Mat workB; 479 480 PetscFunctionBegin; 481 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 482 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 483 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 484 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 485 486 workB = *outworkB = contents->workB; 487 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 488 sindices = to->indices; 489 sstarts = to->starts; 490 sprocs = to->procs; 491 swaits = contents->swaits; 492 svalues = contents->svalues; 493 494 rindices = from->indices; 495 rstarts = from->starts; 496 rprocs = from->procs; 497 rwaits = contents->rwaits; 498 rvalues = contents->rvalues; 499 500 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 501 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 502 503 for (i=0; i<from->n; i++) { 504 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 505 } 506 507 for (i=0; i<to->n; i++) { 508 /* pack a message at a time */ 509 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 510 for (k=0; k<ncols; k++) { 511 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 512 } 513 } 514 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 515 } 516 517 nrecvs = from->n; 518 while (nrecvs) { 519 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 520 nrecvs--; 521 /* unpack a message at a time */ 522 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 523 for (k=0; k<ncols; k++) { 524 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 525 } 526 } 527 } 528 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 529 530 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 531 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 532 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 533 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 534 PetscFunctionReturn(0); 535 } 536 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 537 538 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 539 { 540 PetscErrorCode ierr; 541 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 542 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 543 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 544 Mat workB; 545 546 PetscFunctionBegin; 547 /* diagonal block of A times all local rows of B*/ 548 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 549 550 /* get off processor parts of B needed to complete the product */ 551 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 552 553 /* off-diagonal block of A times nonlocal rows of B */ 554 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 555 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 556 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 557 PetscFunctionReturn(0); 558 } 559 560 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 561 { 562 PetscErrorCode ierr; 563 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 564 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 565 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 566 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 567 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 568 Mat_SeqAIJ *p_loc,*p_oth; 569 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 570 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 571 PetscInt cm = C->rmap->n,anz,pnz; 572 Mat_PtAPMPI *ptap = c->ptap; 573 PetscScalar *apa_sparse = ptap->apa; 574 PetscInt *api,*apj,*apJ,i,j,k,row; 575 PetscInt cstart = C->cmap->rstart; 576 PetscInt cdnz,conz,k0,k1,nextp; 577 MPI_Comm comm; 578 PetscMPIInt size; 579 580 PetscFunctionBegin; 581 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 582 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 583 584 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 585 /*-----------------------------------------------------*/ 586 /* update numerical values of P_oth and P_loc */ 587 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 588 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 589 590 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 591 /*----------------------------------------------------------*/ 592 /* get data from symbolic products */ 593 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 594 pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a; 595 if (size >1) { 596 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 597 pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a; 598 } else { 599 p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL; 600 } 601 602 api = ptap->api; 603 apj = ptap->apj; 604 for (i=0; i<cm; i++) { 605 apJ = apj + api[i]; 606 607 /* diagonal portion of A */ 608 anz = adi[i+1] - adi[i]; 609 adj = ad->j + adi[i]; 610 ada = ad->a + adi[i]; 611 for (j=0; j<anz; j++) { 612 row = adj[j]; 613 pnz = pi_loc[row+1] - pi_loc[row]; 614 pj = pj_loc + pi_loc[row]; 615 pa = pa_loc + pi_loc[row]; 616 /* perform sparse axpy */ 617 valtmp = ada[j]; 618 nextp = 0; 619 for (k=0; nextp<pnz; k++) { 620 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 621 apa_sparse[k] += valtmp*pa[nextp++]; 622 } 623 } 624 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 625 } 626 627 /* off-diagonal portion of A */ 628 anz = aoi[i+1] - aoi[i]; 629 aoj = ao->j + aoi[i]; 630 aoa = ao->a + aoi[i]; 631 for (j=0; j<anz; j++) { 632 row = aoj[j]; 633 pnz = pi_oth[row+1] - pi_oth[row]; 634 pj = pj_oth + pi_oth[row]; 635 pa = pa_oth + pi_oth[row]; 636 /* perform sparse axpy */ 637 valtmp = aoa[j]; 638 nextp = 0; 639 for (k=0; nextp<pnz; k++) { 640 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 641 apa_sparse[k] += valtmp*pa[nextp++]; 642 } 643 } 644 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 645 } 646 647 /* set values in C */ 648 cdnz = cd->i[i+1] - cd->i[i]; 649 conz = co->i[i+1] - co->i[i]; 650 651 /* 1st off-diagoanl part of C */ 652 ca = coa + co->i[i]; 653 k = 0; 654 for (k0=0; k0<conz; k0++) { 655 if (apJ[k] >= cstart) break; 656 ca[k0] = apa_sparse[k]; 657 apa_sparse[k] = 0.0; 658 k++; 659 } 660 661 /* diagonal part of C */ 662 ca = cda + cd->i[i]; 663 for (k1=0; k1<cdnz; k1++) { 664 ca[k1] = apa_sparse[k]; 665 apa_sparse[k] = 0.0; 666 k++; 667 } 668 669 /* 2nd off-diagoanl part of C */ 670 ca = coa + co->i[i]; 671 for (; k0<conz; k0++) { 672 ca[k0] = apa_sparse[k]; 673 apa_sparse[k] = 0.0; 674 k++; 675 } 676 } 677 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 678 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 679 PetscFunctionReturn(0); 680 } 681 682 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 683 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 684 { 685 PetscErrorCode ierr; 686 MPI_Comm comm; 687 PetscMPIInt size; 688 Mat Cmpi; 689 Mat_PtAPMPI *ptap; 690 PetscFreeSpaceList free_space = NULL,current_space=NULL; 691 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 692 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 693 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 694 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 695 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0; 696 PetscInt am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20; 697 PetscReal afill; 698 PetscScalar *apa; 699 MatType mtype; 700 701 PetscFunctionBegin; 702 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 703 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 704 705 /* create struct Mat_PtAPMPI and attached it to C later */ 706 ierr = PetscNew(&ptap);CHKERRQ(ierr); 707 708 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 709 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 710 711 /* get P_loc by taking all local rows of P */ 712 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 713 714 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 715 pi_loc = p_loc->i; pj_loc = p_loc->j; 716 if (size > 1) { 717 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 718 pi_oth = p_oth->i; pj_oth = p_oth->j; 719 } else { 720 p_oth = NULL; 721 pi_oth = NULL; pj_oth = NULL; 722 } 723 724 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 725 /*-------------------------------------------------------------------*/ 726 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 727 ptap->api = api; 728 api[0] = 0; 729 730 ierr = PetscLLCondensedCreate_Scalable(lsize,&lnk);CHKERRQ(ierr); 731 732 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 733 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space);CHKERRQ(ierr); 734 current_space = free_space; 735 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 736 for (i=0; i<am; i++) { 737 /* diagonal portion of A */ 738 nzi = adi[i+1] - adi[i]; 739 for (j=0; j<nzi; j++) { 740 row = *adj++; 741 pnz = pi_loc[row+1] - pi_loc[row]; 742 Jptr = pj_loc + pi_loc[row]; 743 /* Expand list if it is not long enough */ 744 if (pnz+apnz_max > lsize) { 745 lsize = pnz+apnz_max; 746 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 747 } 748 /* add non-zero cols of P into the sorted linked list lnk */ 749 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 750 apnz = *lnk; /* The first element in the list is the number of items in the list */ 751 api[i+1] = api[i] + apnz; 752 if (apnz > apnz_max) apnz_max = apnz; 753 } 754 /* off-diagonal portion of A */ 755 nzi = aoi[i+1] - aoi[i]; 756 for (j=0; j<nzi; j++) { 757 row = *aoj++; 758 pnz = pi_oth[row+1] - pi_oth[row]; 759 Jptr = pj_oth + pi_oth[row]; 760 /* Expand list if it is not long enough */ 761 if (pnz+apnz_max > lsize) { 762 lsize = pnz + apnz_max; 763 ierr = PetscLLCondensedExpand_Scalable(lsize, &lnk);CHKERRQ(ierr); 764 } 765 /* add non-zero cols of P into the sorted linked list lnk */ 766 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 767 apnz = *lnk; /* The first element in the list is the number of items in the list */ 768 api[i+1] = api[i] + apnz; 769 if (apnz > apnz_max) apnz_max = apnz; 770 } 771 apnz = *lnk; 772 api[i+1] = api[i] + apnz; 773 if (apnz > apnz_max) apnz_max = apnz; 774 775 /* if free space is not available, double the total space in the list */ 776 if (current_space->local_remaining<apnz) { 777 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 778 nspacedouble++; 779 } 780 781 /* Copy data into free space, then initialize lnk */ 782 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 783 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 784 785 current_space->array += apnz; 786 current_space->local_used += apnz; 787 current_space->local_remaining -= apnz; 788 } 789 790 /* Allocate space for apj, initialize apj, and */ 791 /* destroy list of free space and other temporary array(s) */ 792 ierr = PetscMalloc1(api[am]+1,&ptap->apj);CHKERRQ(ierr); 793 apj = ptap->apj; 794 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 795 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 796 797 /* create and assemble symbolic parallel matrix Cmpi */ 798 /*----------------------------------------------------*/ 799 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 800 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 801 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 802 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 803 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 804 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 805 806 /* malloc apa for assembly Cmpi */ 807 ierr = PetscCalloc1(apnz_max,&apa);CHKERRQ(ierr); 808 ptap->apa = apa; 809 810 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 811 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 812 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 813 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 814 815 ptap->destroy = Cmpi->ops->destroy; 816 ptap->duplicate = Cmpi->ops->duplicate; 817 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 818 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 819 820 /* attach the supporting struct to Cmpi for reuse */ 821 c = (Mat_MPIAIJ*)Cmpi->data; 822 c->ptap = ptap; 823 *C = Cmpi; 824 825 /* set MatInfo */ 826 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 827 if (afill < 1.0) afill = 1.0; 828 Cmpi->info.mallocs = nspacedouble; 829 Cmpi->info.fill_ratio_given = fill; 830 Cmpi->info.fill_ratio_needed = afill; 831 832 #if defined(PETSC_USE_INFO) 833 if (api[am]) { 834 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 835 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 836 } else { 837 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 838 } 839 #endif 840 PetscFunctionReturn(0); 841 } 842 843 /* This function is needed for the seqMPI matrix-matrix multiplication. */ 844 /* Three input arrays are merged to one output array. The size of the */ 845 /* output array is also output. Duplicate entries only show up once. */ 846 static void Merge3SortedArrays(PetscInt size1, PetscInt *in1, 847 PetscInt size2, PetscInt *in2, 848 PetscInt size3, PetscInt *in3, 849 PetscInt *size4, PetscInt *out) 850 { 851 int i = 0, j = 0, k = 0, l = 0; 852 853 /* Traverse all three arrays */ 854 while (i<size1 && j<size2 && k<size3) { 855 if (in1[i] < in2[j] && in1[i] < in3[k]) { 856 out[l++] = in1[i++]; 857 } 858 else if(in2[j] < in1[i] && in2[j] < in3[k]) { 859 out[l++] = in2[j++]; 860 } 861 else if(in3[k] < in1[i] && in3[k] < in2[j]) { 862 out[l++] = in3[k++]; 863 } 864 else if(in1[i] == in2[j] && in1[i] < in3[k]) { 865 out[l++] = in1[i]; 866 i++, j++; 867 } 868 else if(in1[i] == in3[k] && in1[i] < in2[j]) { 869 out[l++] = in1[i]; 870 i++, k++; 871 } 872 else if(in3[k] == in2[j] && in2[j] < in1[i]) { 873 out[l++] = in2[j]; 874 k++, j++; 875 } 876 else if(in1[i] == in2[j] && in1[i] == in3[k]) { 877 out[l++] = in1[i]; 878 i++, j++, k++; 879 } 880 } 881 882 /* Traverse two remaining arrays */ 883 while (i<size1 && j<size2) { 884 if (in1[i] < in2[j]) { 885 out[l++] = in1[i++]; 886 } 887 else if(in1[i] > in2[j]) { 888 out[l++] = in2[j++]; 889 } 890 else { 891 out[l++] = in1[i]; 892 i++, j++; 893 } 894 } 895 896 while (i<size1 && k<size3) { 897 if (in1[i] < in3[k]) { 898 out[l++] = in1[i++]; 899 } 900 else if(in1[i] > in3[k]) { 901 out[l++] = in3[k++]; 902 } 903 else { 904 out[l++] = in1[i]; 905 i++, k++; 906 } 907 } 908 909 while (k<size3 && j<size2) { 910 if (in3[k] < in2[j]) { 911 out[l++] = in3[k++]; 912 } 913 else if(in3[k] > in2[j]) { 914 out[l++] = in2[j++]; 915 } 916 else { 917 out[l++] = in3[k]; 918 k++, j++; 919 } 920 } 921 922 /* Traverse one remaining array */ 923 while (i<size1) out[l++] = in1[i++]; 924 while (j<size2) out[l++] = in2[j++]; 925 while (k<size3) out[l++] = in3[k++]; 926 927 *size4 = l; 928 } 929 930 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and */ 931 /* adds up the products. Two of these three multiplications are performed with existing (sequential) */ 932 /* matrix-matrix multiplications. */ 933 #undef __FUNCT__ 934 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI" 935 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat *C) 936 { 937 PetscErrorCode ierr; 938 MPI_Comm comm; 939 PetscMPIInt size; 940 Mat Cmpi; 941 Mat_PtAPMPI *ptap; 942 PetscFreeSpaceList free_space_diag=NULL, current_space=NULL; 943 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data; 944 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc; 945 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data; 946 Mat_MPIAIJ *c; 947 Mat_SeqAIJ *adpd_seq, *p_off, *aopoth_seq; 948 PetscInt adponz, adpdnz; 949 PetscInt *pi_loc,*dnz,*onz; 950 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart; 951 PetscInt *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi, 952 *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj; 953 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend; 954 PetscBT lnkbt; 955 PetscScalar *apa; 956 PetscReal afill; 957 PetscMPIInt rank; 958 Mat adpd, aopoth; 959 MatType mtype; 960 961 PetscFunctionBegin; 962 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 963 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 964 ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr); 965 ierr = MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend); CHKERRQ(ierr); 966 967 /* create struct Mat_PtAPMPI and attached it to C later */ 968 ierr = PetscNew(&ptap);CHKERRQ(ierr); 969 970 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 971 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 972 973 /* get P_loc by taking all local rows of P */ 974 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 975 976 977 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 978 pi_loc = p_loc->i; 979 980 /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */ 981 ierr = PetscMalloc1(am+2,&api);CHKERRQ(ierr); 982 ierr = PetscMalloc1(am+2,&adpoi);CHKERRQ(ierr); 983 984 adpoi[0] = 0; 985 ptap->api = api; 986 api[0] = 0; 987 988 /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */ 989 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 990 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 991 992 /* Symbolic calc of A_loc_diag * P_loc_diag */ 993 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->A, p->A, fill, &adpd);CHKERRQ(ierr); 994 adpd_seq = (Mat_SeqAIJ*)((adpd)->data); 995 adpdi = adpd_seq->i; adpdj = adpd_seq->j; 996 p_off = (Mat_SeqAIJ*)((p->B)->data); 997 poff_i = p_off->i; poff_j = p_off->j; 998 999 /* j_temp stores indices of a result row before they are added to the linked list */ 1000 ierr = PetscMalloc1(pN+2,&j_temp);CHKERRQ(ierr); 1001 1002 1003 /* Symbolic calc of the A_diag * p_loc_off */ 1004 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 1005 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag);CHKERRQ(ierr); 1006 current_space = free_space_diag; 1007 1008 for (i=0; i<am; i++) { 1009 /* A_diag * P_loc_off */ 1010 nzi = adi[i+1] - adi[i]; 1011 for (j=0; j<nzi; j++) { 1012 row = *adj++; 1013 pnz = poff_i[row+1] - poff_i[row]; 1014 Jptr = poff_j + poff_i[row]; 1015 for(i1 = 0; i1 < pnz; i1++) { 1016 j_temp[i1] = p->garray[Jptr[i1]]; 1017 } 1018 /* add non-zero cols of P into the sorted linked list lnk */ 1019 ierr = PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt);CHKERRQ(ierr); 1020 } 1021 1022 adponz = lnk[0]; 1023 adpoi[i+1] = adpoi[i] + adponz; 1024 1025 /* if free space is not available, double the total space in the list */ 1026 if (current_space->local_remaining<adponz) { 1027 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1028 nspacedouble++; 1029 } 1030 1031 /* Copy data into free space, then initialize lnk */ 1032 ierr = PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1033 1034 current_space->array += adponz; 1035 current_space->local_used += adponz; 1036 current_space->local_remaining -= adponz; 1037 } 1038 1039 /* Symbolic calc of A_off * P_oth */ 1040 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, &aopoth);CHKERRQ(ierr); 1041 aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data); 1042 aopothi = aopoth_seq->i; aopothj = aopoth_seq->j; 1043 1044 /* Allocate space for apj, adpj, aopj, ... */ 1045 /* destroy lists of free space and other temporary array(s) */ 1046 1047 ierr = PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj);CHKERRQ(ierr); 1048 ierr = PetscMalloc1(adpoi[am]+2, &adpoj);CHKERRQ(ierr); 1049 1050 /* Copy from linked list to j-array */ 1051 ierr = PetscFreeSpaceContiguous(&free_space_diag,adpoj);CHKERRQ(ierr); 1052 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1053 1054 adpoJ = adpoj; 1055 adpdJ = adpdj; 1056 aopJ = aopothj; 1057 apj = ptap->apj; 1058 apJ = apj; /* still empty */ 1059 1060 /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */ 1061 /* A_diag * P_loc_diag to get A*P */ 1062 for (i = 0; i < am; i++) { 1063 aopnz = aopothi[i+1] - aopothi[i]; 1064 adponz = adpoi[i+1] - adpoi[i]; 1065 adpdnz = adpdi[i+1] - adpdi[i]; 1066 1067 /* Correct indices from A_diag*P_diag */ 1068 for(i1 = 0; i1 < adpdnz; i1++) { 1069 adpdJ[i1] += p_colstart; 1070 } 1071 /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */ 1072 Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ); 1073 ierr = MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz); CHKERRQ(ierr); 1074 1075 aopJ += aopnz; 1076 adpoJ += adponz; 1077 adpdJ += adpdnz; 1078 apJ += apnz; 1079 api[i+1] = api[i] + apnz; 1080 } 1081 1082 /* malloc apa to store dense row A[i,:]*P */ 1083 ierr = PetscCalloc1(pN+2,&apa);CHKERRQ(ierr); 1084 1085 ptap->apa = apa; 1086 /* create and assemble symbolic parallel matrix Cmpi */ 1087 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1088 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1089 ierr = MatSetBlockSizesFromMats(Cmpi,A,P);CHKERRQ(ierr); 1090 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1091 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1092 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1093 1094 1095 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(Cmpi, apj, api);CHKERRQ(ierr); 1096 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1097 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1098 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1099 1100 1101 ptap->destroy = Cmpi->ops->destroy; 1102 ptap->duplicate = Cmpi->ops->duplicate; 1103 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1104 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 1105 1106 /* attach the supporting struct to Cmpi for reuse */ 1107 c = (Mat_MPIAIJ*)Cmpi->data; 1108 c->ptap = ptap; 1109 *C = Cmpi; 1110 1111 /* set MatInfo */ 1112 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 1113 if (afill < 1.0) afill = 1.0; 1114 Cmpi->info.mallocs = nspacedouble; 1115 Cmpi->info.fill_ratio_given = fill; 1116 Cmpi->info.fill_ratio_needed = afill; 1117 1118 #if defined(PETSC_USE_INFO) 1119 if (api[am]) { 1120 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 1121 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill);CHKERRQ(ierr); 1122 } else { 1123 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1124 } 1125 #endif 1126 1127 ierr = MatDestroy(&aopoth);CHKERRQ(ierr); 1128 ierr = MatDestroy(&adpd);CHKERRQ(ierr); 1129 ierr = PetscFree(j_temp);CHKERRQ(ierr); 1130 ierr = PetscFree(adpoj);CHKERRQ(ierr); 1131 ierr = PetscFree(adpoi);CHKERRQ(ierr); 1132 PetscFunctionReturn(0); 1133 } 1134 1135 1136 /*-------------------------------------------------------------------------*/ 1137 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 1138 { 1139 PetscErrorCode ierr; 1140 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 1141 PetscInt aN=A->cmap->N,alg=1; /* set default algorithm */ 1142 PetscBool flg; 1143 1144 PetscFunctionBegin; 1145 if (scall == MAT_INITIAL_MATRIX) { 1146 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 1147 PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */ 1148 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[1],&alg,&flg);CHKERRQ(ierr); 1149 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1150 1151 ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 1152 switch (alg) { 1153 case 1: 1154 if (!flg && aN > 100000) { /* may switch to scalable algorithm as default */ 1155 MatInfo Ainfo,Pinfo; 1156 PetscInt nz_local; 1157 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 1158 MPI_Comm comm; 1159 1160 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 1161 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 1162 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); /* estimated local nonzero entries */ 1163 1164 if (aN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 1165 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1166 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 1167 1168 if (alg_scalable) { 1169 alg = 0; /* scalable algorithm would slower than nonscalable algorithm */ 1170 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 1171 break; 1172 } 1173 } 1174 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);CHKERRQ(ierr); 1175 break; 1176 case 2: 1177 { 1178 Mat Pt; 1179 Mat_PtAPMPI *ptap; 1180 Mat_MPIAIJ *c; 1181 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 1182 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 1183 c = (Mat_MPIAIJ*)(*C)->data; 1184 ptap = c->ptap; 1185 ptap->Pt = Pt; 1186 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 1187 PetscFunctionReturn(0); 1188 } 1189 break; 1190 default: /* scalable algorithm */ 1191 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 1192 break; 1193 } 1194 ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 1195 } 1196 ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 1197 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 1198 ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 1199 PetscFunctionReturn(0); 1200 } 1201 1202 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 1203 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 1204 { 1205 PetscErrorCode ierr; 1206 Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data; 1207 Mat_PtAPMPI *ptap= c->ptap; 1208 Mat Pt=ptap->Pt; 1209 1210 PetscFunctionBegin; 1211 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 1212 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 1213 PetscFunctionReturn(0); 1214 } 1215 1216 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat,MatDuplicateOption,Mat*); 1217 1218 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1219 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C) 1220 { 1221 PetscErrorCode ierr; 1222 Mat_PtAPMPI *ptap; 1223 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1224 MPI_Comm comm; 1225 PetscMPIInt size,rank; 1226 Mat Cmpi; 1227 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1228 PetscInt pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n; 1229 PetscInt *lnk,i,k,nsend; 1230 PetscBT lnkbt; 1231 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1232 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1233 PetscInt len,proc,*dnz,*onz,*owners,nzi; 1234 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1235 MPI_Request *swaits,*rwaits; 1236 MPI_Status *sstatus,rstatus; 1237 PetscLayout rowmap; 1238 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1239 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1240 PetscInt *Jptr,*prmap=p->garray,con,j,Crmax; 1241 Mat_SeqAIJ *a_loc,*c_loc,*c_oth; 1242 PetscTable ta; 1243 MatType mtype; 1244 1245 PetscFunctionBegin; 1246 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1247 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1248 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1249 1250 /* create symbolic parallel matrix Cmpi */ 1251 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1252 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1253 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1254 1255 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable; 1256 1257 /* create struct Mat_PtAPMPI and attached it to C later */ 1258 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1259 ptap->reuse = MAT_INITIAL_MATRIX; 1260 1261 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1262 /* --------------------------------- */ 1263 ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1264 ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1265 1266 /* (1) compute symbolic A_loc */ 1267 /* ---------------------------*/ 1268 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1269 1270 /* (2-1) compute symbolic C_oth = Ro*A_loc */ 1271 /* ------------------------------------ */ 1272 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 1273 1274 /* (3) send coj of C_oth to other processors */ 1275 /* ------------------------------------------ */ 1276 /* determine row ownership */ 1277 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1278 rowmap->n = pn; 1279 rowmap->bs = 1; 1280 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1281 owners = rowmap->range; 1282 1283 /* determine the number of messages to send, their lengths */ 1284 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1285 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1286 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1287 1288 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1289 coi = c_oth->i; coj = c_oth->j; 1290 con = ptap->C_oth->rmap->n; 1291 proc = 0; 1292 for (i=0; i<con; i++) { 1293 while (prmap[i] >= owners[proc+1]) proc++; 1294 len_si[proc]++; /* num of rows in Co(=Pt*A) to be sent to [proc] */ 1295 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1296 } 1297 1298 len = 0; /* max length of buf_si[], see (4) */ 1299 owners_co[0] = 0; 1300 nsend = 0; 1301 for (proc=0; proc<size; proc++) { 1302 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1303 if (len_s[proc]) { 1304 nsend++; 1305 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1306 len += len_si[proc]; 1307 } 1308 } 1309 1310 /* determine the number and length of messages to receive for coi and coj */ 1311 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1312 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1313 1314 /* post the Irecv and Isend of coj */ 1315 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1316 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1317 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1318 for (proc=0, k=0; proc<size; proc++) { 1319 if (!len_s[proc]) continue; 1320 i = owners_co[proc]; 1321 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1322 k++; 1323 } 1324 1325 /* (2-2) compute symbolic C_loc = Rd*A_loc */ 1326 /* ---------------------------------------- */ 1327 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1328 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1329 1330 /* receives coj are complete */ 1331 for (i=0; i<nrecv; i++) { 1332 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1333 } 1334 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1335 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1336 1337 /* add received column indices into ta to update Crmax */ 1338 a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data; 1339 1340 /* create and initialize a linked list */ 1341 ierr = PetscTableCreate(an,aN,&ta);CHKERRQ(ierr); /* for compute Crmax */ 1342 MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta); 1343 1344 for (k=0; k<nrecv; k++) {/* k-th received message */ 1345 Jptr = buf_rj[k]; 1346 for (j=0; j<len_r[k]; j++) { 1347 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1348 } 1349 } 1350 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1351 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1352 1353 /* (4) send and recv coi */ 1354 /*-----------------------*/ 1355 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1356 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1357 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1358 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1359 for (proc=0,k=0; proc<size; proc++) { 1360 if (!len_s[proc]) continue; 1361 /* form outgoing message for i-structure: 1362 buf_si[0]: nrows to be sent 1363 [1:nrows]: row index (global) 1364 [nrows+1:2*nrows+1]: i-structure index 1365 */ 1366 /*-------------------------------------------*/ 1367 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1368 buf_si_i = buf_si + nrows+1; 1369 buf_si[0] = nrows; 1370 buf_si_i[0] = 0; 1371 nrows = 0; 1372 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1373 nzi = coi[i+1] - coi[i]; 1374 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1375 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1376 nrows++; 1377 } 1378 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1379 k++; 1380 buf_si += len_si[proc]; 1381 } 1382 for (i=0; i<nrecv; i++) { 1383 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1384 } 1385 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1386 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1387 1388 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1389 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1390 ierr = PetscFree(swaits);CHKERRQ(ierr); 1391 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1392 1393 /* (5) compute the local portion of Cmpi */ 1394 /* ------------------------------------------ */ 1395 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1396 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1397 current_space = free_space; 1398 1399 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1400 for (k=0; k<nrecv; k++) { 1401 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1402 nrows = *buf_ri_k[k]; 1403 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1404 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1405 } 1406 1407 ierr = MatPreallocateInitialize(comm,pn,an,dnz,onz);CHKERRQ(ierr); 1408 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1409 for (i=0; i<pn; i++) { 1410 /* add C_loc into Cmpi */ 1411 nzi = c_loc->i[i+1] - c_loc->i[i]; 1412 Jptr = c_loc->j + c_loc->i[i]; 1413 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1414 1415 /* add received col data into lnk */ 1416 for (k=0; k<nrecv; k++) { /* k-th received message */ 1417 if (i == *nextrow[k]) { /* i-th row */ 1418 nzi = *(nextci[k]+1) - *nextci[k]; 1419 Jptr = buf_rj[k] + *nextci[k]; 1420 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1421 nextrow[k]++; nextci[k]++; 1422 } 1423 } 1424 nzi = lnk[0]; 1425 1426 /* copy data into free space, then initialize lnk */ 1427 ierr = PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1428 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 1429 } 1430 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1431 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1432 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 1433 1434 /* local sizes and preallocation */ 1435 ierr = MatSetSizes(Cmpi,pn,an,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1436 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr); 1437 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1438 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1439 1440 /* members in merge */ 1441 ierr = PetscFree(id_r);CHKERRQ(ierr); 1442 ierr = PetscFree(len_r);CHKERRQ(ierr); 1443 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 1444 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 1445 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 1446 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 1447 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 1448 1449 /* attach the supporting struct to Cmpi for reuse */ 1450 c = (Mat_MPIAIJ*)Cmpi->data; 1451 c->ptap = ptap; 1452 ptap->duplicate = Cmpi->ops->duplicate; 1453 ptap->destroy = Cmpi->ops->destroy; 1454 1455 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1456 Cmpi->assembled = PETSC_FALSE; 1457 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1458 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 1459 *C = Cmpi; 1460 PetscFunctionReturn(0); 1461 } 1462 1463 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C) 1464 { 1465 PetscErrorCode ierr; 1466 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1467 Mat_SeqAIJ *c_seq; 1468 Mat_PtAPMPI *ptap = c->ptap; 1469 Mat A_loc,C_loc,C_oth; 1470 PetscInt i,rstart,rend,cm,ncols,row; 1471 const PetscInt *cols; 1472 const PetscScalar *vals; 1473 1474 PetscFunctionBegin; 1475 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1476 1477 if (ptap->reuse == MAT_REUSE_MATRIX) { 1478 /* These matrices are obtained in MatTransposeMatMultSymbolic() */ 1479 /* 1) get R = Pd^T, Ro = Po^T */ 1480 /*----------------------------*/ 1481 ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1482 ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1483 1484 /* 2) compute numeric A_loc */ 1485 /*--------------------------*/ 1486 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc);CHKERRQ(ierr); 1487 } 1488 1489 /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */ 1490 A_loc = ptap->A_loc; 1491 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc);CHKERRQ(ierr); 1492 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth);CHKERRQ(ierr); 1493 C_loc = ptap->C_loc; 1494 C_oth = ptap->C_oth; 1495 1496 /* add C_loc and Co to to C */ 1497 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 1498 1499 /* C_loc -> C */ 1500 cm = C_loc->rmap->N; 1501 c_seq = (Mat_SeqAIJ*)C_loc->data; 1502 cols = c_seq->j; 1503 vals = c_seq->a; 1504 for (i=0; i<cm; i++) { 1505 ncols = c_seq->i[i+1] - c_seq->i[i]; 1506 row = rstart + i; 1507 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1508 cols += ncols; vals += ncols; 1509 } 1510 1511 /* Co -> C, off-processor part */ 1512 cm = C_oth->rmap->N; 1513 c_seq = (Mat_SeqAIJ*)C_oth->data; 1514 cols = c_seq->j; 1515 vals = c_seq->a; 1516 for (i=0; i<cm; i++) { 1517 ncols = c_seq->i[i+1] - c_seq->i[i]; 1518 row = p->garray[i]; 1519 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 1520 cols += ncols; vals += ncols; 1521 } 1522 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1523 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1524 1525 ptap->reuse = MAT_REUSE_MATRIX; 1526 PetscFunctionReturn(0); 1527 } 1528 1529 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 1530 { 1531 PetscErrorCode ierr; 1532 Mat_Merge_SeqsToMPI *merge; 1533 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1534 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1535 Mat_PtAPMPI *ptap; 1536 PetscInt *adj; 1537 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1538 MatScalar *ada,*ca,valtmp; 1539 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1540 MPI_Comm comm; 1541 PetscMPIInt size,rank,taga,*len_s; 1542 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1543 PetscInt **buf_ri,**buf_rj; 1544 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1545 MPI_Request *s_waits,*r_waits; 1546 MPI_Status *status; 1547 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1548 PetscInt *ai,*aj,*coi,*coj,*poJ,*pdJ; 1549 Mat A_loc; 1550 Mat_SeqAIJ *a_loc; 1551 1552 PetscFunctionBegin; 1553 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1554 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1555 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1556 1557 ptap = c->ptap; 1558 merge = ptap->merge; 1559 1560 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1561 /*------------------------------------------*/ 1562 /* get data from symbolic products */ 1563 coi = merge->coi; coj = merge->coj; 1564 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 1565 bi = merge->bi; bj = merge->bj; 1566 owners = merge->rowmap->range; 1567 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); 1568 1569 /* get A_loc by taking all local rows of A */ 1570 A_loc = ptap->A_loc; 1571 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1572 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1573 ai = a_loc->i; 1574 aj = a_loc->j; 1575 1576 for (i=0; i<am; i++) { 1577 anz = ai[i+1] - ai[i]; 1578 adj = aj + ai[i]; 1579 ada = a_loc->a + ai[i]; 1580 1581 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1582 /*-------------------------------------------------------------*/ 1583 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1584 pnz = po->i[i+1] - po->i[i]; 1585 poJ = po->j + po->i[i]; 1586 pA = po->a + po->i[i]; 1587 for (j=0; j<pnz; j++) { 1588 row = poJ[j]; 1589 cj = coj + coi[row]; 1590 ca = coa + coi[row]; 1591 /* perform sparse axpy */ 1592 nexta = 0; 1593 valtmp = pA[j]; 1594 for (k=0; nexta<anz; k++) { 1595 if (cj[k] == adj[nexta]) { 1596 ca[k] += valtmp*ada[nexta]; 1597 nexta++; 1598 } 1599 } 1600 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1601 } 1602 1603 /* put the value into Cd (diagonal part) */ 1604 pnz = pd->i[i+1] - pd->i[i]; 1605 pdJ = pd->j + pd->i[i]; 1606 pA = pd->a + pd->i[i]; 1607 for (j=0; j<pnz; j++) { 1608 row = pdJ[j]; 1609 cj = bj + bi[row]; 1610 ca = ba + bi[row]; 1611 /* perform sparse axpy */ 1612 nexta = 0; 1613 valtmp = pA[j]; 1614 for (k=0; nexta<anz; k++) { 1615 if (cj[k] == adj[nexta]) { 1616 ca[k] += valtmp*ada[nexta]; 1617 nexta++; 1618 } 1619 } 1620 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1621 } 1622 } 1623 1624 /* 3) send and recv matrix values coa */ 1625 /*------------------------------------*/ 1626 buf_ri = merge->buf_ri; 1627 buf_rj = merge->buf_rj; 1628 len_s = merge->len_s; 1629 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1630 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1631 1632 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 1633 for (proc=0,k=0; proc<size; proc++) { 1634 if (!len_s[proc]) continue; 1635 i = merge->owners_co[proc]; 1636 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1637 k++; 1638 } 1639 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1640 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1641 1642 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1643 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1644 ierr = PetscFree(coa);CHKERRQ(ierr); 1645 1646 /* 4) insert local Cseq and received values into Cmpi */ 1647 /*----------------------------------------------------*/ 1648 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1649 for (k=0; k<merge->nrecv; k++) { 1650 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1651 nrows = *(buf_ri_k[k]); 1652 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1653 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1654 } 1655 1656 for (i=0; i<cm; i++) { 1657 row = owners[rank] + i; /* global row index of C_seq */ 1658 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1659 ba_i = ba + bi[i]; 1660 bnz = bi[i+1] - bi[i]; 1661 /* add received vals into ba */ 1662 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1663 /* i-th row */ 1664 if (i == *nextrow[k]) { 1665 cnz = *(nextci[k]+1) - *nextci[k]; 1666 cj = buf_rj[k] + *(nextci[k]); 1667 ca = abuf_r[k] + *(nextci[k]); 1668 nextcj = 0; 1669 for (j=0; nextcj<cnz; j++) { 1670 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1671 ba_i[j] += ca[nextcj++]; 1672 } 1673 } 1674 nextrow[k]++; nextci[k]++; 1675 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1676 } 1677 } 1678 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1679 } 1680 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1681 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1682 1683 ierr = PetscFree(ba);CHKERRQ(ierr); 1684 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1685 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1686 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1687 PetscFunctionReturn(0); 1688 } 1689 1690 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C) 1691 { 1692 PetscErrorCode ierr; 1693 Mat Cmpi,A_loc,POt,PDt; 1694 Mat_PtAPMPI *ptap; 1695 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1696 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data,*c; 1697 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1698 PetscInt nnz; 1699 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1700 PetscInt am =A->rmap->n,pn=P->cmap->n; 1701 MPI_Comm comm; 1702 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1703 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1704 PetscInt len,proc,*dnz,*onz,*owners; 1705 PetscInt nzi,*bi,*bj; 1706 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1707 MPI_Request *swaits,*rwaits; 1708 MPI_Status *sstatus,rstatus; 1709 Mat_Merge_SeqsToMPI *merge; 1710 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1711 PetscReal afill =1.0,afill_tmp; 1712 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax; 1713 PetscScalar *vals; 1714 Mat_SeqAIJ *a_loc,*pdt,*pot; 1715 PetscTable ta; 1716 MatType mtype; 1717 1718 PetscFunctionBegin; 1719 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1720 /* check if matrix local sizes are compatible */ 1721 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1722 1723 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1724 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1725 1726 /* create struct Mat_PtAPMPI and attached it to C later */ 1727 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1728 1729 /* get A_loc by taking all local rows of A */ 1730 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1731 1732 ptap->A_loc = A_loc; 1733 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1734 ai = a_loc->i; 1735 aj = a_loc->j; 1736 1737 /* determine symbolic Co=(p->B)^T*A - send to others */ 1738 /*----------------------------------------------------*/ 1739 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1740 pdt = (Mat_SeqAIJ*)PDt->data; 1741 pdti = pdt->i; pdtj = pdt->j; 1742 1743 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1744 pot = (Mat_SeqAIJ*)POt->data; 1745 poti = pot->i; potj = pot->j; 1746 1747 /* then, compute symbolic Co = (p->B)^T*A */ 1748 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1749 >= (num of nonzero rows of C_seq) - pn */ 1750 ierr = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr); 1751 coi[0] = 0; 1752 1753 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1754 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am])); 1755 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1756 current_space = free_space; 1757 1758 /* create and initialize a linked list */ 1759 ierr = PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta);CHKERRQ(ierr); 1760 MatRowMergeMax_SeqAIJ(a_loc,am,ta); 1761 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1762 1763 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1764 1765 for (i=0; i<pon; i++) { 1766 pnz = poti[i+1] - poti[i]; 1767 ptJ = potj + poti[i]; 1768 for (j=0; j<pnz; j++) { 1769 row = ptJ[j]; /* row of A_loc == col of Pot */ 1770 anz = ai[row+1] - ai[row]; 1771 Jptr = aj + ai[row]; 1772 /* add non-zero cols of AP into the sorted linked list lnk */ 1773 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1774 } 1775 nnz = lnk[0]; 1776 1777 /* If free space is not available, double the total space in the list */ 1778 if (current_space->local_remaining<nnz) { 1779 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1780 nspacedouble++; 1781 } 1782 1783 /* Copy data into free space, and zero out denserows */ 1784 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1785 1786 current_space->array += nnz; 1787 current_space->local_used += nnz; 1788 current_space->local_remaining -= nnz; 1789 1790 coi[i+1] = coi[i] + nnz; 1791 } 1792 1793 ierr = PetscMalloc1(coi[pon]+1,&coj);CHKERRQ(ierr); 1794 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1795 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); /* must destroy to get a new one for C */ 1796 1797 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1798 if (afill_tmp > afill) afill = afill_tmp; 1799 1800 /* send j-array (coj) of Co to other processors */ 1801 /*----------------------------------------------*/ 1802 /* determine row ownership */ 1803 ierr = PetscNew(&merge);CHKERRQ(ierr); 1804 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1805 1806 merge->rowmap->n = pn; 1807 merge->rowmap->bs = 1; 1808 1809 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1810 owners = merge->rowmap->range; 1811 1812 /* determine the number of messages to send, their lengths */ 1813 ierr = PetscCalloc1(size,&len_si);CHKERRQ(ierr); 1814 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 1815 1816 len_s = merge->len_s; 1817 merge->nsend = 0; 1818 1819 ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr); 1820 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1821 1822 proc = 0; 1823 for (i=0; i<pon; i++) { 1824 while (prmap[i] >= owners[proc+1]) proc++; 1825 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1826 len_s[proc] += coi[i+1] - coi[i]; 1827 } 1828 1829 len = 0; /* max length of buf_si[] */ 1830 owners_co[0] = 0; 1831 for (proc=0; proc<size; proc++) { 1832 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1833 if (len_si[proc]) { 1834 merge->nsend++; 1835 len_si[proc] = 2*(len_si[proc] + 1); 1836 len += len_si[proc]; 1837 } 1838 } 1839 1840 /* determine the number and length of messages to receive for coi and coj */ 1841 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1842 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1843 1844 /* post the Irecv and Isend of coj */ 1845 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1846 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1847 ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr); 1848 for (proc=0, k=0; proc<size; proc++) { 1849 if (!len_s[proc]) continue; 1850 i = owners_co[proc]; 1851 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1852 k++; 1853 } 1854 1855 /* receives and sends of coj are complete */ 1856 ierr = PetscMalloc1(size,&sstatus);CHKERRQ(ierr); 1857 for (i=0; i<merge->nrecv; i++) { 1858 PetscMPIInt icompleted; 1859 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1860 } 1861 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1862 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1863 1864 /* add received column indices into table to update Armax */ 1865 /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/examples/tutorials/ex56.c! */ 1866 for (k=0; k<merge->nrecv; k++) {/* k-th received message */ 1867 Jptr = buf_rj[k]; 1868 for (j=0; j<merge->len_r[k]; j++) { 1869 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1870 } 1871 } 1872 ierr = PetscTableGetCount(ta,&Armax);CHKERRQ(ierr); 1873 /* printf("Armax %d, an %d + Bn %d = %d, aN %d\n",Armax,A->cmap->n,a->B->cmap->N,A->cmap->n+a->B->cmap->N,aN); */ 1874 1875 /* send and recv coi */ 1876 /*-------------------*/ 1877 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1878 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1879 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1880 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1881 for (proc=0,k=0; proc<size; proc++) { 1882 if (!len_s[proc]) continue; 1883 /* form outgoing message for i-structure: 1884 buf_si[0]: nrows to be sent 1885 [1:nrows]: row index (global) 1886 [nrows+1:2*nrows+1]: i-structure index 1887 */ 1888 /*-------------------------------------------*/ 1889 nrows = len_si[proc]/2 - 1; 1890 buf_si_i = buf_si + nrows+1; 1891 buf_si[0] = nrows; 1892 buf_si_i[0] = 0; 1893 nrows = 0; 1894 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1895 nzi = coi[i+1] - coi[i]; 1896 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1897 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1898 nrows++; 1899 } 1900 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1901 k++; 1902 buf_si += len_si[proc]; 1903 } 1904 i = merge->nrecv; 1905 while (i--) { 1906 PetscMPIInt icompleted; 1907 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1908 } 1909 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1910 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1911 ierr = PetscFree(len_si);CHKERRQ(ierr); 1912 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1913 ierr = PetscFree(swaits);CHKERRQ(ierr); 1914 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1915 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1916 1917 /* compute the local portion of C (mpi mat) */ 1918 /*------------------------------------------*/ 1919 /* allocate bi array and free space for accumulating nonzero column info */ 1920 ierr = PetscMalloc1(pn+1,&bi);CHKERRQ(ierr); 1921 bi[0] = 0; 1922 1923 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1924 nnz = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am]))); 1925 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1926 current_space = free_space; 1927 1928 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 1929 for (k=0; k<merge->nrecv; k++) { 1930 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1931 nrows = *buf_ri_k[k]; 1932 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1933 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 1934 } 1935 1936 ierr = PetscLLCondensedCreate_Scalable(Armax,&lnk);CHKERRQ(ierr); 1937 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1938 rmax = 0; 1939 for (i=0; i<pn; i++) { 1940 /* add pdt[i,:]*AP into lnk */ 1941 pnz = pdti[i+1] - pdti[i]; 1942 ptJ = pdtj + pdti[i]; 1943 for (j=0; j<pnz; j++) { 1944 row = ptJ[j]; /* row of AP == col of Pt */ 1945 anz = ai[row+1] - ai[row]; 1946 Jptr = aj + ai[row]; 1947 /* add non-zero cols of AP into the sorted linked list lnk */ 1948 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1949 } 1950 1951 /* add received col data into lnk */ 1952 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1953 if (i == *nextrow[k]) { /* i-th row */ 1954 nzi = *(nextci[k]+1) - *nextci[k]; 1955 Jptr = buf_rj[k] + *nextci[k]; 1956 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 1957 nextrow[k]++; nextci[k]++; 1958 } 1959 } 1960 nnz = lnk[0]; 1961 1962 /* if free space is not available, make more free space */ 1963 if (current_space->local_remaining<nnz) { 1964 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1965 nspacedouble++; 1966 } 1967 /* copy data into free space, then initialize lnk */ 1968 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1969 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1970 1971 current_space->array += nnz; 1972 current_space->local_used += nnz; 1973 current_space->local_remaining -= nnz; 1974 1975 bi[i+1] = bi[i] + nnz; 1976 if (nnz > rmax) rmax = nnz; 1977 } 1978 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1979 1980 ierr = PetscMalloc1(bi[pn]+1,&bj);CHKERRQ(ierr); 1981 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1982 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1983 if (afill_tmp > afill) afill = afill_tmp; 1984 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 1985 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1986 1987 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1988 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1989 1990 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1991 /*----------------------------------------------------------------------------------*/ 1992 ierr = PetscCalloc1(rmax+1,&vals);CHKERRQ(ierr); 1993 1994 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1995 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1996 ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs));CHKERRQ(ierr); 1997 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1998 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1999 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 2000 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2001 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 2002 for (i=0; i<pn; i++) { 2003 row = i + rstart; 2004 nnz = bi[i+1] - bi[i]; 2005 Jptr = bj + bi[i]; 2006 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 2007 } 2008 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2009 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2010 ierr = PetscFree(vals);CHKERRQ(ierr); 2011 2012 merge->bi = bi; 2013 merge->bj = bj; 2014 merge->coi = coi; 2015 merge->coj = coj; 2016 merge->buf_ri = buf_ri; 2017 merge->buf_rj = buf_rj; 2018 merge->owners_co = owners_co; 2019 2020 /* attach the supporting struct to Cmpi for reuse */ 2021 c = (Mat_MPIAIJ*)Cmpi->data; 2022 2023 c->ptap = ptap; 2024 ptap->api = NULL; 2025 ptap->apj = NULL; 2026 ptap->merge = merge; 2027 ptap->apa = NULL; 2028 ptap->destroy = Cmpi->ops->destroy; 2029 ptap->duplicate = Cmpi->ops->duplicate; 2030 2031 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 2032 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 2033 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 2034 2035 *C = Cmpi; 2036 #if defined(PETSC_USE_INFO) 2037 if (bi[pn] != 0) { 2038 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 2039 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 2040 } else { 2041 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 2042 } 2043 #endif 2044 PetscFunctionReturn(0); 2045 } 2046