1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc-private/vecimpl.h> 12 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 15 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 16 { 17 PetscErrorCode ierr; 18 const char *algTypes[2] = {"scalable","nonscalable"}; 19 PetscInt alg=0; /* set default algorithm */ 20 21 PetscFunctionBegin; 22 if (scall == MAT_INITIAL_MATRIX) { 23 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 24 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,2,algTypes[0],&alg,NULL);CHKERRQ(ierr); 25 ierr = PetscOptionsEnd();CHKERRQ(ierr); 26 27 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 28 switch (alg) { 29 case 1: 30 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 31 break; 32 default: 33 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 34 break; 35 } 36 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 37 } 38 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 39 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 40 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 41 PetscFunctionReturn(0); 42 } 43 44 #undef __FUNCT__ 45 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 46 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 47 { 48 PetscErrorCode ierr; 49 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 50 Mat_PtAPMPI *ptap = a->ptap; 51 52 PetscFunctionBegin; 53 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 54 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 55 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 56 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 57 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 58 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 59 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 60 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 61 ierr = ptap->destroy(A);CHKERRQ(ierr); 62 ierr = PetscFree(ptap);CHKERRQ(ierr); 63 PetscFunctionReturn(0); 64 } 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 68 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 69 { 70 PetscErrorCode ierr; 71 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 72 Mat_PtAPMPI *ptap = a->ptap; 73 74 PetscFunctionBegin; 75 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 76 77 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 78 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 79 PetscFunctionReturn(0); 80 } 81 82 #undef __FUNCT__ 83 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 84 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 85 { 86 PetscErrorCode ierr; 87 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 88 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 89 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 90 PetscInt *adi=ad->i,*adj,*aoi=ao->i,*aoj; 91 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 92 Mat_SeqAIJ *p_loc,*p_oth; 93 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 94 PetscScalar *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca; 95 PetscInt cm =C->rmap->n,anz,pnz; 96 Mat_PtAPMPI *ptap=c->ptap; 97 PetscInt *api,*apj,*apJ,i,j,k,row; 98 PetscInt cstart=C->cmap->rstart; 99 PetscInt cdnz,conz,k0,k1; 100 101 PetscFunctionBegin; 102 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 103 /*-----------------------------------------------------*/ 104 /* update numerical values of P_oth and P_loc */ 105 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 106 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 107 108 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 109 /*----------------------------------------------------------*/ 110 /* get data from symbolic products */ 111 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 112 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 113 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 114 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 115 116 /* get apa for storing dense row A[i,:]*P */ 117 apa = ptap->apa; 118 119 api = ptap->api; 120 apj = ptap->apj; 121 for (i=0; i<cm; i++) { 122 /* diagonal portion of A */ 123 anz = adi[i+1] - adi[i]; 124 adj = ad->j + adi[i]; 125 ada = ad->a + adi[i]; 126 for (j=0; j<anz; j++) { 127 row = adj[j]; 128 pnz = pi_loc[row+1] - pi_loc[row]; 129 pj = pj_loc + pi_loc[row]; 130 pa = pa_loc + pi_loc[row]; 131 132 /* perform dense axpy */ 133 valtmp = ada[j]; 134 for (k=0; k<pnz; k++) { 135 apa[pj[k]] += valtmp*pa[k]; 136 } 137 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 138 } 139 140 /* off-diagonal portion of A */ 141 anz = aoi[i+1] - aoi[i]; 142 aoj = ao->j + aoi[i]; 143 aoa = ao->a + aoi[i]; 144 for (j=0; j<anz; j++) { 145 row = aoj[j]; 146 pnz = pi_oth[row+1] - pi_oth[row]; 147 pj = pj_oth + pi_oth[row]; 148 pa = pa_oth + pi_oth[row]; 149 150 /* perform dense axpy */ 151 valtmp = aoa[j]; 152 for (k=0; k<pnz; k++) { 153 apa[pj[k]] += valtmp*pa[k]; 154 } 155 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 156 } 157 158 /* set values in C */ 159 apJ = apj + api[i]; 160 cdnz = cd->i[i+1] - cd->i[i]; 161 conz = co->i[i+1] - co->i[i]; 162 163 /* 1st off-diagoanl part of C */ 164 ca = coa + co->i[i]; 165 k = 0; 166 for (k0=0; k0<conz; k0++) { 167 if (apJ[k] >= cstart) break; 168 ca[k0] = apa[apJ[k]]; 169 apa[apJ[k]] = 0.0; 170 k++; 171 } 172 173 /* diagonal part of C */ 174 ca = cda + cd->i[i]; 175 for (k1=0; k1<cdnz; k1++) { 176 ca[k1] = apa[apJ[k]]; 177 apa[apJ[k]] = 0.0; 178 k++; 179 } 180 181 /* 2nd off-diagoanl part of C */ 182 ca = coa + co->i[i]; 183 for (; k0<conz; k0++) { 184 ca[k0] = apa[apJ[k]]; 185 apa[apJ[k]] = 0.0; 186 k++; 187 } 188 } 189 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 190 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 191 PetscFunctionReturn(0); 192 } 193 194 #undef __FUNCT__ 195 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable" 196 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 197 { 198 PetscErrorCode ierr; 199 MPI_Comm comm; 200 Mat Cmpi; 201 Mat_PtAPMPI *ptap; 202 PetscFreeSpaceList free_space=NULL,current_space=NULL; 203 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 204 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 205 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 206 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 207 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 208 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 209 PetscBT lnkbt; 210 PetscScalar *apa; 211 PetscReal afill; 212 PetscInt nlnk_max,armax,prmax; 213 214 PetscFunctionBegin; 215 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 216 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) { 217 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 218 } 219 220 /* create struct Mat_PtAPMPI and attached it to C later */ 221 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 222 223 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 224 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 225 226 /* get P_loc by taking all local rows of P */ 227 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 228 229 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 230 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 231 pi_loc = p_loc->i; pj_loc = p_loc->j; 232 pi_oth = p_oth->i; pj_oth = p_oth->j; 233 234 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 235 /*-------------------------------------------------------------------*/ 236 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 237 ptap->api = api; 238 api[0] = 0; 239 240 /* create and initialize a linked list */ 241 armax = ad->rmax+ao->rmax; 242 prmax = PetscMax(p_loc->rmax,p_oth->rmax); 243 nlnk_max = armax*prmax; 244 if (!nlnk_max || nlnk_max > pN) nlnk_max = pN; 245 ierr = PetscLLCondensedCreate(nlnk_max,pN,&lnk,&lnkbt);CHKERRQ(ierr); 246 247 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 248 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 249 250 current_space = free_space; 251 252 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 253 for (i=0; i<am; i++) { 254 /* diagonal portion of A */ 255 nzi = adi[i+1] - adi[i]; 256 for (j=0; j<nzi; j++) { 257 row = *adj++; 258 pnz = pi_loc[row+1] - pi_loc[row]; 259 Jptr = pj_loc + pi_loc[row]; 260 /* add non-zero cols of P into the sorted linked list lnk */ 261 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 262 } 263 /* off-diagonal portion of A */ 264 nzi = aoi[i+1] - aoi[i]; 265 for (j=0; j<nzi; j++) { 266 row = *aoj++; 267 pnz = pi_oth[row+1] - pi_oth[row]; 268 Jptr = pj_oth + pi_oth[row]; 269 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 270 } 271 272 apnz = lnk[0]; 273 api[i+1] = api[i] + apnz; 274 275 /* if free space is not available, double the total space in the list */ 276 if (current_space->local_remaining<apnz) { 277 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 278 nspacedouble++; 279 } 280 281 /* Copy data into free space, then initialize lnk */ 282 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 283 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 284 285 current_space->array += apnz; 286 current_space->local_used += apnz; 287 current_space->local_remaining -= apnz; 288 } 289 290 /* Allocate space for apj, initialize apj, and */ 291 /* destroy list of free space and other temporary array(s) */ 292 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 293 apj = ptap->apj; 294 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 295 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 296 297 /* malloc apa to store dense row A[i,:]*P */ 298 ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 299 ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr); 300 301 ptap->apa = apa; 302 303 /* create and assemble symbolic parallel matrix Cmpi */ 304 /*----------------------------------------------------*/ 305 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 306 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 307 ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr); 308 309 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 310 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 311 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 312 for (i=0; i<am; i++) { 313 row = i + rstart; 314 apnz = api[i+1] - api[i]; 315 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 316 apj += apnz; 317 } 318 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 319 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 320 321 ptap->destroy = Cmpi->ops->destroy; 322 ptap->duplicate = Cmpi->ops->duplicate; 323 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 324 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 325 326 /* attach the supporting struct to Cmpi for reuse */ 327 c = (Mat_MPIAIJ*)Cmpi->data; 328 c->ptap = ptap; 329 330 *C = Cmpi; 331 332 /* set MatInfo */ 333 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 334 if (afill < 1.0) afill = 1.0; 335 Cmpi->info.mallocs = nspacedouble; 336 Cmpi->info.fill_ratio_given = fill; 337 Cmpi->info.fill_ratio_needed = afill; 338 339 #if defined(PETSC_USE_INFO) 340 if (api[am]) { 341 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 342 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 343 } else { 344 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 345 } 346 #endif 347 PetscFunctionReturn(0); 348 } 349 350 #undef __FUNCT__ 351 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 352 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 353 { 354 PetscErrorCode ierr; 355 356 PetscFunctionBegin; 357 if (scall == MAT_INITIAL_MATRIX) { 358 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 359 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 360 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 361 } 362 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 363 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 364 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 365 PetscFunctionReturn(0); 366 } 367 368 typedef struct { 369 Mat workB; 370 PetscScalar *rvalues,*svalues; 371 MPI_Request *rwaits,*swaits; 372 } MPIAIJ_MPIDense; 373 374 #undef __FUNCT__ 375 #define __FUNCT__ "MatMPIAIJ_MPIDenseDestroy" 376 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 377 { 378 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 379 PetscErrorCode ierr; 380 381 PetscFunctionBegin; 382 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 383 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 384 ierr = PetscFree(contents);CHKERRQ(ierr); 385 PetscFunctionReturn(0); 386 } 387 388 #undef __FUNCT__ 389 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 390 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 391 { 392 PetscErrorCode ierr; 393 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 394 PetscInt nz = aij->B->cmap->n; 395 PetscContainer container; 396 MPIAIJ_MPIDense *contents; 397 VecScatter ctx = aij->Mvctx; 398 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 399 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 400 PetscInt m = A->rmap->n,n=B->cmap->n; 401 402 PetscFunctionBegin; 403 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 404 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 405 ierr = MatSetBlockSizes(*C,A->rmap->bs,B->cmap->bs);CHKERRQ(ierr); 406 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 407 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 408 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 409 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 410 411 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 412 413 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 414 /* Create work matrix used to store off processor rows of B needed for local product */ 415 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 416 /* Create work arrays needed */ 417 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues, 418 B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues, 419 from->n,MPI_Request,&contents->rwaits, 420 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 421 422 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 423 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 424 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 425 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 426 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 427 PetscFunctionReturn(0); 428 } 429 430 #undef __FUNCT__ 431 #define __FUNCT__ "MatMPIDenseScatter" 432 /* 433 Performs an efficient scatter on the rows of B needed by this process; this is 434 a modification of the VecScatterBegin_() routines. 435 */ 436 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 437 { 438 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 439 PetscErrorCode ierr; 440 PetscScalar *b,*w,*svalues,*rvalues; 441 VecScatter ctx = aij->Mvctx; 442 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 443 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 444 PetscInt i,j,k; 445 PetscInt *sindices,*sstarts,*rindices,*rstarts; 446 PetscMPIInt *sprocs,*rprocs,nrecvs; 447 MPI_Request *swaits,*rwaits; 448 MPI_Comm comm; 449 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 450 MPI_Status status; 451 MPIAIJ_MPIDense *contents; 452 PetscContainer container; 453 Mat workB; 454 455 PetscFunctionBegin; 456 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 457 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 458 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 459 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 460 461 workB = *outworkB = contents->workB; 462 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 463 sindices = to->indices; 464 sstarts = to->starts; 465 sprocs = to->procs; 466 swaits = contents->swaits; 467 svalues = contents->svalues; 468 469 rindices = from->indices; 470 rstarts = from->starts; 471 rprocs = from->procs; 472 rwaits = contents->rwaits; 473 rvalues = contents->rvalues; 474 475 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 476 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 477 478 for (i=0; i<from->n; i++) { 479 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 480 } 481 482 for (i=0; i<to->n; i++) { 483 /* pack a message at a time */ 484 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 485 for (k=0; k<ncols; k++) { 486 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 487 } 488 } 489 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 490 } 491 492 nrecvs = from->n; 493 while (nrecvs) { 494 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 495 nrecvs--; 496 /* unpack a message at a time */ 497 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 498 for (k=0; k<ncols; k++) { 499 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 500 } 501 } 502 } 503 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 504 505 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 506 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 507 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 508 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 509 PetscFunctionReturn(0); 510 } 511 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 512 513 #undef __FUNCT__ 514 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 515 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 516 { 517 PetscErrorCode ierr; 518 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 519 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 520 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 521 Mat workB; 522 523 PetscFunctionBegin; 524 /* diagonal block of A times all local rows of B*/ 525 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 526 527 /* get off processor parts of B needed to complete the product */ 528 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 529 530 /* off-diagonal block of A times nonlocal rows of B */ 531 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 532 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 533 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 534 PetscFunctionReturn(0); 535 } 536 537 #undef __FUNCT__ 538 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable" 539 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,Mat C) 540 { 541 PetscErrorCode ierr; 542 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 543 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 544 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 545 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 546 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 547 Mat_SeqAIJ *p_loc,*p_oth; 548 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 549 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 550 PetscInt cm = C->rmap->n,anz,pnz; 551 Mat_PtAPMPI *ptap = c->ptap; 552 PetscScalar *apa_sparse = ptap->apa; 553 PetscInt *api,*apj,*apJ,i,j,k,row; 554 PetscInt cstart = C->cmap->rstart; 555 PetscInt cdnz,conz,k0,k1,nextp; 556 557 PetscFunctionBegin; 558 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 559 /*-----------------------------------------------------*/ 560 /* update numerical values of P_oth and P_loc */ 561 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 562 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 563 564 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 565 /*----------------------------------------------------------*/ 566 /* get data from symbolic products */ 567 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 568 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 569 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 570 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 571 572 api = ptap->api; 573 apj = ptap->apj; 574 for (i=0; i<cm; i++) { 575 apJ = apj + api[i]; 576 577 /* diagonal portion of A */ 578 anz = adi[i+1] - adi[i]; 579 adj = ad->j + adi[i]; 580 ada = ad->a + adi[i]; 581 for (j=0; j<anz; j++) { 582 row = adj[j]; 583 pnz = pi_loc[row+1] - pi_loc[row]; 584 pj = pj_loc + pi_loc[row]; 585 pa = pa_loc + pi_loc[row]; 586 /* perform sparse axpy */ 587 valtmp = ada[j]; 588 nextp = 0; 589 for (k=0; nextp<pnz; k++) { 590 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 591 apa_sparse[k] += valtmp*pa[nextp++]; 592 } 593 } 594 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 595 } 596 597 /* off-diagonal portion of A */ 598 anz = aoi[i+1] - aoi[i]; 599 aoj = ao->j + aoi[i]; 600 aoa = ao->a + aoi[i]; 601 for (j=0; j<anz; j++) { 602 row = aoj[j]; 603 pnz = pi_oth[row+1] - pi_oth[row]; 604 pj = pj_oth + pi_oth[row]; 605 pa = pa_oth + pi_oth[row]; 606 /* perform sparse axpy */ 607 valtmp = aoa[j]; 608 nextp = 0; 609 for (k=0; nextp<pnz; k++) { 610 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 611 apa_sparse[k] += valtmp*pa[nextp++]; 612 } 613 } 614 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 615 } 616 617 /* set values in C */ 618 cdnz = cd->i[i+1] - cd->i[i]; 619 conz = co->i[i+1] - co->i[i]; 620 621 /* 1st off-diagoanl part of C */ 622 ca = coa + co->i[i]; 623 k = 0; 624 for (k0=0; k0<conz; k0++) { 625 if (apJ[k] >= cstart) break; 626 ca[k0] = apa_sparse[k]; 627 apa_sparse[k] = 0.0; 628 k++; 629 } 630 631 /* diagonal part of C */ 632 ca = cda + cd->i[i]; 633 for (k1=0; k1<cdnz; k1++) { 634 ca[k1] = apa_sparse[k]; 635 apa_sparse[k] = 0.0; 636 k++; 637 } 638 639 /* 2nd off-diagoanl part of C */ 640 ca = coa + co->i[i]; 641 for (; k0<conz; k0++) { 642 ca[k0] = apa_sparse[k]; 643 apa_sparse[k] = 0.0; 644 k++; 645 } 646 } 647 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 648 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 649 PetscFunctionReturn(0); 650 } 651 652 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 653 #undef __FUNCT__ 654 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 655 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 656 { 657 PetscErrorCode ierr; 658 MPI_Comm comm; 659 Mat Cmpi; 660 Mat_PtAPMPI *ptap; 661 PetscFreeSpaceList free_space = NULL,current_space=NULL; 662 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 663 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 664 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 665 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 666 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0; 667 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 668 PetscInt nlnk_max,armax,prmax; 669 PetscReal afill; 670 PetscScalar *apa; 671 672 PetscFunctionBegin; 673 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 674 /* create struct Mat_PtAPMPI and attached it to C later */ 675 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 676 677 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 678 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 679 680 /* get P_loc by taking all local rows of P */ 681 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 682 683 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 684 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 685 pi_loc = p_loc->i; pj_loc = p_loc->j; 686 pi_oth = p_oth->i; pj_oth = p_oth->j; 687 688 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 689 /*-------------------------------------------------------------------*/ 690 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 691 ptap->api = api; 692 api[0] = 0; 693 694 /* create and initialize a linked list */ 695 armax = ad->rmax+ao->rmax; 696 prmax = PetscMax(p_loc->rmax,p_oth->rmax); 697 nlnk_max = armax*prmax; 698 if (!nlnk_max || nlnk_max > pN) nlnk_max = pN; 699 ierr = PetscLLCondensedCreate_Scalable(nlnk_max,&lnk);CHKERRQ(ierr); 700 701 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 702 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 703 704 current_space = free_space; 705 706 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 707 for (i=0; i<am; i++) { 708 /* diagonal portion of A */ 709 nzi = adi[i+1] - adi[i]; 710 for (j=0; j<nzi; j++) { 711 row = *adj++; 712 pnz = pi_loc[row+1] - pi_loc[row]; 713 Jptr = pj_loc + pi_loc[row]; 714 /* add non-zero cols of P into the sorted linked list lnk */ 715 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 716 } 717 /* off-diagonal portion of A */ 718 nzi = aoi[i+1] - aoi[i]; 719 for (j=0; j<nzi; j++) { 720 row = *aoj++; 721 pnz = pi_oth[row+1] - pi_oth[row]; 722 Jptr = pj_oth + pi_oth[row]; 723 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 724 } 725 726 apnz = *lnk; 727 api[i+1] = api[i] + apnz; 728 if (apnz > apnz_max) apnz_max = apnz; 729 730 /* if free space is not available, double the total space in the list */ 731 if (current_space->local_remaining<apnz) { 732 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 733 nspacedouble++; 734 } 735 736 /* Copy data into free space, then initialize lnk */ 737 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 738 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 739 740 current_space->array += apnz; 741 current_space->local_used += apnz; 742 current_space->local_remaining -= apnz; 743 } 744 745 /* Allocate space for apj, initialize apj, and */ 746 /* destroy list of free space and other temporary array(s) */ 747 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 748 apj = ptap->apj; 749 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 750 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 751 752 /* create and assemble symbolic parallel matrix Cmpi */ 753 /*----------------------------------------------------*/ 754 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 755 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 756 ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr); 757 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 758 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 759 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 760 761 /* malloc apa for assembly Cmpi */ 762 ierr = PetscMalloc(apnz_max*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 763 ierr = PetscMemzero(apa,apnz_max*sizeof(PetscScalar));CHKERRQ(ierr); 764 765 ptap->apa = apa; 766 for (i=0; i<am; i++) { 767 row = i + rstart; 768 apnz = api[i+1] - api[i]; 769 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 770 apj += apnz; 771 } 772 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 773 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 774 775 ptap->destroy = Cmpi->ops->destroy; 776 ptap->duplicate = Cmpi->ops->duplicate; 777 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable; 778 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 779 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 780 781 /* attach the supporting struct to Cmpi for reuse */ 782 c = (Mat_MPIAIJ*)Cmpi->data; 783 c->ptap = ptap; 784 785 *C = Cmpi; 786 787 /* set MatInfo */ 788 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 789 if (afill < 1.0) afill = 1.0; 790 Cmpi->info.mallocs = nspacedouble; 791 Cmpi->info.fill_ratio_given = fill; 792 Cmpi->info.fill_ratio_needed = afill; 793 794 #if defined(PETSC_USE_INFO) 795 if (api[am]) { 796 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 797 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 798 } else { 799 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 800 } 801 #endif 802 PetscFunctionReturn(0); 803 } 804 805 /*-------------------------------------------------------------------------*/ 806 #undef __FUNCT__ 807 #define __FUNCT__ "MatTransposeMatMult_MPIAIJ_MPIAIJ" 808 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 809 { 810 PetscErrorCode ierr; 811 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 812 PetscInt alg=0; /* set default algorithm */ 813 814 PetscFunctionBegin; 815 if (scall == MAT_INITIAL_MATRIX) { 816 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 817 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[0],&alg,NULL);CHKERRQ(ierr); 818 ierr = PetscOptionsEnd();CHKERRQ(ierr); 819 820 ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 821 switch (alg) { 822 case 1: 823 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(P,A,fill,C);CHKERRQ(ierr); 824 break; 825 case 2: 826 Mat Pt; 827 Mat_PtAPMPI *ptap; 828 Mat_MPIAIJ *c; 829 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 830 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 831 c = (Mat_MPIAIJ*)(*C)->data; 832 ptap = c->ptap; 833 ptap->Pt = Pt; 834 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 835 PetscFunctionReturn(0); 836 break; 837 default: 838 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(P,A,fill,C);CHKERRQ(ierr); 839 break; 840 } 841 ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 842 } 843 ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 844 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 845 ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 846 PetscFunctionReturn(0); 847 } 848 849 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 850 #undef __FUNCT__ 851 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult" 852 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 853 { 854 PetscErrorCode ierr; 855 Mat_MPIAIJ *c=(Mat_MPIAIJ*)C->data; 856 Mat_PtAPMPI *ptap= c->ptap; 857 Mat Pt=ptap->Pt; 858 859 PetscFunctionBegin; 860 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 861 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 862 PetscFunctionReturn(0); 863 } 864 865 #undef __FUNCT__ 866 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ" 867 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 868 { 869 PetscErrorCode ierr; 870 Mat_Merge_SeqsToMPI *merge; 871 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 872 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 873 Mat_PtAPMPI *ptap; 874 PetscInt *adj,*aJ; 875 PetscInt i,j,k,anz,pnz,row,*cj; 876 MatScalar *ada,*aval,*ca,valtmp; 877 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 878 MPI_Comm comm; 879 PetscMPIInt size,rank,taga,*len_s; 880 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 881 PetscInt **buf_ri,**buf_rj; 882 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 883 MPI_Request *s_waits,*r_waits; 884 MPI_Status *status; 885 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 886 PetscInt *ai,*aj,*coi,*coj; 887 PetscInt *poJ,*pdJ; 888 Mat A_loc; 889 Mat_SeqAIJ *a_loc; 890 891 PetscFunctionBegin; 892 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 893 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 894 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 895 896 ptap = c->ptap; 897 merge = ptap->merge; 898 899 /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */ 900 /*--------------------------------------------------------------*/ 901 /* get data from symbolic products */ 902 coi = merge->coi; coj = merge->coj; 903 ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr); 904 ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr); 905 906 bi = merge->bi; bj = merge->bj; 907 owners = merge->rowmap->range; 908 ierr = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr); 909 ierr = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr); 910 911 /* get A_loc by taking all local rows of A */ 912 A_loc = ptap->A_loc; 913 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 914 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 915 ai = a_loc->i; 916 aj = a_loc->j; 917 918 ierr = PetscMalloc((A->cmap->N)*sizeof(PetscScalar),&aval);CHKERRQ(ierr); /* non-scalable!!! */ 919 ierr = PetscMemzero(aval,A->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr); 920 921 for (i=0; i<am; i++) { 922 /* 2-a) put A[i,:] to dense array aval */ 923 anz = ai[i+1] - ai[i]; 924 adj = aj + ai[i]; 925 ada = a_loc->a + ai[i]; 926 for (j=0; j<anz; j++) { 927 aval[adj[j]] = ada[j]; 928 } 929 930 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 931 /*--------------------------------------------------------------*/ 932 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 933 pnz = po->i[i+1] - po->i[i]; 934 poJ = po->j + po->i[i]; 935 pA = po->a + po->i[i]; 936 for (j=0; j<pnz; j++) { 937 row = poJ[j]; 938 cnz = coi[row+1] - coi[row]; 939 cj = coj + coi[row]; 940 ca = coa + coi[row]; 941 /* perform dense axpy */ 942 valtmp = pA[j]; 943 for (k=0; k<cnz; k++) { 944 ca[k] += valtmp*aval[cj[k]]; 945 } 946 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 947 } 948 949 /* put the value into Cd (diagonal part) */ 950 pnz = pd->i[i+1] - pd->i[i]; 951 pdJ = pd->j + pd->i[i]; 952 pA = pd->a + pd->i[i]; 953 for (j=0; j<pnz; j++) { 954 row = pdJ[j]; 955 cnz = bi[row+1] - bi[row]; 956 cj = bj + bi[row]; 957 ca = ba + bi[row]; 958 /* perform dense axpy */ 959 valtmp = pA[j]; 960 for (k=0; k<cnz; k++) { 961 ca[k] += valtmp*aval[cj[k]]; 962 } 963 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 964 } 965 966 /* zero the current row of Pt*A */ 967 aJ = aj + ai[i]; 968 for (k=0; k<anz; k++) aval[aJ[k]] = 0.0; 969 } 970 971 /* 3) send and recv matrix values coa */ 972 /*------------------------------------*/ 973 buf_ri = merge->buf_ri; 974 buf_rj = merge->buf_rj; 975 len_s = merge->len_s; 976 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 977 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 978 979 ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr); 980 for (proc=0,k=0; proc<size; proc++) { 981 if (!len_s[proc]) continue; 982 i = merge->owners_co[proc]; 983 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 984 k++; 985 } 986 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 987 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 988 989 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 990 ierr = PetscFree(r_waits);CHKERRQ(ierr); 991 ierr = PetscFree(coa);CHKERRQ(ierr); 992 993 /* 4) insert local Cseq and received values into Cmpi */ 994 /*----------------------------------------------------*/ 995 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 996 for (k=0; k<merge->nrecv; k++) { 997 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 998 nrows = *(buf_ri_k[k]); 999 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1000 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1001 } 1002 1003 for (i=0; i<cm; i++) { 1004 row = owners[rank] + i; /* global row index of C_seq */ 1005 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1006 ba_i = ba + bi[i]; 1007 bnz = bi[i+1] - bi[i]; 1008 /* add received vals into ba */ 1009 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1010 /* i-th row */ 1011 if (i == *nextrow[k]) { 1012 cnz = *(nextci[k]+1) - *nextci[k]; 1013 cj = buf_rj[k] + *(nextci[k]); 1014 ca = abuf_r[k] + *(nextci[k]); 1015 nextcj = 0; 1016 for (j=0; nextcj<cnz; j++) { 1017 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1018 ba_i[j] += ca[nextcj++]; 1019 } 1020 } 1021 nextrow[k]++; nextci[k]++; 1022 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1023 } 1024 } 1025 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1026 } 1027 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1028 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1029 1030 ierr = PetscFree(ba);CHKERRQ(ierr); 1031 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1032 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1033 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1034 ierr = PetscFree(aval);CHKERRQ(ierr); 1035 PetscFunctionReturn(0); 1036 } 1037 1038 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1039 #undef __FUNCT__ 1040 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable" 1041 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat *C) 1042 { 1043 PetscErrorCode ierr; 1044 Mat Cmpi,A_loc,POt,PDt; 1045 Mat_PtAPMPI *ptap; 1046 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1047 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c; 1048 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1049 PetscInt nnz; 1050 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1051 PetscInt am=A->rmap->n,pn=P->cmap->n; 1052 PetscBT lnkbt; 1053 MPI_Comm comm; 1054 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1055 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1056 PetscInt len,proc,*dnz,*onz,*owners; 1057 PetscInt nzi,*bi,*bj; 1058 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1059 MPI_Request *swaits,*rwaits; 1060 MPI_Status *sstatus,rstatus; 1061 Mat_Merge_SeqsToMPI *merge; 1062 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1063 PetscReal afill =1.0,afill_tmp; 1064 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax; 1065 PetscScalar *vals; 1066 Mat_SeqAIJ *a_loc, *pdt,*pot; 1067 1068 PetscFunctionBegin; 1069 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1070 /* check if matrix local sizes are compatible */ 1071 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) { 1072 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1073 } 1074 1075 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1076 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1077 1078 /* create struct Mat_PtAPMPI and attached it to C later */ 1079 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 1080 1081 /* get A_loc by taking all local rows of A */ 1082 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1083 1084 ptap->A_loc = A_loc; 1085 1086 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1087 ai = a_loc->i; 1088 aj = a_loc->j; 1089 1090 /* determine symbolic Co=(p->B)^T*A - send to others */ 1091 /*----------------------------------------------------*/ 1092 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1093 pdt = (Mat_SeqAIJ*)PDt->data; 1094 pdti = pdt->i; pdtj = pdt->j; 1095 1096 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1097 pot = (Mat_SeqAIJ*)POt->data; 1098 poti = pot->i; potj = pot->j; 1099 1100 /* then, compute symbolic Co = (p->B)^T*A */ 1101 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors >= (num of nonzero rows of C_seq) - pn */ 1102 ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr); 1103 coi[0] = 0; 1104 1105 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1106 nnz = fill*(poti[pon] + ai[am]); 1107 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1108 current_space = free_space; 1109 1110 /* create and initialize a linked list */ 1111 i = PetscMax(pdt->rmax,pot->rmax); 1112 Crmax = i*a_loc->rmax*size; 1113 if (!Crmax || Crmax > aN) Crmax = aN; 1114 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1115 1116 for (i=0; i<pon; i++) { 1117 pnz = poti[i+1] - poti[i]; 1118 ptJ = potj + poti[i]; 1119 for (j=0; j<pnz; j++) { 1120 row = ptJ[j]; /* row of A_loc == col of Pot */ 1121 anz = ai[row+1] - ai[row]; 1122 Jptr = aj + ai[row]; 1123 /* add non-zero cols of AP into the sorted linked list lnk */ 1124 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1125 } 1126 nnz = lnk[0]; 1127 1128 /* If free space is not available, double the total space in the list */ 1129 if (current_space->local_remaining<nnz) { 1130 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1131 nspacedouble++; 1132 } 1133 1134 /* Copy data into free space, and zero out denserows */ 1135 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1136 1137 current_space->array += nnz; 1138 current_space->local_used += nnz; 1139 current_space->local_remaining -= nnz; 1140 1141 coi[i+1] = coi[i] + nnz; 1142 } 1143 1144 ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr); 1145 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1146 1147 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1148 if (afill_tmp > afill) afill = afill_tmp; 1149 1150 /* send j-array (coj) of Co to other processors */ 1151 /*----------------------------------------------*/ 1152 /* determine row ownership */ 1153 ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr); 1154 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1155 1156 merge->rowmap->n = pn; 1157 merge->rowmap->bs = 1; 1158 1159 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1160 owners = merge->rowmap->range; 1161 1162 /* determine the number of messages to send, their lengths */ 1163 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr); 1164 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1165 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr); 1166 1167 len_s = merge->len_s; 1168 merge->nsend = 0; 1169 1170 ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr); 1171 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1172 1173 proc = 0; 1174 for (i=0; i<pon; i++) { 1175 while (prmap[i] >= owners[proc+1]) proc++; 1176 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1177 len_s[proc] += coi[i+1] - coi[i]; 1178 } 1179 1180 len = 0; /* max length of buf_si[] */ 1181 owners_co[0] = 0; 1182 for (proc=0; proc<size; proc++) { 1183 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1184 if (len_si[proc]) { 1185 merge->nsend++; 1186 len_si[proc] = 2*(len_si[proc] + 1); 1187 len += len_si[proc]; 1188 } 1189 } 1190 1191 /* determine the number and length of messages to receive for coi and coj */ 1192 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1193 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1194 1195 /* post the Irecv and Isend of coj */ 1196 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1197 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1198 ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr); 1199 for (proc=0, k=0; proc<size; proc++) { 1200 if (!len_s[proc]) continue; 1201 i = owners_co[proc]; 1202 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1203 k++; 1204 } 1205 1206 /* receives and sends of coj are complete */ 1207 ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr); 1208 for (i=0; i<merge->nrecv; i++) { 1209 PetscMPIInt icompleted; 1210 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1211 } 1212 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1213 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1214 1215 /* send and recv coi */ 1216 /*-------------------*/ 1217 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1218 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1219 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr); 1220 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1221 for (proc=0,k=0; proc<size; proc++) { 1222 if (!len_s[proc]) continue; 1223 /* form outgoing message for i-structure: 1224 buf_si[0]: nrows to be sent 1225 [1:nrows]: row index (global) 1226 [nrows+1:2*nrows+1]: i-structure index 1227 */ 1228 /*-------------------------------------------*/ 1229 nrows = len_si[proc]/2 - 1; 1230 buf_si_i = buf_si + nrows+1; 1231 buf_si[0] = nrows; 1232 buf_si_i[0] = 0; 1233 nrows = 0; 1234 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1235 nzi = coi[i+1] - coi[i]; 1236 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1237 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1238 nrows++; 1239 } 1240 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1241 k++; 1242 buf_si += len_si[proc]; 1243 } 1244 i = merge->nrecv; 1245 while (i--) { 1246 PetscMPIInt icompleted; 1247 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1248 } 1249 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1250 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1251 ierr = PetscFree(len_si);CHKERRQ(ierr); 1252 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1253 ierr = PetscFree(swaits);CHKERRQ(ierr); 1254 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1255 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1256 1257 /* compute the local portion of C (mpi mat) */ 1258 /*------------------------------------------*/ 1259 /* allocate bi array and free space for accumulating nonzero column info */ 1260 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr); 1261 bi[0] = 0; 1262 1263 /* set initial free space to be fill*(nnz(P) + nnz(A)) */ 1264 nnz = fill*(pdti[pn] + poti[pon] + ai[am]); 1265 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1266 current_space = free_space; 1267 1268 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1269 for (k=0; k<merge->nrecv; k++) { 1270 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1271 nrows = *buf_ri_k[k]; 1272 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1273 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1274 } 1275 1276 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1277 rmax = 0; 1278 for (i=0; i<pn; i++) { 1279 /* add pdt[i,:]*AP into lnk */ 1280 pnz = pdti[i+1] - pdti[i]; 1281 ptJ = pdtj + pdti[i]; 1282 for (j=0; j<pnz; j++) { 1283 row = ptJ[j]; /* row of AP == col of Pt */ 1284 anz = ai[row+1] - ai[row]; 1285 Jptr = aj + ai[row]; 1286 /* add non-zero cols of AP into the sorted linked list lnk */ 1287 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1288 } 1289 1290 /* add received col data into lnk */ 1291 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1292 if (i == *nextrow[k]) { /* i-th row */ 1293 nzi = *(nextci[k]+1) - *nextci[k]; 1294 Jptr = buf_rj[k] + *nextci[k]; 1295 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1296 nextrow[k]++; nextci[k]++; 1297 } 1298 } 1299 nnz = lnk[0]; 1300 1301 /* if free space is not available, make more free space */ 1302 if (current_space->local_remaining<nnz) { 1303 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1304 nspacedouble++; 1305 } 1306 /* copy data into free space, then initialize lnk */ 1307 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1308 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1309 1310 current_space->array += nnz; 1311 current_space->local_used += nnz; 1312 current_space->local_remaining -= nnz; 1313 1314 bi[i+1] = bi[i] + nnz; 1315 if (nnz > rmax) rmax = nnz; 1316 } 1317 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1318 1319 ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr); 1320 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1321 1322 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1323 if (afill_tmp > afill) afill = afill_tmp; 1324 ierr = PetscLLCondensedDestroy(lnk,lnkbt);CHKERRQ(ierr); 1325 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1326 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1327 1328 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1329 /*----------------------------------------------------------------------------------*/ 1330 ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr); 1331 ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr); 1332 1333 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1334 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1335 ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr); 1336 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1337 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1338 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1339 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1340 for (i=0; i<pn; i++) { 1341 row = i + rstart; 1342 nnz = bi[i+1] - bi[i]; 1343 Jptr = bj + bi[i]; 1344 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1345 } 1346 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1347 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1348 ierr = PetscFree(vals);CHKERRQ(ierr); 1349 1350 merge->bi = bi; 1351 merge->bj = bj; 1352 merge->coi = coi; 1353 merge->coj = coj; 1354 merge->buf_ri = buf_ri; 1355 merge->buf_rj = buf_rj; 1356 merge->owners_co = owners_co; 1357 merge->destroy = Cmpi->ops->destroy; 1358 merge->duplicate = Cmpi->ops->duplicate; 1359 1360 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 1361 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1362 1363 /* attach the supporting struct to Cmpi for reuse */ 1364 c = (Mat_MPIAIJ*)Cmpi->data; 1365 c->ptap = ptap; 1366 ptap->api = NULL; 1367 ptap->apj = NULL; 1368 ptap->merge = merge; 1369 ptap->rmax = rmax; 1370 1371 *C = Cmpi; 1372 #if defined(PETSC_USE_INFO) 1373 if (bi[pn] != 0) { 1374 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 1375 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 1376 } else { 1377 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1378 } 1379 #endif 1380 PetscFunctionReturn(0); 1381 } 1382 1383 #undef __FUNCT__ 1384 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable" 1385 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,Mat C) 1386 { 1387 PetscErrorCode ierr; 1388 Mat_Merge_SeqsToMPI *merge; 1389 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1390 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1391 Mat_PtAPMPI *ptap; 1392 PetscInt *adj; 1393 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1394 MatScalar *ada,*ca,valtmp; 1395 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1396 MPI_Comm comm; 1397 PetscMPIInt size,rank,taga,*len_s; 1398 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1399 PetscInt **buf_ri,**buf_rj; 1400 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1401 MPI_Request *s_waits,*r_waits; 1402 MPI_Status *status; 1403 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1404 PetscInt *ai,*aj,*coi,*coj; 1405 PetscInt *poJ,*pdJ; 1406 Mat A_loc; 1407 Mat_SeqAIJ *a_loc; 1408 1409 PetscFunctionBegin; 1410 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1411 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1412 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1413 1414 ptap = c->ptap; 1415 merge = ptap->merge; 1416 1417 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1418 /*------------------------------------------*/ 1419 /* get data from symbolic products */ 1420 coi = merge->coi; coj = merge->coj; 1421 ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr); 1422 ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr); 1423 bi = merge->bi; bj = merge->bj; 1424 owners = merge->rowmap->range; 1425 ierr = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr); 1426 ierr = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr); 1427 1428 /* get A_loc by taking all local rows of A */ 1429 A_loc = ptap->A_loc; 1430 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1431 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1432 ai = a_loc->i; 1433 aj = a_loc->j; 1434 1435 for (i=0; i<am; i++) { 1436 anz = ai[i+1] - ai[i]; 1437 adj = aj + ai[i]; 1438 ada = a_loc->a + ai[i]; 1439 1440 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1441 /*-------------------------------------------------------------*/ 1442 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1443 pnz = po->i[i+1] - po->i[i]; 1444 poJ = po->j + po->i[i]; 1445 pA = po->a + po->i[i]; 1446 for (j=0; j<pnz; j++) { 1447 row = poJ[j]; 1448 cj = coj + coi[row]; 1449 ca = coa + coi[row]; 1450 /* perform sparse axpy */ 1451 nexta = 0; 1452 valtmp = pA[j]; 1453 for (k=0; nexta<anz; k++) { 1454 if (cj[k] == adj[nexta]) { 1455 ca[k] += valtmp*ada[nexta]; 1456 nexta++; 1457 } 1458 } 1459 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1460 } 1461 1462 /* put the value into Cd (diagonal part) */ 1463 pnz = pd->i[i+1] - pd->i[i]; 1464 pdJ = pd->j + pd->i[i]; 1465 pA = pd->a + pd->i[i]; 1466 for (j=0; j<pnz; j++) { 1467 row = pdJ[j]; 1468 cj = bj + bi[row]; 1469 ca = ba + bi[row]; 1470 /* perform sparse axpy */ 1471 nexta = 0; 1472 valtmp = pA[j]; 1473 for (k=0; nexta<anz; k++) { 1474 if (cj[k] == adj[nexta]) { 1475 ca[k] += valtmp*ada[nexta]; 1476 nexta++; 1477 } 1478 } 1479 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1480 } 1481 } 1482 1483 /* 3) send and recv matrix values coa */ 1484 /*------------------------------------*/ 1485 buf_ri = merge->buf_ri; 1486 buf_rj = merge->buf_rj; 1487 len_s = merge->len_s; 1488 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1489 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1490 1491 ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr); 1492 for (proc=0,k=0; proc<size; proc++) { 1493 if (!len_s[proc]) continue; 1494 i = merge->owners_co[proc]; 1495 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1496 k++; 1497 } 1498 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1499 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1500 1501 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1502 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1503 ierr = PetscFree(coa);CHKERRQ(ierr); 1504 1505 /* 4) insert local Cseq and received values into Cmpi */ 1506 /*----------------------------------------------------*/ 1507 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1508 for (k=0; k<merge->nrecv; k++) { 1509 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1510 nrows = *(buf_ri_k[k]); 1511 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1512 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1513 } 1514 1515 for (i=0; i<cm; i++) { 1516 row = owners[rank] + i; /* global row index of C_seq */ 1517 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1518 ba_i = ba + bi[i]; 1519 bnz = bi[i+1] - bi[i]; 1520 /* add received vals into ba */ 1521 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1522 /* i-th row */ 1523 if (i == *nextrow[k]) { 1524 cnz = *(nextci[k]+1) - *nextci[k]; 1525 cj = buf_rj[k] + *(nextci[k]); 1526 ca = abuf_r[k] + *(nextci[k]); 1527 nextcj = 0; 1528 for (j=0; nextcj<cnz; j++) { 1529 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1530 ba_i[j] += ca[nextcj++]; 1531 } 1532 } 1533 nextrow[k]++; nextci[k]++; 1534 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1535 } 1536 } 1537 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1538 } 1539 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1540 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1541 1542 ierr = PetscFree(ba);CHKERRQ(ierr); 1543 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1544 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1545 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1546 PetscFunctionReturn(0); 1547 } 1548 1549 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ(); 1550 differ from MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable in using LLCondensedCreate_Scalable() */ 1551 #undef __FUNCT__ 1552 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable" 1553 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,PetscReal fill,Mat *C) 1554 { 1555 PetscErrorCode ierr; 1556 Mat Cmpi,A_loc,POt,PDt; 1557 Mat_PtAPMPI *ptap; 1558 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1559 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c; 1560 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1561 PetscInt nnz; 1562 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1563 PetscInt am =A->rmap->n,pn=P->cmap->n; 1564 MPI_Comm comm; 1565 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1566 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1567 PetscInt len,proc,*dnz,*onz,*owners; 1568 PetscInt nzi,*bi,*bj; 1569 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1570 MPI_Request *swaits,*rwaits; 1571 MPI_Status *sstatus,rstatus; 1572 Mat_Merge_SeqsToMPI *merge; 1573 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1574 PetscReal afill =1.0,afill_tmp; 1575 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax; 1576 PetscScalar *vals; 1577 Mat_SeqAIJ *a_loc, *pdt,*pot; 1578 1579 PetscFunctionBegin; 1580 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1581 /* check if matrix local sizes are compatible */ 1582 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) { 1583 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1584 } 1585 1586 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1587 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1588 1589 /* create struct Mat_PtAPMPI and attached it to C later */ 1590 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 1591 1592 /* get A_loc by taking all local rows of A */ 1593 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1594 1595 ptap->A_loc = A_loc; 1596 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1597 ai = a_loc->i; 1598 aj = a_loc->j; 1599 1600 /* determine symbolic Co=(p->B)^T*A - send to others */ 1601 /*----------------------------------------------------*/ 1602 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1603 pdt = (Mat_SeqAIJ*)PDt->data; 1604 pdti = pdt->i; pdtj = pdt->j; 1605 1606 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1607 pot = (Mat_SeqAIJ*)POt->data; 1608 poti = pot->i; potj = pot->j; 1609 1610 /* then, compute symbolic Co = (p->B)^T*A */ 1611 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1612 >= (num of nonzero rows of C_seq) - pn */ 1613 ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr); 1614 coi[0] = 0; 1615 1616 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1617 nnz = fill*(poti[pon] + ai[am]); 1618 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1619 current_space = free_space; 1620 1621 /* create and initialize a linked list */ 1622 i = PetscMax(pdt->rmax,pot->rmax); 1623 Crmax = i*a_loc->rmax*size; /* non-scalable! */ 1624 if (!Crmax || Crmax > aN) Crmax = aN; 1625 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 1626 1627 for (i=0; i<pon; i++) { 1628 pnz = poti[i+1] - poti[i]; 1629 ptJ = potj + poti[i]; 1630 for (j=0; j<pnz; j++) { 1631 row = ptJ[j]; /* row of A_loc == col of Pot */ 1632 anz = ai[row+1] - ai[row]; 1633 Jptr = aj + ai[row]; 1634 /* add non-zero cols of AP into the sorted linked list lnk */ 1635 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1636 } 1637 nnz = lnk[0]; 1638 1639 /* If free space is not available, double the total space in the list */ 1640 if (current_space->local_remaining<nnz) { 1641 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1642 nspacedouble++; 1643 } 1644 1645 /* Copy data into free space, and zero out denserows */ 1646 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1647 1648 current_space->array += nnz; 1649 current_space->local_used += nnz; 1650 current_space->local_remaining -= nnz; 1651 1652 coi[i+1] = coi[i] + nnz; 1653 } 1654 1655 ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr); 1656 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1657 1658 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1659 if (afill_tmp > afill) afill = afill_tmp; 1660 1661 /* send j-array (coj) of Co to other processors */ 1662 /*----------------------------------------------*/ 1663 /* determine row ownership */ 1664 ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr); 1665 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1666 1667 merge->rowmap->n = pn; 1668 merge->rowmap->bs = 1; 1669 1670 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1671 owners = merge->rowmap->range; 1672 1673 /* determine the number of messages to send, their lengths */ 1674 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr); 1675 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1676 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr); 1677 1678 len_s = merge->len_s; 1679 merge->nsend = 0; 1680 1681 ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr); 1682 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1683 1684 proc = 0; 1685 for (i=0; i<pon; i++) { 1686 while (prmap[i] >= owners[proc+1]) proc++; 1687 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1688 len_s[proc] += coi[i+1] - coi[i]; 1689 } 1690 1691 len = 0; /* max length of buf_si[] */ 1692 owners_co[0] = 0; 1693 for (proc=0; proc<size; proc++) { 1694 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1695 if (len_si[proc]) { 1696 merge->nsend++; 1697 len_si[proc] = 2*(len_si[proc] + 1); 1698 len += len_si[proc]; 1699 } 1700 } 1701 1702 /* determine the number and length of messages to receive for coi and coj */ 1703 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1704 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1705 1706 /* post the Irecv and Isend of coj */ 1707 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1708 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1709 ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr); 1710 for (proc=0, k=0; proc<size; proc++) { 1711 if (!len_s[proc]) continue; 1712 i = owners_co[proc]; 1713 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1714 k++; 1715 } 1716 1717 /* receives and sends of coj are complete */ 1718 ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr); 1719 for (i=0; i<merge->nrecv; i++) { 1720 PetscMPIInt icompleted; 1721 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1722 } 1723 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1724 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1725 1726 /* send and recv coi */ 1727 /*-------------------*/ 1728 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1729 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1730 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr); 1731 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1732 for (proc=0,k=0; proc<size; proc++) { 1733 if (!len_s[proc]) continue; 1734 /* form outgoing message for i-structure: 1735 buf_si[0]: nrows to be sent 1736 [1:nrows]: row index (global) 1737 [nrows+1:2*nrows+1]: i-structure index 1738 */ 1739 /*-------------------------------------------*/ 1740 nrows = len_si[proc]/2 - 1; 1741 buf_si_i = buf_si + nrows+1; 1742 buf_si[0] = nrows; 1743 buf_si_i[0] = 0; 1744 nrows = 0; 1745 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1746 nzi = coi[i+1] - coi[i]; 1747 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1748 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1749 nrows++; 1750 } 1751 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1752 k++; 1753 buf_si += len_si[proc]; 1754 } 1755 i = merge->nrecv; 1756 while (i--) { 1757 PetscMPIInt icompleted; 1758 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1759 } 1760 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1761 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1762 ierr = PetscFree(len_si);CHKERRQ(ierr); 1763 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1764 ierr = PetscFree(swaits);CHKERRQ(ierr); 1765 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1766 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1767 1768 /* compute the local portion of C (mpi mat) */ 1769 /*------------------------------------------*/ 1770 /* allocate bi array and free space for accumulating nonzero column info */ 1771 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr); 1772 bi[0] = 0; 1773 1774 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1775 nnz = fill*(pdti[pn] + poti[pon] + ai[am]); 1776 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1777 current_space = free_space; 1778 1779 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1780 for (k=0; k<merge->nrecv; k++) { 1781 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1782 nrows = *buf_ri_k[k]; 1783 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1784 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 1785 } 1786 1787 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1788 rmax = 0; 1789 for (i=0; i<pn; i++) { 1790 /* add pdt[i,:]*AP into lnk */ 1791 pnz = pdti[i+1] - pdti[i]; 1792 ptJ = pdtj + pdti[i]; 1793 for (j=0; j<pnz; j++) { 1794 row = ptJ[j]; /* row of AP == col of Pt */ 1795 anz = ai[row+1] - ai[row]; 1796 Jptr = aj + ai[row]; 1797 /* add non-zero cols of AP into the sorted linked list lnk */ 1798 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1799 } 1800 1801 /* add received col data into lnk */ 1802 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1803 if (i == *nextrow[k]) { /* i-th row */ 1804 nzi = *(nextci[k]+1) - *nextci[k]; 1805 Jptr = buf_rj[k] + *nextci[k]; 1806 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 1807 nextrow[k]++; nextci[k]++; 1808 } 1809 } 1810 nnz = lnk[0]; 1811 1812 /* if free space is not available, make more free space */ 1813 if (current_space->local_remaining<nnz) { 1814 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1815 nspacedouble++; 1816 } 1817 /* copy data into free space, then initialize lnk */ 1818 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1819 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1820 1821 current_space->array += nnz; 1822 current_space->local_used += nnz; 1823 current_space->local_remaining -= nnz; 1824 1825 bi[i+1] = bi[i] + nnz; 1826 if (nnz > rmax) rmax = nnz; 1827 } 1828 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1829 1830 ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr); 1831 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1832 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1833 if (afill_tmp > afill) afill = afill_tmp; 1834 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 1835 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1836 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1837 1838 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1839 /*----------------------------------------------------------------------------------*/ 1840 ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr); 1841 ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr); 1842 1843 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1844 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1845 ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr); 1846 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1847 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1848 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1849 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1850 for (i=0; i<pn; i++) { 1851 row = i + rstart; 1852 nnz = bi[i+1] - bi[i]; 1853 Jptr = bj + bi[i]; 1854 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1855 } 1856 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1857 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1858 ierr = PetscFree(vals);CHKERRQ(ierr); 1859 1860 merge->bi = bi; 1861 merge->bj = bj; 1862 merge->coi = coi; 1863 merge->coj = coj; 1864 merge->buf_ri = buf_ri; 1865 merge->buf_rj = buf_rj; 1866 merge->owners_co = owners_co; 1867 merge->destroy = Cmpi->ops->destroy; 1868 merge->duplicate = Cmpi->ops->duplicate; 1869 1870 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable; 1871 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1872 1873 /* attach the supporting struct to Cmpi for reuse */ 1874 c = (Mat_MPIAIJ*)Cmpi->data; 1875 1876 c->ptap = ptap; 1877 ptap->api = NULL; 1878 ptap->apj = NULL; 1879 ptap->merge = merge; 1880 ptap->rmax = rmax; 1881 ptap->apa = NULL; 1882 1883 *C = Cmpi; 1884 #if defined(PETSC_USE_INFO) 1885 if (bi[pn] != 0) { 1886 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 1887 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 1888 } else { 1889 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1890 } 1891 #endif 1892 PetscFunctionReturn(0); 1893 } 1894