1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 #include <petsc-private/vecimpl.h> 12 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 15 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 16 { 17 PetscErrorCode ierr; 18 const char *algTypes[2] = {"scalable","nonscalable"}; 19 PetscInt alg=0; /* set default algorithm */ 20 21 PetscFunctionBegin; 22 if (scall == MAT_INITIAL_MATRIX) { 23 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 24 ierr = PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,2,algTypes[0],&alg,NULL);CHKERRQ(ierr); 25 ierr = PetscOptionsEnd();CHKERRQ(ierr); 26 27 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 28 switch (alg) { 29 case 1: 30 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C);CHKERRQ(ierr); 31 break; 32 default: 33 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 34 break; 35 } 36 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 37 } 38 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 39 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 40 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 41 PetscFunctionReturn(0); 42 } 43 44 #undef __FUNCT__ 45 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 46 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 47 { 48 PetscErrorCode ierr; 49 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 50 Mat_PtAPMPI *ptap = a->ptap; 51 52 PetscFunctionBegin; 53 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 54 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 55 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 56 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 57 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 58 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 59 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 60 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 61 ierr = ptap->destroy(A);CHKERRQ(ierr); 62 ierr = PetscFree(ptap);CHKERRQ(ierr); 63 PetscFunctionReturn(0); 64 } 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 68 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 69 { 70 PetscErrorCode ierr; 71 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 72 Mat_PtAPMPI *ptap = a->ptap; 73 74 PetscFunctionBegin; 75 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 76 77 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 78 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 79 PetscFunctionReturn(0); 80 } 81 82 #undef __FUNCT__ 83 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 84 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 85 { 86 PetscErrorCode ierr; 87 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 88 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 89 Mat_SeqAIJ *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 90 PetscInt *adi=ad->i,*adj,*aoi=ao->i,*aoj; 91 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 92 Mat_SeqAIJ *p_loc,*p_oth; 93 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 94 PetscScalar *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca; 95 PetscInt cm =C->rmap->n,anz,pnz; 96 Mat_PtAPMPI *ptap=c->ptap; 97 PetscInt *api,*apj,*apJ,i,j,k,row; 98 PetscInt cstart=C->cmap->rstart; 99 PetscInt cdnz,conz,k0,k1; 100 101 PetscFunctionBegin; 102 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 103 /*-----------------------------------------------------*/ 104 /* update numerical values of P_oth and P_loc */ 105 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 106 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 107 108 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 109 /*----------------------------------------------------------*/ 110 /* get data from symbolic products */ 111 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 112 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 113 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 114 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 115 116 /* get apa for storing dense row A[i,:]*P */ 117 apa = ptap->apa; 118 119 api = ptap->api; 120 apj = ptap->apj; 121 for (i=0; i<cm; i++) { 122 /* diagonal portion of A */ 123 anz = adi[i+1] - adi[i]; 124 adj = ad->j + adi[i]; 125 ada = ad->a + adi[i]; 126 for (j=0; j<anz; j++) { 127 row = adj[j]; 128 pnz = pi_loc[row+1] - pi_loc[row]; 129 pj = pj_loc + pi_loc[row]; 130 pa = pa_loc + pi_loc[row]; 131 132 /* perform dense axpy */ 133 valtmp = ada[j]; 134 for (k=0; k<pnz; k++) { 135 apa[pj[k]] += valtmp*pa[k]; 136 } 137 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 138 } 139 140 /* off-diagonal portion of A */ 141 anz = aoi[i+1] - aoi[i]; 142 aoj = ao->j + aoi[i]; 143 aoa = ao->a + aoi[i]; 144 for (j=0; j<anz; j++) { 145 row = aoj[j]; 146 pnz = pi_oth[row+1] - pi_oth[row]; 147 pj = pj_oth + pi_oth[row]; 148 pa = pa_oth + pi_oth[row]; 149 150 /* perform dense axpy */ 151 valtmp = aoa[j]; 152 for (k=0; k<pnz; k++) { 153 apa[pj[k]] += valtmp*pa[k]; 154 } 155 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 156 } 157 158 /* set values in C */ 159 apJ = apj + api[i]; 160 cdnz = cd->i[i+1] - cd->i[i]; 161 conz = co->i[i+1] - co->i[i]; 162 163 /* 1st off-diagoanl part of C */ 164 ca = coa + co->i[i]; 165 k = 0; 166 for (k0=0; k0<conz; k0++) { 167 if (apJ[k] >= cstart) break; 168 ca[k0] = apa[apJ[k]]; 169 apa[apJ[k]] = 0.0; 170 k++; 171 } 172 173 /* diagonal part of C */ 174 ca = cda + cd->i[i]; 175 for (k1=0; k1<cdnz; k1++) { 176 ca[k1] = apa[apJ[k]]; 177 apa[apJ[k]] = 0.0; 178 k++; 179 } 180 181 /* 2nd off-diagoanl part of C */ 182 ca = coa + co->i[i]; 183 for (; k0<conz; k0++) { 184 ca[k0] = apa[apJ[k]]; 185 apa[apJ[k]] = 0.0; 186 k++; 187 } 188 } 189 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 190 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 191 PetscFunctionReturn(0); 192 } 193 194 #undef __FUNCT__ 195 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable" 196 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat *C) 197 { 198 PetscErrorCode ierr; 199 MPI_Comm comm; 200 Mat Cmpi; 201 Mat_PtAPMPI *ptap; 202 PetscFreeSpaceList free_space=NULL,current_space=NULL; 203 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*c; 204 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 205 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 206 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 207 PetscInt *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 208 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 209 PetscBT lnkbt; 210 PetscScalar *apa; 211 PetscReal afill; 212 PetscInt nlnk_max,armax,prmax; 213 214 PetscFunctionBegin; 215 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 216 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) { 217 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 218 } 219 220 /* create struct Mat_PtAPMPI and attached it to C later */ 221 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 222 223 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 224 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 225 226 /* get P_loc by taking all local rows of P */ 227 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 228 229 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 230 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 231 pi_loc = p_loc->i; pj_loc = p_loc->j; 232 pi_oth = p_oth->i; pj_oth = p_oth->j; 233 234 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 235 /*-------------------------------------------------------------------*/ 236 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 237 ptap->api = api; 238 api[0] = 0; 239 240 /* create and initialize a linked list */ 241 armax = ad->rmax+ao->rmax; 242 prmax = PetscMax(p_loc->rmax,p_oth->rmax); 243 nlnk_max = armax*prmax; 244 if (!nlnk_max || nlnk_max > pN) nlnk_max = pN; 245 ierr = PetscLLCondensedCreate(nlnk_max,pN,&lnk,&lnkbt);CHKERRQ(ierr); 246 247 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 248 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 249 250 current_space = free_space; 251 252 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 253 for (i=0; i<am; i++) { 254 /* diagonal portion of A */ 255 nzi = adi[i+1] - adi[i]; 256 for (j=0; j<nzi; j++) { 257 row = *adj++; 258 pnz = pi_loc[row+1] - pi_loc[row]; 259 Jptr = pj_loc + pi_loc[row]; 260 /* add non-zero cols of P into the sorted linked list lnk */ 261 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 262 } 263 /* off-diagonal portion of A */ 264 nzi = aoi[i+1] - aoi[i]; 265 for (j=0; j<nzi; j++) { 266 row = *aoj++; 267 pnz = pi_oth[row+1] - pi_oth[row]; 268 Jptr = pj_oth + pi_oth[row]; 269 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 270 } 271 272 apnz = lnk[0]; 273 api[i+1] = api[i] + apnz; 274 275 /* if free space is not available, double the total space in the list */ 276 if (current_space->local_remaining<apnz) { 277 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 278 nspacedouble++; 279 } 280 281 /* Copy data into free space, then initialize lnk */ 282 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 283 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 284 285 current_space->array += apnz; 286 current_space->local_used += apnz; 287 current_space->local_remaining -= apnz; 288 } 289 290 /* Allocate space for apj, initialize apj, and */ 291 /* destroy list of free space and other temporary array(s) */ 292 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 293 apj = ptap->apj; 294 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 295 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 296 297 /* malloc apa to store dense row A[i,:]*P */ 298 ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 299 ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr); 300 301 ptap->apa = apa; 302 303 /* create and assemble symbolic parallel matrix Cmpi */ 304 /*----------------------------------------------------*/ 305 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 306 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 307 ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr); 308 309 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 310 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 311 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 312 for (i=0; i<am; i++) { 313 row = i + rstart; 314 apnz = api[i+1] - api[i]; 315 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 316 apj += apnz; 317 } 318 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 319 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 320 321 ptap->destroy = Cmpi->ops->destroy; 322 ptap->duplicate = Cmpi->ops->duplicate; 323 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 324 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 325 326 /* attach the supporting struct to Cmpi for reuse */ 327 c = (Mat_MPIAIJ*)Cmpi->data; 328 c->ptap = ptap; 329 330 *C = Cmpi; 331 332 /* set MatInfo */ 333 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 334 if (afill < 1.0) afill = 1.0; 335 Cmpi->info.mallocs = nspacedouble; 336 Cmpi->info.fill_ratio_given = fill; 337 Cmpi->info.fill_ratio_needed = afill; 338 339 #if defined(PETSC_USE_INFO) 340 if (api[am]) { 341 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 342 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 343 } else { 344 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 345 } 346 #endif 347 PetscFunctionReturn(0); 348 } 349 350 #undef __FUNCT__ 351 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 352 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 353 { 354 PetscErrorCode ierr; 355 356 PetscFunctionBegin; 357 if (scall == MAT_INITIAL_MATRIX) { 358 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 359 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 360 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 361 } 362 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 363 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 364 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 365 PetscFunctionReturn(0); 366 } 367 368 typedef struct { 369 Mat workB; 370 PetscScalar *rvalues,*svalues; 371 MPI_Request *rwaits,*swaits; 372 } MPIAIJ_MPIDense; 373 374 #undef __FUNCT__ 375 #define __FUNCT__ "MatMPIAIJ_MPIDenseDestroy" 376 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx) 377 { 378 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 379 PetscErrorCode ierr; 380 381 PetscFunctionBegin; 382 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 383 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 384 ierr = PetscFree(contents);CHKERRQ(ierr); 385 PetscFunctionReturn(0); 386 } 387 388 #undef __FUNCT__ 389 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 390 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 391 { 392 PetscErrorCode ierr; 393 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 394 PetscInt nz = aij->B->cmap->n; 395 PetscContainer container; 396 MPIAIJ_MPIDense *contents; 397 VecScatter ctx = aij->Mvctx; 398 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 399 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 400 PetscInt m = A->rmap->n,n=B->cmap->n; 401 402 PetscFunctionBegin; 403 ierr = MatCreate(PetscObjectComm((PetscObject)B),C);CHKERRQ(ierr); 404 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 405 ierr = MatSetBlockSizes(*C,A->rmap->bs,B->cmap->bs);CHKERRQ(ierr); 406 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 407 ierr = MatMPIDenseSetPreallocation(*C,NULL);CHKERRQ(ierr); 408 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 409 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 410 411 (*C)->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense; 412 413 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 414 /* Create work matrix used to store off processor rows of B needed for local product */ 415 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,NULL,&contents->workB);CHKERRQ(ierr); 416 /* Create work arrays needed */ 417 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues, 418 B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues, 419 from->n,MPI_Request,&contents->rwaits, 420 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 421 422 ierr = PetscContainerCreate(PetscObjectComm((PetscObject)A),&container);CHKERRQ(ierr); 423 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 424 ierr = PetscContainerSetUserDestroy(container,MatMPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 425 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 426 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 427 PetscFunctionReturn(0); 428 } 429 430 #undef __FUNCT__ 431 #define __FUNCT__ "MatMPIDenseScatter" 432 /* 433 Performs an efficient scatter on the rows of B needed by this process; this is 434 a modification of the VecScatterBegin_() routines. 435 */ 436 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 437 { 438 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 439 PetscErrorCode ierr; 440 PetscScalar *b,*w,*svalues,*rvalues; 441 VecScatter ctx = aij->Mvctx; 442 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 443 VecScatter_MPI_General *to = (VecScatter_MPI_General*) ctx->todata; 444 PetscInt i,j,k; 445 PetscInt *sindices,*sstarts,*rindices,*rstarts; 446 PetscMPIInt *sprocs,*rprocs,nrecvs; 447 MPI_Request *swaits,*rwaits; 448 MPI_Comm comm; 449 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 450 MPI_Status status; 451 MPIAIJ_MPIDense *contents; 452 PetscContainer container; 453 Mat workB; 454 455 PetscFunctionBegin; 456 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 457 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 458 if (!container) SETERRQ(comm,PETSC_ERR_PLIB,"Container does not exist"); 459 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 460 461 workB = *outworkB = contents->workB; 462 if (nrows != workB->rmap->n) SETERRQ2(comm,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 463 sindices = to->indices; 464 sstarts = to->starts; 465 sprocs = to->procs; 466 swaits = contents->swaits; 467 svalues = contents->svalues; 468 469 rindices = from->indices; 470 rstarts = from->starts; 471 rprocs = from->procs; 472 rwaits = contents->rwaits; 473 rvalues = contents->rvalues; 474 475 ierr = MatDenseGetArray(B,&b);CHKERRQ(ierr); 476 ierr = MatDenseGetArray(workB,&w);CHKERRQ(ierr); 477 478 for (i=0; i<from->n; i++) { 479 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 480 } 481 482 for (i=0; i<to->n; i++) { 483 /* pack a message at a time */ 484 for (j=0; j<sstarts[i+1]-sstarts[i]; j++) { 485 for (k=0; k<ncols; k++) { 486 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 487 } 488 } 489 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 490 } 491 492 nrecvs = from->n; 493 while (nrecvs) { 494 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 495 nrecvs--; 496 /* unpack a message at a time */ 497 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++) { 498 for (k=0; k<ncols; k++) { 499 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 500 } 501 } 502 } 503 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 504 505 ierr = MatDenseRestoreArray(B,&b);CHKERRQ(ierr); 506 ierr = MatDenseRestoreArray(workB,&w);CHKERRQ(ierr); 507 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 508 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 509 PetscFunctionReturn(0); 510 } 511 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 512 513 #undef __FUNCT__ 514 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 515 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 516 { 517 PetscErrorCode ierr; 518 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 519 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 520 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 521 Mat workB; 522 523 PetscFunctionBegin; 524 /* diagonal block of A times all local rows of B*/ 525 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 526 527 /* get off processor parts of B needed to complete the product */ 528 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 529 530 /* off-diagonal block of A times nonlocal rows of B */ 531 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 532 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 533 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 534 PetscFunctionReturn(0); 535 } 536 537 #undef __FUNCT__ 538 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable" 539 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat A,Mat P,Mat C) 540 { 541 PetscErrorCode ierr; 542 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 543 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 544 Mat_SeqAIJ *cd = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 545 PetscInt *adi = ad->i,*adj,*aoi=ao->i,*aoj; 546 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 547 Mat_SeqAIJ *p_loc,*p_oth; 548 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 549 PetscScalar *pa_loc,*pa_oth,*pa,valtmp,*ca; 550 PetscInt cm = C->rmap->n,anz,pnz; 551 Mat_PtAPMPI *ptap = c->ptap; 552 PetscScalar *apa_sparse = ptap->apa; 553 PetscInt *api,*apj,*apJ,i,j,k,row; 554 PetscInt cstart = C->cmap->rstart; 555 PetscInt cdnz,conz,k0,k1,nextp; 556 557 PetscFunctionBegin; 558 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 559 /*-----------------------------------------------------*/ 560 /* update numerical values of P_oth and P_loc */ 561 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 562 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 563 564 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 565 /*----------------------------------------------------------*/ 566 /* get data from symbolic products */ 567 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 568 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 569 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 570 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 571 572 api = ptap->api; 573 apj = ptap->apj; 574 for (i=0; i<cm; i++) { 575 apJ = apj + api[i]; 576 577 /* diagonal portion of A */ 578 anz = adi[i+1] - adi[i]; 579 adj = ad->j + adi[i]; 580 ada = ad->a + adi[i]; 581 for (j=0; j<anz; j++) { 582 row = adj[j]; 583 pnz = pi_loc[row+1] - pi_loc[row]; 584 pj = pj_loc + pi_loc[row]; 585 pa = pa_loc + pi_loc[row]; 586 /* perform sparse axpy */ 587 valtmp = ada[j]; 588 nextp = 0; 589 for (k=0; nextp<pnz; k++) { 590 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 591 apa_sparse[k] += valtmp*pa[nextp++]; 592 } 593 } 594 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 595 } 596 597 /* off-diagonal portion of A */ 598 anz = aoi[i+1] - aoi[i]; 599 aoj = ao->j + aoi[i]; 600 aoa = ao->a + aoi[i]; 601 for (j=0; j<anz; j++) { 602 row = aoj[j]; 603 pnz = pi_oth[row+1] - pi_oth[row]; 604 pj = pj_oth + pi_oth[row]; 605 pa = pa_oth + pi_oth[row]; 606 /* perform sparse axpy */ 607 valtmp = aoa[j]; 608 nextp = 0; 609 for (k=0; nextp<pnz; k++) { 610 if (apJ[k] == pj[nextp]) { /* column of AP == column of P */ 611 apa_sparse[k] += valtmp*pa[nextp++]; 612 } 613 } 614 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 615 } 616 617 /* set values in C */ 618 cdnz = cd->i[i+1] - cd->i[i]; 619 conz = co->i[i+1] - co->i[i]; 620 621 /* 1st off-diagoanl part of C */ 622 ca = coa + co->i[i]; 623 k = 0; 624 for (k0=0; k0<conz; k0++) { 625 if (apJ[k] >= cstart) break; 626 ca[k0] = apa_sparse[k]; 627 apa_sparse[k] = 0.0; 628 k++; 629 } 630 631 /* diagonal part of C */ 632 ca = cda + cd->i[i]; 633 for (k1=0; k1<cdnz; k1++) { 634 ca[k1] = apa_sparse[k]; 635 apa_sparse[k] = 0.0; 636 k++; 637 } 638 639 /* 2nd off-diagoanl part of C */ 640 ca = coa + co->i[i]; 641 for (; k0<conz; k0++) { 642 ca[k0] = apa_sparse[k]; 643 apa_sparse[k] = 0.0; 644 k++; 645 } 646 } 647 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 648 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 649 PetscFunctionReturn(0); 650 } 651 652 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */ 653 #undef __FUNCT__ 654 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 655 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 656 { 657 PetscErrorCode ierr; 658 MPI_Comm comm; 659 Mat Cmpi; 660 Mat_PtAPMPI *ptap; 661 PetscFreeSpaceList free_space = NULL,current_space=NULL; 662 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data,*c; 663 Mat_SeqAIJ *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 664 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 665 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 666 PetscInt i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=0; 667 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 668 PetscInt nlnk_max,armax,prmax; 669 PetscReal afill; 670 PetscScalar *apa; 671 672 PetscFunctionBegin; 673 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 674 /* create struct Mat_PtAPMPI and attached it to C later */ 675 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 676 677 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 678 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 679 680 /* get P_loc by taking all local rows of P */ 681 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 682 683 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 684 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 685 pi_loc = p_loc->i; pj_loc = p_loc->j; 686 pi_oth = p_oth->i; pj_oth = p_oth->j; 687 688 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 689 /*-------------------------------------------------------------------*/ 690 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 691 ptap->api = api; 692 api[0] = 0; 693 694 /* create and initialize a linked list */ 695 armax = ad->rmax+ao->rmax; 696 prmax = PetscMax(p_loc->rmax,p_oth->rmax); 697 nlnk_max = armax*prmax; 698 if (!nlnk_max || nlnk_max > pN) nlnk_max = pN; 699 ierr = PetscLLCondensedCreate_Scalable(nlnk_max,&lnk);CHKERRQ(ierr); 700 701 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 702 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 703 704 current_space = free_space; 705 706 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 707 for (i=0; i<am; i++) { 708 /* diagonal portion of A */ 709 nzi = adi[i+1] - adi[i]; 710 for (j=0; j<nzi; j++) { 711 row = *adj++; 712 pnz = pi_loc[row+1] - pi_loc[row]; 713 Jptr = pj_loc + pi_loc[row]; 714 /* add non-zero cols of P into the sorted linked list lnk */ 715 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 716 } 717 /* off-diagonal portion of A */ 718 nzi = aoi[i+1] - aoi[i]; 719 for (j=0; j<nzi; j++) { 720 row = *aoj++; 721 pnz = pi_oth[row+1] - pi_oth[row]; 722 Jptr = pj_oth + pi_oth[row]; 723 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 724 } 725 726 apnz = *lnk; 727 api[i+1] = api[i] + apnz; 728 if (apnz > apnz_max) apnz_max = apnz; 729 730 /* if free space is not available, double the total space in the list */ 731 if (current_space->local_remaining<apnz) { 732 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 733 nspacedouble++; 734 } 735 736 /* Copy data into free space, then initialize lnk */ 737 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 738 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 739 740 current_space->array += apnz; 741 current_space->local_used += apnz; 742 current_space->local_remaining -= apnz; 743 } 744 745 /* Allocate space for apj, initialize apj, and */ 746 /* destroy list of free space and other temporary array(s) */ 747 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 748 apj = ptap->apj; 749 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 750 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 751 752 /* create and assemble symbolic parallel matrix Cmpi */ 753 /*----------------------------------------------------*/ 754 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 755 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 756 ierr = MatSetBlockSizes(Cmpi,A->rmap->bs,P->cmap->bs);CHKERRQ(ierr); 757 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 758 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 759 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 760 761 /* malloc apa for assembly Cmpi */ 762 ierr = PetscMalloc(apnz_max*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 763 ierr = PetscMemzero(apa,apnz_max*sizeof(PetscScalar));CHKERRQ(ierr); 764 765 ptap->apa = apa; 766 for (i=0; i<am; i++) { 767 row = i + rstart; 768 apnz = api[i+1] - api[i]; 769 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 770 apj += apnz; 771 } 772 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 773 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 774 775 ptap->destroy = Cmpi->ops->destroy; 776 ptap->duplicate = Cmpi->ops->duplicate; 777 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_Scalable; 778 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 779 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 780 781 /* attach the supporting struct to Cmpi for reuse */ 782 c = (Mat_MPIAIJ*)Cmpi->data; 783 c->ptap = ptap; 784 785 *C = Cmpi; 786 787 /* set MatInfo */ 788 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5; 789 if (afill < 1.0) afill = 1.0; 790 Cmpi->info.mallocs = nspacedouble; 791 Cmpi->info.fill_ratio_given = fill; 792 Cmpi->info.fill_ratio_needed = afill; 793 794 #if defined(PETSC_USE_INFO) 795 if (api[am]) { 796 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 797 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 798 } else { 799 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 800 } 801 #endif 802 PetscFunctionReturn(0); 803 } 804 805 /*-------------------------------------------------------------------------*/ 806 #undef __FUNCT__ 807 #define __FUNCT__ "MatTransposeMatMult_MPIAIJ_MPIAIJ" 808 PetscErrorCode MatTransposeMatMult_MPIAIJ_MPIAIJ(Mat P,Mat A,MatReuse scall,PetscReal fill,Mat *C) 809 { 810 PetscErrorCode ierr; 811 const char *algTypes[3] = {"scalable","nonscalable","matmatmult"}; 812 PetscInt alg=0; /* set default algorithm */ 813 814 PetscFunctionBegin; 815 if (scall == MAT_INITIAL_MATRIX) { 816 ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr); 817 ierr = PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,3,algTypes[0],&alg,NULL);CHKERRQ(ierr); 818 ierr = PetscOptionsEnd();CHKERRQ(ierr); 819 820 ierr = PetscLogEventBegin(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 821 switch (alg) { 822 case 1: 823 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(P,A,fill,C);CHKERRQ(ierr); 824 break; 825 case 2: 826 Mat Pt; 827 Mat_PtAPMPI *ptap; 828 Mat_MPIAIJ *c; 829 ierr = MatTranspose(P,MAT_INITIAL_MATRIX,&Pt);CHKERRQ(ierr); 830 ierr = MatMatMult(Pt,A,MAT_INITIAL_MATRIX,fill,C);CHKERRQ(ierr); 831 c = (Mat_MPIAIJ*)(*C)->data; 832 ptap = c->ptap; 833 ptap->Pt = Pt; 834 (*C)->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult; 835 PetscFunctionReturn(0); 836 break; 837 default: 838 ierr = MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(P,A,fill,C);CHKERRQ(ierr); 839 break; 840 } 841 ierr = PetscLogEventEnd(MAT_TransposeMatMultSymbolic,P,A,0,0);CHKERRQ(ierr); 842 } 843 ierr = PetscLogEventBegin(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 844 ierr = (*(*C)->ops->mattransposemultnumeric)(P,A,*C);CHKERRQ(ierr); 845 ierr = PetscLogEventEnd(MAT_TransposeMatMultNumeric,P,A,0,0);CHKERRQ(ierr); 846 PetscFunctionReturn(0); 847 } 848 849 /* This routine only works when scall=MAT_REUSE_MATRIX! */ 850 #undef __FUNCT__ 851 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult" 852 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C) 853 { 854 PetscErrorCode ierr; 855 Mat Pt; 856 Mat_PtAPMPI *ptap; 857 Mat_MPIAIJ *c; 858 859 PetscFunctionBegin; 860 printf("MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult ...\n"); 861 c = (Mat_MPIAIJ*)C->data; 862 ptap = c->ptap; 863 Pt = ptap->Pt; 864 ierr = MatTranspose(P,MAT_REUSE_MATRIX,&Pt);CHKERRQ(ierr); 865 ierr = MatMatMultNumeric(Pt,A,C);CHKERRQ(ierr); 866 PetscFunctionReturn(0); 867 } 868 869 #undef __FUNCT__ 870 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ" 871 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C) 872 { 873 PetscErrorCode ierr; 874 Mat_Merge_SeqsToMPI *merge; 875 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 876 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 877 Mat_PtAPMPI *ptap; 878 PetscInt *adj,*aJ; 879 PetscInt i,j,k,anz,pnz,row,*cj; 880 MatScalar *ada,*aval,*ca,valtmp; 881 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 882 MPI_Comm comm; 883 PetscMPIInt size,rank,taga,*len_s; 884 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 885 PetscInt **buf_ri,**buf_rj; 886 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 887 MPI_Request *s_waits,*r_waits; 888 MPI_Status *status; 889 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 890 PetscInt *ai,*aj,*coi,*coj; 891 PetscInt *poJ,*pdJ; 892 Mat A_loc; 893 Mat_SeqAIJ *a_loc; 894 895 PetscFunctionBegin; 896 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 897 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 898 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 899 900 ptap = c->ptap; 901 merge = ptap->merge; 902 903 /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */ 904 /*--------------------------------------------------------------*/ 905 /* get data from symbolic products */ 906 coi = merge->coi; coj = merge->coj; 907 ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr); 908 ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr); 909 910 bi = merge->bi; bj = merge->bj; 911 owners = merge->rowmap->range; 912 ierr = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr); 913 ierr = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr); 914 915 /* get A_loc by taking all local rows of A */ 916 A_loc = ptap->A_loc; 917 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 918 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 919 ai = a_loc->i; 920 aj = a_loc->j; 921 922 ierr = PetscMalloc((A->cmap->N)*sizeof(PetscScalar),&aval);CHKERRQ(ierr); /* non-scalable!!! */ 923 ierr = PetscMemzero(aval,A->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr); 924 925 for (i=0; i<am; i++) { 926 /* 2-a) put A[i,:] to dense array aval */ 927 anz = ai[i+1] - ai[i]; 928 adj = aj + ai[i]; 929 ada = a_loc->a + ai[i]; 930 for (j=0; j<anz; j++) { 931 aval[adj[j]] = ada[j]; 932 } 933 934 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 935 /*--------------------------------------------------------------*/ 936 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 937 pnz = po->i[i+1] - po->i[i]; 938 poJ = po->j + po->i[i]; 939 pA = po->a + po->i[i]; 940 for (j=0; j<pnz; j++) { 941 row = poJ[j]; 942 cnz = coi[row+1] - coi[row]; 943 cj = coj + coi[row]; 944 ca = coa + coi[row]; 945 /* perform dense axpy */ 946 valtmp = pA[j]; 947 for (k=0; k<cnz; k++) { 948 ca[k] += valtmp*aval[cj[k]]; 949 } 950 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 951 } 952 953 /* put the value into Cd (diagonal part) */ 954 pnz = pd->i[i+1] - pd->i[i]; 955 pdJ = pd->j + pd->i[i]; 956 pA = pd->a + pd->i[i]; 957 for (j=0; j<pnz; j++) { 958 row = pdJ[j]; 959 cnz = bi[row+1] - bi[row]; 960 cj = bj + bi[row]; 961 ca = ba + bi[row]; 962 /* perform dense axpy */ 963 valtmp = pA[j]; 964 for (k=0; k<cnz; k++) { 965 ca[k] += valtmp*aval[cj[k]]; 966 } 967 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 968 } 969 970 /* zero the current row of Pt*A */ 971 aJ = aj + ai[i]; 972 for (k=0; k<anz; k++) aval[aJ[k]] = 0.0; 973 } 974 975 /* 3) send and recv matrix values coa */ 976 /*------------------------------------*/ 977 buf_ri = merge->buf_ri; 978 buf_rj = merge->buf_rj; 979 len_s = merge->len_s; 980 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 981 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 982 983 ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr); 984 for (proc=0,k=0; proc<size; proc++) { 985 if (!len_s[proc]) continue; 986 i = merge->owners_co[proc]; 987 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 988 k++; 989 } 990 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 991 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 992 993 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 994 ierr = PetscFree(r_waits);CHKERRQ(ierr); 995 ierr = PetscFree(coa);CHKERRQ(ierr); 996 997 /* 4) insert local Cseq and received values into Cmpi */ 998 /*----------------------------------------------------*/ 999 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1000 for (k=0; k<merge->nrecv; k++) { 1001 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1002 nrows = *(buf_ri_k[k]); 1003 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1004 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1005 } 1006 1007 for (i=0; i<cm; i++) { 1008 row = owners[rank] + i; /* global row index of C_seq */ 1009 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1010 ba_i = ba + bi[i]; 1011 bnz = bi[i+1] - bi[i]; 1012 /* add received vals into ba */ 1013 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1014 /* i-th row */ 1015 if (i == *nextrow[k]) { 1016 cnz = *(nextci[k]+1) - *nextci[k]; 1017 cj = buf_rj[k] + *(nextci[k]); 1018 ca = abuf_r[k] + *(nextci[k]); 1019 nextcj = 0; 1020 for (j=0; nextcj<cnz; j++) { 1021 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1022 ba_i[j] += ca[nextcj++]; 1023 } 1024 } 1025 nextrow[k]++; nextci[k]++; 1026 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1027 } 1028 } 1029 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1030 } 1031 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1032 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1033 1034 ierr = PetscFree(ba);CHKERRQ(ierr); 1035 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1036 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1037 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1038 ierr = PetscFree(aval);CHKERRQ(ierr); 1039 PetscFunctionReturn(0); 1040 } 1041 1042 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1043 #undef __FUNCT__ 1044 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ" 1045 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat *C) 1046 { 1047 PetscErrorCode ierr; 1048 Mat Cmpi,A_loc,POt,PDt; 1049 Mat_PtAPMPI *ptap; 1050 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1051 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c; 1052 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1053 PetscInt nnz; 1054 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1055 PetscInt am=A->rmap->n,pn=P->cmap->n; 1056 PetscBT lnkbt; 1057 MPI_Comm comm; 1058 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1059 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1060 PetscInt len,proc,*dnz,*onz,*owners; 1061 PetscInt nzi,*bi,*bj; 1062 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1063 MPI_Request *swaits,*rwaits; 1064 MPI_Status *sstatus,rstatus; 1065 Mat_Merge_SeqsToMPI *merge; 1066 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1067 PetscReal afill =1.0,afill_tmp; 1068 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax; 1069 PetscScalar *vals; 1070 Mat_SeqAIJ *a_loc, *pdt,*pot; 1071 1072 PetscFunctionBegin; 1073 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1074 /* check if matrix local sizes are compatible */ 1075 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) { 1076 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1077 } 1078 1079 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1080 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1081 1082 /* create struct Mat_PtAPMPI and attached it to C later */ 1083 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 1084 1085 /* get A_loc by taking all local rows of A */ 1086 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1087 1088 ptap->A_loc = A_loc; 1089 1090 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1091 ai = a_loc->i; 1092 aj = a_loc->j; 1093 1094 /* determine symbolic Co=(p->B)^T*A - send to others */ 1095 /*----------------------------------------------------*/ 1096 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1097 pdt = (Mat_SeqAIJ*)PDt->data; 1098 pdti = pdt->i; pdtj = pdt->j; 1099 1100 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1101 pot = (Mat_SeqAIJ*)POt->data; 1102 poti = pot->i; potj = pot->j; 1103 1104 /* then, compute symbolic Co = (p->B)^T*A */ 1105 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors >= (num of nonzero rows of C_seq) - pn */ 1106 ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr); 1107 coi[0] = 0; 1108 1109 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1110 nnz = fill*(poti[pon] + ai[am]); 1111 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1112 current_space = free_space; 1113 1114 /* create and initialize a linked list */ 1115 i = PetscMax(pdt->rmax,pot->rmax); 1116 Crmax = i*a_loc->rmax*size; 1117 if (!Crmax || Crmax > aN) Crmax = aN; 1118 ierr = PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt);CHKERRQ(ierr); 1119 1120 for (i=0; i<pon; i++) { 1121 pnz = poti[i+1] - poti[i]; 1122 ptJ = potj + poti[i]; 1123 for (j=0; j<pnz; j++) { 1124 row = ptJ[j]; /* row of A_loc == col of Pot */ 1125 anz = ai[row+1] - ai[row]; 1126 Jptr = aj + ai[row]; 1127 /* add non-zero cols of AP into the sorted linked list lnk */ 1128 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1129 } 1130 nnz = lnk[0]; 1131 1132 /* If free space is not available, double the total space in the list */ 1133 if (current_space->local_remaining<nnz) { 1134 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1135 nspacedouble++; 1136 } 1137 1138 /* Copy data into free space, and zero out denserows */ 1139 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1140 1141 current_space->array += nnz; 1142 current_space->local_used += nnz; 1143 current_space->local_remaining -= nnz; 1144 1145 coi[i+1] = coi[i] + nnz; 1146 } 1147 1148 ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr); 1149 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1150 1151 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1152 if (afill_tmp > afill) afill = afill_tmp; 1153 1154 /* send j-array (coj) of Co to other processors */ 1155 /*----------------------------------------------*/ 1156 /* determine row ownership */ 1157 ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr); 1158 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1159 1160 merge->rowmap->n = pn; 1161 merge->rowmap->bs = 1; 1162 1163 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1164 owners = merge->rowmap->range; 1165 1166 /* determine the number of messages to send, their lengths */ 1167 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr); 1168 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1169 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr); 1170 1171 len_s = merge->len_s; 1172 merge->nsend = 0; 1173 1174 ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr); 1175 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1176 1177 proc = 0; 1178 for (i=0; i<pon; i++) { 1179 while (prmap[i] >= owners[proc+1]) proc++; 1180 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1181 len_s[proc] += coi[i+1] - coi[i]; 1182 } 1183 1184 len = 0; /* max length of buf_si[] */ 1185 owners_co[0] = 0; 1186 for (proc=0; proc<size; proc++) { 1187 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1188 if (len_si[proc]) { 1189 merge->nsend++; 1190 len_si[proc] = 2*(len_si[proc] + 1); 1191 len += len_si[proc]; 1192 } 1193 } 1194 1195 /* determine the number and length of messages to receive for coi and coj */ 1196 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1197 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1198 1199 /* post the Irecv and Isend of coj */ 1200 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1201 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1202 ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr); 1203 for (proc=0, k=0; proc<size; proc++) { 1204 if (!len_s[proc]) continue; 1205 i = owners_co[proc]; 1206 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1207 k++; 1208 } 1209 1210 /* receives and sends of coj are complete */ 1211 ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr); 1212 for (i=0; i<merge->nrecv; i++) { 1213 PetscMPIInt icompleted; 1214 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1215 } 1216 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1217 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1218 1219 /* send and recv coi */ 1220 /*-------------------*/ 1221 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1222 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1223 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr); 1224 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1225 for (proc=0,k=0; proc<size; proc++) { 1226 if (!len_s[proc]) continue; 1227 /* form outgoing message for i-structure: 1228 buf_si[0]: nrows to be sent 1229 [1:nrows]: row index (global) 1230 [nrows+1:2*nrows+1]: i-structure index 1231 */ 1232 /*-------------------------------------------*/ 1233 nrows = len_si[proc]/2 - 1; 1234 buf_si_i = buf_si + nrows+1; 1235 buf_si[0] = nrows; 1236 buf_si_i[0] = 0; 1237 nrows = 0; 1238 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1239 nzi = coi[i+1] - coi[i]; 1240 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1241 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1242 nrows++; 1243 } 1244 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1245 k++; 1246 buf_si += len_si[proc]; 1247 } 1248 i = merge->nrecv; 1249 while (i--) { 1250 PetscMPIInt icompleted; 1251 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1252 } 1253 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1254 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1255 ierr = PetscFree(len_si);CHKERRQ(ierr); 1256 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1257 ierr = PetscFree(swaits);CHKERRQ(ierr); 1258 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1259 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1260 1261 /* compute the local portion of C (mpi mat) */ 1262 /*------------------------------------------*/ 1263 /* allocate bi array and free space for accumulating nonzero column info */ 1264 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr); 1265 bi[0] = 0; 1266 1267 /* set initial free space to be fill*(nnz(P) + nnz(A)) */ 1268 nnz = fill*(pdti[pn] + poti[pon] + ai[am]); 1269 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1270 current_space = free_space; 1271 1272 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1273 for (k=0; k<merge->nrecv; k++) { 1274 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1275 nrows = *buf_ri_k[k]; 1276 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1277 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1278 } 1279 1280 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1281 rmax = 0; 1282 for (i=0; i<pn; i++) { 1283 /* add pdt[i,:]*AP into lnk */ 1284 pnz = pdti[i+1] - pdti[i]; 1285 ptJ = pdtj + pdti[i]; 1286 for (j=0; j<pnz; j++) { 1287 row = ptJ[j]; /* row of AP == col of Pt */ 1288 anz = ai[row+1] - ai[row]; 1289 Jptr = aj + ai[row]; 1290 /* add non-zero cols of AP into the sorted linked list lnk */ 1291 ierr = PetscLLCondensedAddSorted(anz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1292 } 1293 1294 /* add received col data into lnk */ 1295 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1296 if (i == *nextrow[k]) { /* i-th row */ 1297 nzi = *(nextci[k]+1) - *nextci[k]; 1298 Jptr = buf_rj[k] + *nextci[k]; 1299 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1300 nextrow[k]++; nextci[k]++; 1301 } 1302 } 1303 nnz = lnk[0]; 1304 1305 /* if free space is not available, make more free space */ 1306 if (current_space->local_remaining<nnz) { 1307 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1308 nspacedouble++; 1309 } 1310 /* copy data into free space, then initialize lnk */ 1311 ierr = PetscLLCondensedClean(aN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1312 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1313 1314 current_space->array += nnz; 1315 current_space->local_used += nnz; 1316 current_space->local_remaining -= nnz; 1317 1318 bi[i+1] = bi[i] + nnz; 1319 if (nnz > rmax) rmax = nnz; 1320 } 1321 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1322 1323 ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr); 1324 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1325 1326 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1327 if (afill_tmp > afill) afill = afill_tmp; 1328 ierr = PetscLLCondensedDestroy(lnk,lnkbt);CHKERRQ(ierr); 1329 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1330 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1331 1332 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1333 /*----------------------------------------------------------------------------------*/ 1334 ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr); 1335 ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr); 1336 1337 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1338 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1339 ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr); 1340 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1341 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1342 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1343 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1344 for (i=0; i<pn; i++) { 1345 row = i + rstart; 1346 nnz = bi[i+1] - bi[i]; 1347 Jptr = bj + bi[i]; 1348 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1349 } 1350 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1351 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1352 ierr = PetscFree(vals);CHKERRQ(ierr); 1353 1354 merge->bi = bi; 1355 merge->bj = bj; 1356 merge->coi = coi; 1357 merge->coj = coj; 1358 merge->buf_ri = buf_ri; 1359 merge->buf_rj = buf_rj; 1360 merge->owners_co = owners_co; 1361 merge->destroy = Cmpi->ops->destroy; 1362 merge->duplicate = Cmpi->ops->duplicate; 1363 1364 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ; 1365 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1366 1367 /* attach the supporting struct to Cmpi for reuse */ 1368 c = (Mat_MPIAIJ*)Cmpi->data; 1369 c->ptap = ptap; 1370 ptap->api = NULL; 1371 ptap->apj = NULL; 1372 ptap->merge = merge; 1373 ptap->rmax = rmax; 1374 1375 *C = Cmpi; 1376 #if defined(PETSC_USE_INFO) 1377 if (bi[pn] != 0) { 1378 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 1379 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 1380 } else { 1381 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1382 } 1383 #endif 1384 PetscFunctionReturn(0); 1385 } 1386 1387 #undef __FUNCT__ 1388 #define __FUNCT__ "MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable" 1389 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,Mat C) 1390 { 1391 PetscErrorCode ierr; 1392 Mat_Merge_SeqsToMPI *merge; 1393 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1394 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 1395 Mat_PtAPMPI *ptap; 1396 PetscInt *adj; 1397 PetscInt i,j,k,anz,pnz,row,*cj,nexta; 1398 MatScalar *ada,*ca,valtmp; 1399 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 1400 MPI_Comm comm; 1401 PetscMPIInt size,rank,taga,*len_s; 1402 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 1403 PetscInt **buf_ri,**buf_rj; 1404 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 1405 MPI_Request *s_waits,*r_waits; 1406 MPI_Status *status; 1407 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 1408 PetscInt *ai,*aj,*coi,*coj; 1409 PetscInt *poJ,*pdJ; 1410 Mat A_loc; 1411 Mat_SeqAIJ *a_loc; 1412 1413 PetscFunctionBegin; 1414 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1415 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1416 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1417 1418 ptap = c->ptap; 1419 merge = ptap->merge; 1420 1421 /* 2) compute numeric C_seq = P_loc^T*A_loc */ 1422 /*------------------------------------------*/ 1423 /* get data from symbolic products */ 1424 coi = merge->coi; coj = merge->coj; 1425 ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr); 1426 ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr); 1427 bi = merge->bi; bj = merge->bj; 1428 owners = merge->rowmap->range; 1429 ierr = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr); 1430 ierr = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr); 1431 1432 /* get A_loc by taking all local rows of A */ 1433 A_loc = ptap->A_loc; 1434 ierr = MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc);CHKERRQ(ierr); 1435 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1436 ai = a_loc->i; 1437 aj = a_loc->j; 1438 1439 for (i=0; i<am; i++) { 1440 anz = ai[i+1] - ai[i]; 1441 adj = aj + ai[i]; 1442 ada = a_loc->a + ai[i]; 1443 1444 /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */ 1445 /*-------------------------------------------------------------*/ 1446 /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */ 1447 pnz = po->i[i+1] - po->i[i]; 1448 poJ = po->j + po->i[i]; 1449 pA = po->a + po->i[i]; 1450 for (j=0; j<pnz; j++) { 1451 row = poJ[j]; 1452 cj = coj + coi[row]; 1453 ca = coa + coi[row]; 1454 /* perform sparse axpy */ 1455 nexta = 0; 1456 valtmp = pA[j]; 1457 for (k=0; nexta<anz; k++) { 1458 if (cj[k] == adj[nexta]) { 1459 ca[k] += valtmp*ada[nexta]; 1460 nexta++; 1461 } 1462 } 1463 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1464 } 1465 1466 /* put the value into Cd (diagonal part) */ 1467 pnz = pd->i[i+1] - pd->i[i]; 1468 pdJ = pd->j + pd->i[i]; 1469 pA = pd->a + pd->i[i]; 1470 for (j=0; j<pnz; j++) { 1471 row = pdJ[j]; 1472 cj = bj + bi[row]; 1473 ca = ba + bi[row]; 1474 /* perform sparse axpy */ 1475 nexta = 0; 1476 valtmp = pA[j]; 1477 for (k=0; nexta<anz; k++) { 1478 if (cj[k] == adj[nexta]) { 1479 ca[k] += valtmp*ada[nexta]; 1480 nexta++; 1481 } 1482 } 1483 ierr = PetscLogFlops(2.0*anz);CHKERRQ(ierr); 1484 } 1485 } 1486 1487 /* 3) send and recv matrix values coa */ 1488 /*------------------------------------*/ 1489 buf_ri = merge->buf_ri; 1490 buf_rj = merge->buf_rj; 1491 len_s = merge->len_s; 1492 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 1493 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 1494 1495 ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr); 1496 for (proc=0,k=0; proc<size; proc++) { 1497 if (!len_s[proc]) continue; 1498 i = merge->owners_co[proc]; 1499 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 1500 k++; 1501 } 1502 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 1503 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 1504 1505 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 1506 ierr = PetscFree(r_waits);CHKERRQ(ierr); 1507 ierr = PetscFree(coa);CHKERRQ(ierr); 1508 1509 /* 4) insert local Cseq and received values into Cmpi */ 1510 /*----------------------------------------------------*/ 1511 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1512 for (k=0; k<merge->nrecv; k++) { 1513 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1514 nrows = *(buf_ri_k[k]); 1515 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 1516 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1517 } 1518 1519 for (i=0; i<cm; i++) { 1520 row = owners[rank] + i; /* global row index of C_seq */ 1521 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 1522 ba_i = ba + bi[i]; 1523 bnz = bi[i+1] - bi[i]; 1524 /* add received vals into ba */ 1525 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1526 /* i-th row */ 1527 if (i == *nextrow[k]) { 1528 cnz = *(nextci[k]+1) - *nextci[k]; 1529 cj = buf_rj[k] + *(nextci[k]); 1530 ca = abuf_r[k] + *(nextci[k]); 1531 nextcj = 0; 1532 for (j=0; nextcj<cnz; j++) { 1533 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 1534 ba_i[j] += ca[nextcj++]; 1535 } 1536 } 1537 nextrow[k]++; nextci[k]++; 1538 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 1539 } 1540 } 1541 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 1542 } 1543 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1544 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1545 1546 ierr = PetscFree(ba);CHKERRQ(ierr); 1547 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 1548 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 1549 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1550 PetscFunctionReturn(0); 1551 } 1552 1553 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */ 1554 #undef __FUNCT__ 1555 #define __FUNCT__ "MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable" 1556 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_Scalable(Mat P,Mat A,PetscReal fill,Mat *C) 1557 { 1558 PetscErrorCode ierr; 1559 Mat Cmpi,A_loc,POt,PDt; 1560 Mat_PtAPMPI *ptap; 1561 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1562 Mat_MPIAIJ *p =(Mat_MPIAIJ*)P->data,*c; 1563 PetscInt *pdti,*pdtj,*poti,*potj,*ptJ; 1564 PetscInt nnz; 1565 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 1566 PetscInt am =A->rmap->n,pn=P->cmap->n; 1567 MPI_Comm comm; 1568 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri; 1569 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1570 PetscInt len,proc,*dnz,*onz,*owners; 1571 PetscInt nzi,*bi,*bj; 1572 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1573 MPI_Request *swaits,*rwaits; 1574 MPI_Status *sstatus,rstatus; 1575 Mat_Merge_SeqsToMPI *merge; 1576 PetscInt *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j; 1577 PetscReal afill =1.0,afill_tmp; 1578 PetscInt rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Crmax; 1579 PetscScalar *vals; 1580 Mat_SeqAIJ *a_loc, *pdt,*pot; 1581 1582 PetscFunctionBegin; 1583 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1584 /* check if matrix local sizes are compatible */ 1585 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) { 1586 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%D, %D) != P (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 1587 } 1588 1589 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1590 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1591 1592 /* create struct Mat_PtAPMPI and attached it to C later */ 1593 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 1594 1595 /* get A_loc by taking all local rows of A */ 1596 ierr = MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc);CHKERRQ(ierr); 1597 1598 ptap->A_loc = A_loc; 1599 a_loc = (Mat_SeqAIJ*)(A_loc)->data; 1600 ai = a_loc->i; 1601 aj = a_loc->j; 1602 1603 /* determine symbolic Co=(p->B)^T*A - send to others */ 1604 /*----------------------------------------------------*/ 1605 ierr = MatTransposeSymbolic_SeqAIJ(p->A,&PDt);CHKERRQ(ierr); 1606 pdt = (Mat_SeqAIJ*)PDt->data; 1607 pdti = pdt->i; pdtj = pdt->j; 1608 1609 ierr = MatTransposeSymbolic_SeqAIJ(p->B,&POt);CHKERRQ(ierr); 1610 pot = (Mat_SeqAIJ*)POt->data; 1611 poti = pot->i; potj = pot->j; 1612 1613 /* then, compute symbolic Co = (p->B)^T*A */ 1614 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 1615 >= (num of nonzero rows of C_seq) - pn */ 1616 ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr); 1617 coi[0] = 0; 1618 1619 /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */ 1620 nnz = fill*(poti[pon] + ai[am]); 1621 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1622 current_space = free_space; 1623 1624 /* create and initialize a linked list */ 1625 i = PetscMax(pdt->rmax,pot->rmax); 1626 Crmax = i*a_loc->rmax*size; /* non-scalable! */ 1627 if (!Crmax || Crmax > aN) Crmax = aN; 1628 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 1629 1630 for (i=0; i<pon; i++) { 1631 pnz = poti[i+1] - poti[i]; 1632 ptJ = potj + poti[i]; 1633 for (j=0; j<pnz; j++) { 1634 row = ptJ[j]; /* row of A_loc == col of Pot */ 1635 anz = ai[row+1] - ai[row]; 1636 Jptr = aj + ai[row]; 1637 /* add non-zero cols of AP into the sorted linked list lnk */ 1638 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1639 } 1640 nnz = lnk[0]; 1641 1642 /* If free space is not available, double the total space in the list */ 1643 if (current_space->local_remaining<nnz) { 1644 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1645 nspacedouble++; 1646 } 1647 1648 /* Copy data into free space, and zero out denserows */ 1649 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1650 1651 current_space->array += nnz; 1652 current_space->local_used += nnz; 1653 current_space->local_remaining -= nnz; 1654 1655 coi[i+1] = coi[i] + nnz; 1656 } 1657 1658 ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr); 1659 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 1660 1661 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1); 1662 if (afill_tmp > afill) afill = afill_tmp; 1663 1664 /* send j-array (coj) of Co to other processors */ 1665 /*----------------------------------------------*/ 1666 /* determine row ownership */ 1667 ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr); 1668 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 1669 1670 merge->rowmap->n = pn; 1671 merge->rowmap->bs = 1; 1672 1673 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 1674 owners = merge->rowmap->range; 1675 1676 /* determine the number of messages to send, their lengths */ 1677 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr); 1678 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1679 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr); 1680 1681 len_s = merge->len_s; 1682 merge->nsend = 0; 1683 1684 ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr); 1685 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 1686 1687 proc = 0; 1688 for (i=0; i<pon; i++) { 1689 while (prmap[i] >= owners[proc+1]) proc++; 1690 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 1691 len_s[proc] += coi[i+1] - coi[i]; 1692 } 1693 1694 len = 0; /* max length of buf_si[] */ 1695 owners_co[0] = 0; 1696 for (proc=0; proc<size; proc++) { 1697 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1698 if (len_si[proc]) { 1699 merge->nsend++; 1700 len_si[proc] = 2*(len_si[proc] + 1); 1701 len += len_si[proc]; 1702 } 1703 } 1704 1705 /* determine the number and length of messages to receive for coi and coj */ 1706 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 1707 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 1708 1709 /* post the Irecv and Isend of coj */ 1710 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1711 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1712 ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr); 1713 for (proc=0, k=0; proc<size; proc++) { 1714 if (!len_s[proc]) continue; 1715 i = owners_co[proc]; 1716 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1717 k++; 1718 } 1719 1720 /* receives and sends of coj are complete */ 1721 ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr); 1722 for (i=0; i<merge->nrecv; i++) { 1723 PetscMPIInt icompleted; 1724 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1725 } 1726 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1727 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1728 1729 /* send and recv coi */ 1730 /*-------------------*/ 1731 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1732 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1733 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr); 1734 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1735 for (proc=0,k=0; proc<size; proc++) { 1736 if (!len_s[proc]) continue; 1737 /* form outgoing message for i-structure: 1738 buf_si[0]: nrows to be sent 1739 [1:nrows]: row index (global) 1740 [nrows+1:2*nrows+1]: i-structure index 1741 */ 1742 /*-------------------------------------------*/ 1743 nrows = len_si[proc]/2 - 1; 1744 buf_si_i = buf_si + nrows+1; 1745 buf_si[0] = nrows; 1746 buf_si_i[0] = 0; 1747 nrows = 0; 1748 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1749 nzi = coi[i+1] - coi[i]; 1750 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1751 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1752 nrows++; 1753 } 1754 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1755 k++; 1756 buf_si += len_si[proc]; 1757 } 1758 i = merge->nrecv; 1759 while (i--) { 1760 PetscMPIInt icompleted; 1761 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1762 } 1763 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1764 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 1765 ierr = PetscFree(len_si);CHKERRQ(ierr); 1766 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1767 ierr = PetscFree(swaits);CHKERRQ(ierr); 1768 ierr = PetscFree(sstatus);CHKERRQ(ierr); 1769 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1770 1771 /* compute the local portion of C (mpi mat) */ 1772 /*------------------------------------------*/ 1773 /* allocate bi array and free space for accumulating nonzero column info */ 1774 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr); 1775 bi[0] = 0; 1776 1777 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 1778 nnz = fill*(pdti[pn] + poti[pon] + ai[am]); 1779 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 1780 current_space = free_space; 1781 1782 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 1783 for (k=0; k<merge->nrecv; k++) { 1784 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1785 nrows = *buf_ri_k[k]; 1786 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1787 nextci[k] = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recieved i-structure */ 1788 } 1789 1790 ierr = MatPreallocateInitialize(comm,pn,A->cmap->n,dnz,onz);CHKERRQ(ierr); 1791 rmax = 0; 1792 for (i=0; i<pn; i++) { 1793 /* add pdt[i,:]*AP into lnk */ 1794 pnz = pdti[i+1] - pdti[i]; 1795 ptJ = pdtj + pdti[i]; 1796 for (j=0; j<pnz; j++) { 1797 row = ptJ[j]; /* row of AP == col of Pt */ 1798 anz = ai[row+1] - ai[row]; 1799 Jptr = aj + ai[row]; 1800 /* add non-zero cols of AP into the sorted linked list lnk */ 1801 ierr = PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk);CHKERRQ(ierr); 1802 } 1803 1804 /* add received col data into lnk */ 1805 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 1806 if (i == *nextrow[k]) { /* i-th row */ 1807 nzi = *(nextci[k]+1) - *nextci[k]; 1808 Jptr = buf_rj[k] + *nextci[k]; 1809 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 1810 nextrow[k]++; nextci[k]++; 1811 } 1812 } 1813 nnz = lnk[0]; 1814 1815 /* if free space is not available, make more free space */ 1816 if (current_space->local_remaining<nnz) { 1817 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 1818 nspacedouble++; 1819 } 1820 /* copy data into free space, then initialize lnk */ 1821 ierr = PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk);CHKERRQ(ierr); 1822 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 1823 1824 current_space->array += nnz; 1825 current_space->local_used += nnz; 1826 current_space->local_remaining -= nnz; 1827 1828 bi[i+1] = bi[i] + nnz; 1829 if (nnz > rmax) rmax = nnz; 1830 } 1831 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 1832 1833 ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr); 1834 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 1835 afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1); 1836 if (afill_tmp > afill) afill = afill_tmp; 1837 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 1838 ierr = MatDestroy(&POt);CHKERRQ(ierr); 1839 ierr = MatDestroy(&PDt);CHKERRQ(ierr); 1840 1841 /* create symbolic parallel matrix Cmpi - why cannot be assembled in Numeric part */ 1842 /*----------------------------------------------------------------------------------*/ 1843 ierr = PetscMalloc((rmax+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr); 1844 ierr = PetscMemzero(vals,rmax*sizeof(PetscScalar));CHKERRQ(ierr); 1845 1846 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1847 ierr = MatSetSizes(Cmpi,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1848 ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,A->cmap->bs);CHKERRQ(ierr); 1849 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 1850 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1851 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 1852 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 1853 for (i=0; i<pn; i++) { 1854 row = i + rstart; 1855 nnz = bi[i+1] - bi[i]; 1856 Jptr = bj + bi[i]; 1857 ierr = MatSetValues(Cmpi,1,&row,nnz,Jptr,vals,INSERT_VALUES);CHKERRQ(ierr); 1858 } 1859 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1860 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1861 ierr = PetscFree(vals);CHKERRQ(ierr); 1862 1863 merge->bi = bi; 1864 merge->bj = bj; 1865 merge->coi = coi; 1866 merge->coj = coj; 1867 merge->buf_ri = buf_ri; 1868 merge->buf_rj = buf_rj; 1869 merge->owners_co = owners_co; 1870 merge->destroy = Cmpi->ops->destroy; 1871 merge->duplicate = Cmpi->ops->duplicate; 1872 1873 Cmpi->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_Scalable; 1874 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1875 1876 /* attach the supporting struct to Cmpi for reuse */ 1877 c = (Mat_MPIAIJ*)Cmpi->data; 1878 1879 c->ptap = ptap; 1880 ptap->api = NULL; 1881 ptap->apj = NULL; 1882 ptap->merge = merge; 1883 ptap->rmax = rmax; 1884 ptap->apa = NULL; 1885 1886 *C = Cmpi; 1887 #if defined(PETSC_USE_INFO) 1888 if (bi[pn] != 0) { 1889 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 1890 ierr = PetscInfo1(Cmpi,"Use MatTransposeMatMult(A,B,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 1891 } else { 1892 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 1893 } 1894 #endif 1895 PetscFunctionReturn(0); 1896 } 1897