1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 12 /* #define DEBUG_MATMATMULT */ 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 16 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 17 { 18 PetscErrorCode ierr; 19 20 PetscFunctionBegin; 21 if (scall == MAT_INITIAL_MATRIX){ 22 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 23 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 24 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 25 } 26 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 27 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 28 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 29 PetscFunctionReturn(0); 30 } 31 32 #undef __FUNCT__ 33 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI" 34 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr) 35 { 36 PetscErrorCode ierr; 37 Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr; 38 39 PetscFunctionBegin; 40 ierr = ISDestroy(&mult->isrowa);CHKERRQ(ierr); 41 ierr = ISDestroy(&mult->isrowb);CHKERRQ(ierr); 42 ierr = ISDestroy(&mult->iscolb);CHKERRQ(ierr); 43 ierr = MatDestroy(&mult->C_seq);CHKERRQ(ierr); 44 ierr = MatDestroy(&mult->A_loc);CHKERRQ(ierr); 45 ierr = MatDestroy(&mult->B_seq);CHKERRQ(ierr); 46 ierr = PetscFree(mult);CHKERRQ(ierr); 47 PetscFunctionReturn(0); 48 } 49 50 #undef __FUNCT__ 51 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 52 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 53 { 54 PetscErrorCode ierr; 55 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 56 Mat_PtAPMPI *ptap=a->ptap; 57 58 PetscFunctionBegin; 59 ierr = PetscFree2(ptap->startsj,ptap->startsj_r);CHKERRQ(ierr); 60 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 61 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 62 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 63 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 64 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 65 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 66 ierr = ptap->destroy(A);CHKERRQ(ierr); 67 ierr = PetscFree(ptap);CHKERRQ(ierr); 68 PetscFunctionReturn(0); 69 } 70 71 #undef __FUNCT__ 72 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult_32" 73 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult_32(Mat A) 74 { 75 PetscErrorCode ierr; 76 PetscContainer container; 77 Mat_MatMatMultMPI *mult=PETSC_NULL; 78 79 PetscFunctionBegin; 80 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 81 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 82 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 83 A->ops->destroy = mult->destroy; 84 A->ops->duplicate = mult->duplicate; 85 if (A->ops->destroy) { 86 ierr = (*A->ops->destroy)(A);CHKERRQ(ierr); 87 } 88 ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 89 PetscFunctionReturn(0); 90 } 91 92 #undef __FUNCT__ 93 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult_32" 94 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult_32(Mat A, MatDuplicateOption op, Mat *M) 95 { 96 PetscErrorCode ierr; 97 Mat_MatMatMultMPI *mult; 98 PetscContainer container; 99 100 PetscFunctionBegin; 101 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 102 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 103 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 104 /* Note: the container is not duplicated, because it requires deep copying of 105 several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()). 106 These data sets are only used for repeated calling of MatMatMultNumeric(). 107 *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */ 108 ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr); 109 (*M)->ops->destroy = mult->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 110 (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */ 111 PetscFunctionReturn(0); 112 } 113 114 #undef __FUNCT__ 115 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 116 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 117 { 118 PetscErrorCode ierr; 119 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 120 Mat_PtAPMPI *ptap=a->ptap; 121 122 PetscFunctionBegin; 123 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 124 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 125 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 126 PetscFunctionReturn(0); 127 } 128 129 #undef __FUNCT__ 130 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 131 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 132 { 133 PetscErrorCode ierr; 134 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 135 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 136 Mat_SeqAIJ *cd=(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 137 PetscInt *adi=ad->i,*adj,*aoi=ao->i,*aoj; 138 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 139 Mat_SeqAIJ *p_loc,*p_oth; 140 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 141 PetscScalar *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca; 142 PetscInt cm=C->rmap->n,anz,pnz; 143 Mat_PtAPMPI *ptap=c->ptap; 144 PetscInt *api,*apj,*apJ,i,j,k,row; 145 PetscInt rstart=C->rmap->rstart,cstart=C->cmap->rstart; 146 PetscInt cdnz,conz,k0,k1; 147 #if defined(DEBUG_MATMATMULT) 148 PetscMPIInt rank=a->rank; 149 #endif 150 151 PetscFunctionBegin; 152 #if defined(DEBUG_MATMATMULT) 153 if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultNumeric_MPIAIJ_MPIAIJ()...\n",rank); 154 #endif 155 156 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 157 /*-----------------------------------------------------*/ 158 /* update numerical values of P_oth and P_loc */ 159 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 160 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 161 162 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 163 /*----------------------------------------------------------*/ 164 /* get data from symbolic products */ 165 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 166 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 167 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 168 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 169 170 /* get apa for storing dense row A[i,:]*P */ 171 apa = ptap->apa; 172 173 for (i=0; i<cm; i++) { 174 /* diagonal portion of A */ 175 anz = adi[i+1] - adi[i]; 176 adj = ad->j + adi[i]; 177 ada = ad->a + adi[i]; 178 for (j=0; j<anz; j++) { 179 row = adj[j]; 180 pnz = pi_loc[row+1] - pi_loc[row]; 181 pj = pj_loc + pi_loc[row]; 182 pa = pa_loc + pi_loc[row]; 183 184 /* perform dense axpy */ 185 valtmp = ada[j]; 186 for (k=0; k<pnz; k++){ 187 apa[pj[k]] += valtmp*pa[k]; 188 } 189 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 190 } 191 192 /* off-diagonal portion of A */ 193 anz = aoi[i+1] - aoi[i]; 194 aoj = ao->j + aoi[i]; 195 aoa = ao->a + aoi[i]; 196 for (j=0; j<anz; j++) { 197 row = aoj[j]; 198 pnz = pi_oth[row+1] - pi_oth[row]; 199 pj = pj_oth + pi_oth[row]; 200 pa = pa_oth + pi_oth[row]; 201 202 /* perform dense axpy */ 203 valtmp = aoa[j]; 204 for (k=0; k<pnz; k++){ 205 apa[pj[k]] += valtmp*pa[k]; 206 } 207 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 208 } 209 210 /* set values in C */ 211 row = rstart + i; 212 api = ptap->api; 213 apj = ptap->apj; 214 apJ = apj + api[i]; 215 cdnz = cd->i[i+1] - cd->i[i]; 216 conz = co->i[i+1] - co->i[i]; 217 218 /* 1st off-diagoanl part of C */ 219 ca = coa + co->i[i]; 220 k = 0; 221 for (k0=0; k0<conz; k0++){ 222 if (apJ[k] >= cstart) break; 223 ca[k0] = apa[apJ[k]]; 224 apa[apJ[k]] = 0.0; 225 k++; 226 } 227 228 /* diagonal part of C */ 229 ca = cda + cd->i[i]; 230 for (k1=0; k1<cdnz; k1++){ 231 ca[k1] = apa[apJ[k]]; 232 apa[apJ[k]] = 0.0; 233 k++; 234 } 235 236 /* 2nd off-diagoanl part of C */ 237 ca = coa + co->i[i]; 238 for (; k0<conz; k0++){ 239 ca[k0] = apa[apJ[k]]; 240 apa[apJ[k]] = 0.0; 241 k++; 242 } 243 } 244 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 245 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 246 PetscFunctionReturn(0); 247 } 248 249 #undef __FUNCT__ 250 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 251 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 252 { 253 PetscErrorCode ierr; 254 MPI_Comm comm=((PetscObject)A)->comm; 255 Mat Cmpi; 256 Mat_PtAPMPI *ptap; 257 PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 258 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c; 259 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 260 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 261 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 262 PetscInt nlnk,*lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 263 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 264 PetscBT lnkbt; 265 PetscScalar *apa; 266 PetscReal afill; 267 PetscBool matmatmult_old=PETSC_FALSE; 268 #if defined(DEBUG_MATMATMULT) 269 PetscMPIInt rank=a->rank; 270 #endif 271 272 PetscFunctionBegin; 273 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){ 274 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 275 } 276 ierr = PetscOptionsGetBool(PETSC_NULL,"-matmatmult_old",&matmatmult_old,PETSC_NULL);CHKERRQ(ierr); 277 if (matmatmult_old){ 278 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(A,P,fill,C);;CHKERRQ(ierr); 279 PetscFunctionReturn(0); 280 } 281 282 #if defined(DEBUG_MATMATMULT) 283 if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultSymbolic_MPIAIJ_MPIAIJ()...\n",rank); 284 #endif 285 286 /* create struct Mat_PtAPMPI and attached it to C later */ 287 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 288 289 /* malloc apa to store dense row A[i,:]*P */ 290 ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 291 ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr); 292 ptap->apa = apa; 293 294 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 295 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 296 /* get P_loc by taking all local rows of P */ 297 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 298 299 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 300 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 301 pi_loc = p_loc->i; pj_loc = p_loc->j; 302 pi_oth = p_oth->i; pj_oth = p_oth->j; 303 304 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 305 /*-------------------------------------------------------------------*/ 306 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 307 ptap->api = api; 308 api[0] = 0; 309 310 /* create and initialize a linked list */ 311 nlnk = pN+1; 312 ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 313 314 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 315 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 316 current_space = free_space; 317 318 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 319 for (i=0; i<am; i++) { 320 apnz = 0; 321 /* diagonal portion of A */ 322 nzi = adi[i+1] - adi[i]; 323 for (j=0; j<nzi; j++){ 324 row = *adj++; 325 pnz = pi_loc[row+1] - pi_loc[row]; 326 Jptr = pj_loc + pi_loc[row]; 327 /* add non-zero cols of P into the sorted linked list lnk */ 328 ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 329 apnz += nlnk; 330 } 331 /* off-diagonal portion of A */ 332 nzi = aoi[i+1] - aoi[i]; 333 for (j=0; j<nzi; j++){ 334 row = *aoj++; 335 pnz = pi_oth[row+1] - pi_oth[row]; 336 Jptr = pj_oth + pi_oth[row]; 337 ierr = PetscLLAddSorted(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 338 apnz += nlnk; 339 } 340 341 api[i+1] = api[i] + apnz; 342 343 /* if free space is not available, double the total space in the list */ 344 if (current_space->local_remaining<apnz) { 345 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 346 nspacedouble++; 347 } 348 349 /* Copy data into free space, then initialize lnk */ 350 ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 351 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 352 current_space->array += apnz; 353 current_space->local_used += apnz; 354 current_space->local_remaining -= apnz; 355 } 356 357 /* Allocate space for apj, initialize apj, and */ 358 /* destroy list of free space and other temporary array(s) */ 359 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 360 apj = ptap->apj; 361 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 362 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 363 364 /* create and assemble symbolic parallel matrix Cmpi */ 365 /*----------------------------------------------------*/ 366 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 367 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 368 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 369 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 370 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 371 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 372 for (i=0; i<am; i++){ 373 row = i + rstart; 374 apnz = api[i+1] - api[i]; 375 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 376 apj += apnz; 377 } 378 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 379 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 380 381 ptap->destroy = Cmpi->ops->destroy; 382 ptap->duplicate = Cmpi->ops->duplicate; 383 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 384 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 385 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 386 387 /* attach the supporting struct to Cmpi for reuse */ 388 c = (Mat_MPIAIJ*)Cmpi->data; 389 c->ptap = ptap; 390 391 *C = Cmpi; 392 393 /* set MatInfo */ 394 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]) + 1.e-5; 395 if (afill < 1.0) afill = 1.0; 396 Cmpi->info.mallocs = nspacedouble; 397 Cmpi->info.fill_ratio_given = fill; 398 Cmpi->info.fill_ratio_needed = afill; 399 400 #if defined(PETSC_USE_INFO) 401 if (api[am]) { 402 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 403 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 404 } else { 405 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 406 } 407 #endif 408 PetscFunctionReturn(0); 409 } 410 411 /* implementation used in PETSc-3.2 */ 412 /* This routine is called ONLY in the case of reusing previously computed symbolic C */ 413 #undef __FUNCT__ 414 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_32" 415 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_32(Mat A,Mat B,Mat C) 416 { 417 PetscErrorCode ierr; 418 Mat *seq; 419 Mat_MatMatMultMPI *mult; 420 PetscContainer container; 421 422 PetscFunctionBegin; 423 ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 424 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 425 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 426 427 if (mult->skipNumeric){ /* first numeric product is done during symbolic product */ 428 mult->skipNumeric = PETSC_FALSE; 429 PetscFunctionReturn(0); 430 } 431 #if defined(DEBUG_MATMATMULT) 432 PetscMPIInt rank; 433 ierr = MPI_Comm_rank(((PetscObject)C)->comm,&rank);CHKERRQ(ierr); 434 if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultNumeric_MPIAIJ_MPIAIJ_32()...\n",rank); 435 #endif 436 437 seq = &mult->B_seq; 438 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 439 mult->B_seq = *seq; 440 441 seq = &mult->A_loc; 442 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 443 mult->A_loc = *seq; 444 445 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_SparseAxpy(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 446 447 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); 448 ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 449 PetscFunctionReturn(0); 450 } 451 452 /* numeric product is computed as well */ 453 #undef __FUNCT__ 454 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_32" 455 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(Mat A,Mat B,PetscReal fill,Mat *C) 456 { 457 PetscErrorCode ierr; 458 Mat_MatMatMultMPI *mult; 459 PetscContainer container; 460 Mat AB,*seq; 461 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 462 PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark; 463 #if defined(DEBUG_MATMATMULT) 464 PetscMPIInt rank=a->rank; 465 #endif 466 467 PetscFunctionBegin; 468 #if defined(DEBUG_MATMATMULT) 469 if (!rank) ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] call MatMatMultSymbolic_MPIAIJ_MPIAIJ_32()...\n",rank); 470 #endif 471 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){ 472 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 473 } 474 475 ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr); 476 477 /* get isrowb: nonzero col of A */ 478 start = A->cmap->rstart; 479 cmap = a->garray; 480 nzA = a->A->cmap->n; 481 nzB = a->B->cmap->n; 482 ierr = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr); 483 ncols = 0; 484 for (i=0; i<nzB; i++) { /* row < local row index */ 485 if (cmap[i] < start) idx[ncols++] = cmap[i]; 486 else break; 487 } 488 imark = i; 489 for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */ 490 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */ 491 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&mult->isrowb);CHKERRQ(ierr); 492 ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&mult->iscolb);CHKERRQ(ierr); 493 494 /* get isrowa: all local rows of A */ 495 ierr = ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,&mult->isrowa);CHKERRQ(ierr); 496 497 /* Below should go to MatMatMultNumeric_MPIAIJ_MPIAIJ() - How to generate C there? */ 498 /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */ 499 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 500 mult->B_seq = *seq; 501 ierr = PetscFree(seq);CHKERRQ(ierr); 502 503 /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */ 504 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 505 mult->A_loc = *seq; 506 ierr = PetscFree(seq);CHKERRQ(ierr); 507 508 /* compute C_seq = A_seq * B_seq */ 509 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,fill,&mult->C_seq);CHKERRQ(ierr); 510 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 511 512 /* create mpi matrix C by concatinating C_seq */ 513 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */ 514 ierr = MatMergeSymbolic(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,&AB);CHKERRQ(ierr); 515 ierr = MatMergeNumeric(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,AB);CHKERRQ(ierr); 516 517 /* attach the supporting struct to C for reuse of symbolic C */ 518 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 519 ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr); 520 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 521 ierr = PetscObjectCompose((PetscObject)AB,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 522 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 523 mult->skipNumeric = PETSC_TRUE; /* a numeric product is done here */ 524 mult->destroy = AB->ops->destroy; 525 mult->duplicate = AB->ops->duplicate; 526 AB->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_32; 527 AB->ops->destroy = MatDestroy_MPIAIJ_MatMatMult_32; 528 AB->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult_32; 529 AB->ops->matmult = MatMatMult_MPIAIJ_MPIAIJ; 530 *C = AB; 531 PetscFunctionReturn(0); 532 } 533 534 #undef __FUNCT__ 535 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 536 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 537 { 538 PetscErrorCode ierr; 539 540 PetscFunctionBegin; 541 if (scall == MAT_INITIAL_MATRIX){ 542 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 543 } 544 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 545 PetscFunctionReturn(0); 546 } 547 548 typedef struct { 549 Mat workB; 550 PetscScalar *rvalues,*svalues; 551 MPI_Request *rwaits,*swaits; 552 } MPIAIJ_MPIDense; 553 554 #undef __FUNCT__ 555 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy" 556 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx) 557 { 558 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 559 PetscErrorCode ierr; 560 561 PetscFunctionBegin; 562 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 563 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 564 ierr = PetscFree(contents);CHKERRQ(ierr); 565 PetscFunctionReturn(0); 566 } 567 568 #undef __FUNCT__ 569 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 570 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 571 { 572 PetscErrorCode ierr; 573 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 574 PetscInt nz = aij->B->cmap->n; 575 PetscContainer container; 576 MPIAIJ_MPIDense *contents; 577 VecScatter ctx = aij->Mvctx; 578 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 579 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 580 PetscInt m=A->rmap->n,n=B->cmap->n; 581 582 PetscFunctionBegin; 583 ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr); 584 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 585 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 586 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 587 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 588 (*C)->ops->matmult = MatMatMult_MPIAIJ_MPIDense; 589 590 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 591 /* Create work matrix used to store off processor rows of B needed for local product */ 592 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr); 593 /* Create work arrays needed */ 594 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues, 595 B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues, 596 from->n,MPI_Request,&contents->rwaits, 597 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 598 599 ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr); 600 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 601 ierr = PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 602 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 603 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 604 PetscFunctionReturn(0); 605 } 606 607 #undef __FUNCT__ 608 #define __FUNCT__ "MatMPIDenseScatter" 609 /* 610 Performs an efficient scatter on the rows of B needed by this process; this is 611 a modification of the VecScatterBegin_() routines. 612 */ 613 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 614 { 615 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 616 PetscErrorCode ierr; 617 PetscScalar *b,*w,*svalues,*rvalues; 618 VecScatter ctx = aij->Mvctx; 619 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 620 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 621 PetscInt i,j,k; 622 PetscInt *sindices,*sstarts,*rindices,*rstarts; 623 PetscMPIInt *sprocs,*rprocs,nrecvs; 624 MPI_Request *swaits,*rwaits; 625 MPI_Comm comm = ((PetscObject)A)->comm; 626 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 627 MPI_Status status; 628 MPIAIJ_MPIDense *contents; 629 PetscContainer container; 630 Mat workB; 631 632 PetscFunctionBegin; 633 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 634 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 635 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 636 637 workB = *outworkB = contents->workB; 638 if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 639 sindices = to->indices; 640 sstarts = to->starts; 641 sprocs = to->procs; 642 swaits = contents->swaits; 643 svalues = contents->svalues; 644 645 rindices = from->indices; 646 rstarts = from->starts; 647 rprocs = from->procs; 648 rwaits = contents->rwaits; 649 rvalues = contents->rvalues; 650 651 ierr = MatGetArray(B,&b);CHKERRQ(ierr); 652 ierr = MatGetArray(workB,&w);CHKERRQ(ierr); 653 654 for (i=0; i<from->n; i++) { 655 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 656 } 657 658 for (i=0; i<to->n; i++) { 659 /* pack a message at a time */ 660 CHKMEMQ; 661 for (j=0; j<sstarts[i+1]-sstarts[i]; j++){ 662 for (k=0; k<ncols; k++) { 663 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 664 } 665 } 666 CHKMEMQ; 667 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 668 } 669 670 nrecvs = from->n; 671 while (nrecvs) { 672 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 673 nrecvs--; 674 /* unpack a message at a time */ 675 CHKMEMQ; 676 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){ 677 for (k=0; k<ncols; k++) { 678 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 679 } 680 } 681 CHKMEMQ; 682 } 683 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 684 685 ierr = MatRestoreArray(B,&b);CHKERRQ(ierr); 686 ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr); 687 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 688 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 689 PetscFunctionReturn(0); 690 } 691 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 692 693 #undef __FUNCT__ 694 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 695 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 696 { 697 PetscErrorCode ierr; 698 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 699 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 700 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 701 Mat workB; 702 703 PetscFunctionBegin; 704 705 /* diagonal block of A times all local rows of B*/ 706 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 707 708 /* get off processor parts of B needed to complete the product */ 709 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 710 711 /* off-diagonal block of A times nonlocal rows of B */ 712 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 713 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 714 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 715 PetscFunctionReturn(0); 716 } 717 718