1 2 /* 3 Defines matrix-matrix product routines for pairs of MPIAIJ matrices 4 C = A * B 5 */ 6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 7 #include <../src/mat/utils/freespace.h> 8 #include <../src/mat/impls/aij/mpi/mpiaij.h> 9 #include <petscbt.h> 10 #include <../src/mat/impls/dense/mpi/mpidense.h> 11 12 #undef __FUNCT__ 13 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIAIJ" 14 PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C) 15 { 16 PetscErrorCode ierr; 17 18 PetscFunctionBegin; 19 if (scall == MAT_INITIAL_MATRIX){ 20 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 21 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 22 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 23 } 24 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 25 ierr = (*(*C)->ops->matmultnumeric)(A,B,*C);CHKERRQ(ierr); 26 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 27 PetscFunctionReturn(0); 28 } 29 30 #undef __FUNCT__ 31 #define __FUNCT__ "PetscContainerDestroy_Mat_MatMatMultMPI" 32 PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr) 33 { 34 PetscErrorCode ierr; 35 Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr; 36 37 PetscFunctionBegin; 38 ierr = ISDestroy(&mult->isrowa);CHKERRQ(ierr); 39 ierr = ISDestroy(&mult->isrowb);CHKERRQ(ierr); 40 ierr = ISDestroy(&mult->iscolb);CHKERRQ(ierr); 41 ierr = MatDestroy(&mult->C_seq);CHKERRQ(ierr); 42 ierr = MatDestroy(&mult->A_loc);CHKERRQ(ierr); 43 ierr = MatDestroy(&mult->B_seq);CHKERRQ(ierr); 44 ierr = PetscFree(mult);CHKERRQ(ierr); 45 PetscFunctionReturn(0); 46 } 47 48 #undef __FUNCT__ 49 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult" 50 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A) 51 { 52 PetscErrorCode ierr; 53 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 54 Mat_PtAPMPI *ptap=a->ptap; 55 56 PetscFunctionBegin; 57 ierr = PetscFree2(ptap->startsj,ptap->startsj_r);CHKERRQ(ierr); 58 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 59 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 60 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 61 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 62 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 63 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 64 ierr = ptap->destroy(A);CHKERRQ(ierr); 65 ierr = PetscFree(ptap);CHKERRQ(ierr); 66 PetscFunctionReturn(0); 67 } 68 69 #undef __FUNCT__ 70 #define __FUNCT__ "MatDestroy_MPIAIJ_MatMatMult_32" 71 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult_32(Mat A) 72 { 73 PetscErrorCode ierr; 74 PetscContainer container; 75 Mat_MatMatMultMPI *mult=PETSC_NULL; 76 77 PetscFunctionBegin; 78 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 79 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 80 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 81 A->ops->destroy = mult->destroy; 82 A->ops->duplicate = mult->duplicate; 83 if (A->ops->destroy) { 84 ierr = (*A->ops->destroy)(A);CHKERRQ(ierr); 85 } 86 ierr = PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 87 PetscFunctionReturn(0); 88 } 89 90 #undef __FUNCT__ 91 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult_32" 92 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult_32(Mat A, MatDuplicateOption op, Mat *M) 93 { 94 PetscErrorCode ierr; 95 Mat_MatMatMultMPI *mult; 96 PetscContainer container; 97 98 PetscFunctionBegin; 99 ierr = PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 100 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 101 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 102 /* Note: the container is not duplicated, because it requires deep copying of 103 several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()). 104 These data sets are only used for repeated calling of MatMatMultNumeric(). 105 *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */ 106 ierr = (*mult->duplicate)(A,op,M);CHKERRQ(ierr); 107 (*M)->ops->destroy = mult->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 108 (*M)->ops->duplicate = mult->duplicate; /* = MatDuplicate_MPIAIJ */ 109 PetscFunctionReturn(0); 110 } 111 112 #undef __FUNCT__ 113 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatMatMult" 114 PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) 115 { 116 PetscErrorCode ierr; 117 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 118 Mat_PtAPMPI *ptap=a->ptap; 119 120 PetscFunctionBegin; 121 ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr); 122 (*M)->ops->destroy = ptap->destroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's special structure! */ 123 (*M)->ops->duplicate = ptap->duplicate; /* = MatDuplicate_MPIAIJ */ 124 PetscFunctionReturn(0); 125 } 126 127 #undef __FUNCT__ 128 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ" 129 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 130 { 131 PetscErrorCode ierr; 132 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data; 133 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 134 Mat_SeqAIJ *cd=(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data; 135 PetscInt *adi=ad->i,*adj,*aoi=ao->i,*aoj; 136 PetscScalar *ada,*aoa,*cda=cd->a,*coa=co->a; 137 Mat_SeqAIJ *p_loc,*p_oth; 138 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj; 139 PetscScalar *pa_loc,*pa_oth,*pa,*apa,valtmp,*ca; 140 PetscInt cm=C->rmap->n,anz,pnz; 141 Mat_PtAPMPI *ptap=c->ptap; 142 PetscInt *api,*apj,*apJ,cnz,i,j,k,row; 143 PetscInt rstart=C->rmap->rstart,cstart=C->cmap->rstart; 144 PetscInt cdnz,conz,k0,k1; 145 146 PetscFunctionBegin; 147 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 148 /*-----------------------------------------------------*/ 149 /* update numerical values of P_oth and P_loc */ 150 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 151 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 152 153 /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */ 154 /*----------------------------------------------------------*/ 155 /* get data from symbolic products */ 156 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 157 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 158 pi_loc=p_loc->i; pj_loc=p_loc->j; pa_loc=p_loc->a; 159 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 160 161 /* get apa for storing dense row A[i,:]*P */ 162 apa = ptap->apa; 163 164 for (i=0; i<cm; i++) { 165 /* diagonal portion of A */ 166 anz = adi[i+1] - adi[i]; 167 adj = ad->j + adi[i]; 168 ada = ad->a + adi[i]; 169 for (j=0; j<anz; j++) { 170 row = adj[j]; 171 pnz = pi_loc[row+1] - pi_loc[row]; 172 pj = pj_loc + pi_loc[row]; 173 pa = pa_loc + pi_loc[row]; 174 175 /* perform dense axpy */ 176 valtmp = ada[j]; 177 for (k=0; k<pnz; k++){ 178 apa[pj[k]] += valtmp*pa[k]; 179 } 180 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 181 } 182 183 /* off-diagonal portion of A */ 184 anz = aoi[i+1] - aoi[i]; 185 aoj = ao->j + aoi[i]; 186 aoa = ao->a + aoi[i]; 187 for (j=0; j<anz; j++) { 188 row = aoj[j]; 189 pnz = pi_oth[row+1] - pi_oth[row]; 190 pj = pj_oth + pi_oth[row]; 191 pa = pa_oth + pi_oth[row]; 192 193 /* perform dense axpy */ 194 valtmp = aoa[j]; 195 for (k=0; k<pnz; k++){ 196 apa[pj[k]] += valtmp*pa[k]; 197 } 198 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 199 } 200 201 /* set values in C */ 202 row = rstart + i; 203 api = ptap->api; 204 apj = ptap->apj; 205 apJ = apj + api[i]; 206 cnz = api[i+1] - api[i]; 207 cdnz = cd->i[i+1] - cd->i[i]; 208 conz = co->i[i+1] - co->i[i]; 209 210 /* 1st off-diagoanl part of C */ 211 ca = coa + co->i[i]; 212 k = 0; 213 for (k0=0; k0<conz; k0++){ 214 if (apJ[k] >= cstart) break; 215 ca[k0] = apa[apJ[k]]; 216 apa[apJ[k]] = 0.0; 217 k++; 218 } 219 220 /* diagonal part of C */ 221 ca = cda + cd->i[i]; 222 for (k1=0; k1<cdnz; k1++){ 223 ca[k1] = apa[apJ[k]]; 224 apa[apJ[k]] = 0.0; 225 k++; 226 } 227 228 /* 2nd off-diagoanl part of C */ 229 ca = coa + co->i[i]; 230 for (; k0<conz; k0++){ 231 ca[k0] = apa[apJ[k]]; 232 apa[apJ[k]] = 0.0; 233 k++; 234 } 235 } 236 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 237 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 238 PetscFunctionReturn(0); 239 } 240 241 #undef __FUNCT__ 242 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 243 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 244 { 245 PetscErrorCode ierr; 246 MPI_Comm comm=((PetscObject)A)->comm; 247 Mat Cmpi; 248 Mat_PtAPMPI *ptap; 249 PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 250 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*c; 251 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth; 252 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz; 253 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart; 254 PetscInt nlnk,*lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi; 255 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n; 256 PetscBT lnkbt; 257 PetscScalar *apa; 258 PetscReal afill; 259 PetscBool matmatmult_old=PETSC_FALSE; 260 261 PetscFunctionBegin; 262 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend){ 263 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 264 } 265 ierr = PetscOptionsGetBool(PETSC_NULL,"-matmatmult_old",&matmatmult_old,PETSC_NULL);CHKERRQ(ierr); 266 if (matmatmult_old){ 267 ierr = MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(A,P,fill,C);;CHKERRQ(ierr); 268 PetscFunctionReturn(0); 269 } 270 271 /* create struct Mat_PtAPMPI and attached it to C later */ 272 ierr = PetscNew(Mat_PtAPMPI,&ptap);CHKERRQ(ierr); 273 ptap->abnz_max = 0; 274 275 /* malloc apa to store dense row A[i,:]*P */ 276 ierr = PetscMalloc(pN*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 277 ierr = PetscMemzero(apa,pN*sizeof(PetscScalar));CHKERRQ(ierr); 278 ptap->apa = apa; 279 280 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 281 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 282 /* get P_loc by taking all local rows of P */ 283 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 284 285 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 286 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 287 pi_loc = p_loc->i; pj_loc = p_loc->j; 288 pi_oth = p_oth->i; pj_oth = p_oth->j; 289 290 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 291 /*-------------------------------------------------------------------*/ 292 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 293 ptap->api = api; 294 api[0] = 0; 295 296 /* create and initialize a linked list */ 297 nlnk = pN+1; 298 ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 299 300 /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */ 301 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 302 current_space = free_space; 303 304 ierr = MatPreallocateInitialize(comm,am,pn,dnz,onz);CHKERRQ(ierr); 305 for (i=0; i<am; i++) { 306 apnz = 0; 307 /* diagonal portion of A */ 308 nzi = adi[i+1] - adi[i]; 309 for (j=0; j<nzi; j++){ 310 row = *adj++; 311 pnz = pi_loc[row+1] - pi_loc[row]; 312 Jptr = pj_loc + pi_loc[row]; 313 /* add non-zero cols of P into the sorted linked list lnk */ 314 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 315 apnz += nlnk; 316 } 317 /* off-diagonal portion of A */ 318 nzi = aoi[i+1] - aoi[i]; 319 for (j=0; j<nzi; j++){ 320 row = *aoj++; 321 pnz = pi_oth[row+1] - pi_oth[row]; 322 Jptr = pj_oth + pi_oth[row]; 323 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 324 apnz += nlnk; 325 } 326 327 api[i+1] = api[i] + apnz; 328 if (ptap->abnz_max < apnz) ptap->abnz_max = apnz; 329 330 /* if free space is not available, double the total space in the list */ 331 if (current_space->local_remaining<apnz) { 332 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 333 nspacedouble++; 334 } 335 336 /* Copy data into free space, then initialize lnk */ 337 ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 338 ierr = MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz);CHKERRQ(ierr); 339 current_space->array += apnz; 340 current_space->local_used += apnz; 341 current_space->local_remaining -= apnz; 342 } 343 344 /* Allocate space for apj, initialize apj, and */ 345 /* destroy list of free space and other temporary array(s) */ 346 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ptap->apj);CHKERRQ(ierr); 347 apj = ptap->apj; 348 ierr = PetscFreeSpaceContiguous(&free_space,ptap->apj);CHKERRQ(ierr); 349 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 350 351 /* create and assemble symbolic parallel matrix Cmpi */ 352 /*----------------------------------------------------*/ 353 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 354 ierr = MatSetSizes(Cmpi,am,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 355 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 356 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 357 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 358 ierr = MatSetBlockSize(Cmpi,1);CHKERRQ(ierr); 359 for (i=0; i<am; i++){ 360 row = i + rstart; 361 apnz = api[i+1] - api[i]; 362 ierr = MatSetValues(Cmpi,1,&row,apnz,apj,apa,INSERT_VALUES);CHKERRQ(ierr); 363 apj += apnz; 364 } 365 ierr = MatAssemblyBegin(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 366 ierr = MatAssemblyEnd(Cmpi,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 367 368 ptap->destroy = Cmpi->ops->destroy; 369 ptap->duplicate = Cmpi->ops->duplicate; 370 Cmpi->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ; 371 Cmpi->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 372 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult; 373 374 /* attach the supporting struct to Cmpi for reuse */ 375 c = (Mat_MPIAIJ*)Cmpi->data; 376 c->ptap = ptap; 377 378 *C = Cmpi; 379 380 /* set MatInfo */ 381 afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]) + 1.e-5; 382 if (afill < 1.0) afill = 1.0; 383 Cmpi->info.mallocs = nspacedouble; 384 Cmpi->info.fill_ratio_given = fill; 385 Cmpi->info.fill_ratio_needed = afill; 386 387 #if defined(PETSC_USE_INFO) 388 if (api[am]) { 389 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 390 ierr = PetscInfo1(Cmpi,"Use MatMatMult(A,B,MatReuse,%G,&C) for best performance.;\n",afill);CHKERRQ(ierr); 391 } else { 392 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 393 } 394 #endif 395 PetscFunctionReturn(0); 396 } 397 398 /* implementation used in PETSc-3.2 */ 399 /* This routine is called ONLY in the case of reusing previously computed symbolic C */ 400 #undef __FUNCT__ 401 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIAIJ_32" 402 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_32(Mat A,Mat B,Mat C) 403 { 404 PetscErrorCode ierr; 405 Mat *seq; 406 Mat_MatMatMultMPI *mult; 407 PetscContainer container; 408 409 PetscFunctionBegin; 410 ierr = PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 411 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 412 ierr = PetscContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr); 413 seq = &mult->B_seq; 414 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 415 mult->B_seq = *seq; 416 417 seq = &mult->A_loc; 418 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);CHKERRQ(ierr); 419 mult->A_loc = *seq; 420 421 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_SparseAxpy(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 422 423 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); 424 ierr = MatMerge(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 425 PetscFunctionReturn(0); 426 } 427 428 #undef __FUNCT__ 429 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ_32" 430 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_32(Mat A,Mat B,PetscReal fill,Mat *C) 431 { 432 PetscErrorCode ierr; 433 Mat_MatMatMultMPI *mult; 434 PetscContainer container; 435 Mat AB,*seq; 436 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 437 PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark; 438 439 PetscFunctionBegin; 440 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){ 441 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 442 } 443 444 ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr); 445 446 /* get isrowb: nonzero col of A */ 447 start = A->cmap->rstart; 448 cmap = a->garray; 449 nzA = a->A->cmap->n; 450 nzB = a->B->cmap->n; 451 ierr = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr); 452 ncols = 0; 453 for (i=0; i<nzB; i++) { /* row < local row index */ 454 if (cmap[i] < start) idx[ncols++] = cmap[i]; 455 else break; 456 } 457 imark = i; 458 for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */ 459 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */ 460 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&mult->isrowb);CHKERRQ(ierr); 461 ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&mult->iscolb);CHKERRQ(ierr); 462 463 /* get isrowa: all local rows of A */ 464 ierr = ISCreateStride(PETSC_COMM_SELF,A->rmap->n,A->rmap->rstart,1,&mult->isrowa);CHKERRQ(ierr); 465 466 /* Below should go to MatMatMultNumeric_MPIAIJ_MPIAIJ() - How to generate C there? */ 467 /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */ 468 ierr = MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 469 mult->B_seq = *seq; 470 ierr = PetscFree(seq);CHKERRQ(ierr); 471 472 /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */ 473 ierr = MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_INITIAL_MATRIX,&seq);CHKERRQ(ierr); 474 mult->A_loc = *seq; 475 ierr = PetscFree(seq);CHKERRQ(ierr); 476 477 /* compute C_seq = A_seq * B_seq */ 478 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,fill,&mult->C_seq);CHKERRQ(ierr); 479 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,mult->C_seq);CHKERRQ(ierr); 480 481 /* create mpi matrix C by concatinating C_seq */ 482 ierr = PetscObjectReference((PetscObject)mult->C_seq);CHKERRQ(ierr); /* prevent C_seq being destroyed by MatMerge() */ 483 ierr = MatMergeSymbolic(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,&AB);CHKERRQ(ierr); 484 ierr = MatMergeNumeric(((PetscObject)A)->comm,mult->C_seq,B->cmap->n,AB);CHKERRQ(ierr); 485 486 /* attach the supporting struct to C for reuse of symbolic C */ 487 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 488 ierr = PetscContainerSetPointer(container,mult);CHKERRQ(ierr); 489 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 490 ierr = PetscObjectCompose((PetscObject)AB,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 491 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 492 mult->destroy = AB->ops->destroy; 493 mult->duplicate = AB->ops->duplicate; 494 AB->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_32; 495 AB->ops->destroy = MatDestroy_MPIAIJ_MatMatMult_32; 496 AB->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult_32; 497 AB->ops->matmult = MatMatMult_MPIAIJ_MPIAIJ; 498 *C = AB; 499 PetscFunctionReturn(0); 500 } 501 502 #undef __FUNCT__ 503 #define __FUNCT__ "MatMatMult_MPIAIJ_MPIDense" 504 PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 505 { 506 PetscErrorCode ierr; 507 508 PetscFunctionBegin; 509 if (scall == MAT_INITIAL_MATRIX){ 510 ierr = MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);CHKERRQ(ierr); 511 } 512 ierr = MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);CHKERRQ(ierr); 513 PetscFunctionReturn(0); 514 } 515 516 typedef struct { 517 Mat workB; 518 PetscScalar *rvalues,*svalues; 519 MPI_Request *rwaits,*swaits; 520 } MPIAIJ_MPIDense; 521 522 #undef __FUNCT__ 523 #define __FUNCT__ "MPIAIJ_MPIDenseDestroy" 524 PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx) 525 { 526 MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx; 527 PetscErrorCode ierr; 528 529 PetscFunctionBegin; 530 ierr = MatDestroy(&contents->workB);CHKERRQ(ierr); 531 ierr = PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);CHKERRQ(ierr); 532 ierr = PetscFree(contents);CHKERRQ(ierr); 533 PetscFunctionReturn(0); 534 } 535 536 #undef __FUNCT__ 537 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIDense" 538 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C) 539 { 540 PetscErrorCode ierr; 541 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data; 542 PetscInt nz = aij->B->cmap->n; 543 PetscContainer container; 544 MPIAIJ_MPIDense *contents; 545 VecScatter ctx = aij->Mvctx; 546 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 547 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 548 PetscInt m=A->rmap->n,n=B->cmap->n; 549 550 PetscFunctionBegin; 551 ierr = MatCreate(((PetscObject)B)->comm,C);CHKERRQ(ierr); 552 ierr = MatSetSizes(*C,m,n,A->rmap->N,B->cmap->N);CHKERRQ(ierr); 553 ierr = MatSetType(*C,MATMPIDENSE);CHKERRQ(ierr); 554 ierr = MatAssemblyBegin(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 555 ierr = MatAssemblyEnd(*C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 556 (*C)->ops->matmult = MatMatMult_MPIAIJ_MPIDense; 557 558 ierr = PetscNew(MPIAIJ_MPIDense,&contents);CHKERRQ(ierr); 559 /* Create work matrix used to store off processor rows of B needed for local product */ 560 ierr = MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap->N,PETSC_NULL,&contents->workB);CHKERRQ(ierr); 561 /* Create work arrays needed */ 562 ierr = PetscMalloc4(B->cmap->N*from->starts[from->n],PetscScalar,&contents->rvalues, 563 B->cmap->N*to->starts[to->n],PetscScalar,&contents->svalues, 564 from->n,MPI_Request,&contents->rwaits, 565 to->n,MPI_Request,&contents->swaits);CHKERRQ(ierr); 566 567 ierr = PetscContainerCreate(((PetscObject)A)->comm,&container);CHKERRQ(ierr); 568 ierr = PetscContainerSetPointer(container,contents);CHKERRQ(ierr); 569 ierr = PetscContainerSetUserDestroy(container,MPIAIJ_MPIDenseDestroy);CHKERRQ(ierr); 570 ierr = PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)container);CHKERRQ(ierr); 571 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 572 PetscFunctionReturn(0); 573 } 574 575 #undef __FUNCT__ 576 #define __FUNCT__ "MatMPIDenseScatter" 577 /* 578 Performs an efficient scatter on the rows of B needed by this process; this is 579 a modification of the VecScatterBegin_() routines. 580 */ 581 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB) 582 { 583 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 584 PetscErrorCode ierr; 585 PetscScalar *b,*w,*svalues,*rvalues; 586 VecScatter ctx = aij->Mvctx; 587 VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata; 588 VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata; 589 PetscInt i,j,k; 590 PetscInt *sindices,*sstarts,*rindices,*rstarts; 591 PetscMPIInt *sprocs,*rprocs,nrecvs; 592 MPI_Request *swaits,*rwaits; 593 MPI_Comm comm = ((PetscObject)A)->comm; 594 PetscMPIInt tag = ((PetscObject)ctx)->tag,ncols = B->cmap->N, nrows = aij->B->cmap->n,imdex,nrowsB = B->rmap->n; 595 MPI_Status status; 596 MPIAIJ_MPIDense *contents; 597 PetscContainer container; 598 Mat workB; 599 600 PetscFunctionBegin; 601 ierr = PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&container);CHKERRQ(ierr); 602 if (!container) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 603 ierr = PetscContainerGetPointer(container,(void**)&contents);CHKERRQ(ierr); 604 605 workB = *outworkB = contents->workB; 606 if (nrows != workB->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap->n); 607 sindices = to->indices; 608 sstarts = to->starts; 609 sprocs = to->procs; 610 swaits = contents->swaits; 611 svalues = contents->svalues; 612 613 rindices = from->indices; 614 rstarts = from->starts; 615 rprocs = from->procs; 616 rwaits = contents->rwaits; 617 rvalues = contents->rvalues; 618 619 ierr = MatGetArray(B,&b);CHKERRQ(ierr); 620 ierr = MatGetArray(workB,&w);CHKERRQ(ierr); 621 622 for (i=0; i<from->n; i++) { 623 ierr = MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 624 } 625 626 for (i=0; i<to->n; i++) { 627 /* pack a message at a time */ 628 CHKMEMQ; 629 for (j=0; j<sstarts[i+1]-sstarts[i]; j++){ 630 for (k=0; k<ncols; k++) { 631 svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k]; 632 } 633 } 634 CHKMEMQ; 635 ierr = MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 636 } 637 638 nrecvs = from->n; 639 while (nrecvs) { 640 ierr = MPI_Waitany(from->n,rwaits,&imdex,&status);CHKERRQ(ierr); 641 nrecvs--; 642 /* unpack a message at a time */ 643 CHKMEMQ; 644 for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){ 645 for (k=0; k<ncols; k++) { 646 w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k]; 647 } 648 } 649 CHKMEMQ; 650 } 651 if (to->n) {ierr = MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr);} 652 653 ierr = MatRestoreArray(B,&b);CHKERRQ(ierr); 654 ierr = MatRestoreArray(workB,&w);CHKERRQ(ierr); 655 ierr = MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 656 ierr = MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 657 PetscFunctionReturn(0); 658 } 659 extern PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat); 660 661 #undef __FUNCT__ 662 #define __FUNCT__ "MatMatMultNumeric_MPIAIJ_MPIDense" 663 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C) 664 { 665 PetscErrorCode ierr; 666 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 667 Mat_MPIDense *bdense = (Mat_MPIDense*)B->data; 668 Mat_MPIDense *cdense = (Mat_MPIDense*)C->data; 669 Mat workB; 670 671 PetscFunctionBegin; 672 673 /* diagonal block of A times all local rows of B*/ 674 ierr = MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);CHKERRQ(ierr); 675 676 /* get off processor parts of B needed to complete the product */ 677 ierr = MatMPIDenseScatter(A,B,C,&workB);CHKERRQ(ierr); 678 679 /* off-diagonal block of A times nonlocal rows of B */ 680 ierr = MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);CHKERRQ(ierr); 681 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 682 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 683 PetscFunctionReturn(0); 684 } 685 686