1 2 /* 3 Defines projective product routines where A is a MPIAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <../src/mat/impls/aij/mpi/mpiaij.h> 10 #include <petscbt.h> 11 12 extern PetscErrorCode MatDestroy_MPIAIJ(Mat); 13 #undef __FUNCT__ 14 #define __FUNCT__ "MatDestroy_MPIAIJ_MatPtAP" 15 PetscErrorCode MatDestroy_MPIAIJ_MatPtAP(Mat A) 16 { 17 PetscErrorCode ierr; 18 PetscContainer container; 19 20 PetscFunctionBegin; 21 ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr); 22 if (container) { 23 Mat_Merge_SeqsToMPI *merge; 24 ierr = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr); 25 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 26 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 27 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 28 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 29 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 30 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 31 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 32 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 33 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 34 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 35 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 36 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 37 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 38 ierr = merge->destroy(A);CHKERRQ(ierr); 39 ierr = PetscFree(merge);CHKERRQ(ierr); 40 ierr = PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);CHKERRQ(ierr); 41 } 42 PetscFunctionReturn(0); 43 } 44 45 #undef __FUNCT__ 46 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP" 47 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M) 48 { 49 PetscErrorCode ierr; 50 Mat_Merge_SeqsToMPI *merge; 51 PetscContainer container; 52 53 PetscFunctionBegin; 54 ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr); 55 if (container) { 56 ierr = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr); 57 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Container does not exit"); 58 ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr); 59 (*M)->ops->destroy = merge->destroy; /* =MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */ 60 (*M)->ops->duplicate = merge->duplicate; /* =MatDuplicate_ MPIAIJ */ 61 PetscFunctionReturn(0); 62 } 63 64 #undef __FUNCT__ 65 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ" 66 PetscErrorCode MatPtAPSymbolic_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 67 { 68 PetscErrorCode ierr; 69 70 PetscFunctionBegin; 71 if (!P->ops->ptapsymbolic_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name); 72 ierr = (*P->ops->ptapsymbolic_mpiaij)(A,P,fill,C);CHKERRQ(ierr); 73 PetscFunctionReturn(0); 74 } 75 76 #undef __FUNCT__ 77 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ" 78 PetscErrorCode MatPtAPNumeric_MPIAIJ(Mat A,Mat P,Mat C) 79 { 80 PetscErrorCode ierr; 81 82 PetscFunctionBegin; 83 if (!P->ops->ptapnumeric_mpiaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name); 84 ierr = (*P->ops->ptapnumeric_mpiaij)(A,P,C);CHKERRQ(ierr); 85 PetscFunctionReturn(0); 86 } 87 88 #undef __FUNCT__ 89 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ" 90 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 91 { 92 PetscErrorCode ierr; 93 Mat B_mpi; 94 Mat_MatMatMultMPI *ap; 95 PetscContainer container; 96 PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 97 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 98 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 99 Mat_SeqAIJ *p_loc,*p_oth; 100 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ; 101 PetscInt *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,nnz; 102 PetscInt nlnk,*lnk,*owners_co,*coi,*coj,i,k,pnz,row; 103 PetscInt am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 104 PetscBT lnkbt; 105 MPI_Comm comm=((PetscObject)A)->comm; 106 PetscMPIInt size,rank,tag,*len_si,*len_s,*len_ri; 107 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 108 PetscInt len,proc,*dnz,*onz,*owners; 109 PetscInt nzi,*bi,*bj; 110 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 111 MPI_Request *swaits,*rwaits; 112 MPI_Status *sstatus,rstatus; 113 Mat_Merge_SeqsToMPI *merge; 114 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0; 115 PetscMPIInt j; 116 117 PetscFunctionBegin; 118 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 119 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 120 121 /* destroy the container 'Mat_MatMatMultMPI' in case that P is attached to it */ 122 ierr = PetscObjectQuery((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject *)&container);CHKERRQ(ierr); 123 if (container) { 124 /* reset functions */ 125 ierr = PetscContainerGetPointer(container,(void **)&ap);CHKERRQ(ierr); 126 P->ops->destroy = ap->destroy; 127 P->ops->duplicate = ap->duplicate; 128 /* destroy container and contents */ 129 ierr = PetscObjectCompose((PetscObject)P,"Mat_MatMatMultMPI",0);CHKERRQ(ierr); 130 } 131 132 /* create the container 'Mat_MatMatMultMPI' and attach it to P */ 133 ierr = PetscNew(Mat_MatMatMultMPI,&ap);CHKERRQ(ierr); 134 ap->abi=PETSC_NULL; ap->abj=PETSC_NULL; 135 ap->abnz_max = 0; 136 137 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 138 ierr = PetscContainerSetPointer(container,ap);CHKERRQ(ierr); 139 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);CHKERRQ(ierr); 140 ierr = PetscObjectCompose((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject)container);CHKERRQ(ierr); 141 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 142 ap->destroy = P->ops->destroy; 143 P->ops->destroy = MatDestroy_MPIAIJ_MatMatMult; 144 ap->reuse = MAT_INITIAL_MATRIX; 145 146 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 147 ierr = MatGetBrowsOfAoCols(A,P,MAT_INITIAL_MATRIX,&ap->startsj,&ap->startsj_r,&ap->bufa,&ap->B_oth);CHKERRQ(ierr); 148 /* get P_loc by taking all local rows of P */ 149 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ap->B_loc);CHKERRQ(ierr); 150 151 p_loc = (Mat_SeqAIJ*)(ap->B_loc)->data; 152 p_oth = (Mat_SeqAIJ*)(ap->B_oth)->data; 153 pi_loc = p_loc->i; pj_loc = p_loc->j; 154 pi_oth = p_oth->i; pj_oth = p_oth->j; 155 156 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 157 /*-------------------------------------------------------------------*/ 158 ierr = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr); 159 ap->abi = api; 160 api[0] = 0; 161 162 /* create and initialize a linked list */ 163 nlnk = pN+1; 164 ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 165 166 /* Initial FreeSpace size is fill*nnz(A) */ 167 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am])),&free_space);CHKERRQ(ierr); 168 current_space = free_space; 169 170 for (i=0;i<am;i++) { 171 apnz = 0; 172 /* diagonal portion of A */ 173 nzi = adi[i+1] - adi[i]; 174 for (j=0; j<nzi; j++){ 175 row = *adj++; 176 pnz = pi_loc[row+1] - pi_loc[row]; 177 Jptr = pj_loc + pi_loc[row]; 178 /* add non-zero cols of P into the sorted linked list lnk */ 179 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 180 apnz += nlnk; 181 } 182 /* off-diagonal portion of A */ 183 nzi = aoi[i+1] - aoi[i]; 184 for (j=0; j<nzi; j++){ 185 row = *aoj++; 186 pnz = pi_oth[row+1] - pi_oth[row]; 187 Jptr = pj_oth + pi_oth[row]; 188 ierr = PetscLLAdd(pnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 189 apnz += nlnk; 190 } 191 192 api[i+1] = api[i] + apnz; 193 if (ap->abnz_max < apnz) ap->abnz_max = apnz; 194 195 /* if free space is not available, double the total space in the list */ 196 if (current_space->local_remaining<apnz) { 197 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 198 nspacedouble++; 199 } 200 201 /* Copy data into free space, then initialize lnk */ 202 ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 203 current_space->array += apnz; 204 current_space->local_used += apnz; 205 current_space->local_remaining -= apnz; 206 } 207 /* Allocate space for apj, initialize apj, and */ 208 /* destroy list of free space and other temporary array(s) */ 209 ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ap->abj);CHKERRQ(ierr); 210 apj = ap->abj; 211 ierr = PetscFreeSpaceContiguous(&free_space,ap->abj);CHKERRQ(ierr); 212 213 /* determine symbolic Co=(p->B)^T*AP - send to others */ 214 /*----------------------------------------------------*/ 215 ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr); 216 217 /* then, compute symbolic Co = (p->B)^T*AP */ 218 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 219 >= (num of nonzero rows of C_seq) - pn */ 220 ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr); 221 coi[0] = 0; 222 223 /* set initial free space to be 3*pon*max( nnz(AP) per row) */ 224 nnz = 3*pon*ap->abnz_max + 1; 225 ierr = PetscFreeSpaceGet(nnz,&free_space); 226 current_space = free_space; 227 228 for (i=0; i<pon; i++) { 229 nnz = 0; 230 pnz = poti[i+1] - poti[i]; 231 j = pnz; 232 ptJ = potj + poti[i+1]; 233 while (j){/* assume cols are almost in increasing order, starting from its end saves computation */ 234 j--; ptJ--; 235 row = *ptJ; /* row of AP == col of Pot */ 236 apnz = api[row+1] - api[row]; 237 Jptr = apj + api[row]; 238 /* add non-zero cols of AP into the sorted linked list lnk */ 239 ierr = PetscLLAdd(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 240 nnz += nlnk; 241 } 242 243 /* If free space is not available, double the total space in the list */ 244 if (current_space->local_remaining<nnz) { 245 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 246 } 247 248 /* Copy data into free space, and zero out denserows */ 249 ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 250 current_space->array += nnz; 251 current_space->local_used += nnz; 252 current_space->local_remaining -= nnz; 253 coi[i+1] = coi[i] + nnz; 254 } 255 ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr); 256 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 257 ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr); 258 259 /* send j-array (coj) of Co to other processors */ 260 /*----------------------------------------------*/ 261 /* determine row ownership */ 262 ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr); 263 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 264 merge->rowmap->n = pn; 265 merge->rowmap->bs = 1; 266 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 267 owners = merge->rowmap->range; 268 269 /* determine the number of messages to send, their lengths */ 270 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr); 271 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 272 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr); 273 len_s = merge->len_s; 274 merge->nsend = 0; 275 276 ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr); 277 ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 278 279 proc = 0; 280 for (i=0; i<pon; i++){ 281 while (prmap[i] >= owners[proc+1]) proc++; 282 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 283 len_s[proc] += coi[i+1] - coi[i]; 284 } 285 286 len = 0; /* max length of buf_si[] */ 287 owners_co[0] = 0; 288 for (proc=0; proc<size; proc++){ 289 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 290 if (len_si[proc]){ 291 merge->nsend++; 292 len_si[proc] = 2*(len_si[proc] + 1); 293 len += len_si[proc]; 294 } 295 } 296 297 /* determine the number and length of messages to receive for coi and coj */ 298 ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 299 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 300 301 /* post the Irecv and Isend of coj */ 302 ierr = PetscCommGetNewTag(comm,&tag);CHKERRQ(ierr); 303 ierr = PetscPostIrecvInt(comm,tag,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 304 305 ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);CHKERRQ(ierr); 306 307 for (proc=0, k=0; proc<size; proc++){ 308 if (!len_s[proc]) continue; 309 i = owners_co[proc]; 310 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tag,comm,swaits+k);CHKERRQ(ierr); 311 k++; 312 } 313 314 /* receives and sends of coj are complete */ 315 ierr = PetscMalloc(size*sizeof(MPI_Status),&sstatus);CHKERRQ(ierr); 316 i = merge->nrecv; 317 while (i--) { 318 ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr); 319 } 320 ierr = PetscFree(rwaits);CHKERRQ(ierr); 321 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 322 323 /* send and recv coi */ 324 /*-------------------*/ 325 ierr = PetscPostIrecvInt(comm,tag,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 326 327 ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr); 328 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 329 for (proc=0,k=0; proc<size; proc++){ 330 if (!len_s[proc]) continue; 331 /* form outgoing message for i-structure: 332 buf_si[0]: nrows to be sent 333 [1:nrows]: row index (global) 334 [nrows+1:2*nrows+1]: i-structure index 335 */ 336 /*-------------------------------------------*/ 337 nrows = len_si[proc]/2 - 1; 338 buf_si_i = buf_si + nrows+1; 339 buf_si[0] = nrows; 340 buf_si_i[0] = 0; 341 nrows = 0; 342 for (i=owners_co[proc]; i<owners_co[proc+1]; i++){ 343 nzi = coi[i+1] - coi[i]; 344 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 345 buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */ 346 nrows++; 347 } 348 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tag,comm,swaits+k);CHKERRQ(ierr); 349 k++; 350 buf_si += len_si[proc]; 351 } 352 i = merge->nrecv; 353 while (i--) { 354 ierr = MPI_Waitany(merge->nrecv,rwaits,&j,&rstatus);CHKERRQ(ierr); 355 } 356 ierr = PetscFree(rwaits);CHKERRQ(ierr); 357 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 358 /* 359 ierr = PetscInfo2(A,"nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr); 360 for (i=0; i<merge->nrecv; i++){ 361 ierr = PetscInfo3(A,"recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr); 362 } 363 */ 364 ierr = PetscFree(len_si);CHKERRQ(ierr); 365 ierr = PetscFree(len_ri);CHKERRQ(ierr); 366 ierr = PetscFree(swaits);CHKERRQ(ierr); 367 ierr = PetscFree(sstatus);CHKERRQ(ierr); 368 ierr = PetscFree(buf_s);CHKERRQ(ierr); 369 370 /* compute the local portion of C (mpi mat) */ 371 /*------------------------------------------*/ 372 ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr); 373 374 /* allocate bi array and free space for accumulating nonzero column info */ 375 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr); 376 bi[0] = 0; 377 378 /* set initial free space to be 3*pn*max( nnz(AP) per row) */ 379 nnz = 3*pn*ap->abnz_max + 1; 380 ierr = PetscFreeSpaceGet(nnz,&free_space); 381 current_space = free_space; 382 383 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 384 for (k=0; k<merge->nrecv; k++){ 385 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 386 nrows = *buf_ri_k[k]; 387 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 388 nextci[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */ 389 } 390 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 391 for (i=0; i<pn; i++) { 392 /* add pdt[i,:]*AP into lnk */ 393 nnz = 0; 394 pnz = pdti[i+1] - pdti[i]; 395 j = pnz; 396 ptJ = pdtj + pdti[i+1]; 397 while (j){/* assume cols are almost in increasing order, starting from its end saves computation */ 398 j--; ptJ--; 399 row = *ptJ; /* row of AP == col of Pt */ 400 apnz = api[row+1] - api[row]; 401 Jptr = apj + api[row]; 402 /* add non-zero cols of AP into the sorted linked list lnk */ 403 ierr = PetscLLAdd(apnz,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 404 nnz += nlnk; 405 } 406 /* add received col data into lnk */ 407 for (k=0; k<merge->nrecv; k++){ /* k-th received message */ 408 if (i == *nextrow[k]) { /* i-th row */ 409 nzi = *(nextci[k]+1) - *nextci[k]; 410 Jptr = buf_rj[k] + *nextci[k]; 411 ierr = PetscLLAdd(nzi,Jptr,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr); 412 nnz += nlnk; 413 nextrow[k]++; nextci[k]++; 414 } 415 } 416 417 /* if free space is not available, make more free space */ 418 if (current_space->local_remaining<nnz) { 419 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 420 } 421 /* copy data into free space, then initialize lnk */ 422 ierr = PetscLLClean(pN,pN,nnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 423 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 424 current_space->array += nnz; 425 current_space->local_used += nnz; 426 current_space->local_remaining -= nnz; 427 bi[i+1] = bi[i] + nnz; 428 } 429 ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr); 430 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 431 432 ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr); 433 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 434 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 435 436 /* create symbolic parallel matrix B_mpi */ 437 /*---------------------------------------*/ 438 ierr = MatCreate(comm,&B_mpi);CHKERRQ(ierr); 439 ierr = MatSetSizes(B_mpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 440 ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr); 441 ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr); 442 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 443 444 merge->bi = bi; 445 merge->bj = bj; 446 merge->coi = coi; 447 merge->coj = coj; 448 merge->buf_ri = buf_ri; 449 merge->buf_rj = buf_rj; 450 merge->owners_co = owners_co; 451 merge->destroy = B_mpi->ops->destroy; 452 merge->duplicate = B_mpi->ops->duplicate; 453 454 /* B_mpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 455 B_mpi->assembled = PETSC_FALSE; 456 B_mpi->ops->destroy = MatDestroy_MPIAIJ_MatPtAP; 457 B_mpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 458 ierr = MatSetBlockSize_MPIAIJ(B_mpi,1);CHKERRQ(ierr); 459 460 /* attach the supporting struct to B_mpi for reuse */ 461 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 462 ierr = PetscContainerSetPointer(container,merge);CHKERRQ(ierr); 463 ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr); 464 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 465 *C = B_mpi; 466 #if defined(PETSC_USE_INFO) 467 if (bi[pn] != 0) { 468 PetscReal afill = ((PetscReal)bi[pn])/(adi[am]+aoi[am]); 469 if (afill < 1.0) afill = 1.0; 470 ierr = PetscInfo3(B_mpi,"Reallocs %D; Fill ratio: given %G needed %G when computing A*P.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 471 ierr = PetscInfo1(B_mpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 472 } else { 473 ierr = PetscInfo(B_mpi,"Empty matrix product\n");CHKERRQ(ierr); 474 } 475 #endif 476 PetscFunctionReturn(0); 477 } 478 479 #undef __FUNCT__ 480 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ" 481 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 482 { 483 PetscErrorCode ierr; 484 Mat_Merge_SeqsToMPI *merge; 485 Mat_MatMatMultMPI *ap; 486 PetscContainer cont_merge,cont_ptap; 487 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 488 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 489 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 490 Mat_SeqAIJ *p_loc,*p_oth; 491 PetscInt *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp; 492 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj; 493 PetscInt i,j,k,anz,pnz,apnz,nextap,row,*cj; 494 MatScalar *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth; 495 PetscInt am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 496 MPI_Comm comm=((PetscObject)C)->comm; 497 PetscMPIInt size,rank,taga,*len_s; 498 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 499 PetscInt **buf_ri,**buf_rj; 500 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 501 MPI_Request *s_waits,*r_waits; 502 MPI_Status *status; 503 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 504 PetscInt *api,*apj,*coi,*coj; 505 PetscInt *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend; 506 PetscInt sparse_axpy; 507 PetscLogDouble t0,tf,etime=0.0,t00,tff,time_matupdate=0.0,time_malloc=0.0,time_Cseq0=0.0,time_Cseq1=0.0,time_setvals=0.0; 508 509 PetscFunctionBegin; 510 ierr = MPI_Barrier(comm);CHKERRQ(ierr); 511 ierr = PetscGetTime(&t00);CHKERRQ(ierr); 512 513 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 514 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 515 516 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 517 ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&cont_merge);CHKERRQ(ierr); 518 if (cont_merge) { 519 ierr = PetscContainerGetPointer(cont_merge,(void **)&merge);CHKERRQ(ierr); 520 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses an object container"); 521 522 /* 1) get P_oth = ap->B_oth and P_loc = ap->B_loc */ 523 /*--------------------------------------------------*/ 524 ierr = PetscObjectQuery((PetscObject)P,"Mat_MatMatMultMPI",(PetscObject *)&cont_ptap);CHKERRQ(ierr); 525 if (cont_ptap) { 526 ierr = PetscContainerGetPointer(cont_ptap,(void **)&ap);CHKERRQ(ierr); 527 if (ap->reuse == MAT_INITIAL_MATRIX){ 528 ap->reuse = MAT_REUSE_MATRIX; 529 } else { /* update numerical values of P_oth and P_loc */ 530 ierr = MatGetBrowsOfAoCols(A,P,MAT_REUSE_MATRIX,&ap->startsj,&ap->startsj_r,&ap->bufa,&ap->B_oth);CHKERRQ(ierr); 531 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ap->B_loc);CHKERRQ(ierr); 532 } 533 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container"); 534 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 535 time_matupdate += tf-t0; 536 537 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 538 /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */ 539 /*--------------------------------------------------------------*/ 540 /* get data from symbolic products */ 541 p_loc = (Mat_SeqAIJ*)(ap->B_loc)->data; 542 p_oth = (Mat_SeqAIJ*)(ap->B_oth)->data; 543 pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a; 544 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 545 546 coi = merge->coi; coj = merge->coj; 547 ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr); 548 ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr); 549 550 bi = merge->bi; bj = merge->bj; 551 owners = merge->rowmap->range; 552 ierr = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr); 553 ierr = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr); 554 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 555 time_malloc += tf-t0; 556 557 api = ap->abi; apj = ap->abj; 558 /* flag 'sparse_axpy' determines which implementations to be used: 559 0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of a dense array apa; (default) 560 1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops 561 (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz; 562 2: do two sparse axpy in MatPtAPNumeric() - slowest, uses a sparse array apa */ 563 /* set default sparse_axpy */ 564 sparse_axpy = 1; 565 ierr = PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);CHKERRQ(ierr); 566 if (sparse_axpy == 0){ /* Do not perform sparse axpy */ 567 /*--------------------------------------------------*/ 568 /* malloc apa to store dense row A[i,:]*P */ 569 ierr = PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 570 ierr = PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr); 571 572 for (i=0; i<am; i++) { 573 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 574 /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */ 575 /*------------------------------------------------------------*/ 576 apJ = apj + api[i]; 577 578 /* diagonal portion of A */ 579 anz = adi[i+1] - adi[i]; 580 adj = ad->j + adi[i]; 581 ada = ad->a + adi[i]; 582 for (j=0; j<anz; j++) { 583 row = adj[j]; 584 pnz = pi_loc[row+1] - pi_loc[row]; 585 pj = pj_loc + pi_loc[row]; 586 pa = pa_loc + pi_loc[row]; 587 588 /* perform dense axpy */ 589 for (k=0; k<pnz; k++){ 590 apa[pj[k]] += ada[j]*pa[k]; 591 } 592 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 593 } 594 595 /* off-diagonal portion of A */ 596 anz = aoi[i+1] - aoi[i]; 597 aoj = ao->j + aoi[i]; 598 aoa = ao->a + aoi[i]; 599 for (j=0; j<anz; j++) { 600 row = aoj[j]; 601 pnz = pi_oth[row+1] - pi_oth[row]; 602 pj = pj_oth + pi_oth[row]; 603 pa = pa_oth + pi_oth[row]; 604 605 /* perform dense axpy */ 606 for (k=0; k<pnz; k++){ 607 apa[pj[k]] += aoa[j]*pa[k]; 608 } 609 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 610 } 611 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 612 time_Cseq0 += tf - t0; 613 614 /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */ 615 /*--------------------------------------------------------------*/ 616 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 617 apnz = api[i+1] - api[i]; 618 /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */ 619 pnz = po->i[i+1] - po->i[i]; 620 pA = po->a + po->i[i]; 621 for (j=0; j<pnz; j++){ 622 cnz = coi[*poJ+1] - coi[*poJ]; 623 cj = coj + coi[*poJ]; 624 ca = coa + coi[*poJ++]; 625 /* perform dense axpy */ 626 for (k=0; k<cnz; k++) { 627 ca[k] += pA[j]*apa[cj[k]]; 628 } 629 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 630 } 631 632 /* put the value into Cd (diagonal part) */ 633 pnz = pd->i[i+1] - pd->i[i]; 634 pA = pd->a + pd->i[i]; 635 for (j=0; j<pnz; j++){ 636 cnz = bi[*pdJ+1] - bi[*pdJ]; 637 cj = bj + bi[*pdJ]; 638 ca = ba + bi[*pdJ++]; 639 /* perform dense axpy */ 640 for (k=0; k<cnz; k++) { 641 ca[k] += pA[j]*apa[cj[k]]; 642 } 643 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 644 } 645 646 /* zero the current row of A*P */ 647 for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0; 648 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 649 time_Cseq1 += tf - t0; 650 } 651 } else if (sparse_axpy == 1){ /* Perform one sparse axpy */ 652 /*------------------------------------------------------*/ 653 /* malloc apa to store dense row A[i,:]*P */ 654 ierr = PetscMalloc((P->cmap->N)*sizeof(PetscScalar),&apa);CHKERRQ(ierr); 655 ierr = PetscMemzero(apa,P->cmap->N*sizeof(PetscScalar));CHKERRQ(ierr); 656 657 for (i=0; i<am; i++) { 658 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 659 /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */ 660 /*------------------------------------------------------------*/ 661 apJ = apj + api[i]; 662 663 /* diagonal portion of A */ 664 anz = adi[i+1] - adi[i]; 665 adj = ad->j + adi[i]; 666 ada = ad->a + adi[i]; 667 for (j=0; j<anz; j++) { 668 row = adj[j]; 669 pnz = pi_loc[row+1] - pi_loc[row]; 670 pj = pj_loc + pi_loc[row]; 671 pa = pa_loc + pi_loc[row]; 672 673 /* perform dense axpy */ 674 for (k=0; k<pnz; k++){ 675 apa[pj[k]] += ada[j]*pa[k]; 676 } 677 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 678 } 679 680 /* off-diagonal portion of A */ 681 anz = aoi[i+1] - aoi[i]; 682 aoj = ao->j + aoi[i]; 683 aoa = ao->a + aoi[i]; 684 for (j=0; j<anz; j++) { 685 row = aoj[j]; 686 pnz = pi_oth[row+1] - pi_oth[row]; 687 pj = pj_oth + pi_oth[row]; 688 pa = pa_oth + pi_oth[row]; 689 690 /* perform dense axpy */ 691 for (k=0; k<pnz; k++){ 692 apa[pj[k]] += aoa[j]*pa[k]; 693 } 694 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 695 } 696 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 697 time_Cseq0 += tf - t0; 698 699 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 700 /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */ 701 /*--------------------------------------------------------------*/ 702 apnz = api[i+1] - api[i]; 703 /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */ 704 pnz = po->i[i+1] - po->i[i]; 705 pA = po->a + po->i[i]; 706 for (j=0; j<pnz; j++){ 707 cj = coj + coi[*poJ]; 708 ca = coa + coi[*poJ++]; 709 /* perform sparse axpy */ 710 nextap = 0; 711 for (k=0; nextap<apnz; k++) { 712 if (cj[k]==apJ[nextap]) { /* global column index */ 713 ca[k] += pA[j]*apa[cj[k]]; nextap++; 714 } 715 } 716 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 717 } 718 719 /* put the value into Cd (diagonal part) */ 720 pnz = pd->i[i+1] - pd->i[i]; 721 pA = pd->a + pd->i[i]; 722 for (j=0; j<pnz; j++){ 723 cj = bj + bi[*pdJ]; 724 ca = ba + bi[*pdJ++]; 725 /* perform sparse axpy */ 726 nextap = 0; 727 for (k=0; nextap<apnz; k++) { 728 if (cj[k]==apJ[nextap]) { /* global column index */ 729 ca[k] += pA[j]*apa[cj[k]]; 730 nextap++; 731 } 732 } 733 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 734 } 735 736 /* zero the current row of A*P */ 737 for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0; 738 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 739 time_Cseq1 += tf - t0; 740 } 741 } else if (sparse_axpy == 2){/* Perform two sparse axpy */ 742 /*----------------------------------------------------*/ 743 /* malloc apa to store sparse row A[i,:]*P */ 744 ierr = PetscMalloc((ap->abnz_max+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr); 745 ierr = PetscMemzero(apa,ap->abnz_max*sizeof(MatScalar));CHKERRQ(ierr); 746 747 pA=pa_loc; 748 for (i=0; i<am; i++) { 749 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 750 /* form i-th sparse row of A*P */ 751 apnz = api[i+1] - api[i]; 752 apJ = apj + api[i]; 753 /* diagonal portion of A */ 754 anz = adi[i+1] - adi[i]; 755 adj = ad->j + adi[i]; 756 ada = ad->a + adi[i]; 757 for (j=0; j<anz; j++) { 758 row = adj[j]; 759 pnz = pi_loc[row+1] - pi_loc[row]; 760 pj = pj_loc + pi_loc[row]; 761 pa = pa_loc + pi_loc[row]; 762 nextp = 0; 763 for (k=0; nextp<pnz; k++) { 764 if (apJ[k] == pj[nextp]) { /* col of AP == col of P */ 765 apa[k] += ada[j]*pa[nextp++]; 766 } 767 } 768 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 769 } 770 /* off-diagonal portion of A */ 771 anz = aoi[i+1] - aoi[i]; 772 aoj = ao->j + aoi[i]; 773 aoa = ao->a + aoi[i]; 774 for (j=0; j<anz; j++) { 775 row = aoj[j]; 776 pnz = pi_oth[row+1] - pi_oth[row]; 777 pj = pj_oth + pi_oth[row]; 778 pa = pa_oth + pi_oth[row]; 779 nextp = 0; 780 for (k=0; nextp<pnz; k++) { 781 if (apJ[k] == pj[nextp]) { /* col of AP == col of P */ 782 apa[k] += aoa[j]*pa[nextp++]; 783 } 784 } 785 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 786 } 787 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 788 time_Cseq0 += tf - t0; 789 790 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 791 /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */ 792 /*--------------------------------------------------------------*/ 793 pnz = pi_loc[i+1] - pi_loc[i]; 794 pJ = pj_loc + pi_loc[i]; 795 for (j=0; j<pnz; j++) { 796 nextap = 0; 797 row = pJ[j]; /* global index */ 798 if (row < pcstart || row >=pcend) { /* put the value into Co */ 799 cj = coj + coi[*poJ]; 800 ca = coa + coi[*poJ++]; 801 } else { /* put the value into Cd */ 802 cj = bj + bi[*pdJ]; 803 ca = ba + bi[*pdJ++]; 804 } 805 for (k=0; nextap<apnz; k++) { 806 if (cj[k]==apJ[nextap]) ca[k] += pA[j]*apa[nextap++]; 807 } 808 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 809 } 810 pA += pnz; 811 /* zero the current row info for A*P */ 812 ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr); 813 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 814 time_Cseq1 += tf - t0; 815 } 816 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"sparse_axpy only takes values 0, 1 and 2"); 817 ierr = PetscFree(apa);CHKERRQ(ierr); 818 819 /* 3) send and recv matrix values coa */ 820 /*------------------------------------*/ 821 buf_ri = merge->buf_ri; 822 buf_rj = merge->buf_rj; 823 len_s = merge->len_s; 824 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 825 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 826 827 ierr = PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);CHKERRQ(ierr); 828 for (proc=0,k=0; proc<size; proc++){ 829 if (!len_s[proc]) continue; 830 i = merge->owners_co[proc]; 831 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 832 k++; 833 } 834 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 835 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 836 837 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 838 ierr = PetscFree(r_waits);CHKERRQ(ierr); 839 ierr = PetscFree(coa);CHKERRQ(ierr); 840 841 /* 4) insert local Cseq and received values into Cmpi */ 842 /*------------------------------------------------------*/ 843 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 844 ierr = PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);CHKERRQ(ierr); 845 846 for (k=0; k<merge->nrecv; k++){ 847 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 848 nrows = *(buf_ri_k[k]); 849 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 850 nextci[k] = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure */ 851 } 852 853 for (i=0; i<cm; i++) { 854 row = owners[rank] + i; /* global row index of C_seq */ 855 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 856 ba_i = ba + bi[i]; 857 bnz = bi[i+1] - bi[i]; 858 /* add received vals into ba */ 859 for (k=0; k<merge->nrecv; k++){ /* k-th received message */ 860 /* i-th row */ 861 if (i == *nextrow[k]) { 862 cnz = *(nextci[k]+1) - *nextci[k]; 863 cj = buf_rj[k] + *(nextci[k]); 864 ca = abuf_r[k] + *(nextci[k]); 865 nextcj = 0; 866 for (j=0; nextcj<cnz; j++){ 867 if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */ 868 ba_i[j] += ca[nextcj++]; 869 } 870 } 871 nextrow[k]++; nextci[k]++; 872 } 873 } 874 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 875 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 876 } 877 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 878 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 879 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 880 time_setvals += tf-t0; 881 882 ierr = PetscFree(ba);CHKERRQ(ierr); 883 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 884 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 885 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 886 887 ierr = PetscGetTime(&tff);CHKERRQ(ierr); 888 etime += tff - t00; 889 /* 890 PetscInt prid=0; 891 if (rank == prid){ 892 ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] PtAPNum time %g = matupdate %g + malloc %g + Cseq %g + %g + setvals %g\n",rank,etime,time_matupdate,time_malloc,time_Cseq0,time_Cseq1,time_setvals); 893 } 894 */ 895 PetscFunctionReturn(0); 896 } 897