1 /* 2 Routines to compute overlapping regions of a parallel MPI matrix 3 and to find submatrices that were shared across processors. 4 */ 5 #include <../src/mat/impls/aij/seq/aij.h> 6 #include <../src/mat/impls/aij/mpi/mpiaij.h> 7 #include <petscbt.h> 8 #include <petscsf.h> 9 10 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat,PetscInt,IS*); 11 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**,PetscTable*); 12 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*); 13 extern PetscErrorCode MatGetRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 14 extern PetscErrorCode MatRestoreRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 15 16 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat,PetscInt,IS*); 17 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat,PetscInt,IS*); 18 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat,PetscInt,PetscMPIInt,PetscMPIInt *,PetscInt *, PetscInt *,PetscInt **,PetscInt **); 19 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat,PetscInt,IS*,PetscInt,PetscInt *); 20 21 22 PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 23 { 24 PetscErrorCode ierr; 25 PetscInt i; 26 27 PetscFunctionBegin; 28 if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 29 for (i=0; i<ov; ++i) { 30 ierr = MatIncreaseOverlap_MPIAIJ_Once(C,imax,is);CHKERRQ(ierr); 31 } 32 PetscFunctionReturn(0); 33 } 34 35 PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat C,PetscInt imax,IS is[],PetscInt ov) 36 { 37 PetscErrorCode ierr; 38 PetscInt i; 39 40 PetscFunctionBegin; 41 if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 42 for (i=0; i<ov; ++i) { 43 ierr = MatIncreaseOverlap_MPIAIJ_Once_Scalable(C,imax,is);CHKERRQ(ierr); 44 } 45 PetscFunctionReturn(0); 46 } 47 48 49 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat mat,PetscInt nidx,IS is[]) 50 { 51 PetscErrorCode ierr; 52 MPI_Comm comm; 53 PetscInt *length,length_i,tlength,*remoterows,nrrows,reducednrrows,*rrow_ranks,*rrow_isids,i,j,owner; 54 PetscInt *tosizes,*tosizes_temp,*toffsets,*fromsizes,*todata,*fromdata; 55 PetscInt nrecvrows,*sbsizes = 0,*sbdata = 0; 56 const PetscInt *indices_i,**indices; 57 PetscLayout rmap; 58 PetscMPIInt rank,size,*toranks,*fromranks,nto,nfrom; 59 PetscSF sf; 60 PetscSFNode *remote; 61 62 PetscFunctionBegin; 63 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 64 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 65 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 66 /* get row map to determine where rows should be going */ 67 ierr = MatGetLayouts(mat,&rmap,NULL);CHKERRQ(ierr); 68 /* retrieve IS data and put all together so that we 69 * can optimize communication 70 * */ 71 ierr = PetscCalloc2(nidx,(PetscInt ***)&indices,nidx,&length);CHKERRQ(ierr); 72 for (i=0,tlength=0; i<nidx; i++){ 73 ierr = ISGetLocalSize(is[i],&length[i]);CHKERRQ(ierr); 74 tlength += length[i]; 75 ierr = ISGetIndices(is[i],&indices[i]);CHKERRQ(ierr); 76 } 77 /* find these rows on remote processors */ 78 ierr = PetscCalloc3(tlength,&remoterows,tlength,&rrow_ranks,tlength,&rrow_isids);CHKERRQ(ierr); 79 ierr = PetscCalloc3(size,&toranks,2*size,&tosizes,size,&tosizes_temp);CHKERRQ(ierr); 80 nrrows = 0; 81 for (i=0; i<nidx; i++){ 82 length_i = length[i]; 83 indices_i = indices[i]; 84 for (j=0; j<length_i; j++){ 85 owner = -1; 86 ierr = PetscLayoutFindOwner(rmap,indices_i[j],&owner);CHKERRQ(ierr); 87 /* remote processors */ 88 if (owner != rank){ 89 tosizes_temp[owner]++; /* number of rows to owner */ 90 rrow_ranks[nrrows] = owner; /* processor */ 91 rrow_isids[nrrows] = i; /* is id */ 92 remoterows[nrrows++] = indices_i[j]; /* row */ 93 } 94 } 95 ierr = ISRestoreIndices(is[i],&indices[i]);CHKERRQ(ierr); 96 } 97 ierr = PetscFree2(indices,length);CHKERRQ(ierr); 98 /* test if we need to exchange messages 99 * generally speaking, we do not need to exchange 100 * data when overlap is 1 101 * */ 102 ierr = MPIU_Allreduce(&nrrows,&reducednrrows,1,MPIU_INT,MPIU_MAX,comm);CHKERRQ(ierr); 103 /* we do not have any messages 104 * It usually corresponds to overlap 1 105 * */ 106 if (!reducednrrows){ 107 ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr); 108 ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr); 109 ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 nto = 0; 113 /* send sizes and ranks for building a two-sided communcation */ 114 for (i=0; i<size; i++){ 115 if (tosizes_temp[i]){ 116 tosizes[nto*2] = tosizes_temp[i]*2; /* size */ 117 tosizes_temp[i] = nto; /* a map from processor to index */ 118 toranks[nto++] = i; /* processor */ 119 } 120 } 121 ierr = PetscCalloc1(nto+1,&toffsets);CHKERRQ(ierr); 122 for (i=0; i<nto; i++){ 123 toffsets[i+1] = toffsets[i]+tosizes[2*i]; /* offsets */ 124 tosizes[2*i+1] = toffsets[i]; /* offsets to send */ 125 } 126 /* send information to other processors */ 127 ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nto,toranks,tosizes,&nfrom,&fromranks,&fromsizes);CHKERRQ(ierr); 128 nrecvrows = 0; 129 for (i=0; i<nfrom; i++) nrecvrows += fromsizes[2*i]; 130 ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr); 131 nrecvrows = 0; 132 for (i=0; i<nfrom; i++){ 133 for (j=0; j<fromsizes[2*i]; j++){ 134 remote[nrecvrows].rank = fromranks[i]; 135 remote[nrecvrows++].index = fromsizes[2*i+1]+j; 136 } 137 } 138 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 139 ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr); 140 /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */ 141 ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr); 142 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 143 /* message pair <no of is, row> */ 144 ierr = PetscCalloc2(2*nrrows,&todata,nrecvrows,&fromdata);CHKERRQ(ierr); 145 for (i=0; i<nrrows; i++){ 146 owner = rrow_ranks[i]; /* processor */ 147 j = tosizes_temp[owner]; /* index */ 148 todata[toffsets[j]++] = rrow_isids[i]; 149 todata[toffsets[j]++] = remoterows[i]; 150 } 151 ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr); 152 ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr); 153 ierr = PetscFree(toffsets);CHKERRQ(ierr); 154 ierr = PetscSFBcastBegin(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr); 155 ierr = PetscSFBcastEnd(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr); 156 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 157 /* send rows belonging to the remote so that then we could get the overlapping data back */ 158 ierr = MatIncreaseOverlap_MPIAIJ_Send_Scalable(mat,nidx,nfrom,fromranks,fromsizes,fromdata,&sbsizes,&sbdata);CHKERRQ(ierr); 159 ierr = PetscFree2(todata,fromdata);CHKERRQ(ierr); 160 ierr = PetscFree(fromsizes);CHKERRQ(ierr); 161 ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nfrom,fromranks,sbsizes,&nto,&toranks,&tosizes);CHKERRQ(ierr); 162 ierr = PetscFree(fromranks);CHKERRQ(ierr); 163 nrecvrows = 0; 164 for (i=0; i<nto; i++) nrecvrows += tosizes[2*i]; 165 ierr = PetscCalloc1(nrecvrows,&todata);CHKERRQ(ierr); 166 ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr); 167 nrecvrows = 0; 168 for (i=0; i<nto; i++){ 169 for (j=0; j<tosizes[2*i]; j++){ 170 remote[nrecvrows].rank = toranks[i]; 171 remote[nrecvrows++].index = tosizes[2*i+1]+j; 172 } 173 } 174 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 175 ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr); 176 /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */ 177 ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr); 178 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 179 /* overlap communication and computation */ 180 ierr = PetscSFBcastBegin(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr); 181 ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr); 182 ierr = PetscSFBcastEnd(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr); 183 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 184 ierr = PetscFree2(sbdata,sbsizes);CHKERRQ(ierr); 185 ierr = MatIncreaseOverlap_MPIAIJ_Receive_Scalable(mat,nidx,is,nrecvrows,todata);CHKERRQ(ierr); 186 ierr = PetscFree(toranks);CHKERRQ(ierr); 187 ierr = PetscFree(tosizes);CHKERRQ(ierr); 188 ierr = PetscFree(todata);CHKERRQ(ierr); 189 PetscFunctionReturn(0); 190 } 191 192 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat mat,PetscInt nidx, IS is[], PetscInt nrecvs, PetscInt *recvdata) 193 { 194 PetscInt *isz,isz_i,i,j,is_id, data_size; 195 PetscInt col,lsize,max_lsize,*indices_temp, *indices_i; 196 const PetscInt *indices_i_temp; 197 PetscErrorCode ierr; 198 199 PetscFunctionBegin; 200 max_lsize = 0; 201 ierr = PetscMalloc1(nidx,&isz);CHKERRQ(ierr); 202 for (i=0; i<nidx; i++){ 203 ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr); 204 max_lsize = lsize>max_lsize ? lsize:max_lsize; 205 isz[i] = lsize; 206 } 207 ierr = PetscMalloc1((max_lsize+nrecvs)*nidx,&indices_temp);CHKERRQ(ierr); 208 for (i=0; i<nidx; i++){ 209 ierr = ISGetIndices(is[i],&indices_i_temp);CHKERRQ(ierr); 210 ierr = PetscMemcpy(indices_temp+i*(max_lsize+nrecvs),indices_i_temp, sizeof(PetscInt)*isz[i]);CHKERRQ(ierr); 211 ierr = ISRestoreIndices(is[i],&indices_i_temp);CHKERRQ(ierr); 212 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 213 } 214 /* retrieve information to get row id and its overlap */ 215 for (i=0; i<nrecvs; ){ 216 is_id = recvdata[i++]; 217 data_size = recvdata[i++]; 218 indices_i = indices_temp+(max_lsize+nrecvs)*is_id; 219 isz_i = isz[is_id]; 220 for (j=0; j< data_size; j++){ 221 col = recvdata[i++]; 222 indices_i[isz_i++] = col; 223 } 224 isz[is_id] = isz_i; 225 } 226 /* remove duplicate entities */ 227 for (i=0; i<nidx; i++){ 228 indices_i = indices_temp+(max_lsize+nrecvs)*i; 229 isz_i = isz[i]; 230 ierr = PetscSortRemoveDupsInt(&isz_i,indices_i);CHKERRQ(ierr); 231 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz_i,indices_i,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr); 232 } 233 ierr = PetscFree(isz);CHKERRQ(ierr); 234 ierr = PetscFree(indices_temp);CHKERRQ(ierr); 235 PetscFunctionReturn(0); 236 } 237 238 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat mat,PetscInt nidx, PetscMPIInt nfrom,PetscMPIInt *fromranks,PetscInt *fromsizes, PetscInt *fromrows, PetscInt **sbrowsizes, PetscInt **sbrows) 239 { 240 PetscLayout rmap,cmap; 241 PetscInt i,j,k,l,*rows_i,*rows_data_ptr,**rows_data,max_fszs,rows_pos,*rows_pos_i; 242 PetscInt is_id,tnz,an,bn,rstart,cstart,row,start,end,col,totalrows,*sbdata; 243 PetscInt *indv_counts,indvc_ij,*sbsizes,*indices_tmp,*offsets; 244 const PetscInt *gcols,*ai,*aj,*bi,*bj; 245 Mat amat,bmat; 246 PetscMPIInt rank; 247 PetscBool done; 248 MPI_Comm comm; 249 PetscErrorCode ierr; 250 251 PetscFunctionBegin; 252 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 253 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 254 ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr); 255 /* Even if the mat is symmetric, we still assume it is not symmetric */ 256 ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 257 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 258 ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 259 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 260 /* total number of nonzero values is used to estimate the memory usage in the next step */ 261 tnz = ai[an]+bi[bn]; 262 ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr); 263 ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr); 264 ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr); 265 /* to find the longest message */ 266 max_fszs = 0; 267 for (i=0; i<nfrom; i++) max_fszs = fromsizes[2*i]>max_fszs ? fromsizes[2*i]:max_fszs; 268 /* better way to estimate number of nonzero in the mat??? */ 269 ierr = PetscCalloc5(max_fszs*nidx,&rows_data_ptr,nidx,&rows_data,nidx,&rows_pos_i,nfrom*nidx,&indv_counts,tnz,&indices_tmp);CHKERRQ(ierr); 270 for (i=0; i<nidx; i++) rows_data[i] = rows_data_ptr+max_fszs*i; 271 rows_pos = 0; 272 totalrows = 0; 273 for (i=0; i<nfrom; i++){ 274 ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr); 275 /* group data together */ 276 for (j=0; j<fromsizes[2*i]; j+=2){ 277 is_id = fromrows[rows_pos++];/* no of is */ 278 rows_i = rows_data[is_id]; 279 rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++];/* row */ 280 } 281 /* estimate a space to avoid multiple allocations */ 282 for (j=0; j<nidx; j++){ 283 indvc_ij = 0; 284 rows_i = rows_data[j]; 285 for (l=0; l<rows_pos_i[j]; l++){ 286 row = rows_i[l]-rstart; 287 start = ai[row]; 288 end = ai[row+1]; 289 for (k=start; k<end; k++){ /* Amat */ 290 col = aj[k] + cstart; 291 indices_tmp[indvc_ij++] = col;/* do not count the rows from the original rank */ 292 } 293 start = bi[row]; 294 end = bi[row+1]; 295 for (k=start; k<end; k++) { /* Bmat */ 296 col = gcols[bj[k]]; 297 indices_tmp[indvc_ij++] = col; 298 } 299 } 300 ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr); 301 indv_counts[i*nidx+j] = indvc_ij; 302 totalrows += indvc_ij; 303 } 304 } 305 /* message triple <no of is, number of rows, rows> */ 306 ierr = PetscCalloc2(totalrows+nidx*nfrom*2,&sbdata,2*nfrom,&sbsizes);CHKERRQ(ierr); 307 totalrows = 0; 308 rows_pos = 0; 309 /* use this code again */ 310 for (i=0;i<nfrom;i++){ 311 ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr); 312 for (j=0; j<fromsizes[2*i]; j+=2){ 313 is_id = fromrows[rows_pos++]; 314 rows_i = rows_data[is_id]; 315 rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++]; 316 } 317 /* add data */ 318 for (j=0; j<nidx; j++){ 319 if (!indv_counts[i*nidx+j]) continue; 320 indvc_ij = 0; 321 sbdata[totalrows++] = j; 322 sbdata[totalrows++] = indv_counts[i*nidx+j]; 323 sbsizes[2*i] += 2; 324 rows_i = rows_data[j]; 325 for (l=0; l<rows_pos_i[j]; l++){ 326 row = rows_i[l]-rstart; 327 start = ai[row]; 328 end = ai[row+1]; 329 for (k=start; k<end; k++){ /* Amat */ 330 col = aj[k] + cstart; 331 indices_tmp[indvc_ij++] = col; 332 } 333 start = bi[row]; 334 end = bi[row+1]; 335 for (k=start; k<end; k++) { /* Bmat */ 336 col = gcols[bj[k]]; 337 indices_tmp[indvc_ij++] = col; 338 } 339 } 340 ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr); 341 sbsizes[2*i] += indvc_ij; 342 ierr = PetscMemcpy(sbdata+totalrows,indices_tmp,sizeof(PetscInt)*indvc_ij);CHKERRQ(ierr); 343 totalrows += indvc_ij; 344 } 345 } 346 ierr = PetscCalloc1(nfrom+1,&offsets);CHKERRQ(ierr); 347 for (i=0; i<nfrom; i++){ 348 offsets[i+1] = offsets[i] + sbsizes[2*i]; 349 sbsizes[2*i+1] = offsets[i]; 350 } 351 ierr = PetscFree(offsets);CHKERRQ(ierr); 352 if (sbrowsizes) *sbrowsizes = sbsizes; 353 if (sbrows) *sbrows = sbdata; 354 ierr = PetscFree5(rows_data_ptr,rows_data,rows_pos_i,indv_counts,indices_tmp);CHKERRQ(ierr); 355 ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 356 ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 357 PetscFunctionReturn(0); 358 } 359 360 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat mat,PetscInt nidx, IS is[]) 361 { 362 const PetscInt *gcols,*ai,*aj,*bi,*bj, *indices; 363 PetscInt tnz,an,bn,i,j,row,start,end,rstart,cstart,col,k,*indices_temp; 364 PetscInt lsize,lsize_tmp,owner; 365 PetscMPIInt rank; 366 Mat amat,bmat; 367 PetscBool done; 368 PetscLayout cmap,rmap; 369 MPI_Comm comm; 370 PetscErrorCode ierr; 371 372 PetscFunctionBegin; 373 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 374 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 375 ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr); 376 ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 377 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 378 ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 379 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 380 /* is it a safe way to compute number of nonzero values ? */ 381 tnz = ai[an]+bi[bn]; 382 ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr); 383 ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr); 384 ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr); 385 /* it is a better way to estimate memory than the old implementation 386 * where global size of matrix is used 387 * */ 388 ierr = PetscMalloc1(tnz,&indices_temp);CHKERRQ(ierr); 389 for (i=0; i<nidx; i++) { 390 ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr); 391 ierr = ISGetIndices(is[i],&indices);CHKERRQ(ierr); 392 lsize_tmp = 0; 393 for (j=0; j<lsize; j++) { 394 owner = -1; 395 row = indices[j]; 396 ierr = PetscLayoutFindOwner(rmap,row,&owner);CHKERRQ(ierr); 397 if (owner != rank) continue; 398 /* local number */ 399 row -= rstart; 400 start = ai[row]; 401 end = ai[row+1]; 402 for (k=start; k<end; k++) { /* Amat */ 403 col = aj[k] + cstart; 404 indices_temp[lsize_tmp++] = col; 405 } 406 start = bi[row]; 407 end = bi[row+1]; 408 for (k=start; k<end; k++) { /* Bmat */ 409 col = gcols[bj[k]]; 410 indices_temp[lsize_tmp++] = col; 411 } 412 } 413 ierr = ISRestoreIndices(is[i],&indices);CHKERRQ(ierr); 414 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 415 ierr = PetscSortRemoveDupsInt(&lsize_tmp,indices_temp);CHKERRQ(ierr); 416 ierr = ISCreateGeneral(PETSC_COMM_SELF,lsize_tmp,indices_temp,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr); 417 } 418 ierr = PetscFree(indices_temp);CHKERRQ(ierr); 419 ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 420 ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 421 PetscFunctionReturn(0); 422 } 423 424 425 /* 426 Sample message format: 427 If a processor A wants processor B to process some elements corresponding 428 to index sets is[1],is[5] 429 mesg [0] = 2 (no of index sets in the mesg) 430 ----------- 431 mesg [1] = 1 => is[1] 432 mesg [2] = sizeof(is[1]); 433 ----------- 434 mesg [3] = 5 => is[5] 435 mesg [4] = sizeof(is[5]); 436 ----------- 437 mesg [5] 438 mesg [n] datas[1] 439 ----------- 440 mesg[n+1] 441 mesg[m] data(is[5]) 442 ----------- 443 444 Notes: 445 nrqs - no of requests sent (or to be sent out) 446 nrqr - no of requests recieved (which have to be or which have been processed 447 */ 448 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat C,PetscInt imax,IS is[]) 449 { 450 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 451 PetscMPIInt *w1,*w2,nrqr,*w3,*w4,*onodes1,*olengths1,*onodes2,*olengths2; 452 const PetscInt **idx,*idx_i; 453 PetscInt *n,**data,len; 454 #if defined(PETSC_USE_CTABLE) 455 PetscTable *table_data,table_data_i; 456 PetscInt *tdata,tcount,tcount_max; 457 #else 458 PetscInt *data_i,*d_p; 459 #endif 460 PetscErrorCode ierr; 461 PetscMPIInt size,rank,tag1,tag2; 462 PetscInt M,i,j,k,**rbuf,row,proc = 0,nrqs,msz,**outdat,**ptr; 463 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2; 464 PetscBT *table; 465 MPI_Comm comm; 466 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 467 MPI_Status *s_status,*recv_status; 468 char *t_p; 469 470 PetscFunctionBegin; 471 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 472 size = c->size; 473 rank = c->rank; 474 M = C->rmap->N; 475 476 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 477 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 478 479 ierr = PetscMalloc2(imax,&idx,imax,&n);CHKERRQ(ierr); 480 481 for (i=0; i<imax; i++) { 482 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 483 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 484 } 485 486 /* evaluate communication - mesg to who,length of mesg, and buffer space 487 required. Based on this, buffers are allocated, and data copied into them */ 488 ierr = PetscMalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); 489 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 490 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 491 ierr = PetscMemzero(w3,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 492 for (i=0; i<imax; i++) { 493 ierr = PetscMemzero(w4,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 494 idx_i = idx[i]; 495 len = n[i]; 496 for (j=0; j<len; j++) { 497 row = idx_i[j]; 498 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 499 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 500 w4[proc]++; 501 } 502 for (j=0; j<size; j++) { 503 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 504 } 505 } 506 507 nrqs = 0; /* no of outgoing messages */ 508 msz = 0; /* total mesg length (for all proc */ 509 w1[rank] = 0; /* no mesg sent to intself */ 510 w3[rank] = 0; 511 for (i=0; i<size; i++) { 512 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 513 } 514 /* pa - is list of processors to communicate with */ 515 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); 516 for (i=0,j=0; i<size; i++) { 517 if (w1[i]) {pa[j] = i; j++;} 518 } 519 520 /* Each message would have a header = 1 + 2*(no of IS) + data */ 521 for (i=0; i<nrqs; i++) { 522 j = pa[i]; 523 w1[j] += w2[j] + 2*w3[j]; 524 msz += w1[j]; 525 } 526 527 /* Determine the number of messages to expect, their lengths, from from-ids */ 528 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 529 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 530 531 /* Now post the Irecvs corresponding to these messages */ 532 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 533 534 /* Allocate Memory for outgoing messages */ 535 ierr = PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);CHKERRQ(ierr); 536 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 537 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 538 539 { 540 PetscInt *iptr = tmp,ict = 0; 541 for (i=0; i<nrqs; i++) { 542 j = pa[i]; 543 iptr += ict; 544 outdat[j] = iptr; 545 ict = w1[j]; 546 } 547 } 548 549 /* Form the outgoing messages */ 550 /* plug in the headers */ 551 for (i=0; i<nrqs; i++) { 552 j = pa[i]; 553 outdat[j][0] = 0; 554 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 555 ptr[j] = outdat[j] + 2*w3[j] + 1; 556 } 557 558 /* Memory for doing local proc's work */ 559 { 560 PetscInt M_BPB_imax = 0; 561 #if defined(PETSC_USE_CTABLE) 562 ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr); 563 ierr = PetscMalloc1(imax,&table_data);CHKERRQ(ierr); 564 for (i=0; i<imax; i++) { 565 ierr = PetscTableCreate(n[i]+1,M+1,&table_data[i]);CHKERRQ(ierr); 566 } 567 ierr = PetscCalloc4(imax,&table, imax,&data, imax,&isz, M_BPB_imax,&t_p);CHKERRQ(ierr); 568 for (i=0; i<imax; i++) { 569 table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i; 570 } 571 #else 572 PetscInt Mimax = 0; 573 ierr = PetscIntMultError(M,imax, &Mimax);CHKERRQ(ierr); 574 ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr); 575 ierr = PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mimax,&d_p, M_BPB_imax,&t_p);CHKERRQ(ierr); 576 for (i=0; i<imax; i++) { 577 table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i; 578 data[i] = d_p + M*i; 579 } 580 #endif 581 } 582 583 /* Parse the IS and update local tables and the outgoing buf with the data */ 584 { 585 PetscInt n_i,isz_i,*outdat_j,ctr_j; 586 PetscBT table_i; 587 588 for (i=0; i<imax; i++) { 589 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 590 n_i = n[i]; 591 table_i = table[i]; 592 idx_i = idx[i]; 593 #if defined(PETSC_USE_CTABLE) 594 table_data_i = table_data[i]; 595 #else 596 data_i = data[i]; 597 #endif 598 isz_i = isz[i]; 599 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 600 row = idx_i[j]; 601 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 602 if (proc != rank) { /* copy to the outgoing buffer */ 603 ctr[proc]++; 604 *ptr[proc] = row; 605 ptr[proc]++; 606 } else if (!PetscBTLookupSet(table_i,row)) { 607 #if defined(PETSC_USE_CTABLE) 608 ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 609 #else 610 data_i[isz_i] = row; /* Update the local table */ 611 #endif 612 isz_i++; 613 } 614 } 615 /* Update the headers for the current IS */ 616 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 617 if ((ctr_j = ctr[j])) { 618 outdat_j = outdat[j]; 619 k = ++outdat_j[0]; 620 outdat_j[2*k] = ctr_j; 621 outdat_j[2*k-1] = i; 622 } 623 } 624 isz[i] = isz_i; 625 } 626 } 627 628 /* Now post the sends */ 629 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 630 for (i=0; i<nrqs; ++i) { 631 j = pa[i]; 632 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 633 } 634 635 /* No longer need the original indices */ 636 for (i=0; i<imax; ++i) { 637 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 638 } 639 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 640 641 for (i=0; i<imax; ++i) { 642 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 643 } 644 645 /* Do Local work */ 646 #if defined(PETSC_USE_CTABLE) 647 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,NULL,table_data);CHKERRQ(ierr); 648 #else 649 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,data,NULL);CHKERRQ(ierr); 650 #endif 651 652 /* Receive messages */ 653 ierr = PetscMalloc1(nrqr+1,&recv_status);CHKERRQ(ierr); 654 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 655 656 ierr = PetscMalloc1(nrqs+1,&s_status);CHKERRQ(ierr); 657 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 658 659 /* Phase 1 sends are complete - deallocate buffers */ 660 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 661 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 662 663 ierr = PetscMalloc1(nrqr+1,&xdata);CHKERRQ(ierr); 664 ierr = PetscMalloc1(nrqr+1,&isz1);CHKERRQ(ierr); 665 ierr = MatIncreaseOverlap_MPIAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 666 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 667 ierr = PetscFree(rbuf);CHKERRQ(ierr); 668 669 670 /* Send the data back */ 671 /* Do a global reduction to know the buffer space req for incoming messages */ 672 { 673 PetscMPIInt *rw1; 674 675 ierr = PetscCalloc1(size,&rw1);CHKERRQ(ierr); 676 677 for (i=0; i<nrqr; ++i) { 678 proc = recv_status[i].MPI_SOURCE; 679 680 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 681 rw1[proc] = isz1[i]; 682 } 683 ierr = PetscFree(onodes1);CHKERRQ(ierr); 684 ierr = PetscFree(olengths1);CHKERRQ(ierr); 685 686 /* Determine the number of messages to expect, their lengths, from from-ids */ 687 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 688 ierr = PetscFree(rw1);CHKERRQ(ierr); 689 } 690 /* Now post the Irecvs corresponding to these messages */ 691 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 692 693 /* Now post the sends */ 694 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 695 for (i=0; i<nrqr; ++i) { 696 j = recv_status[i].MPI_SOURCE; 697 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 698 } 699 700 /* receive work done on other processors */ 701 { 702 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,jmax; 703 PetscMPIInt idex; 704 PetscBT table_i; 705 MPI_Status *status2; 706 707 ierr = PetscMalloc1((PetscMax(nrqr,nrqs)+1),&status2);CHKERRQ(ierr); 708 for (i=0; i<nrqs; ++i) { 709 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 710 /* Process the message */ 711 rbuf2_i = rbuf2[idex]; 712 ct1 = 2*rbuf2_i[0]+1; 713 jmax = rbuf2[idex][0]; 714 for (j=1; j<=jmax; j++) { 715 max = rbuf2_i[2*j]; 716 is_no = rbuf2_i[2*j-1]; 717 isz_i = isz[is_no]; 718 table_i = table[is_no]; 719 #if defined(PETSC_USE_CTABLE) 720 table_data_i = table_data[is_no]; 721 #else 722 data_i = data[is_no]; 723 #endif 724 for (k=0; k<max; k++,ct1++) { 725 row = rbuf2_i[ct1]; 726 if (!PetscBTLookupSet(table_i,row)) { 727 #if defined(PETSC_USE_CTABLE) 728 ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 729 #else 730 data_i[isz_i] = row; 731 #endif 732 isz_i++; 733 } 734 } 735 isz[is_no] = isz_i; 736 } 737 } 738 739 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 740 ierr = PetscFree(status2);CHKERRQ(ierr); 741 } 742 743 #if defined(PETSC_USE_CTABLE) 744 tcount_max = 0; 745 for (i=0; i<imax; ++i) { 746 table_data_i = table_data[i]; 747 ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr); 748 if (tcount_max < tcount) tcount_max = tcount; 749 } 750 ierr = PetscMalloc1(tcount_max+1,&tdata);CHKERRQ(ierr); 751 #endif 752 753 for (i=0; i<imax; ++i) { 754 #if defined(PETSC_USE_CTABLE) 755 PetscTablePosition tpos; 756 table_data_i = table_data[i]; 757 758 ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr); 759 while (tpos) { 760 ierr = PetscTableGetNext(table_data_i,&tpos,&k,&j);CHKERRQ(ierr); 761 tdata[--j] = --k; 762 } 763 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],tdata,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 764 #else 765 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 766 #endif 767 } 768 769 ierr = PetscFree(onodes2);CHKERRQ(ierr); 770 ierr = PetscFree(olengths2);CHKERRQ(ierr); 771 772 ierr = PetscFree(pa);CHKERRQ(ierr); 773 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 774 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 775 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 776 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 777 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 778 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 779 ierr = PetscFree(s_status);CHKERRQ(ierr); 780 ierr = PetscFree(recv_status);CHKERRQ(ierr); 781 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 782 ierr = PetscFree(xdata);CHKERRQ(ierr); 783 ierr = PetscFree(isz1);CHKERRQ(ierr); 784 #if defined(PETSC_USE_CTABLE) 785 for (i=0; i<imax; i++) { 786 ierr = PetscTableDestroy((PetscTable*)&table_data[i]);CHKERRQ(ierr); 787 } 788 ierr = PetscFree(table_data);CHKERRQ(ierr); 789 ierr = PetscFree(tdata);CHKERRQ(ierr); 790 ierr = PetscFree4(table,data,isz,t_p);CHKERRQ(ierr); 791 #else 792 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 793 #endif 794 PetscFunctionReturn(0); 795 } 796 797 /* 798 MatIncreaseOverlap_MPIAIJ_Local - Called by MatincreaseOverlap, to do 799 the work on the local processor. 800 801 Inputs: 802 C - MAT_MPIAIJ; 803 imax - total no of index sets processed at a time; 804 table - an array of char - size = m bits. 805 806 Output: 807 isz - array containing the count of the solution elements corresponding 808 to each index set; 809 data or table_data - pointer to the solutions 810 */ 811 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data,PetscTable *table_data) 812 { 813 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 814 Mat A = c->A,B = c->B; 815 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 816 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 817 PetscInt *bi,*bj,*garray,i,j,k,row,isz_i; 818 PetscBT table_i; 819 #if defined(PETSC_USE_CTABLE) 820 PetscTable table_data_i; 821 PetscErrorCode ierr; 822 PetscTablePosition tpos; 823 PetscInt tcount,*tdata; 824 #else 825 PetscInt *data_i; 826 #endif 827 828 PetscFunctionBegin; 829 rstart = C->rmap->rstart; 830 cstart = C->cmap->rstart; 831 ai = a->i; 832 aj = a->j; 833 bi = b->i; 834 bj = b->j; 835 garray = c->garray; 836 837 for (i=0; i<imax; i++) { 838 #if defined(PETSC_USE_CTABLE) 839 /* copy existing entries of table_data_i into tdata[] */ 840 table_data_i = table_data[i]; 841 ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr); 842 if (tcount != isz[i]) SETERRQ3(PETSC_COMM_SELF,0," tcount %d != isz[%d] %d",tcount,i,isz[i]); 843 844 ierr = PetscMalloc1(tcount,&tdata);CHKERRQ(ierr); 845 ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr); 846 while (tpos) { 847 ierr = PetscTableGetNext(table_data_i,&tpos,&row,&j);CHKERRQ(ierr); 848 tdata[--j] = --row; 849 if (j > tcount - 1) SETERRQ2(PETSC_COMM_SELF,0," j %d >= tcount %d",j,tcount); 850 } 851 #else 852 data_i = data[i]; 853 #endif 854 table_i = table[i]; 855 isz_i = isz[i]; 856 max = isz[i]; 857 858 for (j=0; j<max; j++) { 859 #if defined(PETSC_USE_CTABLE) 860 row = tdata[j] - rstart; 861 #else 862 row = data_i[j] - rstart; 863 #endif 864 start = ai[row]; 865 end = ai[row+1]; 866 for (k=start; k<end; k++) { /* Amat */ 867 val = aj[k] + cstart; 868 if (!PetscBTLookupSet(table_i,val)) { 869 #if defined(PETSC_USE_CTABLE) 870 ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 871 #else 872 data_i[isz_i] = val; 873 #endif 874 isz_i++; 875 } 876 } 877 start = bi[row]; 878 end = bi[row+1]; 879 for (k=start; k<end; k++) { /* Bmat */ 880 val = garray[bj[k]]; 881 if (!PetscBTLookupSet(table_i,val)) { 882 #if defined(PETSC_USE_CTABLE) 883 ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 884 #else 885 data_i[isz_i] = val; 886 #endif 887 isz_i++; 888 } 889 } 890 } 891 isz[i] = isz_i; 892 893 #if defined(PETSC_USE_CTABLE) 894 ierr = PetscFree(tdata);CHKERRQ(ierr); 895 #endif 896 } 897 PetscFunctionReturn(0); 898 } 899 900 /* 901 MatIncreaseOverlap_MPIAIJ_Receive - Process the recieved messages, 902 and return the output 903 904 Input: 905 C - the matrix 906 nrqr - no of messages being processed. 907 rbuf - an array of pointers to the recieved requests 908 909 Output: 910 xdata - array of messages to be sent back 911 isz1 - size of each message 912 913 For better efficiency perhaps we should malloc separately each xdata[i], 914 then if a remalloc is required we need only copy the data for that one row 915 rather then all previous rows as it is now where a single large chunck of 916 memory is used. 917 918 */ 919 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 920 { 921 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 922 Mat A = c->A,B = c->B; 923 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 924 PetscErrorCode ierr; 925 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 926 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 927 PetscInt val,max1,max2,m,no_malloc =0,*tmp,new_estimate,ctr; 928 PetscInt *rbuf_i,kmax,rbuf_0; 929 PetscBT xtable; 930 931 PetscFunctionBegin; 932 m = C->rmap->N; 933 rstart = C->rmap->rstart; 934 cstart = C->cmap->rstart; 935 ai = a->i; 936 aj = a->j; 937 bi = b->i; 938 bj = b->j; 939 garray = c->garray; 940 941 942 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 943 rbuf_i = rbuf[i]; 944 rbuf_0 = rbuf_i[0]; 945 ct += rbuf_0; 946 for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j]; 947 } 948 949 if (C->rmap->n) max1 = ct*(a->nz + b->nz)/C->rmap->n; 950 else max1 = 1; 951 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 952 ierr = PetscMalloc1(mem_estimate,&xdata[0]);CHKERRQ(ierr); 953 ++no_malloc; 954 ierr = PetscBTCreate(m,&xtable);CHKERRQ(ierr); 955 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 956 957 ct3 = 0; 958 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 959 rbuf_i = rbuf[i]; 960 rbuf_0 = rbuf_i[0]; 961 ct1 = 2*rbuf_0+1; 962 ct2 = ct1; 963 ct3 += ct1; 964 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 965 ierr = PetscBTMemzero(m,xtable);CHKERRQ(ierr); 966 oct2 = ct2; 967 kmax = rbuf_i[2*j]; 968 for (k=0; k<kmax; k++,ct1++) { 969 row = rbuf_i[ct1]; 970 if (!PetscBTLookupSet(xtable,row)) { 971 if (!(ct3 < mem_estimate)) { 972 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 973 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 974 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 975 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 976 xdata[0] = tmp; 977 mem_estimate = new_estimate; ++no_malloc; 978 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 979 } 980 xdata[i][ct2++] = row; 981 ct3++; 982 } 983 } 984 for (k=oct2,max2=ct2; k<max2; k++) { 985 row = xdata[i][k] - rstart; 986 start = ai[row]; 987 end = ai[row+1]; 988 for (l=start; l<end; l++) { 989 val = aj[l] + cstart; 990 if (!PetscBTLookupSet(xtable,val)) { 991 if (!(ct3 < mem_estimate)) { 992 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 993 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 994 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 995 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 996 xdata[0] = tmp; 997 mem_estimate = new_estimate; ++no_malloc; 998 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 999 } 1000 xdata[i][ct2++] = val; 1001 ct3++; 1002 } 1003 } 1004 start = bi[row]; 1005 end = bi[row+1]; 1006 for (l=start; l<end; l++) { 1007 val = garray[bj[l]]; 1008 if (!PetscBTLookupSet(xtable,val)) { 1009 if (!(ct3 < mem_estimate)) { 1010 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 1011 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 1012 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 1013 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 1014 xdata[0] = tmp; 1015 mem_estimate = new_estimate; ++no_malloc; 1016 for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 1017 } 1018 xdata[i][ct2++] = val; 1019 ct3++; 1020 } 1021 } 1022 } 1023 /* Update the header*/ 1024 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 1025 xdata[i][2*j-1] = rbuf_i[2*j-1]; 1026 } 1027 xdata[i][0] = rbuf_0; 1028 xdata[i+1] = xdata[i] + ct2; 1029 isz1[i] = ct2; /* size of each message */ 1030 } 1031 ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr); 1032 ierr = PetscInfo3(C,"Allocated %D bytes, required %D bytes, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 1033 PetscFunctionReturn(0); 1034 } 1035 /* -------------------------------------------------------------------------*/ 1036 extern PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,Mat*); 1037 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType); 1038 /* 1039 Every processor gets the entire matrix 1040 */ 1041 PetscErrorCode MatGetSubMatrix_MPIAIJ_All(Mat A,MatGetSubMatrixOption flag,MatReuse scall,Mat *Bin[]) 1042 { 1043 Mat B; 1044 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1045 Mat_SeqAIJ *b,*ad = (Mat_SeqAIJ*)a->A->data,*bd = (Mat_SeqAIJ*)a->B->data; 1046 PetscErrorCode ierr; 1047 PetscMPIInt size,rank,*recvcounts = 0,*displs = 0; 1048 PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j; 1049 PetscInt m,*b_sendj,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 1050 MatScalar *sendbuf,*recvbuf,*a_sendbuf,*b_sendbuf; 1051 1052 PetscFunctionBegin; 1053 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 1054 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr); 1055 1056 if (scall == MAT_INITIAL_MATRIX) { 1057 /* ---------------------------------------------------------------- 1058 Tell every processor the number of nonzeros per row 1059 */ 1060 ierr = PetscMalloc1(A->rmap->N,&lens);CHKERRQ(ierr); 1061 for (i=A->rmap->rstart; i<A->rmap->rend; i++) { 1062 lens[i] = ad->i[i-A->rmap->rstart+1] - ad->i[i-A->rmap->rstart] + bd->i[i-A->rmap->rstart+1] - bd->i[i-A->rmap->rstart]; 1063 } 1064 ierr = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr); 1065 for (i=0; i<size; i++) { 1066 recvcounts[i] = A->rmap->range[i+1] - A->rmap->range[i]; 1067 displs[i] = A->rmap->range[i]; 1068 } 1069 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1070 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1071 #else 1072 sendcount = A->rmap->rend - A->rmap->rstart; 1073 ierr = MPI_Allgatherv(lens+A->rmap->rstart,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1074 #endif 1075 /* --------------------------------------------------------------- 1076 Create the sequential matrix of the same type as the local block diagonal 1077 */ 1078 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 1079 ierr = MatSetSizes(B,A->rmap->N,A->cmap->N,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1080 ierr = MatSetBlockSizesFromMats(B,A,A);CHKERRQ(ierr); 1081 ierr = MatSetType(B,((PetscObject)a->A)->type_name);CHKERRQ(ierr); 1082 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 1083 ierr = PetscMalloc1(1,Bin);CHKERRQ(ierr); 1084 **Bin = B; 1085 b = (Mat_SeqAIJ*)B->data; 1086 1087 /*-------------------------------------------------------------------- 1088 Copy my part of matrix column indices over 1089 */ 1090 sendcount = ad->nz + bd->nz; 1091 jsendbuf = b->j + b->i[rstarts[rank]]; 1092 a_jsendbuf = ad->j; 1093 b_jsendbuf = bd->j; 1094 n = A->rmap->rend - A->rmap->rstart; 1095 cnt = 0; 1096 for (i=0; i<n; i++) { 1097 1098 /* put in lower diagonal portion */ 1099 m = bd->i[i+1] - bd->i[i]; 1100 while (m > 0) { 1101 /* is it above diagonal (in bd (compressed) numbering) */ 1102 if (garray[*b_jsendbuf] > A->rmap->rstart + i) break; 1103 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 1104 m--; 1105 } 1106 1107 /* put in diagonal portion */ 1108 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 1109 jsendbuf[cnt++] = A->rmap->rstart + *a_jsendbuf++; 1110 } 1111 1112 /* put in upper diagonal portion */ 1113 while (m-- > 0) { 1114 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 1115 } 1116 } 1117 if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 1118 1119 /*-------------------------------------------------------------------- 1120 Gather all column indices to all processors 1121 */ 1122 for (i=0; i<size; i++) { 1123 recvcounts[i] = 0; 1124 for (j=A->rmap->range[i]; j<A->rmap->range[i+1]; j++) { 1125 recvcounts[i] += lens[j]; 1126 } 1127 } 1128 displs[0] = 0; 1129 for (i=1; i<size; i++) { 1130 displs[i] = displs[i-1] + recvcounts[i-1]; 1131 } 1132 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1133 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1134 #else 1135 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1136 #endif 1137 /*-------------------------------------------------------------------- 1138 Assemble the matrix into useable form (note numerical values not yet set) 1139 */ 1140 /* set the b->ilen (length of each row) values */ 1141 ierr = PetscMemcpy(b->ilen,lens,A->rmap->N*sizeof(PetscInt));CHKERRQ(ierr); 1142 /* set the b->i indices */ 1143 b->i[0] = 0; 1144 for (i=1; i<=A->rmap->N; i++) { 1145 b->i[i] = b->i[i-1] + lens[i-1]; 1146 } 1147 ierr = PetscFree(lens);CHKERRQ(ierr); 1148 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1149 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1150 1151 } else { 1152 B = **Bin; 1153 b = (Mat_SeqAIJ*)B->data; 1154 } 1155 1156 /*-------------------------------------------------------------------- 1157 Copy my part of matrix numerical values into the values location 1158 */ 1159 if (flag == MAT_GET_VALUES) { 1160 sendcount = ad->nz + bd->nz; 1161 sendbuf = b->a + b->i[rstarts[rank]]; 1162 a_sendbuf = ad->a; 1163 b_sendbuf = bd->a; 1164 b_sendj = bd->j; 1165 n = A->rmap->rend - A->rmap->rstart; 1166 cnt = 0; 1167 for (i=0; i<n; i++) { 1168 1169 /* put in lower diagonal portion */ 1170 m = bd->i[i+1] - bd->i[i]; 1171 while (m > 0) { 1172 /* is it above diagonal (in bd (compressed) numbering) */ 1173 if (garray[*b_sendj] > A->rmap->rstart + i) break; 1174 sendbuf[cnt++] = *b_sendbuf++; 1175 m--; 1176 b_sendj++; 1177 } 1178 1179 /* put in diagonal portion */ 1180 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 1181 sendbuf[cnt++] = *a_sendbuf++; 1182 } 1183 1184 /* put in upper diagonal portion */ 1185 while (m-- > 0) { 1186 sendbuf[cnt++] = *b_sendbuf++; 1187 b_sendj++; 1188 } 1189 } 1190 if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 1191 1192 /* ----------------------------------------------------------------- 1193 Gather all numerical values to all processors 1194 */ 1195 if (!recvcounts) { 1196 ierr = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr); 1197 } 1198 for (i=0; i<size; i++) { 1199 recvcounts[i] = b->i[rstarts[i+1]] - b->i[rstarts[i]]; 1200 } 1201 displs[0] = 0; 1202 for (i=1; i<size; i++) { 1203 displs[i] = displs[i-1] + recvcounts[i-1]; 1204 } 1205 recvbuf = b->a; 1206 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1207 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1208 #else 1209 ierr = MPI_Allgatherv(sendbuf,sendcount,MPIU_SCALAR,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1210 #endif 1211 } /* endof (flag == MAT_GET_VALUES) */ 1212 ierr = PetscFree2(recvcounts,displs);CHKERRQ(ierr); 1213 1214 if (A->symmetric) { 1215 ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 1216 } else if (A->hermitian) { 1217 ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); 1218 } else if (A->structurally_symmetric) { 1219 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 1220 } 1221 PetscFunctionReturn(0); 1222 } 1223 PetscErrorCode MatDestroy_MPIAIJ_MatGetSubmatrices(Mat C) 1224 { 1225 PetscErrorCode ierr; 1226 Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; 1227 Mat_SubMat *submatj = c->submatis1; 1228 PetscInt i; 1229 1230 PetscFunctionBegin; 1231 if (!submatj->id) { /* delete data that are linked only to submats[id=0] */ 1232 ierr = PetscFree4(submatj->sbuf1,submatj->ptr,submatj->tmp,submatj->ctr);CHKERRQ(ierr); 1233 1234 for (i=0; i<submatj->nrqr; ++i) { 1235 ierr = PetscFree(submatj->sbuf2[i]);CHKERRQ(ierr); 1236 } 1237 ierr = PetscFree3(submatj->sbuf2,submatj->req_size,submatj->req_source1);CHKERRQ(ierr); 1238 1239 if (submatj->rbuf1) { 1240 ierr = PetscFree(submatj->rbuf1[0]);CHKERRQ(ierr); 1241 ierr = PetscFree(submatj->rbuf1);CHKERRQ(ierr); 1242 } 1243 1244 for (i=0; i<submatj->nrqs; ++i) { 1245 ierr = PetscFree(submatj->rbuf3[i]);CHKERRQ(ierr); 1246 } 1247 ierr = PetscFree3(submatj->req_source2,submatj->rbuf2,submatj->rbuf3);CHKERRQ(ierr); 1248 ierr = PetscFree(submatj->pa);CHKERRQ(ierr); 1249 } 1250 1251 #if defined(PETSC_USE_CTABLE) 1252 ierr = PetscTableDestroy((PetscTable*)&submatj->rmap);CHKERRQ(ierr); 1253 if (submatj->cmap_loc) {ierr = PetscFree(submatj->cmap_loc);CHKERRQ(ierr);} 1254 ierr = PetscFree(submatj->rmap_loc);CHKERRQ(ierr); 1255 #else 1256 ierr = PetscFree(submatj->rmap);CHKERRQ(ierr); 1257 #endif 1258 1259 if (!submatj->allcolumns) { 1260 #if defined(PETSC_USE_CTABLE) 1261 ierr = PetscTableDestroy((PetscTable*)&submatj->cmap);CHKERRQ(ierr); 1262 #else 1263 ierr = PetscFree(submatj->cmap);CHKERRQ(ierr); 1264 #endif 1265 } 1266 ierr = submatj->destroy(C);CHKERRQ(ierr); 1267 ierr = PetscFree(submatj->row2proc);CHKERRQ(ierr); 1268 1269 ierr = PetscFree(submatj);CHKERRQ(ierr); 1270 PetscFunctionReturn(0); 1271 } 1272 1273 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool allcolumns,Mat *submats) 1274 { 1275 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 1276 Mat submat,A = c->A,B = c->B; 1277 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data,*subc; 1278 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,nzA,nzB; 1279 PetscInt cstart = C->cmap->rstart,cend = C->cmap->rend,rstart = C->rmap->rstart,*bmap = c->garray; 1280 const PetscInt *icol,*irow; 1281 PetscInt nrow,ncol,start; 1282 PetscErrorCode ierr; 1283 PetscMPIInt rank,size,tag1,tag2,tag3,tag4,*w1,*w2,nrqr; 1284 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,ct3,**rbuf1,row,proc; 1285 PetscInt nrqs=0,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol,*iptr; 1286 PetscInt **rbuf3,*req_source1,*req_source2,**sbuf_aj,**rbuf2,max1,nnz; 1287 PetscInt *lens,rmax,ncols,*cols,Crow; 1288 #if defined(PETSC_USE_CTABLE) 1289 PetscTable cmap,rmap; 1290 PetscInt *cmap_loc,*rmap_loc; 1291 #else 1292 PetscInt *cmap,*rmap; 1293 #endif 1294 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*sbuf1_i,*rbuf2_i,*rbuf3_i; 1295 PetscInt *cworkB,lwrite,*subcols,*row2proc; 1296 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a,*subvals=NULL; 1297 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 1298 MPI_Request *r_waits4,*s_waits3 = NULL,*s_waits4; 1299 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3 = NULL,*s_status2; 1300 MPI_Status *r_status3 = NULL,*r_status4,*s_status4; 1301 MPI_Comm comm; 1302 PetscScalar **rbuf4,**sbuf_aa,*vals,*sbuf_aa_i,*rbuf4_i; 1303 PetscMPIInt *onodes1,*olengths1,idex,end; 1304 Mat_SubMat *smatis1; 1305 PetscBool isrowsorted; 1306 1307 PetscFunctionBegin; 1308 if (ismax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"This routine only works when all processes have ismax=1"); 1309 1310 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1311 size = c->size; 1312 rank = c->rank; 1313 1314 ierr = ISSorted(isrow[0],&isrowsorted);CHKERRQ(ierr); 1315 if (!isrowsorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow[0] must be sorted"); 1316 1317 ierr = ISGetIndices(isrow[0],&irow);CHKERRQ(ierr); 1318 ierr = ISGetLocalSize(isrow[0],&nrow);CHKERRQ(ierr); 1319 if (allcolumns) { 1320 icol = NULL; 1321 ncol = C->cmap->N; 1322 } else { 1323 ierr = ISGetIndices(iscol[0],&icol);CHKERRQ(ierr); 1324 ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr); 1325 } 1326 1327 if (scall == MAT_INITIAL_MATRIX) { 1328 PetscInt *sbuf2_i,*cworkA,lwrite,ctmp; 1329 1330 /* Get some new tags to keep the communication clean */ 1331 tag1 = ((PetscObject)C)->tag; 1332 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 1333 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 1334 1335 /* evaluate communication - mesg to who, length of mesg, and buffer space 1336 required. Based on this, buffers are allocated, and data copied into them */ 1337 ierr = PetscCalloc2(size,&w1,size,&w2);CHKERRQ(ierr); 1338 ierr = PetscMalloc1(nrow,&row2proc);CHKERRQ(ierr); 1339 1340 /* w1[proc] = num of rows owned by proc -- to be requested */ 1341 proc = 0; 1342 nrqs = 0; /* num of outgoing messages */ 1343 for (j=0; j<nrow; j++) { 1344 row = irow[j]; /* sorted! */ 1345 while (row >= C->rmap->range[proc+1]) proc++; 1346 w1[proc]++; 1347 row2proc[j] = proc; /* map row index to proc */ 1348 1349 if (proc != rank && !w2[proc]) { 1350 w2[proc] = 1; nrqs++; 1351 } 1352 } 1353 w1[rank] = 0; /* rows owned by self will not be requested */ 1354 1355 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 1356 for (proc=0,j=0; proc<size; proc++) { 1357 if (w1[proc]) { pa[j++] = proc;} 1358 } 1359 1360 /* Each message would have a header = 1 + 2*(num of IS) + data (here,num of IS = 1) */ 1361 msz = 0; /* total mesg length (for all procs) */ 1362 for (i=0; i<nrqs; i++) { 1363 proc = pa[i]; 1364 w1[proc] += 3; 1365 msz += w1[proc]; 1366 } 1367 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 1368 1369 /* Determine nrqr, the number of messages to expect, their lengths, from from-ids */ 1370 /* if w2[proc]=1, a message of length w1[proc] will be sent to proc; */ 1371 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 1372 1373 /* Input: nrqs: nsend; nrqr: nrecv; w1: msg length to be sent; 1374 Output: onodes1: recv node-ids; olengths1: corresponding recv message length */ 1375 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 1376 1377 /* Now post the Irecvs corresponding to these messages */ 1378 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 1379 1380 ierr = PetscFree(onodes1);CHKERRQ(ierr); 1381 ierr = PetscFree(olengths1);CHKERRQ(ierr); 1382 1383 /* Allocate Memory for outgoing messages */ 1384 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 1385 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 1386 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 1387 1388 /* subf1[pa[0]] = tmp, subf1[pa[i]] = subf1[pa[i-1]] + w1[pa[i-1]] */ 1389 iptr = tmp; 1390 for (i=0; i<nrqs; i++) { 1391 proc = pa[i]; 1392 sbuf1[proc] = iptr; 1393 iptr += w1[proc]; 1394 } 1395 1396 /* Form the outgoing messages */ 1397 /* Initialize the header space */ 1398 for (i=0; i<nrqs; i++) { 1399 proc = pa[i]; 1400 ierr = PetscMemzero(sbuf1[proc],3*sizeof(PetscInt));CHKERRQ(ierr); 1401 ptr[proc] = sbuf1[proc] + 3; 1402 } 1403 1404 /* Parse the isrow and copy data into outbuf */ 1405 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 1406 for (j=0; j<nrow; j++) { /* parse the indices of each IS */ 1407 proc = row2proc[j]; 1408 if (proc != rank) { /* copy to the outgoing buf*/ 1409 *ptr[proc] = irow[j]; 1410 ctr[proc]++; ptr[proc]++; 1411 } 1412 } 1413 1414 /* Update the headers for the current IS */ 1415 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 1416 if ((ctr_j = ctr[j])) { 1417 sbuf1_j = sbuf1[j]; 1418 k = ++sbuf1_j[0]; 1419 sbuf1_j[2*k] = ctr_j; 1420 sbuf1_j[2*k-1] = 0; 1421 } 1422 } 1423 1424 /* Now post the sends */ 1425 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 1426 for (i=0; i<nrqs; ++i) { 1427 proc = pa[i]; 1428 ierr = MPI_Isend(sbuf1[proc],w1[proc],MPIU_INT,proc,tag1,comm,s_waits1+i);CHKERRQ(ierr); 1429 } 1430 1431 /* Post Receives to capture the buffer size */ 1432 ierr = PetscMalloc4(nrqs+1,&r_status2,nrqr+1,&s_waits2,nrqs+1,&r_waits2,nrqr+1,&s_status2);CHKERRQ(ierr); 1433 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 1434 1435 rbuf2[0] = tmp + msz; 1436 for (i=1; i<nrqs; ++i) rbuf2[i] = rbuf2[i-1] + w1[pa[i-1]]; 1437 1438 for (i=0; i<nrqs; ++i) { 1439 proc = pa[i]; 1440 ierr = MPI_Irecv(rbuf2[i],w1[proc],MPIU_INT,proc,tag2,comm,r_waits2+i);CHKERRQ(ierr); 1441 } 1442 1443 ierr = PetscFree2(w1,w2);CHKERRQ(ierr); 1444 1445 /* Send to other procs the buf size they should allocate */ 1446 /* Receive messages*/ 1447 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 1448 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 1449 1450 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 1451 for (i=0; i<nrqr; ++i) { 1452 req_size[i] = 0; 1453 rbuf1_i = rbuf1[i]; 1454 start = 2*rbuf1_i[0] + 1; 1455 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 1456 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 1457 sbuf2_i = sbuf2[i]; 1458 for (j=start; j<end; j++) { 1459 k = rbuf1_i[j] - rstart; 1460 ncols = ai[k+1] - ai[k] + bi[k+1] - bi[k]; 1461 sbuf2_i[j] = ncols; 1462 req_size[i] += ncols; 1463 } 1464 req_source1[i] = r_status1[i].MPI_SOURCE; 1465 1466 /* form the header */ 1467 sbuf2_i[0] = req_size[i]; 1468 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 1469 1470 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 1471 } 1472 1473 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1474 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1475 1476 /* rbuf2 is received, Post recv column indices a->j */ 1477 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 1478 1479 ierr = PetscMalloc4(nrqs+1,&r_waits3,nrqr+1,&s_waits3,nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr); 1480 for (i=0; i<nrqs; ++i) { 1481 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 1482 req_source2[i] = r_status2[i].MPI_SOURCE; 1483 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 1484 } 1485 1486 /* Wait on sends1 and sends2 */ 1487 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 1488 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 1489 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1490 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1491 1492 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 1493 ierr = PetscFree4(r_status2,s_waits2,r_waits2,s_status2);CHKERRQ(ierr); 1494 1495 /* Now allocate sending buffers for a->j, and send them off */ 1496 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 1497 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1498 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 1499 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1500 1501 for (i=0; i<nrqr; i++) { /* for each requested message */ 1502 rbuf1_i = rbuf1[i]; 1503 sbuf_aj_i = sbuf_aj[i]; 1504 ct1 = 2*rbuf1_i[0] + 1; 1505 ct2 = 0; 1506 /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ1(PETSC_COMM_SELF,0,"max1 %d != 1",max1); */ 1507 1508 kmax = rbuf1[i][2]; 1509 for (k=0; k<kmax; k++,ct1++) { /* for each row */ 1510 row = rbuf1_i[ct1] - rstart; 1511 nzA = ai[row+1] - ai[row]; 1512 nzB = bi[row+1] - bi[row]; 1513 ncols = nzA + nzB; 1514 cworkA = aj + ai[row]; cworkB = bj + bi[row]; 1515 1516 /* load the column indices for this row into cols*/ 1517 cols = sbuf_aj_i + ct2; 1518 1519 lwrite = 0; 1520 for (l=0; l<nzB; l++) { 1521 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp; 1522 } 1523 for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l]; 1524 for (l=0; l<nzB; l++) { 1525 if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp; 1526 } 1527 1528 ct2 += ncols; 1529 } 1530 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 1531 } 1532 1533 /* create column map (cmap): global col of C -> local col of submat */ 1534 #if defined(PETSC_USE_CTABLE) 1535 if (!allcolumns) { 1536 ierr = PetscTableCreate(ncol+1,C->cmap->N+1,&cmap);CHKERRQ(ierr); 1537 ierr = PetscCalloc1(C->cmap->n,&cmap_loc);CHKERRQ(ierr); 1538 for (j=0; j<ncol; j++) { /* use array cmap_loc[] for local col indices */ 1539 if (icol[j] >= cstart && icol[j] <cend) { 1540 cmap_loc[icol[j] - cstart] = j+1; 1541 } else { /* use PetscTable for non-local col indices */ 1542 ierr = PetscTableAdd(cmap,icol[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1543 } 1544 } 1545 } else { 1546 cmap = NULL; 1547 cmap_loc = NULL; 1548 } 1549 ierr = PetscCalloc1(C->rmap->n,&rmap_loc);CHKERRQ(ierr); 1550 #else 1551 if (!allcolumns) { 1552 ierr = PetscCalloc1(C->cmap->N,&cmap);CHKERRQ(ierr); 1553 for (j=0; j<ncol; j++) cmap[icol[j]] = j+1; 1554 } else { 1555 cmap = NULL; 1556 } 1557 #endif 1558 1559 /* Create lens for MatSeqAIJSetPreallocation() */ 1560 ierr = PetscCalloc1(nrow,&lens);CHKERRQ(ierr); 1561 1562 /* Compute lens from local part of C */ 1563 for (j=0; j<nrow; j++) { 1564 row = irow[j]; 1565 proc = row2proc[j]; 1566 if (proc == rank) { 1567 /* diagonal part A = c->A */ 1568 ncols = ai[row-rstart+1] - ai[row-rstart]; 1569 cols = aj + ai[row-rstart]; 1570 if (!allcolumns) { 1571 for (k=0; k<ncols; k++) { 1572 #if defined(PETSC_USE_CTABLE) 1573 tcol = cmap_loc[cols[k]]; 1574 #else 1575 tcol = cmap[cols[k]+cstart]; 1576 #endif 1577 if (tcol) lens[j]++; 1578 } 1579 } else { /* allcolumns */ 1580 lens[j] = ncols; 1581 } 1582 1583 /* off-diagonal part B = c->B */ 1584 ncols = bi[row-rstart+1] - bi[row-rstart]; 1585 cols = bj + bi[row-rstart]; 1586 if (!allcolumns) { 1587 for (k=0; k<ncols; k++) { 1588 #if defined(PETSC_USE_CTABLE) 1589 ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr); 1590 #else 1591 tcol = cmap[bmap[cols[k]]]; 1592 #endif 1593 if (tcol) lens[j]++; 1594 } 1595 } else { /* allcolumns */ 1596 lens[j] += ncols; 1597 } 1598 } 1599 } 1600 1601 /* Create row map (rmap): global row of C -> local row of submat */ 1602 #if defined(PETSC_USE_CTABLE) 1603 ierr = PetscTableCreate(nrow+1,C->rmap->N+1,&rmap);CHKERRQ(ierr); 1604 for (j=0; j<nrow; j++) { 1605 row = irow[j]; 1606 proc = row2proc[j]; 1607 if (proc == rank) { /* a local row */ 1608 rmap_loc[row - rstart] = j; 1609 } else { 1610 ierr = PetscTableAdd(rmap,irow[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1611 } 1612 } 1613 #else 1614 ierr = PetscCalloc1(C->rmap->N,&rmap);CHKERRQ(ierr); 1615 for (j=0; j<nrow; j++) { 1616 rmap[irow[j]] = j; 1617 } 1618 #endif 1619 1620 /* Update lens from offproc data */ 1621 /* recv a->j is done */ 1622 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 1623 for (i=0; i<nrqs; i++) { 1624 proc = pa[i]; 1625 sbuf1_i = sbuf1[proc]; 1626 /* jmax = sbuf1_i[0]; if (jmax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"jmax !=1"); */ 1627 ct1 = 2 + 1; 1628 ct2 = 0; 1629 rbuf2_i = rbuf2[i]; /* received length of C->j */ 1630 rbuf3_i = rbuf3[i]; /* received C->j */ 1631 1632 /* is_no = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */ 1633 max1 = sbuf1_i[2]; 1634 for (k=0; k<max1; k++,ct1++) { 1635 #if defined(PETSC_USE_CTABLE) 1636 ierr = PetscTableFind(rmap,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1637 row--; 1638 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1639 #else 1640 row = rmap[sbuf1_i[ct1]]; /* the row index in submat */ 1641 #endif 1642 /* Now, store row index of submat in sbuf1_i[ct1] */ 1643 sbuf1_i[ct1] = row; 1644 1645 nnz = rbuf2_i[ct1]; 1646 if (!allcolumns) { 1647 for (l=0; l<nnz; l++,ct2++) { 1648 #if defined(PETSC_USE_CTABLE) 1649 if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) { 1650 tcol = cmap_loc[rbuf3_i[ct2] - cstart]; 1651 } else { 1652 ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1653 } 1654 #else 1655 tcol = cmap[rbuf3_i[ct2]]; /* column index in submat */ 1656 #endif 1657 if (tcol) lens[row]++; 1658 } 1659 } else { /* allcolumns */ 1660 lens[row] += nnz; 1661 } 1662 } 1663 } 1664 ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr); 1665 ierr = PetscFree4(r_waits3,s_waits3,r_status3,s_status3);CHKERRQ(ierr); 1666 1667 /* Create the submatrices */ 1668 ierr = MatCreate(PETSC_COMM_SELF,&submat);CHKERRQ(ierr); 1669 ierr = MatSetSizes(submat,nrow,ncol,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1670 1671 ierr = ISGetBlockSize(isrow[0],&i);CHKERRQ(ierr); 1672 ierr = ISGetBlockSize(iscol[0],&j);CHKERRQ(ierr); 1673 ierr = MatSetBlockSizes(submat,i,j);CHKERRQ(ierr); 1674 ierr = MatSetType(submat,((PetscObject)A)->type_name);CHKERRQ(ierr); 1675 ierr = MatSeqAIJSetPreallocation(submat,0,lens);CHKERRQ(ierr); 1676 1677 /* create struct Mat_SubMat and attached it to submat */ 1678 ierr = PetscNew(&smatis1);CHKERRQ(ierr); 1679 subc = (Mat_SeqAIJ*)submat->data; 1680 subc->submatis1 = smatis1; 1681 1682 smatis1->id = 0; 1683 smatis1->nrqs = nrqs; 1684 smatis1->nrqr = nrqr; 1685 smatis1->rbuf1 = rbuf1; 1686 smatis1->rbuf2 = rbuf2; 1687 smatis1->rbuf3 = rbuf3; 1688 smatis1->sbuf2 = sbuf2; 1689 smatis1->req_source2 = req_source2; 1690 1691 smatis1->sbuf1 = sbuf1; 1692 smatis1->ptr = ptr; 1693 smatis1->tmp = tmp; 1694 smatis1->ctr = ctr; 1695 1696 smatis1->pa = pa; 1697 smatis1->req_size = req_size; 1698 smatis1->req_source1 = req_source1; 1699 1700 smatis1->allcolumns = allcolumns; 1701 smatis1->row2proc = row2proc; 1702 smatis1->rmap = rmap; 1703 smatis1->cmap = cmap; 1704 #if defined(PETSC_USE_CTABLE) 1705 smatis1->rmap_loc = rmap_loc; 1706 smatis1->cmap_loc = cmap_loc; 1707 #endif 1708 1709 smatis1->destroy = submat->ops->destroy; 1710 submat->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices; 1711 submat->factortype = C->factortype; 1712 1713 /* compute rmax */ 1714 rmax = 0; 1715 for (i=0; i<nrow; i++) rmax = PetscMax(rmax,lens[i]); 1716 1717 } else { /* scall == MAT_REUSE_MATRIX */ 1718 submat = submats[0]; 1719 if (submat->rmap->n != nrow || submat->cmap->n != ncol) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1720 1721 subc = (Mat_SeqAIJ*)submat->data; 1722 rmax = subc->rmax; 1723 smatis1 = subc->submatis1; 1724 nrqs = smatis1->nrqs; 1725 nrqr = smatis1->nrqr; 1726 rbuf1 = smatis1->rbuf1; 1727 rbuf2 = smatis1->rbuf2; 1728 rbuf3 = smatis1->rbuf3; 1729 req_source2 = smatis1->req_source2; 1730 1731 sbuf1 = smatis1->sbuf1; 1732 sbuf2 = smatis1->sbuf2; 1733 ptr = smatis1->ptr; 1734 tmp = smatis1->tmp; 1735 ctr = smatis1->ctr; 1736 1737 pa = smatis1->pa; 1738 req_size = smatis1->req_size; 1739 req_source1 = smatis1->req_source1; 1740 1741 allcolumns = smatis1->allcolumns; 1742 row2proc = smatis1->row2proc; 1743 rmap = smatis1->rmap; 1744 cmap = smatis1->cmap; 1745 #if defined(PETSC_USE_CTABLE) 1746 rmap_loc = smatis1->rmap_loc; 1747 cmap_loc = smatis1->cmap_loc; 1748 #endif 1749 } 1750 1751 /* Post recv matrix values */ 1752 ierr = PetscMalloc3(nrqs+1,&rbuf4, rmax,&subcols, rmax,&subvals);CHKERRQ(ierr); 1753 ierr = PetscMalloc4(nrqs+1,&r_waits4,nrqr+1,&s_waits4,nrqs+1,&r_status4,nrqr+1,&s_status4);CHKERRQ(ierr); 1754 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 1755 for (i=0; i<nrqs; ++i) { 1756 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr); 1757 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 1758 } 1759 1760 /* Allocate sending buffers for a->a, and send them off */ 1761 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 1762 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1763 ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr); 1764 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 1765 1766 for (i=0; i<nrqr; i++) { 1767 rbuf1_i = rbuf1[i]; 1768 sbuf_aa_i = sbuf_aa[i]; 1769 ct1 = 2*rbuf1_i[0]+1; 1770 ct2 = 0; 1771 /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"max1 !=1"); */ 1772 1773 kmax = rbuf1_i[2]; 1774 for (k=0; k<kmax; k++,ct1++) { 1775 row = rbuf1_i[ct1] - rstart; 1776 nzA = ai[row+1] - ai[row]; 1777 nzB = bi[row+1] - bi[row]; 1778 ncols = nzA + nzB; 1779 cworkB = bj + bi[row]; 1780 vworkA = a_a + ai[row]; 1781 vworkB = b_a + bi[row]; 1782 1783 /* load the column values for this row into vals*/ 1784 vals = sbuf_aa_i + ct2; 1785 1786 lwrite = 0; 1787 for (l=0; l<nzB; l++) { 1788 if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l]; 1789 } 1790 for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l]; 1791 for (l=0; l<nzB; l++) { 1792 if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l]; 1793 } 1794 1795 ct2 += ncols; 1796 } 1797 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 1798 } 1799 1800 /* Assemble submat */ 1801 /* First assemble the local rows */ 1802 for (j=0; j<nrow; j++) { 1803 row = irow[j]; 1804 proc = row2proc[j]; 1805 if (proc == rank) { 1806 Crow = row - rstart; /* local row index of C */ 1807 #if defined(PETSC_USE_CTABLE) 1808 row = rmap_loc[Crow]; /* row index of submat */ 1809 #else 1810 row = rmap[row]; 1811 #endif 1812 1813 if (allcolumns) { 1814 /* diagonal part A = c->A */ 1815 ncols = ai[Crow+1] - ai[Crow]; 1816 cols = aj + ai[Crow]; 1817 vals = a->a + ai[Crow]; 1818 i = 0; 1819 for (k=0; k<ncols; k++) { 1820 subcols[i] = cols[k] + cstart; 1821 subvals[i++] = vals[k]; 1822 } 1823 1824 /* off-diagonal part B = c->B */ 1825 ncols = bi[Crow+1] - bi[Crow]; 1826 cols = bj + bi[Crow]; 1827 vals = b->a + bi[Crow]; 1828 for (k=0; k<ncols; k++) { 1829 subcols[i] = bmap[cols[k]]; 1830 subvals[i++] = vals[k]; 1831 } 1832 1833 ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1834 1835 } else { /* !allcolumns */ 1836 #if defined(PETSC_USE_CTABLE) 1837 /* diagonal part A = c->A */ 1838 ncols = ai[Crow+1] - ai[Crow]; 1839 cols = aj + ai[Crow]; 1840 vals = a->a + ai[Crow]; 1841 i = 0; 1842 for (k=0; k<ncols; k++) { 1843 tcol = cmap_loc[cols[k]]; 1844 if (tcol) { 1845 subcols[i] = --tcol; 1846 subvals[i++] = vals[k]; 1847 } 1848 } 1849 1850 /* off-diagonal part B = c->B */ 1851 ncols = bi[Crow+1] - bi[Crow]; 1852 cols = bj + bi[Crow]; 1853 vals = b->a + bi[Crow]; 1854 for (k=0; k<ncols; k++) { 1855 ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr); 1856 if (tcol) { 1857 subcols[i] = --tcol; 1858 subvals[i++] = vals[k]; 1859 } 1860 } 1861 #else 1862 /* diagonal part A = c->A */ 1863 ncols = ai[Crow+1] - ai[Crow]; 1864 cols = aj + ai[Crow]; 1865 vals = a->a + ai[Crow]; 1866 i = 0; 1867 for (k=0; k<ncols; k++) { 1868 tcol = cmap[cols[k]+cstart]; 1869 if (tcol) { 1870 subcols[i] = --tcol; 1871 subvals[i++] = vals[k]; 1872 } 1873 } 1874 1875 /* off-diagonal part B = c->B */ 1876 ncols = bi[Crow+1] - bi[Crow]; 1877 cols = bj + bi[Crow]; 1878 vals = b->a + bi[Crow]; 1879 for (k=0; k<ncols; k++) { 1880 tcol = cmap[bmap[cols[k]]]; 1881 if (tcol) { 1882 subcols[i] = --tcol; 1883 subvals[i++] = vals[k]; 1884 } 1885 } 1886 #endif 1887 ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1888 } 1889 } 1890 } 1891 1892 /* Now assemble the off-proc rows */ 1893 for (i=0; i<nrqs; i++) { /* for each requested message */ 1894 /* recv values from other processes */ 1895 ierr = MPI_Waitany(nrqs,r_waits4,&idex,r_status4+i);CHKERRQ(ierr); 1896 proc = pa[idex]; 1897 sbuf1_i = sbuf1[proc]; 1898 /* jmax = sbuf1_i[0]; if (jmax != 1)SETERRQ1(PETSC_COMM_SELF,0,"jmax %d != 1",jmax); */ 1899 ct1 = 2 + 1; 1900 ct2 = 0; /* count of received C->j */ 1901 ct3 = 0; /* count of received C->j that will be inserted into submat */ 1902 rbuf2_i = rbuf2[idex]; /* int** received length of C->j from other processes */ 1903 rbuf3_i = rbuf3[idex]; /* int** received C->j from other processes */ 1904 rbuf4_i = rbuf4[idex]; /* scalar** received C->a from other processes */ 1905 1906 /* is_no = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */ 1907 max1 = sbuf1_i[2]; /* num of rows */ 1908 for (k=0; k<max1; k++,ct1++) { /* for each recved row */ 1909 row = sbuf1_i[ct1]; /* row index of submat */ 1910 if (!allcolumns) { 1911 idex = 0; 1912 if (scall == MAT_INITIAL_MATRIX) { 1913 nnz = rbuf2_i[ct1]; /* num of C entries in this row */ 1914 for (l=0; l<nnz; l++,ct2++) { /* for each recved column */ 1915 #if defined(PETSC_USE_CTABLE) 1916 if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) { 1917 tcol = cmap_loc[rbuf3_i[ct2] - cstart]; 1918 } else { 1919 ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1920 } 1921 #else 1922 tcol = cmap[rbuf3_i[ct2]]; 1923 #endif 1924 if (tcol) { 1925 subcols[idex] = --tcol; 1926 subvals[idex++] = rbuf4_i[ct2]; 1927 1928 /* We receive an entire column of C, but a subset of it needs to be inserted into submat. 1929 For reuse, we replace received C->j with index that should be inserted to submat */ 1930 rbuf3_i[ct3++] = ct2; 1931 } 1932 } 1933 ierr = MatSetValues_SeqAIJ(submat,1,&row,idex,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1934 1935 } else { /* scall == MAT_REUSE_MATRIX */ 1936 submat = submats[0]; 1937 subc = (Mat_SeqAIJ*)submat->data; 1938 1939 nnz = subc->i[row+1] - subc->i[row]; /* num of submat entries in this row */ 1940 for (l=0; l<nnz; l++) { 1941 ct2 = rbuf3_i[ct3++]; /* index of rbuf4_i[] which needs to be inserted into submat */ 1942 subvals[idex++] = rbuf4_i[ct2]; 1943 } 1944 1945 bj = subc->j + subc->i[row]; 1946 ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,bj,subvals,INSERT_VALUES);CHKERRQ(ierr); 1947 } 1948 } else { /* allcolumns */ 1949 nnz = rbuf2_i[ct1]; /* num of C entries in this row */ 1950 ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,rbuf3_i+ct2,rbuf4_i+ct2,INSERT_VALUES);CHKERRQ(ierr); 1951 ct2 += nnz; 1952 } 1953 } 1954 } 1955 1956 /* sending a->a are done */ 1957 ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr); 1958 ierr = PetscFree4(r_waits4,s_waits4,r_status4,s_status4);CHKERRQ(ierr); 1959 1960 ierr = MatAssemblyBegin(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1961 ierr = MatAssemblyEnd(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1962 submats[0] = submat; 1963 1964 /* Restore the indices */ 1965 ierr = ISRestoreIndices(isrow[0],&irow);CHKERRQ(ierr); 1966 if (!allcolumns) { 1967 ierr = ISRestoreIndices(iscol[0],&icol);CHKERRQ(ierr); 1968 } 1969 1970 /* Destroy allocated memory */ 1971 for (i=0; i<nrqs; ++i) { 1972 ierr = PetscFree3(rbuf4[i],subcols,subvals);CHKERRQ(ierr); 1973 } 1974 ierr = PetscFree3(rbuf4,subcols,subvals);CHKERRQ(ierr); 1975 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1976 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1977 1978 if (scall == MAT_INITIAL_MATRIX) { 1979 ierr = PetscFree(lens);CHKERRQ(ierr); 1980 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1981 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1982 } 1983 PetscFunctionReturn(0); 1984 } 1985 1986 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 1987 { 1988 PetscErrorCode ierr; 1989 PetscInt ncol; 1990 PetscBool colflag,allcolumns=PETSC_FALSE; 1991 1992 PetscFunctionBegin; 1993 /* Allocate memory to hold all the submatrices */ 1994 if (scall == MAT_INITIAL_MATRIX) { 1995 ierr = PetscMalloc1(1,submat);CHKERRQ(ierr); 1996 } 1997 1998 /* Check for special case: each processor gets entire matrix columns */ 1999 ierr = ISIdentity(iscol[0],&colflag);CHKERRQ(ierr); 2000 ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr); 2001 if (colflag && ncol == C->cmap->N) allcolumns = PETSC_TRUE; 2002 2003 ierr = MatGetSubMatrices_MPIAIJ_SingleIS_Local(C,ismax,isrow,iscol,scall,allcolumns,*submat);CHKERRQ(ierr); 2004 PetscFunctionReturn(0); 2005 } 2006 2007 PetscErrorCode MatGetSubMatrices_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 2008 { 2009 PetscErrorCode ierr; 2010 PetscInt nmax,nstages_local,nstages,i,pos,max_no,nrow,ncol; 2011 PetscBool rowflag,colflag,wantallmatrix=PETSC_FALSE,twantallmatrix,*allcolumns; 2012 2013 PetscFunctionBegin; 2014 #if 0 2015 /* Check for special case: each processor gets entire matrix */ 2016 if (C->submat_singleis) { /* flag is set in PCSetUp_ASM() to skip several MPIU_Allreduce() */ 2017 ierr = MatGetSubMatrices_MPIAIJ_SingleIS(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr); 2018 PetscFunctionReturn(0); 2019 } 2020 #endif 2021 2022 if (ismax == 1 && C->rmap->N == C->cmap->N) { 2023 ierr = ISIdentity(*isrow,&rowflag);CHKERRQ(ierr); 2024 ierr = ISIdentity(*iscol,&colflag);CHKERRQ(ierr); 2025 ierr = ISGetLocalSize(*isrow,&nrow);CHKERRQ(ierr); 2026 ierr = ISGetLocalSize(*iscol,&ncol);CHKERRQ(ierr); 2027 if (rowflag && colflag && nrow == C->rmap->N && ncol == C->cmap->N) { 2028 wantallmatrix = PETSC_TRUE; 2029 2030 ierr = PetscOptionsGetBool(((PetscObject)C)->options,((PetscObject)C)->prefix,"-use_fast_submatrix",&wantallmatrix,NULL);CHKERRQ(ierr); 2031 } 2032 } 2033 ierr = MPIU_Allreduce(&wantallmatrix,&twantallmatrix,1,MPIU_BOOL,MPI_MIN,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 2034 if (twantallmatrix) { 2035 ierr = MatGetSubMatrix_MPIAIJ_All(C,MAT_GET_VALUES,scall,submat);CHKERRQ(ierr); 2036 PetscFunctionReturn(0); 2037 } 2038 2039 /* Allocate memory to hold all the submatrices */ 2040 if (scall == MAT_INITIAL_MATRIX) { 2041 ierr = PetscMalloc1(ismax+1,submat);CHKERRQ(ierr); 2042 } 2043 2044 if (scall == MAT_REUSE_MATRIX && !ismax) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"n=0 is not supported for MatGetSubMatrices(mat,n,isrow,iscol,MAT_REUSE_MATRIX,...). Set n=1 with zero-length isrow and iscolumn instead"); 2045 2046 /* Check for special case: each processor gets entire matrix columns */ 2047 ierr = PetscMalloc1(ismax+1,&allcolumns);CHKERRQ(ierr); 2048 for (i=0; i<ismax; i++) { 2049 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 2050 ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr); 2051 if (colflag && ncol == C->cmap->N) { 2052 allcolumns[i] = PETSC_TRUE; 2053 } else { 2054 allcolumns[i] = PETSC_FALSE; 2055 } 2056 } 2057 2058 /* Determine the number of stages through which submatrices are done */ 2059 nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt)); 2060 2061 /* 2062 Each stage will extract nmax submatrices. 2063 nmax is determined by the matrix column dimension. 2064 If the original matrix has 20M columns, only one submatrix per stage is allowed, etc. 2065 */ 2066 if (!nmax) nmax = 1; 2067 nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); 2068 2069 /* Make sure every processor loops through the nstages */ 2070 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 2071 2072 for (i=0,pos=0; i<nstages; i++) { 2073 if (pos+nmax <= ismax) max_no = nmax; 2074 else if (pos == ismax) max_no = 0; 2075 else max_no = ismax-pos; 2076 ierr = MatGetSubMatrices_MPIAIJ_Local(C,max_no,isrow+pos,iscol+pos,scall,allcolumns+pos,*submat+pos);CHKERRQ(ierr); 2077 pos += max_no; 2078 } 2079 2080 ierr = PetscFree(allcolumns);CHKERRQ(ierr); 2081 PetscFunctionReturn(0); 2082 } 2083 2084 /* -------------------------------------------------------------------------*/ 2085 PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allcolumns,Mat *submats) 2086 { 2087 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 2088 Mat A = c->A; 2089 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)c->B->data,*subc; 2090 const PetscInt **icol,**irow; 2091 PetscInt *nrow,*ncol,start; 2092 PetscErrorCode ierr; 2093 PetscMPIInt rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr; 2094 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,**rbuf1,row,proc; 2095 PetscInt nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol; 2096 PetscInt **rbuf3,*req_source1,*req_source2,**sbuf_aj,**rbuf2,max1,max2; 2097 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 2098 #if defined(PETSC_USE_CTABLE) 2099 PetscTable *cmap,cmap_i=NULL,*rmap,rmap_i; 2100 #else 2101 PetscInt **cmap,*cmap_i=NULL,**rmap,*rmap_i; 2102 #endif 2103 const PetscInt *irow_i; 2104 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 2105 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 2106 MPI_Request *r_waits4,*s_waits3,*s_waits4; 2107 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 2108 MPI_Status *r_status3,*r_status4,*s_status4; 2109 MPI_Comm comm; 2110 PetscScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i; 2111 PetscMPIInt *onodes1,*olengths1,end; 2112 PetscInt **row2proc,*row2proc_i; 2113 Mat_SubMat **smats,*smat_i; 2114 PetscBool *issorted; 2115 2116 PetscFunctionBegin; 2117 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2118 size = c->size; 2119 rank = c->rank; 2120 2121 ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);CHKERRQ(ierr); 2122 for (i=0; i<ismax; i++) { 2123 ierr = ISSorted(isrow[i],&issorted[i]);CHKERRQ(ierr); 2124 2125 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 2126 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 2127 if (allcolumns[i]) { 2128 icol[i] = NULL; 2129 ncol[i] = C->cmap->N; 2130 } else { 2131 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 2132 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 2133 } 2134 } 2135 2136 ierr = PetscMalloc1(ismax,&smats);CHKERRQ(ierr); 2137 ierr = PetscMalloc1(ismax,&row2proc);CHKERRQ(ierr); 2138 ierr = PetscMalloc2(ismax,&cmap,ismax,&rmap);CHKERRQ(ierr); 2139 2140 if (scall == MAT_REUSE_MATRIX) { 2141 /* Assumes new rows are same length as the old rows */ 2142 for (i=0; i<ismax; i++) { 2143 subc = (Mat_SeqAIJ*)(submats[i]->data); 2144 if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 2145 2146 /* Initial matrix as if empty */ 2147 ierr = PetscMemzero(subc->ilen,submats[i]->rmap->n*sizeof(PetscInt));CHKERRQ(ierr); 2148 2149 /* Initial matrix as if empty */ 2150 submats[i]->factortype = C->factortype; 2151 2152 smat_i = subc->submatis1; 2153 smats[i] = smat_i; 2154 2155 nrqs = smat_i->nrqs; 2156 nrqr = smat_i->nrqr; 2157 rbuf1 = smat_i->rbuf1; 2158 rbuf2 = smat_i->rbuf2; 2159 rbuf3 = smat_i->rbuf3; 2160 req_source2 = smat_i->req_source2; 2161 2162 sbuf1 = smat_i->sbuf1; 2163 sbuf2 = smat_i->sbuf2; 2164 ptr = smat_i->ptr; 2165 tmp = smat_i->tmp; 2166 ctr = smat_i->ctr; 2167 2168 pa = smat_i->pa; 2169 req_size = smat_i->req_size; 2170 req_source1 = smat_i->req_source1; 2171 2172 allcolumns[i] = smat_i->allcolumns; 2173 row2proc[i] = smat_i->row2proc; 2174 rmap[i] = smat_i->rmap; 2175 cmap[i] = smat_i->cmap; 2176 } 2177 } else { /* scall == MAT_INITIAL_MATRIX */ 2178 /* Get some new tags to keep the communication clean */ 2179 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 2180 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 2181 2182 /* evaluate communication - mesg to who, length of mesg, and buffer space 2183 required. Based on this, buffers are allocated, and data copied into them*/ 2184 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); /* mesg size, initialize work vectors */ 2185 2186 for (i=0; i<ismax; i++) { 2187 jmax = nrow[i]; 2188 irow_i = irow[i]; 2189 2190 ierr = PetscMalloc1(jmax,&row2proc_i);CHKERRQ(ierr); 2191 row2proc[i] = row2proc_i; 2192 2193 if (issorted[i]) proc = 0; 2194 for (j=0; j<jmax; j++) { 2195 if (!issorted[i]) proc = 0; 2196 row = irow_i[j]; 2197 while (row >= C->rmap->range[proc+1]) proc++; 2198 w4[proc]++; 2199 row2proc_i[j] = proc; /* map row index to proc */ 2200 } 2201 for (j=0; j<size; j++) { 2202 if (w4[j]) { w1[j] += w4[j]; w3[j]++; w4[j] = 0;} 2203 } 2204 } 2205 2206 nrqs = 0; /* no of outgoing messages */ 2207 msz = 0; /* total mesg length (for all procs) */ 2208 w1[rank] = 0; /* no mesg sent to self */ 2209 w3[rank] = 0; 2210 for (i=0; i<size; i++) { 2211 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 2212 } 2213 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 2214 for (i=0,j=0; i<size; i++) { 2215 if (w1[i]) { pa[j] = i; j++; } 2216 } 2217 2218 /* Each message would have a header = 1 + 2*(no of IS) + data */ 2219 for (i=0; i<nrqs; i++) { 2220 j = pa[i]; 2221 w1[j] += w2[j] + 2* w3[j]; 2222 msz += w1[j]; 2223 } 2224 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 2225 2226 /* Determine the number of messages to expect, their lengths, from from-ids */ 2227 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 2228 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 2229 2230 /* Now post the Irecvs corresponding to these messages */ 2231 tag0 = ((PetscObject)C)->tag; 2232 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 2233 2234 ierr = PetscFree(onodes1);CHKERRQ(ierr); 2235 ierr = PetscFree(olengths1);CHKERRQ(ierr); 2236 2237 /* Allocate Memory for outgoing messages */ 2238 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 2239 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 2240 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 2241 2242 { 2243 PetscInt *iptr = tmp; 2244 k = 0; 2245 for (i=0; i<nrqs; i++) { 2246 j = pa[i]; 2247 iptr += k; 2248 sbuf1[j] = iptr; 2249 k = w1[j]; 2250 } 2251 } 2252 2253 /* Form the outgoing messages. Initialize the header space */ 2254 for (i=0; i<nrqs; i++) { 2255 j = pa[i]; 2256 sbuf1[j][0] = 0; 2257 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 2258 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 2259 } 2260 2261 /* Parse the isrow and copy data into outbuf */ 2262 for (i=0; i<ismax; i++) { 2263 row2proc_i = row2proc[i]; 2264 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 2265 irow_i = irow[i]; 2266 jmax = nrow[i]; 2267 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 2268 proc = row2proc_i[j]; 2269 if (proc != rank) { /* copy to the outgoing buf*/ 2270 ctr[proc]++; 2271 *ptr[proc] = irow_i[j]; 2272 ptr[proc]++; 2273 } 2274 } 2275 /* Update the headers for the current IS */ 2276 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 2277 if ((ctr_j = ctr[j])) { 2278 sbuf1_j = sbuf1[j]; 2279 k = ++sbuf1_j[0]; 2280 sbuf1_j[2*k] = ctr_j; 2281 sbuf1_j[2*k-1] = i; 2282 } 2283 } 2284 } 2285 2286 /* Now post the sends */ 2287 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 2288 for (i=0; i<nrqs; ++i) { 2289 j = pa[i]; 2290 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 2291 } 2292 2293 /* Post Receives to capture the buffer size */ 2294 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 2295 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 2296 rbuf2[0] = tmp + msz; 2297 for (i=1; i<nrqs; ++i) { 2298 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 2299 } 2300 for (i=0; i<nrqs; ++i) { 2301 j = pa[i]; 2302 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);CHKERRQ(ierr); 2303 } 2304 2305 /* Send to other procs the buf size they should allocate */ 2306 /* Receive messages*/ 2307 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 2308 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 2309 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 2310 { 2311 PetscInt *sAi = a->i,*sBi = b->i,id,rstart = C->rmap->rstart; 2312 PetscInt *sbuf2_i; 2313 2314 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 2315 for (i=0; i<nrqr; ++i) { 2316 req_size[i] = 0; 2317 rbuf1_i = rbuf1[i]; 2318 start = 2*rbuf1_i[0] + 1; 2319 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 2320 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 2321 sbuf2_i = sbuf2[i]; 2322 for (j=start; j<end; j++) { 2323 id = rbuf1_i[j] - rstart; 2324 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 2325 sbuf2_i[j] = ncols; 2326 req_size[i] += ncols; 2327 } 2328 req_source1[i] = r_status1[i].MPI_SOURCE; 2329 /* form the header */ 2330 sbuf2_i[0] = req_size[i]; 2331 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 2332 2333 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 2334 } 2335 } 2336 ierr = PetscFree(r_status1);CHKERRQ(ierr); 2337 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 2338 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 2339 2340 /* Receive messages*/ 2341 ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr); 2342 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 2343 2344 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 2345 for (i=0; i<nrqs; ++i) { 2346 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 2347 req_source2[i] = r_status2[i].MPI_SOURCE; 2348 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 2349 } 2350 ierr = PetscFree(r_status2);CHKERRQ(ierr); 2351 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 2352 2353 /* Wait on sends1 and sends2 */ 2354 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 2355 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 2356 2357 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 2358 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 2359 ierr = PetscFree(s_status1);CHKERRQ(ierr); 2360 ierr = PetscFree(s_status2);CHKERRQ(ierr); 2361 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 2362 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 2363 2364 /* Now allocate sending buffers for a->j, and send them off */ 2365 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 2366 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 2367 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 2368 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 2369 2370 ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr); 2371 { 2372 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i,lwrite; 2373 PetscInt *cworkA,*cworkB,cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray; 2374 PetscInt cend = C->cmap->rend; 2375 PetscInt *a_j = a->j,*b_j = b->j,ctmp; 2376 2377 for (i=0; i<nrqr; i++) { 2378 rbuf1_i = rbuf1[i]; 2379 sbuf_aj_i = sbuf_aj[i]; 2380 ct1 = 2*rbuf1_i[0] + 1; 2381 ct2 = 0; 2382 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 2383 kmax = rbuf1[i][2*j]; 2384 for (k=0; k<kmax; k++,ct1++) { 2385 row = rbuf1_i[ct1] - rstart; 2386 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 2387 ncols = nzA + nzB; 2388 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 2389 2390 /* load the column indices for this row into sorted cols */ 2391 cols = sbuf_aj_i + ct2; 2392 2393 lwrite = 0; 2394 for (l=0; l<nzB; l++) { 2395 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp; 2396 } 2397 for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l]; 2398 for (l=0; l<nzB; l++) { 2399 if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp; 2400 } 2401 2402 ct2 += ncols; 2403 } 2404 } 2405 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 2406 } 2407 } 2408 ierr = PetscMalloc1(nrqs+1,&r_status3);CHKERRQ(ierr); 2409 ierr = PetscMalloc1(nrqr+1,&s_status3);CHKERRQ(ierr); 2410 2411 /* create col map: global col of C -> local col of submatrices */ 2412 { 2413 const PetscInt *icol_i; 2414 #if defined(PETSC_USE_CTABLE) 2415 for (i=0; i<ismax; i++) { 2416 if (!allcolumns[i]) { 2417 ierr = PetscTableCreate(ncol[i]+1,C->cmap->N+1,&cmap[i]);CHKERRQ(ierr); 2418 2419 jmax = ncol[i]; 2420 icol_i = icol[i]; 2421 cmap_i = cmap[i]; 2422 for (j=0; j<jmax; j++) { 2423 ierr = PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 2424 } 2425 } else cmap[i] = NULL; 2426 } 2427 #else 2428 for (i=0; i<ismax; i++) { 2429 if (!allcolumns[i]) { 2430 ierr = PetscMalloc1(C->cmap->N,&cmap[i]);CHKERRQ(ierr); 2431 ierr = PetscMemzero(cmap[i],C->cmap->N*sizeof(PetscInt));CHKERRQ(ierr); 2432 jmax = ncol[i]; 2433 icol_i = icol[i]; 2434 cmap_i = cmap[i]; 2435 for (j=0; j<jmax; j++) { 2436 cmap_i[icol_i[j]] = j+1; 2437 } 2438 } else cmap[i] = NULL; 2439 } 2440 #endif 2441 } 2442 2443 /* Create lens which is required for MatCreate... */ 2444 for (i=0,j=0; i<ismax; i++) j += nrow[i]; 2445 ierr = PetscMalloc1(ismax,&lens);CHKERRQ(ierr); 2446 2447 if (ismax) { 2448 ierr = PetscCalloc1(j,&lens[0]);CHKERRQ(ierr); 2449 } 2450 for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1]; 2451 2452 /* Update lens from local data */ 2453 for (i=0; i<ismax; i++) { 2454 row2proc_i = row2proc[i]; 2455 jmax = nrow[i]; 2456 if (!allcolumns[i]) cmap_i = cmap[i]; 2457 irow_i = irow[i]; 2458 lens_i = lens[i]; 2459 for (j=0; j<jmax; j++) { 2460 row = irow_i[j]; 2461 proc = row2proc_i[j]; 2462 if (proc == rank) { 2463 ierr = MatGetRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 2464 if (!allcolumns[i]) { 2465 for (k=0; k<ncols; k++) { 2466 #if defined(PETSC_USE_CTABLE) 2467 ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr); 2468 #else 2469 tcol = cmap_i[cols[k]]; 2470 #endif 2471 if (tcol) lens_i[j]++; 2472 } 2473 } else { /* allcolumns */ 2474 lens_i[j] = ncols; 2475 } 2476 ierr = MatRestoreRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 2477 } 2478 } 2479 } 2480 2481 /* Create row map: global row of C -> local row of submatrices */ 2482 #if defined(PETSC_USE_CTABLE) 2483 for (i=0; i<ismax; i++) { 2484 ierr = PetscTableCreate(nrow[i]+1,C->rmap->N+1,&rmap[i]);CHKERRQ(ierr); 2485 irow_i = irow[i]; 2486 jmax = nrow[i]; 2487 for (j=0; j<jmax; j++) { 2488 ierr = PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 2489 } 2490 } 2491 #else 2492 if (ismax) { 2493 ierr = PetscMalloc1(ismax*C->rmap->N,&rmap[0]);CHKERRQ(ierr); 2494 ierr = PetscMemzero(rmap[0],ismax*C->rmap->N*sizeof(PetscInt));CHKERRQ(ierr); 2495 } 2496 for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + C->rmap->N; 2497 for (i=0; i<ismax; i++) { 2498 rmap_i = rmap[i]; 2499 irow_i = irow[i]; 2500 jmax = nrow[i]; 2501 for (j=0; j<jmax; j++) { 2502 rmap_i[irow_i[j]] = j; 2503 } 2504 } 2505 #endif 2506 2507 /* Update lens from offproc data */ 2508 { 2509 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 2510 2511 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 2512 for (tmp2=0; tmp2<nrqs; tmp2++) { 2513 sbuf1_i = sbuf1[pa[tmp2]]; 2514 jmax = sbuf1_i[0]; 2515 ct1 = 2*jmax+1; 2516 ct2 = 0; 2517 rbuf2_i = rbuf2[tmp2]; 2518 rbuf3_i = rbuf3[tmp2]; 2519 for (j=1; j<=jmax; j++) { 2520 is_no = sbuf1_i[2*j-1]; 2521 max1 = sbuf1_i[2*j]; 2522 lens_i = lens[is_no]; 2523 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2524 rmap_i = rmap[is_no]; 2525 for (k=0; k<max1; k++,ct1++) { 2526 #if defined(PETSC_USE_CTABLE) 2527 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 2528 row--; 2529 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 2530 #else 2531 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 2532 #endif 2533 max2 = rbuf2_i[ct1]; 2534 for (l=0; l<max2; l++,ct2++) { 2535 if (!allcolumns[is_no]) { 2536 #if defined(PETSC_USE_CTABLE) 2537 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 2538 #else 2539 tcol = cmap_i[rbuf3_i[ct2]]; 2540 #endif 2541 if (tcol) lens_i[row]++; 2542 } else { /* allcolumns */ 2543 lens_i[row]++; /* lens_i[row] += max2 ? */ 2544 } 2545 } 2546 } 2547 } 2548 } 2549 } 2550 ierr = PetscFree(r_status3);CHKERRQ(ierr); 2551 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 2552 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 2553 ierr = PetscFree(s_status3);CHKERRQ(ierr); 2554 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 2555 2556 /* Create the submatrices */ 2557 for (i=0; i<ismax; i++) { 2558 PetscInt rbs,cbs; 2559 2560 ierr = ISGetBlockSize(isrow[i],&rbs);CHKERRQ(ierr); 2561 ierr = ISGetBlockSize(iscol[i],&cbs);CHKERRQ(ierr); 2562 2563 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 2564 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2565 2566 ierr = MatSetBlockSizes(submats[i],rbs,cbs);CHKERRQ(ierr); 2567 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 2568 ierr = MatSeqAIJSetPreallocation(submats[i],0,lens[i]);CHKERRQ(ierr); 2569 2570 /* create struct Mat_SubMat and attached it to submat */ 2571 ierr = PetscNew(&smat_i);CHKERRQ(ierr); 2572 subc = (Mat_SeqAIJ*)submats[i]->data; 2573 subc->submatis1 = smat_i; 2574 smats[i] = smat_i; 2575 2576 smat_i->destroy = submats[i]->ops->destroy; 2577 submats[i]->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices; 2578 submats[i]->factortype = C->factortype; 2579 2580 smat_i->id = i; 2581 smat_i->nrqs = nrqs; 2582 smat_i->nrqr = nrqr; 2583 smat_i->rbuf1 = rbuf1; 2584 smat_i->rbuf2 = rbuf2; 2585 smat_i->rbuf3 = rbuf3; 2586 smat_i->sbuf2 = sbuf2; 2587 smat_i->req_source2 = req_source2; 2588 2589 smat_i->sbuf1 = sbuf1; 2590 smat_i->ptr = ptr; 2591 smat_i->tmp = tmp; 2592 smat_i->ctr = ctr; 2593 2594 smat_i->pa = pa; 2595 smat_i->req_size = req_size; 2596 smat_i->req_source1 = req_source1; 2597 2598 smat_i->allcolumns = allcolumns[i]; 2599 smat_i->row2proc = row2proc[i]; 2600 smat_i->rmap = rmap[i]; 2601 smat_i->cmap = cmap[i]; 2602 } 2603 2604 if (ismax) {ierr = PetscFree(lens[0]);CHKERRQ(ierr);} 2605 ierr = PetscFree(lens);CHKERRQ(ierr); 2606 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 2607 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 2608 2609 } /* endof scall == MAT_INITIAL_MATRIX */ 2610 2611 /* Post recv matrix values */ 2612 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 2613 ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr); 2614 ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr); 2615 ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr); 2616 ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr); 2617 for (i=0; i<nrqs; ++i) { 2618 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr); 2619 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 2620 } 2621 2622 /* Allocate sending buffers for a->a, and send them off */ 2623 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 2624 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 2625 ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr); 2626 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 2627 2628 ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr); 2629 { 2630 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i, *cworkB,lwrite; 2631 PetscInt cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray; 2632 PetscInt cend = C->cmap->rend; 2633 PetscInt *b_j = b->j; 2634 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a; 2635 2636 for (i=0; i<nrqr; i++) { 2637 rbuf1_i = rbuf1[i]; 2638 sbuf_aa_i = sbuf_aa[i]; 2639 ct1 = 2*rbuf1_i[0]+1; 2640 ct2 = 0; 2641 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 2642 kmax = rbuf1_i[2*j]; 2643 for (k=0; k<kmax; k++,ct1++) { 2644 row = rbuf1_i[ct1] - rstart; 2645 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 2646 ncols = nzA + nzB; 2647 cworkB = b_j + b_i[row]; 2648 vworkA = a_a + a_i[row]; 2649 vworkB = b_a + b_i[row]; 2650 2651 /* load the column values for this row into vals*/ 2652 vals = sbuf_aa_i+ct2; 2653 2654 lwrite = 0; 2655 for (l=0; l<nzB; l++) { 2656 if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l]; 2657 } 2658 for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l]; 2659 for (l=0; l<nzB; l++) { 2660 if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l]; 2661 } 2662 2663 ct2 += ncols; 2664 } 2665 } 2666 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 2667 } 2668 } 2669 2670 if (!ismax) { 2671 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 2672 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 2673 } 2674 2675 /* Assemble the matrices */ 2676 /* First assemble the local rows */ 2677 { 2678 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i,old_row; 2679 PetscScalar *imat_a; 2680 2681 for (i=0; i<ismax; i++) { 2682 row2proc_i = row2proc[i]; 2683 subc = (Mat_SeqAIJ*)submats[i]->data; 2684 imat_ilen = subc->ilen; 2685 imat_j = subc->j; 2686 imat_i = subc->i; 2687 imat_a = subc->a; 2688 2689 if (!allcolumns[i]) cmap_i = cmap[i]; 2690 rmap_i = rmap[i]; 2691 irow_i = irow[i]; 2692 jmax = nrow[i]; 2693 for (j=0; j<jmax; j++) { 2694 row = irow_i[j]; 2695 proc = row2proc_i[j]; 2696 if (proc == rank) { 2697 old_row = row; 2698 #if defined(PETSC_USE_CTABLE) 2699 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 2700 row--; 2701 #else 2702 row = rmap_i[row]; 2703 #endif 2704 ilen_row = imat_ilen[row]; 2705 ierr = MatGetRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 2706 mat_i = imat_i[row]; 2707 mat_a = imat_a + mat_i; 2708 mat_j = imat_j + mat_i; 2709 if (!allcolumns[i]) { 2710 for (k=0; k<ncols; k++) { 2711 #if defined(PETSC_USE_CTABLE) 2712 ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr); 2713 #else 2714 tcol = cmap_i[cols[k]]; 2715 #endif 2716 if (tcol) { 2717 *mat_j++ = tcol - 1; 2718 //if (rank==1) printf("%d - col %d\n",row,tcol - 1); 2719 *mat_a++ = vals[k]; 2720 ilen_row++; 2721 } 2722 } 2723 } else { /* allcolumns */ 2724 for (k=0; k<ncols; k++) { 2725 *mat_j++ = cols[k]; /* global col index! */ 2726 *mat_a++ = vals[k]; 2727 ilen_row++; 2728 } 2729 } 2730 ierr = MatRestoreRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 2731 2732 imat_ilen[row] = ilen_row; 2733 } 2734 } 2735 } 2736 } 2737 2738 /* Now assemble the off proc rows*/ 2739 { 2740 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 2741 PetscInt *imat_j,*imat_i; 2742 PetscScalar *imat_a,*rbuf4_i; 2743 2744 ierr = MPI_Waitall(nrqs,r_waits4,r_status4);CHKERRQ(ierr); 2745 for (tmp2=0; tmp2<nrqs; tmp2++) { 2746 sbuf1_i = sbuf1[pa[tmp2]]; 2747 jmax = sbuf1_i[0]; 2748 ct1 = 2*jmax + 1; 2749 ct2 = 0; 2750 rbuf2_i = rbuf2[tmp2]; 2751 rbuf3_i = rbuf3[tmp2]; 2752 rbuf4_i = rbuf4[tmp2]; 2753 for (j=1; j<=jmax; j++) { 2754 is_no = sbuf1_i[2*j-1]; 2755 rmap_i = rmap[is_no]; 2756 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2757 subc = (Mat_SeqAIJ*)submats[is_no]->data; 2758 imat_ilen = subc->ilen; 2759 imat_j = subc->j; 2760 imat_i = subc->i; 2761 imat_a = subc->a; 2762 max1 = sbuf1_i[2*j]; 2763 for (k=0; k<max1; k++,ct1++) { 2764 row = sbuf1_i[ct1]; 2765 #if defined(PETSC_USE_CTABLE) 2766 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 2767 row--; 2768 #else 2769 row = rmap_i[row]; 2770 #endif 2771 ilen = imat_ilen[row]; 2772 mat_i = imat_i[row]; 2773 mat_a = imat_a + mat_i; 2774 mat_j = imat_j + mat_i; 2775 max2 = rbuf2_i[ct1]; 2776 if (!allcolumns[is_no]) { 2777 for (l=0; l<max2; l++,ct2++) { 2778 2779 #if defined(PETSC_USE_CTABLE) 2780 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 2781 #else 2782 tcol = cmap_i[rbuf3_i[ct2]]; 2783 #endif 2784 if (tcol) { 2785 *mat_j++ = tcol - 1; 2786 //if (rank==1) printf("%d - off-proc col %d\n",row,tcol - 1); 2787 *mat_a++ = rbuf4_i[ct2]; 2788 ilen++; 2789 } 2790 } 2791 } else { /* allcolumns */ 2792 for (l=0; l<max2; l++,ct2++) { 2793 *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */ 2794 *mat_a++ = rbuf4_i[ct2]; 2795 ilen++; 2796 } 2797 } 2798 imat_ilen[row] = ilen; 2799 } 2800 } 2801 } 2802 } 2803 2804 /* sort the rows -- do we need this? */ 2805 { 2806 PetscInt *imat_ilen,*imat_j,*imat_i; 2807 PetscScalar *imat_a; 2808 2809 for (i=0; i<ismax; i++) { 2810 subc = (Mat_SeqAIJ*)submats[i]->data; 2811 imat_j = subc->j; 2812 imat_i = subc->i; 2813 imat_a = subc->a; 2814 imat_ilen = subc->ilen; 2815 2816 if (allcolumns[i]) continue; 2817 jmax = nrow[i]; 2818 for (j=0; j<jmax; j++) { 2819 PetscInt ilen; 2820 2821 mat_i = imat_i[j]; 2822 mat_a = imat_a + mat_i; 2823 mat_j = imat_j + mat_i; 2824 ilen = imat_ilen[j]; 2825 ierr = PetscSortIntWithScalarArray(ilen,mat_j,mat_a);CHKERRQ(ierr); 2826 } 2827 } 2828 } 2829 2830 ierr = PetscFree(r_status4);CHKERRQ(ierr); 2831 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 2832 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 2833 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 2834 ierr = PetscFree(s_status4);CHKERRQ(ierr); 2835 2836 /* Restore the indices */ 2837 for (i=0; i<ismax; i++) { 2838 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 2839 if (!allcolumns[i]) { 2840 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 2841 } 2842 } 2843 2844 for (i=0; i<ismax; i++) { 2845 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2846 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2847 } 2848 2849 /* Destroy allocated memory */ 2850 if (!ismax) { 2851 ierr = PetscFree(pa);CHKERRQ(ierr); 2852 2853 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 2854 for (i=0; i<nrqr; ++i) { 2855 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 2856 } 2857 for (i=0; i<nrqs; ++i) { 2858 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 2859 } 2860 2861 ierr = PetscFree3(sbuf2,req_size,req_source1);CHKERRQ(ierr); 2862 ierr = PetscFree3(req_source2,rbuf2,rbuf3);CHKERRQ(ierr); 2863 } 2864 2865 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 2866 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 2867 2868 ierr = PetscFree5(irow,icol,nrow,ncol,issorted);CHKERRQ(ierr); 2869 2870 for (i=0; i<nrqs; ++i) { 2871 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 2872 } 2873 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 2874 2875 ierr = PetscFree2(cmap,rmap);CHKERRQ(ierr); 2876 ierr = PetscFree(row2proc);CHKERRQ(ierr); 2877 ierr = PetscFree(smats);CHKERRQ(ierr); 2878 PetscFunctionReturn(0); 2879 } 2880 2881 /* 2882 Permute A & B into C's *local* index space using rowemb,dcolemb for A and rowemb,ocolemb for B. 2883 Embeddings are supposed to be injections and the above implies that the range of rowemb is a subset 2884 of [0,m), dcolemb is in [0,n) and ocolemb is in [N-n). 2885 If pattern == DIFFERENT_NONZERO_PATTERN, C is preallocated according to A&B. 2886 After that B's columns are mapped into C's global column space, so that C is in the "disassembled" 2887 state, and needs to be "assembled" later by compressing B's column space. 2888 2889 This function may be called in lieu of preallocation, so C should not be expected to be preallocated. 2890 Following this call, C->A & C->B have been created, even if empty. 2891 */ 2892 PetscErrorCode MatSetSeqMats_MPIAIJ(Mat C,IS rowemb,IS dcolemb,IS ocolemb,MatStructure pattern,Mat A,Mat B) 2893 { 2894 /* If making this function public, change the error returned in this function away from _PLIB. */ 2895 PetscErrorCode ierr; 2896 Mat_MPIAIJ *aij; 2897 Mat_SeqAIJ *Baij; 2898 PetscBool seqaij,Bdisassembled; 2899 PetscInt m,n,*nz,i,j,ngcol,col,rstart,rend,shift,count; 2900 PetscScalar v; 2901 const PetscInt *rowindices,*colindices; 2902 2903 PetscFunctionBegin; 2904 /* Check to make sure the component matrices (and embeddings) are compatible with C. */ 2905 if (A) { 2906 ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJ,&seqaij);CHKERRQ(ierr); 2907 if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diagonal matrix is of wrong type"); 2908 if (rowemb) { 2909 ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr); 2910 if (m != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with diag matrix row size %D",m,A->rmap->n); 2911 } else { 2912 if (C->rmap->n != A->rmap->n) { 2913 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is row-incompatible with the MPIAIJ matrix"); 2914 } 2915 } 2916 if (dcolemb) { 2917 ierr = ISGetLocalSize(dcolemb,&n);CHKERRQ(ierr); 2918 if (n != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag col IS of size %D is incompatible with diag matrix col size %D",n,A->cmap->n); 2919 } else { 2920 if (C->cmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is col-incompatible with the MPIAIJ matrix"); 2921 } 2922 } 2923 if (B) { 2924 ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJ,&seqaij);CHKERRQ(ierr); 2925 if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diagonal matrix is of wrong type"); 2926 if (rowemb) { 2927 ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr); 2928 if (m != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with off-diag matrix row size %D",m,A->rmap->n); 2929 } else { 2930 if (C->rmap->n != B->rmap->n) { 2931 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is row-incompatible with the MPIAIJ matrix"); 2932 } 2933 } 2934 if (ocolemb) { 2935 ierr = ISGetLocalSize(ocolemb,&n);CHKERRQ(ierr); 2936 if (n != B->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag col IS of size %D is incompatible with off-diag matrix col size %D",n,B->cmap->n); 2937 } else { 2938 if (C->cmap->N - C->cmap->n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is col-incompatible with the MPIAIJ matrix"); 2939 } 2940 } 2941 2942 aij = (Mat_MPIAIJ*)(C->data); 2943 if (!aij->A) { 2944 /* Mimic parts of MatMPIAIJSetPreallocation() */ 2945 ierr = MatCreate(PETSC_COMM_SELF,&aij->A);CHKERRQ(ierr); 2946 ierr = MatSetSizes(aij->A,C->rmap->n,C->cmap->n,C->rmap->n,C->cmap->n);CHKERRQ(ierr); 2947 ierr = MatSetBlockSizesFromMats(aij->A,C,C);CHKERRQ(ierr); 2948 ierr = MatSetType(aij->A,MATSEQAIJ);CHKERRQ(ierr); 2949 ierr = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->A);CHKERRQ(ierr); 2950 } 2951 if (A) { 2952 ierr = MatSetSeqMat_SeqAIJ(aij->A,rowemb,dcolemb,pattern,A);CHKERRQ(ierr); 2953 } else { 2954 ierr = MatSetUp(aij->A);CHKERRQ(ierr); 2955 } 2956 if (B) { /* Destroy the old matrix or the column map, depending on the sparsity pattern. */ 2957 /* 2958 If pattern == DIFFERENT_NONZERO_PATTERN, we reallocate B and 2959 need to "disassemble" B -- convert it to using C's global indices. 2960 To insert the values we take the safer, albeit more expensive, route of MatSetValues(). 2961 2962 If pattern == SUBSET_NONZERO_PATTERN, we do not "disassemble" B and do not reallocate; 2963 we MatZeroValues(B) first, so there may be a bunch of zeros that, perhaps, could be compacted out. 2964 2965 TODO: Put B's values into aij->B's aij structure in place using the embedding ISs? 2966 At least avoid calling MatSetValues() and the implied searches? 2967 */ 2968 2969 if (B && pattern == DIFFERENT_NONZERO_PATTERN) { 2970 #if defined(PETSC_USE_CTABLE) 2971 ierr = PetscTableDestroy(&aij->colmap);CHKERRQ(ierr); 2972 #else 2973 ierr = PetscFree(aij->colmap);CHKERRQ(ierr); 2974 /* A bit of a HACK: ideally we should deal with case aij->B all in one code block below. */ 2975 if (aij->B) { 2976 ierr = PetscLogObjectMemory((PetscObject)C,-aij->B->cmap->n*sizeof(PetscInt));CHKERRQ(ierr); 2977 } 2978 #endif 2979 ngcol = 0; 2980 if (aij->lvec) { 2981 ierr = VecGetSize(aij->lvec,&ngcol);CHKERRQ(ierr); 2982 } 2983 if (aij->garray) { 2984 ierr = PetscFree(aij->garray);CHKERRQ(ierr); 2985 ierr = PetscLogObjectMemory((PetscObject)C,-ngcol*sizeof(PetscInt));CHKERRQ(ierr); 2986 } 2987 ierr = VecDestroy(&aij->lvec);CHKERRQ(ierr); 2988 ierr = VecScatterDestroy(&aij->Mvctx);CHKERRQ(ierr); 2989 } 2990 if (aij->B && B && pattern == DIFFERENT_NONZERO_PATTERN) { 2991 ierr = MatDestroy(&aij->B);CHKERRQ(ierr); 2992 } 2993 if (aij->B && B && pattern == SUBSET_NONZERO_PATTERN) { 2994 ierr = MatZeroEntries(aij->B);CHKERRQ(ierr); 2995 } 2996 } 2997 Bdisassembled = PETSC_FALSE; 2998 if (!aij->B) { 2999 ierr = MatCreate(PETSC_COMM_SELF,&aij->B);CHKERRQ(ierr); 3000 ierr = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->B);CHKERRQ(ierr); 3001 ierr = MatSetSizes(aij->B,C->rmap->n,C->cmap->N,C->rmap->n,C->cmap->N);CHKERRQ(ierr); 3002 ierr = MatSetBlockSizesFromMats(aij->B,B,B);CHKERRQ(ierr); 3003 ierr = MatSetType(aij->B,MATSEQAIJ);CHKERRQ(ierr); 3004 Bdisassembled = PETSC_TRUE; 3005 } 3006 if (B) { 3007 Baij = (Mat_SeqAIJ*)(B->data); 3008 if (pattern == DIFFERENT_NONZERO_PATTERN) { 3009 ierr = PetscMalloc1(B->rmap->n,&nz);CHKERRQ(ierr); 3010 for (i=0; i<B->rmap->n; i++) { 3011 nz[i] = Baij->i[i+1] - Baij->i[i]; 3012 } 3013 ierr = MatSeqAIJSetPreallocation(aij->B,0,nz);CHKERRQ(ierr); 3014 ierr = PetscFree(nz);CHKERRQ(ierr); 3015 } 3016 3017 ierr = PetscLayoutGetRange(C->rmap,&rstart,&rend);CHKERRQ(ierr); 3018 shift = rend-rstart; 3019 count = 0; 3020 rowindices = NULL; 3021 colindices = NULL; 3022 if (rowemb) { 3023 ierr = ISGetIndices(rowemb,&rowindices);CHKERRQ(ierr); 3024 } 3025 if (ocolemb) { 3026 ierr = ISGetIndices(ocolemb,&colindices);CHKERRQ(ierr); 3027 } 3028 for (i=0; i<B->rmap->n; i++) { 3029 PetscInt row; 3030 row = i; 3031 if (rowindices) row = rowindices[i]; 3032 for (j=Baij->i[i]; j<Baij->i[i+1]; j++) { 3033 col = Baij->j[count]; 3034 if (colindices) col = colindices[col]; 3035 if (Bdisassembled && col>=rstart) col += shift; 3036 v = Baij->a[count]; 3037 ierr = MatSetValues(aij->B,1,&row,1,&col,&v,INSERT_VALUES);CHKERRQ(ierr); 3038 ++count; 3039 } 3040 } 3041 /* No assembly for aij->B is necessary. */ 3042 /* FIXME: set aij->B's nonzerostate correctly. */ 3043 } else { 3044 ierr = MatSetUp(aij->B);CHKERRQ(ierr); 3045 } 3046 C->preallocated = PETSC_TRUE; 3047 C->was_assembled = PETSC_FALSE; 3048 C->assembled = PETSC_FALSE; 3049 /* 3050 C will need to be assembled so that aij->B can be compressed into local form in MatSetUpMultiply_MPIAIJ(). 3051 Furthermore, its nonzerostate will need to be based on that of aij->A's and aij->B's. 3052 */ 3053 PetscFunctionReturn(0); 3054 } 3055 3056 /* 3057 B uses local indices with column indices ranging between 0 and N-n; they must be interpreted using garray. 3058 */ 3059 PetscErrorCode MatGetSeqMats_MPIAIJ(Mat C,Mat *A,Mat *B) 3060 { 3061 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) (C->data); 3062 3063 PetscFunctionBegin; 3064 PetscValidPointer(A,2); 3065 PetscValidPointer(B,3); 3066 /* FIXME: make sure C is assembled */ 3067 *A = aij->A; 3068 *B = aij->B; 3069 /* Note that we don't incref *A and *B, so be careful! */ 3070 PetscFunctionReturn(0); 3071 } 3072 3073 /* 3074 Extract MPI submatrices encoded by pairs of IS that may live on subcomms of C. 3075 NOT SCALABLE due to the use of ISGetNonlocalIS() (see below). 3076 */ 3077 PetscErrorCode MatGetSubMatricesMPI_MPIXAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[], 3078 PetscErrorCode(*getsubmats_seq)(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat**), 3079 PetscErrorCode(*getlocalmats)(Mat,Mat*,Mat*), 3080 PetscErrorCode(*setseqmat)(Mat,IS,IS,MatStructure,Mat), 3081 PetscErrorCode(*setseqmats)(Mat,IS,IS,IS,MatStructure,Mat,Mat)) 3082 { 3083 PetscErrorCode ierr; 3084 PetscMPIInt isize,flag; 3085 PetscInt i,ii,cismax,ispar; 3086 Mat *A,*B; 3087 IS *isrow_p,*iscol_p,*cisrow,*ciscol,*ciscol_p; 3088 3089 PetscFunctionBegin; 3090 if (!ismax) PetscFunctionReturn(0); 3091 3092 for (i = 0, cismax = 0; i < ismax; ++i) { 3093 PetscMPIInt isize; 3094 ierr = MPI_Comm_compare(((PetscObject)isrow[i])->comm,((PetscObject)iscol[i])->comm,&flag);CHKERRQ(ierr); 3095 if (flag != MPI_IDENT) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Row and column index sets must have the same communicator"); 3096 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm, &isize);CHKERRQ(ierr); 3097 if (isize > 1) ++cismax; 3098 } 3099 3100 /* 3101 If cismax is zero on all C's ranks, then and only then can we use purely sequential matrix extraction. 3102 ispar counts the number of parallel ISs across C's comm. 3103 */ 3104 ierr = MPIU_Allreduce(&cismax,&ispar,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 3105 if (!ispar) { /* Sequential ISs only across C's comm, so can call the sequential matrix extraction subroutine. */ 3106 ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr); 3107 PetscFunctionReturn(0); 3108 } 3109 3110 /* if (ispar) */ 3111 /* 3112 Construct the "complements" -- the off-processor indices -- of the iscol ISs for parallel ISs only. 3113 These are used to extract the off-diag portion of the resulting parallel matrix. 3114 The row IS for the off-diag portion is the same as for the diag portion, 3115 so we merely alias (without increfing) the row IS, while skipping those that are sequential. 3116 */ 3117 ierr = PetscMalloc2(cismax,&cisrow,cismax,&ciscol);CHKERRQ(ierr); 3118 ierr = PetscMalloc1(cismax,&ciscol_p);CHKERRQ(ierr); 3119 for (i = 0, ii = 0; i < ismax; ++i) { 3120 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3121 if (isize > 1) { 3122 /* 3123 TODO: This is the part that's ***NOT SCALABLE***. 3124 To fix this we need to extract just the indices of C's nonzero columns 3125 that lie on the intersection of isrow[i] and ciscol[ii] -- the nonlocal 3126 part of iscol[i] -- without actually computing ciscol[ii]. This also has 3127 to be done without serializing on the IS list, so, most likely, it is best 3128 done by rewriting MatGetSubMatrices_MPIAIJ() directly. 3129 */ 3130 ierr = ISGetNonlocalIS(iscol[i],&(ciscol[ii]));CHKERRQ(ierr); 3131 /* Now we have to 3132 (a) make sure ciscol[ii] is sorted, since, even if the off-proc indices 3133 were sorted on each rank, concatenated they might no longer be sorted; 3134 (b) Use ISSortPermutation() to construct ciscol_p, the mapping from the 3135 indices in the nondecreasing order to the original index positions. 3136 If ciscol[ii] is strictly increasing, the permutation IS is NULL. 3137 */ 3138 ierr = ISSortPermutation(ciscol[ii],PETSC_FALSE,ciscol_p+ii);CHKERRQ(ierr); 3139 ierr = ISSort(ciscol[ii]);CHKERRQ(ierr); 3140 ++ii; 3141 } 3142 } 3143 ierr = PetscMalloc2(ismax,&isrow_p,ismax,&iscol_p);CHKERRQ(ierr); 3144 for (i = 0, ii = 0; i < ismax; ++i) { 3145 PetscInt j,issize; 3146 const PetscInt *indices; 3147 3148 /* 3149 Permute the indices into a nondecreasing order. Reject row and col indices with duplicates. 3150 */ 3151 ierr = ISSortPermutation(isrow[i],PETSC_FALSE,isrow_p+i);CHKERRQ(ierr); 3152 ierr = ISSort(isrow[i]);CHKERRQ(ierr); 3153 ierr = ISGetLocalSize(isrow[i],&issize);CHKERRQ(ierr); 3154 ierr = ISGetIndices(isrow[i],&indices);CHKERRQ(ierr); 3155 for (j = 1; j < issize; ++j) { 3156 if (indices[j] == indices[j-1]) { 3157 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in row IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]); 3158 } 3159 } 3160 ierr = ISRestoreIndices(isrow[i],&indices);CHKERRQ(ierr); 3161 3162 3163 ierr = ISSortPermutation(iscol[i],PETSC_FALSE,iscol_p+i);CHKERRQ(ierr); 3164 ierr = ISSort(iscol[i]);CHKERRQ(ierr); 3165 ierr = ISGetLocalSize(iscol[i],&issize);CHKERRQ(ierr); 3166 ierr = ISGetIndices(iscol[i],&indices);CHKERRQ(ierr); 3167 for (j = 1; j < issize; ++j) { 3168 if (indices[j-1] == indices[j]) { 3169 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in col IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]); 3170 } 3171 } 3172 ierr = ISRestoreIndices(iscol[i],&indices);CHKERRQ(ierr); 3173 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3174 if (isize > 1) { 3175 cisrow[ii] = isrow[i]; 3176 ++ii; 3177 } 3178 } 3179 /* 3180 Allocate the necessary arrays to hold the resulting parallel matrices as well as the intermediate 3181 array of sequential matrices underlying the resulting parallel matrices. 3182 Which arrays to allocate is based on the value of MatReuse scall and whether ISs are sorted and/or 3183 contain duplicates. 3184 3185 There are as many diag matrices as there are original index sets. There are only as many parallel 3186 and off-diag matrices, as there are parallel (comm size > 1) index sets. 3187 3188 ARRAYS that can hold Seq matrices get allocated in any event -- either here or by getsubmats_seq(): 3189 - If the array of MPI matrices already exists and is being reused, we need to allocate the array 3190 and extract the underlying seq matrices into it to serve as placeholders, into which getsubmats_seq 3191 will deposite the extracted diag and off-diag parts. Thus, we allocate the A&B arrays and fill them 3192 with A[i] and B[ii] extracted from the corresponding MPI submat. 3193 - However, if the rows, A's column indices or B's column indices are not sorted, the extracted A[i] & B[ii] 3194 will have a different order from what getsubmats_seq expects. To handle this case -- indicated 3195 by a nonzero isrow_p[i], iscol_p[i], or ciscol_p[ii] -- we duplicate A[i] --> AA[i], B[ii] --> BB[ii] 3196 (retrieve composed AA[i] or BB[ii]) and reuse them here. AA[i] and BB[ii] are then used to permute its 3197 values into A[i] and B[ii] sitting inside the corresponding submat. 3198 - If no reuse is taking place then getsubmats_seq will allocate the A&B arrays and create the corresponding 3199 A[i], B[ii], AA[i] or BB[ii] matrices. 3200 */ 3201 /* Parallel matrix array is allocated here only if no reuse is taking place. If reused, it is passed in by the caller. */ 3202 if (scall == MAT_INITIAL_MATRIX) { 3203 ierr = PetscMalloc1(ismax,submat);CHKERRQ(ierr); 3204 } 3205 3206 /* Now obtain the sequential A and B submatrices separately. */ 3207 /* scall=MAT_REUSE_MATRIX is not handled yet, because getsubmats_seq() requires reuse of A and B */ 3208 ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr); 3209 ierr = (*getsubmats_seq)(C,cismax,cisrow,ciscol,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr); 3210 3211 /* 3212 If scall == MAT_REUSE_MATRIX AND the permutations are NULL, we are done, since the sequential 3213 matrices A & B have been extracted directly into the parallel matrices containing them, or 3214 simply into the sequential matrix identical with the corresponding A (if isize == 1). 3215 Note that in that case colmap doesn't need to be rebuilt, since the matrices are expected 3216 to have the same sparsity pattern. 3217 Otherwise, A and/or B have to be properly embedded into C's index spaces and the correct colmap 3218 must be constructed for C. This is done by setseqmat(s). 3219 */ 3220 for (i = 0, ii = 0; i < ismax; ++i) { 3221 /* 3222 TODO: cache ciscol, permutation ISs and maybe cisrow? What about isrow & iscol? 3223 That way we can avoid sorting and computing permutations when reusing. 3224 To this end: 3225 - remove the old cache, if it exists, when extracting submatrices with MAT_INITIAL_MATRIX 3226 - if caching arrays to hold the ISs, make and compose a container for them so that it can 3227 be destroyed upon destruction of C (use PetscContainerUserDestroy() to clear out the contents). 3228 */ 3229 MatStructure pattern; 3230 pattern = DIFFERENT_NONZERO_PATTERN; 3231 3232 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3233 /* Construct submat[i] from the Seq pieces A (and B, if necessary). */ 3234 if (isize > 1) { 3235 if (scall == MAT_INITIAL_MATRIX) { 3236 ierr = MatCreate(((PetscObject)isrow[i])->comm,(*submat)+i);CHKERRQ(ierr); 3237 ierr = MatSetSizes((*submat)[i],A[i]->rmap->n,A[i]->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 3238 ierr = MatSetType((*submat)[i],MATMPIAIJ);CHKERRQ(ierr); 3239 ierr = PetscLayoutSetUp((*submat)[i]->rmap);CHKERRQ(ierr); 3240 ierr = PetscLayoutSetUp((*submat)[i]->cmap);CHKERRQ(ierr); 3241 } 3242 /* 3243 For each parallel isrow[i], insert the extracted sequential matrices into the parallel matrix. 3244 */ 3245 { 3246 Mat AA,BB; 3247 AA = A[i]; 3248 BB = B[ii]; 3249 if (AA || BB) { 3250 ierr = setseqmats((*submat)[i],isrow_p[i],iscol_p[i],ciscol_p[ii],pattern,AA,BB);CHKERRQ(ierr); 3251 ierr = MatAssemblyBegin((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3252 ierr = MatAssemblyEnd((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3253 } 3254 3255 ierr = MatDestroy(&AA);CHKERRQ(ierr); 3256 ierr = MatDestroy(&BB);CHKERRQ(ierr); 3257 } 3258 ierr = ISDestroy(ciscol+ii);CHKERRQ(ierr); 3259 ierr = ISDestroy(ciscol_p+ii);CHKERRQ(ierr); 3260 ++ii; 3261 } else { /* if (isize == 1) */ 3262 if (scall == MAT_REUSE_MATRIX) { 3263 ierr = MatDestroy(&(*submat)[i]);CHKERRQ(ierr); 3264 } 3265 if (isrow_p[i] || iscol_p[i]) { 3266 ierr = MatDuplicate(A[i],MAT_DO_NOT_COPY_VALUES,(*submat)+i);CHKERRQ(ierr); 3267 ierr = setseqmat((*submat)[i],isrow_p[i],iscol_p[i],pattern,A[i]);CHKERRQ(ierr); 3268 /* Otherwise A is extracted straight into (*submats)[i]. */ 3269 /* TODO: Compose A[i] on (*submat([i] for future use, if ((isrow_p[i] || iscol_p[i]) && MAT_INITIAL_MATRIX). */ 3270 ierr = MatDestroy(A+i);CHKERRQ(ierr); 3271 } else (*submat)[i] = A[i]; 3272 } 3273 ierr = ISDestroy(&isrow_p[i]);CHKERRQ(ierr); 3274 ierr = ISDestroy(&iscol_p[i]);CHKERRQ(ierr); 3275 } 3276 ierr = PetscFree2(cisrow,ciscol);CHKERRQ(ierr); 3277 ierr = PetscFree2(isrow_p,iscol_p);CHKERRQ(ierr); 3278 ierr = PetscFree(ciscol_p);CHKERRQ(ierr); 3279 ierr = PetscFree(A);CHKERRQ(ierr); 3280 ierr = PetscFree(B);CHKERRQ(ierr); 3281 PetscFunctionReturn(0); 3282 } 3283 3284 PetscErrorCode MatGetSubMatricesMPI_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 3285 { 3286 PetscErrorCode ierr; 3287 3288 PetscFunctionBegin; 3289 ierr = MatGetSubMatricesMPI_MPIXAIJ(C,ismax,isrow,iscol,scall,submat,MatGetSubMatrices_MPIAIJ,MatGetSeqMats_MPIAIJ,MatSetSeqMat_SeqAIJ,MatSetSeqMats_MPIAIJ);CHKERRQ(ierr); 3290 PetscFunctionReturn(0); 3291 } 3292