1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix. 4 Used for finding submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*); 10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*); 11 12 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov) 13 { 14 PetscErrorCode ierr; 15 PetscInt i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov; 16 IS *is_new,*is_row; 17 Mat *submats; 18 Mat_MPISBAIJ *c=(Mat_MPISBAIJ*)C->data; 19 Mat_SeqSBAIJ *asub_i; 20 PetscBT table; 21 PetscInt *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos; 22 const PetscInt *idx; 23 PetscBool flg; 24 25 PetscFunctionBegin; 26 ierr = PetscMalloc1(is_max,&is_new);CHKERRQ(ierr); 27 /* Convert the indices into block format */ 28 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);CHKERRQ(ierr); 29 if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n"); 30 31 /* ----- previous non-scalable implementation ----- */ 32 flg = PETSC_FALSE; 33 ierr = PetscOptionsHasName(NULL,NULL, "-IncreaseOverlap_old", &flg);CHKERRQ(ierr); 34 if (flg) { /* previous non-scalable implementation */ 35 printf("use previous non-scalable implementation...\n"); 36 for (i=0; i<ov; ++i) { 37 ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr); 38 } 39 } else { /* implementation using modified BAIJ routines */ 40 41 ierr = PetscMalloc1(Mbs+1,&nidx);CHKERRQ(ierr); 42 ierr = PetscBTCreate(Mbs,&table);CHKERRQ(ierr); /* for column search */ 43 44 /* Create is_row */ 45 ierr = PetscMalloc1(is_max,&is_row);CHKERRQ(ierr); 46 ierr = ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);CHKERRQ(ierr); 47 48 for (i=1; i<is_max; i++) { 49 is_row[i] = is_row[0]; /* reuse is_row[0] */ 50 } 51 52 /* Allocate memory to hold all the submatrices - Modified from MatCreateSubMatrices_MPIBAIJ() */ 53 ierr = PetscMalloc1(is_max+1,&submats);CHKERRQ(ierr); 54 55 /* Determine the number of stages through which submatrices are done */ 56 nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt)); 57 if (!nmax) nmax = 1; 58 nstages_local = is_max/nmax + ((is_max % nmax) ? 1 : 0); 59 60 /* Make sure every processor loops through the nstages */ 61 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 62 63 for (iov=0; iov<ov; ++iov) { 64 /* 1) Get submats for column search */ 65 for (i=0,pos=0; i<nstages; i++) { 66 if (pos+nmax <= is_max) max_no = nmax; 67 else if (pos == is_max) max_no = 0; 68 else max_no = is_max-pos; 69 c->ijonly = PETSC_TRUE; /* only matrix data structures are requested */ 70 ierr = MatCreateSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,submats+pos);CHKERRQ(ierr); 71 pos += max_no; 72 } 73 74 /* 2) Row search */ 75 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr); 76 77 /* 3) Column search */ 78 for (i=0; i<is_max; i++) { 79 asub_i = (Mat_SeqSBAIJ*)submats[i]->data; 80 ai = asub_i->i;; 81 82 /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */ 83 ierr = PetscBTMemzero(Mbs,table);CHKERRQ(ierr); 84 85 ierr = ISGetIndices(is_new[i],&idx);CHKERRQ(ierr); 86 ierr = ISGetLocalSize(is_new[i],&nis);CHKERRQ(ierr); 87 for (l=0; l<nis; l++) { 88 ierr = PetscBTSet(table,idx[l]);CHKERRQ(ierr); 89 nidx[l] = idx[l]; 90 } 91 isz = nis; 92 93 /* add column entries to table */ 94 for (brow=0; brow<Mbs; brow++) { 95 nz = ai[brow+1] - ai[brow]; 96 if (nz) { 97 if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow; 98 } 99 } 100 ierr = ISRestoreIndices(is_new[i],&idx);CHKERRQ(ierr); 101 ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr); 102 103 /* create updated is_new */ 104 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);CHKERRQ(ierr); 105 } 106 107 /* Free tmp spaces */ 108 for (i=0; i<is_max; i++) { 109 ierr = MatDestroy(&submats[i]);CHKERRQ(ierr); 110 } 111 } 112 113 ierr = PetscBTDestroy(&table);CHKERRQ(ierr); 114 ierr = PetscFree(submats);CHKERRQ(ierr); 115 ierr = ISDestroy(&is_row[0]);CHKERRQ(ierr); 116 ierr = PetscFree(is_row);CHKERRQ(ierr); 117 ierr = PetscFree(nidx);CHKERRQ(ierr); 118 119 } 120 121 for (i=0; i<is_max; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);} 122 ierr = ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);CHKERRQ(ierr); 123 124 for (i=0; i<is_max; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);} 125 ierr = PetscFree(is_new);CHKERRQ(ierr); 126 PetscFunctionReturn(0); 127 } 128 129 typedef enum {MINE,OTHER} WhoseOwner; 130 /* data1, odata1 and odata2 are packed in the format (for communication): 131 data[0] = is_max, no of is 132 data[1] = size of is[0] 133 ... 134 data[is_max] = size of is[is_max-1] 135 data[is_max + 1] = data(is[0]) 136 ... 137 data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i]) 138 ... 139 data2 is packed in the format (for creating output is[]): 140 data[0] = is_max, no of is 141 data[1] = size of is[0] 142 ... 143 data[is_max] = size of is[is_max-1] 144 data[is_max + 1] = data(is[0]) 145 ... 146 data[is_max + 1 + Mbs*i) = data(is[i]) 147 ... 148 */ 149 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[]) 150 { 151 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 152 PetscErrorCode ierr; 153 PetscMPIInt size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork; 154 const PetscInt *idx_i; 155 PetscInt idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i; 156 PetscInt Mbs,i,j,k,*odata1,*odata2; 157 PetscInt proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est; 158 PetscInt proc_end=0,len_unused,nodata2; 159 PetscInt ois_max; /* max no of is[] in each of processor */ 160 char *t_p; 161 MPI_Comm comm; 162 MPI_Request *s_waits1,*s_waits2,r_req; 163 MPI_Status *s_status,r_status; 164 PetscBT *table; /* mark indices of this processor's is[] */ 165 PetscBT table_i; 166 PetscBT otable; /* mark indices of other processors' is[] */ 167 PetscInt bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners; 168 IS garray_local,garray_gl; 169 170 PetscFunctionBegin; 171 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 172 size = c->size; 173 rank = c->rank; 174 Mbs = c->Mbs; 175 176 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 177 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 178 179 /* create tables used in 180 step 1: table[i] - mark c->garray of proc [i] 181 step 3: table[i] - mark indices of is[i] when whose=MINE 182 table[0] - mark incideces of is[] when whose=OTHER */ 183 len = PetscMax(is_max, size);CHKERRQ(ierr); 184 ierr = PetscMalloc2(len,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,&t_p);CHKERRQ(ierr); 185 for (i=0; i<len; i++) { 186 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 187 } 188 189 ierr = MPIU_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr); 190 191 /* 1. Send this processor's is[] to other processors */ 192 /*---------------------------------------------------*/ 193 /* allocate spaces */ 194 ierr = PetscMalloc1(is_max,&n);CHKERRQ(ierr); 195 len = 0; 196 for (i=0; i<is_max; i++) { 197 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 198 len += n[i]; 199 } 200 if (!len) { 201 is_max = 0; 202 } else { 203 len += 1 + is_max; /* max length of data1 for one processor */ 204 } 205 206 207 ierr = PetscMalloc1(size*len+1,&data1);CHKERRQ(ierr); 208 ierr = PetscMalloc1(size,&data1_start);CHKERRQ(ierr); 209 for (i=0; i<size; i++) data1_start[i] = data1 + i*len; 210 211 ierr = PetscMalloc4(size,&len_s,size,&btable,size,&iwork,size+1,&Bowners);CHKERRQ(ierr); 212 213 /* gather c->garray from all processors */ 214 ierr = ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);CHKERRQ(ierr); 215 ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr); 216 ierr = ISDestroy(&garray_local);CHKERRQ(ierr); 217 ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 218 219 Bowners[0] = 0; 220 for (i=0; i<size; i++) Bowners[i+1] += Bowners[i]; 221 222 if (is_max) { 223 /* hash table ctable which maps c->row to proc_id) */ 224 ierr = PetscMalloc1(Mbs,&ctable);CHKERRQ(ierr); 225 for (proc_id=0,j=0; proc_id<size; proc_id++) { 226 for (; j<C->rmap->range[proc_id+1]/bs; j++) ctable[j] = proc_id; 227 } 228 229 /* hash tables marking c->garray */ 230 ierr = ISGetIndices(garray_gl,&idx_i);CHKERRQ(ierr); 231 for (i=0; i<size; i++) { 232 table_i = table[i]; 233 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 234 for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/ 235 ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr); 236 } 237 } 238 ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr); 239 } /* if (is_max) */ 240 ierr = ISDestroy(&garray_gl);CHKERRQ(ierr); 241 242 /* evaluate communication - mesg to who, length, and buffer space */ 243 for (i=0; i<size; i++) len_s[i] = 0; 244 245 /* header of data1 */ 246 for (proc_id=0; proc_id<size; proc_id++) { 247 iwork[proc_id] = 0; 248 *data1_start[proc_id] = is_max; 249 data1_start[proc_id]++; 250 for (j=0; j<is_max; j++) { 251 if (proc_id == rank) { 252 *data1_start[proc_id] = n[j]; 253 } else { 254 *data1_start[proc_id] = 0; 255 } 256 data1_start[proc_id]++; 257 } 258 } 259 260 for (i=0; i<is_max; i++) { 261 ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr); 262 for (j=0; j<n[i]; j++) { 263 idx = idx_i[j]; 264 *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */ 265 proc_end = ctable[idx]; 266 for (proc_id=0; proc_id<=proc_end; proc_id++) { /* for others to process */ 267 if (proc_id == rank) continue; /* done before this loop */ 268 if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) continue; /* no need for sending idx to [proc_id] */ 269 *data1_start[proc_id] = idx; data1_start[proc_id]++; 270 len_s[proc_id]++; 271 } 272 } 273 /* update header data */ 274 for (proc_id=0; proc_id<size; proc_id++) { 275 if (proc_id== rank) continue; 276 *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id]; 277 iwork[proc_id] = len_s[proc_id]; 278 } 279 ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr); 280 } 281 282 nrqs = 0; nrqr = 0; 283 for (i=0; i<size; i++) { 284 data1_start[i] = data1 + i*len; 285 if (len_s[i]) { 286 nrqs++; 287 len_s[i] += 1 + is_max; /* add no. of header msg */ 288 } 289 } 290 291 for (i=0; i<is_max; i++) { 292 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 293 } 294 ierr = PetscFree(n);CHKERRQ(ierr); 295 ierr = PetscFree(ctable);CHKERRQ(ierr); 296 297 /* Determine the number of messages to expect, their lengths, from from-ids */ 298 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrqr);CHKERRQ(ierr); 299 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr); 300 301 /* Now post the sends */ 302 ierr = PetscMalloc2(size,&s_waits1,size,&s_waits2);CHKERRQ(ierr); 303 k = 0; 304 for (proc_id=0; proc_id<size; proc_id++) { /* send data1 to processor [proc_id] */ 305 if (len_s[proc_id]) { 306 ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr); 307 k++; 308 } 309 } 310 311 /* 2. Receive other's is[] and process. Then send back */ 312 /*-----------------------------------------------------*/ 313 len = 0; 314 for (i=0; i<nrqr; i++) { 315 if (len_r1[i] > len) len = len_r1[i]; 316 } 317 ierr = PetscFree(len_r1);CHKERRQ(ierr); 318 ierr = PetscFree(id_r1);CHKERRQ(ierr); 319 320 for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0; 321 322 ierr = PetscMalloc1(len+1,&odata1);CHKERRQ(ierr); 323 ierr = PetscMalloc1(size,&odata2_ptr);CHKERRQ(ierr); 324 ierr = PetscBTCreate(Mbs,&otable);CHKERRQ(ierr); 325 326 len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */ 327 len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */ 328 ierr = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr); 329 nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */ 330 331 odata2_ptr[nodata2] = odata2; 332 333 len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */ 334 335 k = 0; 336 while (k < nrqr) { 337 /* Receive messages */ 338 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr); 339 if (flag) { 340 ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr); 341 proc_id = r_status.MPI_SOURCE; 342 ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 343 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 344 345 /* Process messages */ 346 /* make sure there is enough unused space in odata2 array */ 347 if (len_unused < len_max) { /* allocate more space for odata2 */ 348 ierr = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr); 349 350 odata2_ptr[++nodata2] = odata2; 351 352 len_unused = len_est; 353 } 354 355 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr); 356 len = 1 + odata2[0]; 357 for (i=0; i<odata2[0]; i++) len += odata2[1 + i]; 358 359 /* Send messages back */ 360 ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr); 361 k++; 362 odata2 += len; 363 len_unused -= len; 364 len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */ 365 } 366 } 367 ierr = PetscFree(odata1);CHKERRQ(ierr); 368 ierr = PetscBTDestroy(&otable);CHKERRQ(ierr); 369 370 /* 3. Do local work on this processor's is[] */ 371 /*-------------------------------------------*/ 372 /* make sure there is enough unused space in odata2(=data) array */ 373 len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */ 374 if (len_unused < len_max) { /* allocate more space for odata2 */ 375 ierr = PetscMalloc1(len_est+1,&odata2);CHKERRQ(ierr); 376 377 odata2_ptr[++nodata2] = odata2; 378 } 379 380 data = odata2; 381 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr); 382 ierr = PetscFree(data1_start);CHKERRQ(ierr); 383 384 /* 4. Receive work done on other processors, then merge */ 385 /*------------------------------------------------------*/ 386 /* get max number of messages that this processor expects to recv */ 387 ierr = MPIU_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr); 388 ierr = PetscMalloc1(iwork[rank]+1,&data2);CHKERRQ(ierr); 389 ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr); 390 391 k = 0; 392 while (k < nrqs) { 393 /* Receive messages */ 394 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);CHKERRQ(ierr); 395 if (flag) { 396 ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr); 397 398 proc_id = r_status.MPI_SOURCE; 399 400 ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 401 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 402 if (len > 1+is_max) { /* Add data2 into data */ 403 data2_i = data2 + 1 + is_max; 404 for (i=0; i<is_max; i++) { 405 table_i = table[i]; 406 data_i = data + 1 + is_max + Mbs*i; 407 isz = data[1+i]; 408 for (j=0; j<data2[1+i]; j++) { 409 col = data2_i[j]; 410 if (!PetscBTLookupSet(table_i,col)) data_i[isz++] = col; 411 } 412 data[1+i] = isz; 413 if (i < is_max - 1) data2_i += data2[1+i]; 414 } 415 } 416 k++; 417 } 418 } 419 ierr = PetscFree(data2);CHKERRQ(ierr); 420 ierr = PetscFree2(table,t_p);CHKERRQ(ierr); 421 422 /* phase 1 sends are complete */ 423 ierr = PetscMalloc1(size,&s_status);CHKERRQ(ierr); 424 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 425 ierr = PetscFree(data1);CHKERRQ(ierr); 426 427 /* phase 2 sends are complete */ 428 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);} 429 ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr); 430 ierr = PetscFree(s_status);CHKERRQ(ierr); 431 432 /* 5. Create new is[] */ 433 /*--------------------*/ 434 for (i=0; i<is_max; i++) { 435 data_i = data + 1 + is_max + Mbs*i; 436 ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 437 } 438 for (k=0; k<=nodata2; k++) { 439 ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr); 440 } 441 ierr = PetscFree(odata2_ptr);CHKERRQ(ierr); 442 PetscFunctionReturn(0); 443 } 444 445 /* 446 MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 447 the work on the local processor. 448 449 Inputs: 450 C - MAT_MPISBAIJ; 451 data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format. 452 whose - whose is[] to be processed, 453 MINE: this processor's is[] 454 OTHER: other processor's is[] 455 Output: 456 nidx - whose = MINE: 457 holds input and newly found indices in the same format as data 458 whose = OTHER: 459 only holds the newly found indices 460 table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'. 461 */ 462 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */ 463 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table) 464 { 465 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 466 Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data; 467 Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data; 468 PetscErrorCode ierr; 469 PetscInt row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l; 470 PetscInt a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n; 471 PetscBT table0; /* mark the indices of input is[] for look up */ 472 PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */ 473 474 PetscFunctionBegin; 475 Mbs = c->Mbs; mbs = a->mbs; 476 ai = a->i; aj = a->j; 477 bi = b->i; bj = b->j; 478 garray = c->garray; 479 rstart = c->rstartbs; 480 is_max = data[0]; 481 482 ierr = PetscBTCreate(Mbs,&table0);CHKERRQ(ierr); 483 484 nidx[0] = is_max; 485 idx_i = data + is_max + 1; /* ptr to input is[0] array */ 486 nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */ 487 for (i=0; i<is_max; i++) { /* for each is */ 488 isz = 0; 489 n = data[1+i]; /* size of input is[i] */ 490 491 /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */ 492 if (whose == MINE) { /* process this processor's is[] */ 493 table_i = table[i]; 494 nidx_i = nidx + 1+ is_max + Mbs*i; 495 } else { /* process other processor's is[] - only use one temp table */ 496 table_i = table[0]; 497 } 498 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 499 ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr); 500 if (n==0) { 501 nidx[1+i] = 0; /* size of new is[i] */ 502 continue; 503 } 504 505 isz0 = 0; col_max = 0; 506 for (j=0; j<n; j++) { 507 col = idx_i[j]; 508 if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs); 509 if (!PetscBTLookupSet(table_i,col)) { 510 ierr = PetscBTSet(table0,col);CHKERRQ(ierr); 511 if (whose == MINE) nidx_i[isz0] = col; 512 if (col_max < col) col_max = col; 513 isz0++; 514 } 515 } 516 517 if (whose == MINE) isz = isz0; 518 k = 0; /* no. of indices from input is[i] that have been examined */ 519 for (row=0; row<mbs; row++) { 520 a_start = ai[row]; a_end = ai[row+1]; 521 b_start = bi[row]; b_end = bi[row+1]; 522 if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]: 523 do row search: collect all col in this row */ 524 for (l = a_start; l<a_end ; l++) { /* Amat */ 525 col = aj[l] + rstart; 526 if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col; 527 } 528 for (l = b_start; l<b_end ; l++) { /* Bmat */ 529 col = garray[bj[l]]; 530 if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col; 531 } 532 k++; 533 if (k >= isz0) break; /* for (row=0; row<mbs; row++) */ 534 } else { /* row is not on input is[i]: 535 do col serach: add row onto nidx_i if there is a col in nidx_i */ 536 for (l = a_start; l<a_end; l++) { /* Amat */ 537 col = aj[l] + rstart; 538 if (col > col_max) break; 539 if (PetscBTLookup(table0,col)) { 540 if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart; 541 break; /* for l = start; l<end ; l++) */ 542 } 543 } 544 for (l = b_start; l<b_end; l++) { /* Bmat */ 545 col = garray[bj[l]]; 546 if (col > col_max) break; 547 if (PetscBTLookup(table0,col)) { 548 if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart; 549 break; /* for l = start; l<end ; l++) */ 550 } 551 } 552 } 553 } 554 555 if (i < is_max - 1) { 556 idx_i += n; /* ptr to input is[i+1] array */ 557 nidx_i += isz; /* ptr to output is[i+1] array */ 558 } 559 nidx[1+i] = isz; /* size of new is[i] */ 560 } /* for each is */ 561 ierr = PetscBTDestroy(&table0);CHKERRQ(ierr); 562 PetscFunctionReturn(0); 563 } 564 565 566