1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix. 4 Used for finding submatrices that were shared across processors. 5 */ 6 #include "src/mat/impls/sbaij/mpi/mpisbaij.h" 7 #include "petscbt.h" 8 9 static int MatIncreaseOverlap_MPISBAIJ_Once(Mat,int,IS*); 10 static int MatIncreaseOverlap_MPISBAIJ_Local(Mat,int*,int,int*,PetscBT*); 11 12 #undef __FUNCT__ 13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ" 14 int MatIncreaseOverlap_MPISBAIJ(Mat C,int is_max,IS is[],int ov) 15 { 16 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 17 int i,ierr,N=C->N, bs=c->bs; 18 IS *is_new; 19 20 PetscFunctionBegin; 21 ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr); 22 /* Convert the indices into block format */ 23 ierr = ISCompressIndicesGeneral(N,bs,is_max,is,is_new);CHKERRQ(ierr); 24 if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 25 for (i=0; i<ov; ++i) { 26 ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr); 27 } 28 for (i=0; i<is_max; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);} 29 ierr = ISExpandIndicesGeneral(N,bs,is_max,is_new,is);CHKERRQ(ierr); 30 for (i=0; i<is_max; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);} 31 ierr = PetscFree(is_new);CHKERRQ(ierr); 32 PetscFunctionReturn(0); 33 } 34 35 typedef enum {MINE,OTHER} WhoseOwner; 36 /* data1, odata1 and odata2 are packed in the format (for communication): 37 data[0] = is_max, no of is 38 data[1] = size of is[0] 39 ... 40 data[is_max] = size of is[is_max-1] 41 data[is_max + 1] = data(is[0]) 42 ... 43 data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i]) 44 ... 45 data2 is packed in the format (for creating output is[]): 46 data[0] = is_max, no of is 47 data[1] = size of is[0] 48 ... 49 data[is_max] = size of is[is_max-1] 50 data[is_max + 1] = data(is[0]) 51 ... 52 data[is_max + 1 + Mbs*i) = data(is[i]) 53 ... 54 */ 55 #undef __FUNCT__ 56 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once" 57 static int MatIncreaseOverlap_MPISBAIJ_Once(Mat C,int is_max,IS is[]) 58 { 59 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 60 int len,idx,*idx_i,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i, 61 size,rank,Mbs,i,j,k,ierr,nrqs,nrqr,*odata1,*odata2, 62 tag1,tag2,flag,proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est; 63 int *id_r1,*len_r1,proc_end=0,*iwork,*len_s,len_unused,nodata2; 64 int ois_max; /* max no of is[] in each of processor */ 65 char *t_p; 66 MPI_Comm comm; 67 MPI_Request *s_waits1,*s_waits2,r_req; 68 MPI_Status *s_status,r_status; 69 PetscBT *table; /* mark indices of this processor's is[] */ 70 PetscBT table_i; 71 PetscBT otable; /* mark indices of other processors' is[] */ 72 int bs=c->bs,Bn = c->B->n,Bnbs = Bn/bs,*Bowners; 73 IS garray_local,garray_gl; 74 75 PetscFunctionBegin; 76 77 comm = C->comm; 78 size = c->size; 79 rank = c->rank; 80 Mbs = c->Mbs; 81 82 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 83 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 84 85 /* create tables used in 86 step 1: table[i] - mark c->garray of proc [i] 87 step 3: table[i] - mark indices of is[i] when whose=MINE 88 table[0] - mark incideces of is[] when whose=OTHER */ 89 len = PetscMax(is_max, size);CHKERRQ(ierr); 90 len_max = len*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*len*sizeof(char) + 1; 91 ierr = PetscMalloc(len_max,&table);CHKERRQ(ierr); 92 t_p = (char *)(table + len); 93 for (i=0; i<len; i++) { 94 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 95 } 96 97 ierr = MPI_Allreduce(&is_max,&ois_max,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr); 98 99 /* 1. Send this processor's is[] to other processors */ 100 /*---------------------------------------------------*/ 101 /* allocate spaces */ 102 ierr = PetscMalloc(is_max*sizeof(int),&n);CHKERRQ(ierr); 103 len = 0; 104 for (i=0; i<is_max; i++) { 105 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 106 len += n[i]; 107 } 108 if (!len) { 109 is_max = 0; 110 } else { 111 len += 1 + is_max; /* max length of data1 for one processor */ 112 } 113 114 115 ierr = PetscMalloc((size*len+1)*sizeof(int),&data1);CHKERRQ(ierr); 116 ierr = PetscMalloc(size*sizeof(int*),&data1_start);CHKERRQ(ierr); 117 for (i=0; i<size; i++) data1_start[i] = data1 + i*len; 118 119 ierr = PetscMalloc((size*4+1)*sizeof(int),&len_s);CHKERRQ(ierr); 120 btable = len_s + size; 121 iwork = btable + size; 122 Bowners = iwork + size; 123 124 /* gather c->garray from all processors */ 125 ierr = ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);CHKERRQ(ierr); 126 ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr); 127 ierr = ISDestroy(garray_local);CHKERRQ(ierr); 128 ierr = MPI_Allgather(&Bnbs,1,MPI_INT,Bowners+1,1,MPI_INT,comm);CHKERRQ(ierr); 129 Bowners[0] = 0; 130 for (i=0; i<size; i++) Bowners[i+1] += Bowners[i]; 131 132 if (is_max){ 133 /* hash table ctable which maps c->row to proc_id) */ 134 ierr = PetscMalloc(Mbs*sizeof(int),&ctable);CHKERRQ(ierr); 135 for (proc_id=0,j=0; proc_id<size; proc_id++) { 136 for (; j<c->rowners[proc_id+1]; j++) { 137 ctable[j] = proc_id; 138 } 139 } 140 141 /* hash tables marking c->garray */ 142 ierr = ISGetIndices(garray_gl,&idx_i); 143 for (i=0; i<size; i++){ 144 table_i = table[i]; 145 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 146 for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/ 147 ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr); 148 } 149 } 150 ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr); 151 } /* if (is_max) */ 152 ierr = ISDestroy(garray_gl);CHKERRQ(ierr); 153 154 /* evaluate communication - mesg to who, length, and buffer space */ 155 for (i=0; i<size; i++) len_s[i] = 0; 156 157 /* header of data1 */ 158 for (proc_id=0; proc_id<size; proc_id++){ 159 iwork[proc_id] = 0; 160 *data1_start[proc_id] = is_max; 161 data1_start[proc_id]++; 162 for (j=0; j<is_max; j++) { 163 if (proc_id == rank){ 164 *data1_start[proc_id] = n[j]; 165 } else { 166 *data1_start[proc_id] = 0; 167 } 168 data1_start[proc_id]++; 169 } 170 } 171 172 for (i=0; i<is_max; i++) { 173 ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr); 174 for (j=0; j<n[i]; j++){ 175 idx = idx_i[j]; 176 *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */ 177 proc_end = ctable[idx]; 178 for (proc_id=0; proc_id<=proc_end; proc_id++){ /* for others to process */ 179 if (proc_id == rank ) continue; /* done before this loop */ 180 if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) 181 continue; /* no need for sending idx to [proc_id] */ 182 *data1_start[proc_id] = idx; data1_start[proc_id]++; 183 len_s[proc_id]++; 184 } 185 } 186 /* update header data */ 187 for (proc_id=0; proc_id<size; proc_id++){ 188 if (proc_id== rank) continue; 189 *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id]; 190 iwork[proc_id] = len_s[proc_id] ; 191 } 192 ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr); 193 } 194 195 nrqs = 0; nrqr = 0; 196 for (i=0; i<size; i++){ 197 data1_start[i] = data1 + i*len; 198 if (len_s[i]){ 199 nrqs++; 200 len_s[i] += 1 + is_max; /* add no. of header msg */ 201 } 202 } 203 204 for (i=0; i<is_max; i++) { 205 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 206 } 207 ierr = PetscFree(n);CHKERRQ(ierr); 208 if (ctable){ierr = PetscFree(ctable);CHKERRQ(ierr);} 209 210 /* Determine the number of messages to expect, their lengths, from from-ids */ 211 ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr); 212 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr); 213 /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] nrqs: %d, nrqr: %d\n",rank,nrqs,nrqr); */ 214 215 /* Now post the sends */ 216 ierr = PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 217 s_waits2 = s_waits1 + size; 218 k = 0; 219 for (proc_id=0; proc_id<size; proc_id++){ /* send data1 to processor [proc_id] */ 220 if (len_s[proc_id]){ 221 ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPI_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr); 222 k++; 223 } 224 } 225 226 /* 2. Receive other's is[] and process. Then send back */ 227 /*-----------------------------------------------------*/ 228 len = 0; 229 for (i=0; i<nrqr; i++){ 230 if (len_r1[i] > len)len = len_r1[i]; 231 /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] expect to recv len=%d from [%d]\n",rank,len_r1[i],id_r1[i]); */ 232 } 233 ierr = PetscFree(len_r1);CHKERRQ(ierr); 234 ierr = PetscFree(id_r1);CHKERRQ(ierr); 235 236 for (proc_id=0; proc_id<size; proc_id++) 237 len_s[proc_id] = iwork[proc_id] = 0; 238 239 ierr = PetscMalloc((len+1)*sizeof(int),&odata1);CHKERRQ(ierr); 240 ierr = PetscMalloc(size*sizeof(int**),&odata2_ptr);CHKERRQ(ierr); 241 ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr); 242 243 len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */ 244 len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */ 245 ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr); 246 nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */ 247 odata2_ptr[nodata2] = odata2; 248 len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */ 249 250 k = 0; 251 while (k < nrqr){ 252 /* Receive messages */ 253 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr); 254 if (flag){ 255 ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr); 256 proc_id = r_status.MPI_SOURCE; 257 ierr = MPI_Irecv(odata1,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 258 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 259 /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] recv %d from [%d]\n",rank,len,proc_id); */ 260 261 /* Process messages */ 262 /* make sure there is enough unused space in odata2 array */ 263 if (len_unused < len_max){ /* allocate more space for odata2 */ 264 ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr); 265 odata2_ptr[++nodata2] = odata2; 266 len_unused = len_est; 267 /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] 2. Malloc odata2, nodata2: %d\n",rank,nodata2); */ 268 } 269 270 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr); 271 len = 1 + odata2[0]; 272 for (i=0; i<odata2[0]; i++){ 273 len += odata2[1 + i]; 274 } 275 276 /* Send messages back */ 277 ierr = MPI_Isend(odata2,len,MPI_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr); 278 /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] send %d back to [%d] \n",rank,len,proc_id); */ 279 k++; 280 odata2 += len; 281 len_unused -= len; 282 len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */ 283 } 284 } 285 ierr = PetscFree(odata1);CHKERRQ(ierr); 286 ierr = PetscBTDestroy(otable);CHKERRQ(ierr); 287 288 /* 3. Do local work on this processor's is[] */ 289 /*-------------------------------------------*/ 290 /* make sure there is enough unused space in odata2(=data) array */ 291 len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */ 292 if (len_unused < len_max){ /* allocate more space for odata2 */ 293 ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr); 294 odata2_ptr[++nodata2] = odata2; 295 len_unused = len_est; 296 /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] 3. Malloc data2, nodata2: %d\n",rank,nodata2); */ 297 } 298 299 data = odata2; 300 ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr); 301 ierr = PetscFree(data1_start);CHKERRQ(ierr); 302 303 /* 4. Receive work done on other processors, then merge */ 304 /*------------------------------------------------------*/ 305 /* get max number of messages that this processor expects to recv */ 306 ierr = MPI_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr); 307 /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] expects max_len=%d of data2 from others \n",rank,iwork[rank]); */ 308 ierr = PetscMalloc((iwork[rank]+1)*sizeof(int),&data2);CHKERRQ(ierr); 309 ierr = PetscFree(len_s);CHKERRQ(ierr); 310 311 k = 0; 312 while (k < nrqs){ 313 /* Receive messages */ 314 ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status); 315 if (flag){ 316 ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr); 317 proc_id = r_status.MPI_SOURCE; 318 ierr = MPI_Irecv(data2,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr); 319 ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr); 320 /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] recv %d from [%d], data2:\n",rank,len,proc_id); */ 321 if (len > 1+is_max){ /* Add data2 into data */ 322 data2_i = data2 + 1 + is_max; 323 for (i=0; i<is_max; i++){ 324 table_i = table[i]; 325 data_i = data + 1 + is_max + Mbs*i; 326 isz = data[1+i]; 327 for (j=0; j<data2[1+i]; j++){ 328 col = data2_i[j]; 329 if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;} 330 } 331 data[1+i] = isz; 332 if (i < is_max - 1) data2_i += data2[1+i]; 333 } 334 } 335 k++; 336 } 337 } 338 ierr = PetscFree(data2);CHKERRQ(ierr); 339 ierr = PetscFree(table);CHKERRQ(ierr); 340 341 /* phase 1 sends are complete */ 342 ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 343 if (nrqs){ 344 ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr); 345 } 346 ierr = PetscFree(data1);CHKERRQ(ierr); 347 348 /* phase 2 sends are complete */ 349 if (nrqr){ 350 ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr); 351 } 352 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 353 ierr = PetscFree(s_status);CHKERRQ(ierr); 354 355 /* 5. Create new is[] */ 356 /*--------------------*/ 357 for (i=0; i<is_max; i++) { 358 data_i = data + 1 + is_max + Mbs*i; 359 ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);CHKERRQ(ierr); 360 } 361 for (k=0; k<=nodata2; k++){ 362 ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr); 363 } 364 ierr = PetscFree(odata2_ptr);CHKERRQ(ierr); 365 366 PetscFunctionReturn(0); 367 } 368 369 #undef __FUNCT__ 370 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local" 371 /* 372 MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do 373 the work on the local processor. 374 375 Inputs: 376 C - MAT_MPISBAIJ; 377 data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format. 378 whose - whose is[] to be processed, 379 MINE: this processor's is[] 380 OTHER: other processor's is[] 381 Output: 382 nidx - whose = MINE: 383 holds input and newly found indices in the same format as data 384 whose = OTHER: 385 only holds the newly found indices 386 table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'. 387 */ 388 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */ 389 static int MatIncreaseOverlap_MPISBAIJ_Local(Mat C,int *data,int whose,int *nidx,PetscBT *table) 390 { 391 Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data; 392 Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data; 393 Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data; 394 int ierr,row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l; 395 int a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n; 396 PetscBT table0; /* mark the indices of input is[] for look up */ 397 PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */ 398 399 PetscFunctionBegin; 400 Mbs = c->Mbs; mbs = a->mbs; 401 ai = a->i; aj = a->j; 402 bi = b->i; bj = b->j; 403 garray = c->garray; 404 rstart = c->rstart; 405 is_max = data[0]; 406 407 ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr); 408 409 nidx[0] = is_max; 410 idx_i = data + is_max + 1; /* ptr to input is[0] array */ 411 nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */ 412 for (i=0; i<is_max; i++) { /* for each is */ 413 isz = 0; 414 n = data[1+i]; /* size of input is[i] */ 415 416 /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */ 417 if (whose == MINE){ /* process this processor's is[] */ 418 table_i = table[i]; 419 nidx_i = nidx + 1+ is_max + Mbs*i; 420 } else { /* process other processor's is[] - only use one temp table */ 421 table_i = table[0]; 422 } 423 ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr); 424 ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr); 425 if (n==0) { 426 nidx[1+i] = 0; /* size of new is[i] */ 427 continue; 428 } 429 430 isz0 = 0; col_max = 0; 431 for (j=0; j<n; j++){ 432 col = idx_i[j]; 433 if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %d >= Mbs %d",col,Mbs); 434 if(!PetscBTLookupSet(table_i,col)) { 435 ierr = PetscBTSet(table0,col);CHKERRQ(ierr); 436 if (whose == MINE) {nidx_i[isz0] = col;} 437 if (col_max < col) col_max = col; 438 isz0++; 439 } 440 } 441 442 if (whose == MINE) {isz = isz0;} 443 k = 0; /* no. of indices from input is[i] that have been examined */ 444 for (row=0; row<mbs; row++){ 445 a_start = ai[row]; a_end = ai[row+1]; 446 b_start = bi[row]; b_end = bi[row+1]; 447 if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]: 448 do row search: collect all col in this row */ 449 for (l = a_start; l<a_end ; l++){ /* Amat */ 450 col = aj[l] + rstart; 451 if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;} 452 } 453 for (l = b_start; l<b_end ; l++){ /* Bmat */ 454 col = garray[bj[l]]; 455 if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;} 456 } 457 k++; 458 if (k >= isz0) break; /* for (row=0; row<mbs; row++) */ 459 } else { /* row is not on input is[i]: 460 do col serach: add row onto nidx_i if there is a col in nidx_i */ 461 for (l = a_start; l<a_end ; l++){ /* Amat */ 462 col = aj[l] + rstart; 463 if (col > col_max) break; 464 if (PetscBTLookup(table0,col)){ 465 if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;} 466 break; /* for l = start; l<end ; l++) */ 467 } 468 } 469 for (l = b_start; l<b_end ; l++){ /* Bmat */ 470 col = garray[bj[l]]; 471 if (col > col_max) break; 472 if (PetscBTLookup(table0,col)){ 473 if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;} 474 break; /* for l = start; l<end ; l++) */ 475 } 476 } 477 } 478 } 479 480 if (i < is_max - 1){ 481 idx_i += n; /* ptr to input is[i+1] array */ 482 nidx_i += isz; /* ptr to output is[i+1] array */ 483 } 484 nidx[1+i] = isz; /* size of new is[i] */ 485 } /* for each is */ 486 ierr = PetscBTDestroy(table0);CHKERRQ(ierr); 487 488 PetscFunctionReturn(0); 489 } 490 491 492