xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision 6849ba73f22fecb8f92ef896a42e4e8bd4cd6965)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include "src/mat/impls/sbaij/mpi/mpisbaij.h"
7 #include "petscbt.h"
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,int,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,int*,int,int*,PetscBT*);
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
14 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,int is_max,IS is[],int ov)
15 {
16   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
17   PetscErrorCode ierr;
18   int           i,N=C->N, bs=c->bs;
19   IS            *is_new;
20 
21   PetscFunctionBegin;
22   ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
23   /* Convert the indices into block format */
24   ierr = ISCompressIndicesGeneral(N,bs,is_max,is,is_new);CHKERRQ(ierr);
25   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
26   for (i=0; i<ov; ++i) {
27     ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
28   }
29   for (i=0; i<is_max; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);}
30   ierr = ISExpandIndicesGeneral(N,bs,is_max,is_new,is);CHKERRQ(ierr);
31   for (i=0; i<is_max; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);}
32   ierr = PetscFree(is_new);CHKERRQ(ierr);
33   PetscFunctionReturn(0);
34 }
35 
36 typedef enum {MINE,OTHER} WhoseOwner;
37 /*  data1, odata1 and odata2 are packed in the format (for communication):
38        data[0]          = is_max, no of is
39        data[1]          = size of is[0]
40         ...
41        data[is_max]     = size of is[is_max-1]
42        data[is_max + 1] = data(is[0])
43         ...
44        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
45         ...
46    data2 is packed in the format (for creating output is[]):
47        data[0]          = is_max, no of is
48        data[1]          = size of is[0]
49         ...
50        data[is_max]     = size of is[is_max-1]
51        data[is_max + 1] = data(is[0])
52         ...
53        data[is_max + 1 + Mbs*i) = data(is[i])
54         ...
55 */
56 #undef __FUNCT__
57 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
58 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,int is_max,IS is[])
59 {
60   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
61   PetscErrorCode ierr;
62   int         len,idx,*idx_i,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
63               size,rank,Mbs,i,j,k,nrqs,nrqr,*odata1,*odata2,
64               tag1,tag2,flag,proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
65   int         *id_r1,*len_r1,proc_end=0,*iwork,*len_s,len_unused,nodata2;
66   int         ois_max; /* max no of is[] in each of processor */
67   char        *t_p;
68   MPI_Comm    comm;
69   MPI_Request *s_waits1,*s_waits2,r_req;
70   MPI_Status  *s_status,r_status;
71   PetscBT     *table;  /* mark indices of this processor's is[] */
72   PetscBT     table_i;
73   PetscBT     otable; /* mark indices of other processors' is[] */
74   int         bs=c->bs,Bn = c->B->n,Bnbs = Bn/bs,*Bowners;
75   IS          garray_local,garray_gl;
76 
77   PetscFunctionBegin;
78 
79   comm = C->comm;
80   size = c->size;
81   rank = c->rank;
82   Mbs  = c->Mbs;
83 
84   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
85   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
86 
87   /* create tables used in
88      step 1: table[i] - mark c->garray of proc [i]
89      step 3: table[i] - mark indices of is[i] when whose=MINE
90              table[0] - mark incideces of is[] when whose=OTHER */
91   len = PetscMax(is_max, size);CHKERRQ(ierr);
92   len_max = len*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*len*sizeof(char) + 1;
93   ierr = PetscMalloc(len_max,&table);CHKERRQ(ierr);
94   t_p  = (char *)(table + len);
95   for (i=0; i<len; i++) {
96     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
97   }
98 
99   ierr = MPI_Allreduce(&is_max,&ois_max,1,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
100 
101   /* 1. Send this processor's is[] to other processors */
102   /*---------------------------------------------------*/
103   /* allocate spaces */
104   ierr = PetscMalloc(is_max*sizeof(int),&n);CHKERRQ(ierr);
105   len = 0;
106   for (i=0; i<is_max; i++) {
107     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
108     len += n[i];
109   }
110   if (!len) {
111     is_max = 0;
112   } else {
113     len += 1 + is_max; /* max length of data1 for one processor */
114   }
115 
116 
117   ierr = PetscMalloc((size*len+1)*sizeof(int),&data1);CHKERRQ(ierr);
118   ierr = PetscMalloc(size*sizeof(int*),&data1_start);CHKERRQ(ierr);
119   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
120 
121   ierr = PetscMalloc((size*4+1)*sizeof(int),&len_s);CHKERRQ(ierr);
122   btable  = len_s + size;
123   iwork   = btable + size;
124   Bowners = iwork + size;
125 
126   /* gather c->garray from all processors */
127   ierr = ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);CHKERRQ(ierr);
128   ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
129   ierr = ISDestroy(garray_local);CHKERRQ(ierr);
130   ierr = MPI_Allgather(&Bnbs,1,MPI_INT,Bowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
131   Bowners[0] = 0;
132   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
133 
134   if (is_max){
135     /* hash table ctable which maps c->row to proc_id) */
136     ierr = PetscMalloc(Mbs*sizeof(int),&ctable);CHKERRQ(ierr);
137     for (proc_id=0,j=0; proc_id<size; proc_id++) {
138       for (; j<c->rowners[proc_id+1]; j++) {
139         ctable[j] = proc_id;
140       }
141     }
142 
143     /* hash tables marking c->garray */
144     ierr = ISGetIndices(garray_gl,&idx_i);
145     for (i=0; i<size; i++){
146       table_i = table[i];
147       ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
148       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
149         ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
150       }
151     }
152     ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
153   }  /* if (is_max) */
154   ierr = ISDestroy(garray_gl);CHKERRQ(ierr);
155 
156   /* evaluate communication - mesg to who, length, and buffer space */
157   for (i=0; i<size; i++) len_s[i] = 0;
158 
159   /* header of data1 */
160   for (proc_id=0; proc_id<size; proc_id++){
161     iwork[proc_id] = 0;
162     *data1_start[proc_id] = is_max;
163     data1_start[proc_id]++;
164     for (j=0; j<is_max; j++) {
165       if (proc_id == rank){
166         *data1_start[proc_id] = n[j];
167       } else {
168         *data1_start[proc_id] = 0;
169       }
170       data1_start[proc_id]++;
171     }
172   }
173 
174   for (i=0; i<is_max; i++) {
175     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
176     for (j=0; j<n[i]; j++){
177       idx = idx_i[j];
178       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
179       proc_end = ctable[idx];
180       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
181         if (proc_id == rank ) continue; /* done before this loop */
182         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
183           continue;   /* no need for sending idx to [proc_id] */
184         *data1_start[proc_id] = idx; data1_start[proc_id]++;
185         len_s[proc_id]++;
186       }
187     }
188     /* update header data */
189     for (proc_id=0; proc_id<size; proc_id++){
190       if (proc_id== rank) continue;
191       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
192       iwork[proc_id] = len_s[proc_id] ;
193     }
194     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
195   }
196 
197   nrqs = 0; nrqr = 0;
198   for (i=0; i<size; i++){
199     data1_start[i] = data1 + i*len;
200     if (len_s[i]){
201       nrqs++;
202       len_s[i] += 1 + is_max; /* add no. of header msg */
203     }
204   }
205 
206   for (i=0; i<is_max; i++) {
207     ierr = ISDestroy(is[i]);CHKERRQ(ierr);
208   }
209   ierr = PetscFree(n);CHKERRQ(ierr);
210   if (ctable){ierr = PetscFree(ctable);CHKERRQ(ierr);}
211 
212   /* Determine the number of messages to expect, their lengths, from from-ids */
213   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
214   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
215   /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] nrqs: %d, nrqr: %d\n",rank,nrqs,nrqr); */
216 
217   /*  Now  post the sends */
218   ierr = PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
219   s_waits2 = s_waits1 + size;
220   k = 0;
221   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
222     if (len_s[proc_id]){
223       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPI_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
224       k++;
225     }
226   }
227 
228   /* 2. Receive other's is[] and process. Then send back */
229   /*-----------------------------------------------------*/
230   len = 0;
231   for (i=0; i<nrqr; i++){
232     if (len_r1[i] > len)len = len_r1[i];
233     /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] expect to recv len=%d from [%d]\n",rank,len_r1[i],id_r1[i]); */
234   }
235   ierr = PetscFree(len_r1);CHKERRQ(ierr);
236   ierr = PetscFree(id_r1);CHKERRQ(ierr);
237 
238   for (proc_id=0; proc_id<size; proc_id++)
239     len_s[proc_id] = iwork[proc_id] = 0;
240 
241   ierr = PetscMalloc((len+1)*sizeof(int),&odata1);CHKERRQ(ierr);
242   ierr = PetscMalloc(size*sizeof(int**),&odata2_ptr);CHKERRQ(ierr);
243   ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr);
244 
245   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
246   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
247   ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr);
248   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
249   odata2_ptr[nodata2] = odata2;
250   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
251 
252   k = 0;
253   while (k < nrqr){
254     /* Receive messages */
255     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
256     if (flag){
257       ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr);
258       proc_id = r_status.MPI_SOURCE;
259       ierr = MPI_Irecv(odata1,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
260       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
261       /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] recv %d from [%d]\n",rank,len,proc_id); */
262 
263       /*  Process messages */
264       /*  make sure there is enough unused space in odata2 array */
265       if (len_unused < len_max){ /* allocate more space for odata2 */
266         ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr);
267         odata2_ptr[++nodata2] = odata2;
268         len_unused = len_est;
269         /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] 2. Malloc odata2, nodata2: %d\n",rank,nodata2); */
270       }
271 
272       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
273       len = 1 + odata2[0];
274       for (i=0; i<odata2[0]; i++){
275         len += odata2[1 + i];
276       }
277 
278       /* Send messages back */
279       ierr = MPI_Isend(odata2,len,MPI_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
280       /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] send %d back to [%d] \n",rank,len,proc_id); */
281       k++;
282       odata2     += len;
283       len_unused -= len;
284       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
285     }
286   }
287   ierr = PetscFree(odata1);CHKERRQ(ierr);
288   ierr = PetscBTDestroy(otable);CHKERRQ(ierr);
289 
290   /* 3. Do local work on this processor's is[] */
291   /*-------------------------------------------*/
292   /* make sure there is enough unused space in odata2(=data) array */
293   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
294   if (len_unused < len_max){ /* allocate more space for odata2 */
295     ierr = PetscMalloc((len_est+1)*sizeof(int),&odata2);CHKERRQ(ierr);
296     odata2_ptr[++nodata2] = odata2;
297     len_unused = len_est;
298     /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] 3. Malloc data2, nodata2: %d\n",rank,nodata2); */
299   }
300 
301   data = odata2;
302   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
303   ierr = PetscFree(data1_start);CHKERRQ(ierr);
304 
305   /* 4. Receive work done on other processors, then merge */
306   /*------------------------------------------------------*/
307   /* get max number of messages that this processor expects to recv */
308   ierr = MPI_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
309   /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] expects max_len=%d of data2 from others \n",rank,iwork[rank]); */
310   ierr = PetscMalloc((iwork[rank]+1)*sizeof(int),&data2);CHKERRQ(ierr);
311   ierr = PetscFree(len_s);CHKERRQ(ierr);
312 
313   k = 0;
314   while (k < nrqs){
315     /* Receive messages */
316     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
317     if (flag){
318       ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr);
319       proc_id = r_status.MPI_SOURCE;
320       ierr = MPI_Irecv(data2,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
321       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
322       /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] recv %d from [%d], data2:\n",rank,len,proc_id); */
323       if (len > 1+is_max){ /* Add data2 into data */
324         data2_i = data2 + 1 + is_max;
325         for (i=0; i<is_max; i++){
326           table_i = table[i];
327           data_i  = data + 1 + is_max + Mbs*i;
328           isz     = data[1+i];
329           for (j=0; j<data2[1+i]; j++){
330             col = data2_i[j];
331             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
332           }
333           data[1+i] = isz;
334           if (i < is_max - 1) data2_i += data2[1+i];
335         }
336       }
337       k++;
338     }
339   }
340   ierr = PetscFree(data2);CHKERRQ(ierr);
341   ierr = PetscFree(table);CHKERRQ(ierr);
342 
343   /* phase 1 sends are complete */
344   ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
345   if (nrqs){
346     ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);
347   }
348   ierr = PetscFree(data1);CHKERRQ(ierr);
349 
350   /* phase 2 sends are complete */
351   if (nrqr){
352     ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);
353   }
354   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
355   ierr = PetscFree(s_status);CHKERRQ(ierr);
356 
357   /* 5. Create new is[] */
358   /*--------------------*/
359   for (i=0; i<is_max; i++) {
360     data_i = data + 1 + is_max + Mbs*i;
361     ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);CHKERRQ(ierr);
362   }
363   for (k=0; k<=nodata2; k++){
364     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
365   }
366   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
367 
368   PetscFunctionReturn(0);
369 }
370 
371 #undef __FUNCT__
372 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
373 /*
374    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
375        the work on the local processor.
376 
377      Inputs:
378       C      - MAT_MPISBAIJ;
379       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
380       whose  - whose is[] to be processed,
381                MINE:  this processor's is[]
382                OTHER: other processor's is[]
383      Output:
384        nidx  - whose = MINE:
385                      holds input and newly found indices in the same format as data
386                whose = OTHER:
387                      only holds the newly found indices
388        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
389 */
390 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
391 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,int *data,int whose,int *nidx,PetscBT *table)
392 {
393   Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
394   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
395   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(c->B)->data;
396   PetscErrorCode ierr;
397   int row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
398   int          a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
399   PetscBT      table0;  /* mark the indices of input is[] for look up */
400   PetscBT      table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
401 
402   PetscFunctionBegin;
403   Mbs = c->Mbs; mbs = a->mbs;
404   ai = a->i; aj = a->j;
405   bi = b->i; bj = b->j;
406   garray = c->garray;
407   rstart = c->rstart;
408   is_max = data[0];
409 
410   ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr);
411 
412   nidx[0] = is_max;
413   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
414   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
415   for (i=0; i<is_max; i++) { /* for each is */
416     isz  = 0;
417     n = data[1+i]; /* size of input is[i] */
418 
419     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
420     if (whose == MINE){ /* process this processor's is[] */
421       table_i = table[i];
422       nidx_i  = nidx + 1+ is_max + Mbs*i;
423     } else {            /* process other processor's is[] - only use one temp table */
424       table_i = table[0];
425     }
426     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
427     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
428     if (n==0) {
429        nidx[1+i] = 0; /* size of new is[i] */
430        continue;
431     }
432 
433     isz0 = 0; col_max = 0;
434     for (j=0; j<n; j++){
435       col = idx_i[j];
436       if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %d >= Mbs %d",col,Mbs);
437       if(!PetscBTLookupSet(table_i,col)) {
438         ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
439         if (whose == MINE) {nidx_i[isz0] = col;}
440         if (col_max < col) col_max = col;
441         isz0++;
442       }
443     }
444 
445     if (whose == MINE) {isz = isz0;}
446     k = 0;  /* no. of indices from input is[i] that have been examined */
447     for (row=0; row<mbs; row++){
448       a_start = ai[row]; a_end = ai[row+1];
449       b_start = bi[row]; b_end = bi[row+1];
450       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
451                                                 do row search: collect all col in this row */
452         for (l = a_start; l<a_end ; l++){ /* Amat */
453           col = aj[l] + rstart;
454           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
455         }
456         for (l = b_start; l<b_end ; l++){ /* Bmat */
457           col = garray[bj[l]];
458           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
459         }
460         k++;
461         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
462       } else { /* row is not on input is[i]:
463                   do col serach: add row onto nidx_i if there is a col in nidx_i */
464         for (l = a_start; l<a_end ; l++){ /* Amat */
465           col = aj[l] + rstart;
466           if (col > col_max) break;
467           if (PetscBTLookup(table0,col)){
468             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
469             break; /* for l = start; l<end ; l++) */
470           }
471         }
472         for (l = b_start; l<b_end ; l++){ /* Bmat */
473           col = garray[bj[l]];
474           if (col > col_max) break;
475           if (PetscBTLookup(table0,col)){
476             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
477             break; /* for l = start; l<end ; l++) */
478           }
479         }
480       }
481     }
482 
483     if (i < is_max - 1){
484       idx_i  += n;   /* ptr to input is[i+1] array */
485       nidx_i += isz; /* ptr to output is[i+1] array */
486     }
487     nidx[1+i] = isz; /* size of new is[i] */
488   } /* for each is */
489   ierr = PetscBTDestroy(table0);CHKERRQ(ierr);
490 
491   PetscFunctionReturn(0);
492 }
493 
494 
495