xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision 0369a392ee39ea4d01e519e42e389892d4e1e2e4)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
11 
12 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
13 {
14   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
15   IS             *is_new,*is_row;
16   Mat            *submats;
17   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
18   Mat_SeqSBAIJ   *asub_i;
19   PetscBT        table;
20   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
21   const PetscInt *idx;
22   PetscBool      flg;
23 
24   PetscFunctionBegin;
25   PetscCall(PetscMalloc1(is_max,&is_new));
26   /* Convert the indices into block format */
27   PetscCall(ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new));
28   PetscCheckFalse(ov < 0,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified");
29 
30   /* ----- previous non-scalable implementation ----- */
31   flg  = PETSC_FALSE;
32   PetscCall(PetscOptionsHasName(NULL,NULL, "-IncreaseOverlap_old", &flg));
33   if (flg) { /* previous non-scalable implementation */
34     printf("use previous non-scalable implementation...\n");
35     for (i=0; i<ov; ++i) {
36       PetscCall(MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new));
37     }
38   } else { /* implementation using modified BAIJ routines */
39 
40     PetscCall(PetscMalloc1(Mbs+1,&nidx));
41     PetscCall(PetscBTCreate(Mbs,&table)); /* for column search */
42 
43     /* Create is_row */
44     PetscCall(PetscMalloc1(is_max,&is_row));
45     PetscCall(ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]));
46 
47     for (i=1; i<is_max; i++) {
48       is_row[i]  = is_row[0]; /* reuse is_row[0] */
49     }
50 
51     /* Allocate memory to hold all the submatrices - Modified from MatCreateSubMatrices_MPIBAIJ() */
52     PetscCall(PetscMalloc1(is_max+1,&submats));
53 
54     /* Determine the number of stages through which submatrices are done */
55     nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
56     if (!nmax) nmax = 1;
57     nstages_local = is_max/nmax + ((is_max % nmax) ? 1 : 0);
58 
59     /* Make sure every processor loops through the nstages */
60     PetscCall(MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C)));
61 
62     for (iov=0; iov<ov; ++iov) {
63       /* 1) Get submats for column search */
64       for (i=0,pos=0; i<nstages; i++) {
65         if (pos+nmax <= is_max) max_no = nmax;
66         else if (pos == is_max) max_no = 0;
67         else                    max_no = is_max-pos;
68         c->ijonly = PETSC_TRUE; /* only matrix data structures are requested */
69         /* The resulting submatrices should be BAIJ, not SBAIJ, hence we change this value to trigger that */
70         PetscCall(PetscStrcpy(((PetscObject)c->A)->type_name,MATSEQBAIJ));
71         PetscCall(MatCreateSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,submats+pos));
72         PetscCall(PetscStrcpy(((PetscObject)c->A)->type_name,MATSEQSBAIJ));
73         pos      += max_no;
74       }
75 
76       /* 2) Row search */
77       PetscCall(MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new));
78 
79       /* 3) Column search */
80       for (i=0; i<is_max; i++) {
81         asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
82         ai     = asub_i->i;
83 
84         /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
85         PetscCall(PetscBTMemzero(Mbs,table));
86 
87         PetscCall(ISGetIndices(is_new[i],&idx));
88         PetscCall(ISGetLocalSize(is_new[i],&nis));
89         for (l=0; l<nis; l++) {
90           PetscCall(PetscBTSet(table,idx[l]));
91           nidx[l] = idx[l];
92         }
93         isz = nis;
94 
95         /* add column entries to table */
96         for (brow=0; brow<Mbs; brow++) {
97           nz = ai[brow+1] - ai[brow];
98           if (nz) {
99             if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
100           }
101         }
102         PetscCall(ISRestoreIndices(is_new[i],&idx));
103         PetscCall(ISDestroy(&is_new[i]));
104 
105         /* create updated is_new */
106         PetscCall(ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i));
107       }
108 
109       /* Free tmp spaces */
110       for (i=0; i<is_max; i++) {
111         PetscCall(MatDestroy(&submats[i]));
112       }
113     }
114 
115     PetscCall(PetscBTDestroy(&table));
116     PetscCall(PetscFree(submats));
117     PetscCall(ISDestroy(&is_row[0]));
118     PetscCall(PetscFree(is_row));
119     PetscCall(PetscFree(nidx));
120 
121   }
122 
123   for (i=0; i<is_max; i++) PetscCall(ISDestroy(&is[i]));
124   PetscCall(ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is));
125 
126   for (i=0; i<is_max; i++) PetscCall(ISDestroy(&is_new[i]));
127   PetscCall(PetscFree(is_new));
128   PetscFunctionReturn(0);
129 }
130 
131 typedef enum {MINE,OTHER} WhoseOwner;
132 /*  data1, odata1 and odata2 are packed in the format (for communication):
133        data[0]          = is_max, no of is
134        data[1]          = size of is[0]
135         ...
136        data[is_max]     = size of is[is_max-1]
137        data[is_max + 1] = data(is[0])
138         ...
139        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
140         ...
141    data2 is packed in the format (for creating output is[]):
142        data[0]          = is_max, no of is
143        data[1]          = size of is[0]
144         ...
145        data[is_max]     = size of is[is_max-1]
146        data[is_max + 1] = data(is[0])
147         ...
148        data[is_max + 1 + Mbs*i) = data(is[i])
149         ...
150 */
151 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
152 {
153   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
154   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
155   const PetscInt *idx_i;
156   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i;
157   PetscInt       Mbs,i,j,k,*odata1,*odata2;
158   PetscInt       proc_id,**odata2_ptr,*ctable=NULL,*btable,len_max,len_est;
159   PetscInt       proc_end=0,len_unused,nodata2;
160   PetscInt       ois_max; /* max no of is[] in each of processor */
161   char           *t_p;
162   MPI_Comm       comm;
163   MPI_Request    *s_waits1,*s_waits2,r_req;
164   MPI_Status     *s_status,r_status;
165   PetscBT        *table;  /* mark indices of this processor's is[] */
166   PetscBT        table_i;
167   PetscBT        otable; /* mark indices of other processors' is[] */
168   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
169   IS             garray_local,garray_gl;
170 
171   PetscFunctionBegin;
172   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
173   size = c->size;
174   rank = c->rank;
175   Mbs  = c->Mbs;
176 
177   PetscCall(PetscObjectGetNewTag((PetscObject)C,&tag1));
178   PetscCall(PetscObjectGetNewTag((PetscObject)C,&tag2));
179 
180   /* create tables used in
181      step 1: table[i] - mark c->garray of proc [i]
182      step 3: table[i] - mark indices of is[i] when whose=MINE
183              table[0] - mark incideces of is[] when whose=OTHER */
184   len  = PetscMax(is_max, size);
185   PetscCall(PetscMalloc2(len,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,&t_p));
186   for (i=0; i<len; i++) {
187     table[i] = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
188   }
189 
190   PetscCall(MPIU_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm));
191 
192   /* 1. Send this processor's is[] to other processors */
193   /*---------------------------------------------------*/
194   /* allocate spaces */
195   PetscCall(PetscMalloc1(is_max,&n));
196   len  = 0;
197   for (i=0; i<is_max; i++) {
198     PetscCall(ISGetLocalSize(is[i],&n[i]));
199     len += n[i];
200   }
201   if (!len) {
202     is_max = 0;
203   } else {
204     len += 1 + is_max; /* max length of data1 for one processor */
205   }
206 
207   PetscCall(PetscMalloc1(size*len+1,&data1));
208   PetscCall(PetscMalloc1(size,&data1_start));
209   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
210 
211   PetscCall(PetscMalloc4(size,&len_s,size,&btable,size,&iwork,size+1,&Bowners));
212 
213   /* gather c->garray from all processors */
214   PetscCall(ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local));
215   PetscCall(ISAllGather(garray_local, &garray_gl));
216   PetscCall(ISDestroy(&garray_local));
217   PetscCallMPI(MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm));
218 
219   Bowners[0] = 0;
220   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
221 
222   if (is_max) {
223     /* hash table ctable which maps c->row to proc_id) */
224     PetscCall(PetscMalloc1(Mbs,&ctable));
225     for (proc_id=0,j=0; proc_id<size; proc_id++) {
226       for (; j<C->rmap->range[proc_id+1]/bs; j++) ctable[j] = proc_id;
227     }
228 
229     /* hash tables marking c->garray */
230     PetscCall(ISGetIndices(garray_gl,&idx_i));
231     for (i=0; i<size; i++) {
232       table_i = table[i];
233       PetscCall(PetscBTMemzero(Mbs,table_i));
234       for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/
235         PetscCall(PetscBTSet(table_i,idx_i[j]));
236       }
237     }
238     PetscCall(ISRestoreIndices(garray_gl,&idx_i));
239   }  /* if (is_max) */
240   PetscCall(ISDestroy(&garray_gl));
241 
242   /* evaluate communication - mesg to who, length, and buffer space */
243   for (i=0; i<size; i++) len_s[i] = 0;
244 
245   /* header of data1 */
246   for (proc_id=0; proc_id<size; proc_id++) {
247     iwork[proc_id]        = 0;
248     *data1_start[proc_id] = is_max;
249     data1_start[proc_id]++;
250     for (j=0; j<is_max; j++) {
251       if (proc_id == rank) {
252         *data1_start[proc_id] = n[j];
253       } else {
254         *data1_start[proc_id] = 0;
255       }
256       data1_start[proc_id]++;
257     }
258   }
259 
260   for (i=0; i<is_max; i++) {
261     PetscCall(ISGetIndices(is[i],&idx_i));
262     for (j=0; j<n[i]; j++) {
263       idx                = idx_i[j];
264       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
265       proc_end           = ctable[idx];
266       for (proc_id=0; proc_id<=proc_end; proc_id++) {  /* for others to process */
267         if (proc_id == rank) continue; /* done before this loop */
268         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) continue; /* no need for sending idx to [proc_id] */
269         *data1_start[proc_id] = idx; data1_start[proc_id]++;
270         len_s[proc_id]++;
271       }
272     }
273     /* update header data */
274     for (proc_id=0; proc_id<size; proc_id++) {
275       if (proc_id== rank) continue;
276       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
277       iwork[proc_id]                 = len_s[proc_id];
278     }
279     PetscCall(ISRestoreIndices(is[i],&idx_i));
280   }
281 
282   nrqs = 0; nrqr = 0;
283   for (i=0; i<size; i++) {
284     data1_start[i] = data1 + i*len;
285     if (len_s[i]) {
286       nrqs++;
287       len_s[i] += 1 + is_max; /* add no. of header msg */
288     }
289   }
290 
291   for (i=0; i<is_max; i++) {
292     PetscCall(ISDestroy(&is[i]));
293   }
294   PetscCall(PetscFree(n));
295   PetscCall(PetscFree(ctable));
296 
297   /* Determine the number of messages to expect, their lengths, from from-ids */
298   PetscCall(PetscGatherNumberOfMessages(comm,NULL,len_s,&nrqr));
299   PetscCall(PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1));
300 
301   /*  Now  post the sends */
302   PetscCall(PetscMalloc2(size,&s_waits1,size,&s_waits2));
303   k    = 0;
304   for (proc_id=0; proc_id<size; proc_id++) {  /* send data1 to processor [proc_id] */
305     if (len_s[proc_id]) {
306       PetscCallMPI(MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k));
307       k++;
308     }
309   }
310 
311   /* 2. Receive other's is[] and process. Then send back */
312   /*-----------------------------------------------------*/
313   len = 0;
314   for (i=0; i<nrqr; i++) {
315     if (len_r1[i] > len) len = len_r1[i];
316   }
317   PetscCall(PetscFree(len_r1));
318   PetscCall(PetscFree(id_r1));
319 
320   for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;
321 
322   PetscCall(PetscMalloc1(len+1,&odata1));
323   PetscCall(PetscMalloc1(size,&odata2_ptr));
324   PetscCall(PetscBTCreate(Mbs,&otable));
325 
326   len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
327   len_est = 2*len_max;       /* estimated space of storing is[] for all receiving messages */
328   PetscCall(PetscMalloc1(len_est+1,&odata2));
329   nodata2 = 0;               /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
330 
331   odata2_ptr[nodata2] = odata2;
332 
333   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
334 
335   k = 0;
336   while (k < nrqr) {
337     /* Receive messages */
338     PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status));
339     if (flag) {
340       PetscCallMPI(MPI_Get_count(&r_status,MPIU_INT,&len));
341       proc_id = r_status.MPI_SOURCE;
342       PetscCallMPI(MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req));
343       PetscCallMPI(MPI_Wait(&r_req,&r_status));
344 
345       /*  Process messages */
346       /*  make sure there is enough unused space in odata2 array */
347       if (len_unused < len_max) { /* allocate more space for odata2 */
348         PetscCall(PetscMalloc1(len_est+1,&odata2));
349 
350         odata2_ptr[++nodata2] = odata2;
351 
352         len_unused = len_est;
353       }
354 
355       PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable));
356       len  = 1 + odata2[0];
357       for (i=0; i<odata2[0]; i++) len += odata2[1 + i];
358 
359       /* Send messages back */
360       PetscCallMPI(MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k));
361       k++;
362       odata2        += len;
363       len_unused    -= len;
364       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
365     }
366   }
367   PetscCall(PetscFree(odata1));
368   PetscCall(PetscBTDestroy(&otable));
369 
370   /* 3. Do local work on this processor's is[] */
371   /*-------------------------------------------*/
372   /* make sure there is enough unused space in odata2(=data) array */
373   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
374   if (len_unused < len_max) { /* allocate more space for odata2 */
375     PetscCall(PetscMalloc1(len_est+1,&odata2));
376 
377     odata2_ptr[++nodata2] = odata2;
378   }
379 
380   data = odata2;
381   PetscCall(MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table));
382   PetscCall(PetscFree(data1_start));
383 
384   /* 4. Receive work done on other processors, then merge */
385   /*------------------------------------------------------*/
386   /* get max number of messages that this processor expects to recv */
387   PetscCall(MPIU_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm));
388   PetscCall(PetscMalloc1(iwork[rank]+1,&data2));
389   PetscCall(PetscFree4(len_s,btable,iwork,Bowners));
390 
391   k = 0;
392   while (k < nrqs) {
393     /* Receive messages */
394     PetscCallMPI(MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status));
395     if (flag) {
396       PetscCallMPI(MPI_Get_count(&r_status,MPIU_INT,&len));
397 
398       proc_id = r_status.MPI_SOURCE;
399 
400       PetscCallMPI(MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req));
401       PetscCallMPI(MPI_Wait(&r_req,&r_status));
402       if (len > 1+is_max) { /* Add data2 into data */
403         data2_i = data2 + 1 + is_max;
404         for (i=0; i<is_max; i++) {
405           table_i = table[i];
406           data_i  = data + 1 + is_max + Mbs*i;
407           isz     = data[1+i];
408           for (j=0; j<data2[1+i]; j++) {
409             col = data2_i[j];
410             if (!PetscBTLookupSet(table_i,col)) data_i[isz++] = col;
411           }
412           data[1+i] = isz;
413           if (i < is_max - 1) data2_i += data2[1+i];
414         }
415       }
416       k++;
417     }
418   }
419   PetscCall(PetscFree(data2));
420   PetscCall(PetscFree2(table,t_p));
421 
422   /* phase 1 sends are complete */
423   PetscCall(PetscMalloc1(size,&s_status));
424   if (nrqs) PetscCallMPI(MPI_Waitall(nrqs,s_waits1,s_status));
425   PetscCall(PetscFree(data1));
426 
427   /* phase 2 sends are complete */
428   if (nrqr) PetscCallMPI(MPI_Waitall(nrqr,s_waits2,s_status));
429   PetscCall(PetscFree2(s_waits1,s_waits2));
430   PetscCall(PetscFree(s_status));
431 
432   /* 5. Create new is[] */
433   /*--------------------*/
434   for (i=0; i<is_max; i++) {
435     data_i = data + 1 + is_max + Mbs*i;
436     PetscCall(ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i));
437   }
438   for (k=0; k<=nodata2; k++) {
439     PetscCall(PetscFree(odata2_ptr[k]));
440   }
441   PetscCall(PetscFree(odata2_ptr));
442   PetscFunctionReturn(0);
443 }
444 
445 /*
446    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
447        the work on the local processor.
448 
449      Inputs:
450       C      - MAT_MPISBAIJ;
451       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
452       whose  - whose is[] to be processed,
453                MINE:  this processor's is[]
454                OTHER: other processor's is[]
455      Output:
456        nidx  - whose = MINE:
457                      holds input and newly found indices in the same format as data
458                whose = OTHER:
459                      only holds the newly found indices
460        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
461 */
462 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
463 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
464 {
465   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
466   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
467   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
468   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
469   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
470   PetscBT        table0;  /* mark the indices of input is[] for look up */
471   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
472 
473   PetscFunctionBegin;
474   Mbs    = c->Mbs; mbs = a->mbs;
475   ai     = a->i; aj = a->j;
476   bi     = b->i; bj = b->j;
477   garray = c->garray;
478   rstart = c->rstartbs;
479   is_max = data[0];
480 
481   PetscCall(PetscBTCreate(Mbs,&table0));
482 
483   nidx[0] = is_max;
484   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
485   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
486   for (i=0; i<is_max; i++) { /* for each is */
487     isz = 0;
488     n   = data[1+i]; /* size of input is[i] */
489 
490     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
491     if (whose == MINE) { /* process this processor's is[] */
492       table_i = table[i];
493       nidx_i  = nidx + 1+ is_max + Mbs*i;
494     } else {            /* process other processor's is[] - only use one temp table */
495       table_i = table[0];
496     }
497     PetscCall(PetscBTMemzero(Mbs,table_i));
498     PetscCall(PetscBTMemzero(Mbs,table0));
499     if (n==0) {
500       nidx[1+i] = 0;  /* size of new is[i] */
501       continue;
502     }
503 
504     isz0 = 0; col_max = 0;
505     for (j=0; j<n; j++) {
506       col = idx_i[j];
507       PetscCheckFalse(col >= Mbs,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %" PetscInt_FMT " >= Mbs %" PetscInt_FMT,col,Mbs);
508       if (!PetscBTLookupSet(table_i,col)) {
509         PetscCall(PetscBTSet(table0,col));
510         if (whose == MINE) nidx_i[isz0] = col;
511         if (col_max < col) col_max = col;
512         isz0++;
513       }
514     }
515 
516     if (whose == MINE) isz = isz0;
517     k = 0;  /* no. of indices from input is[i] that have been examined */
518     for (row=0; row<mbs; row++) {
519       a_start = ai[row]; a_end = ai[row+1];
520       b_start = bi[row]; b_end = bi[row+1];
521       if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]:
522                                                 do row search: collect all col in this row */
523         for (l = a_start; l<a_end ; l++) { /* Amat */
524           col = aj[l] + rstart;
525           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
526         }
527         for (l = b_start; l<b_end ; l++) { /* Bmat */
528           col = garray[bj[l]];
529           if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
530         }
531         k++;
532         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
533       } else { /* row is not on input is[i]:
534                   do col serach: add row onto nidx_i if there is a col in nidx_i */
535         for (l = a_start; l<a_end; l++) {  /* Amat */
536           col = aj[l] + rstart;
537           if (col > col_max) break;
538           if (PetscBTLookup(table0,col)) {
539             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
540             break; /* for l = start; l<end ; l++) */
541           }
542         }
543         for (l = b_start; l<b_end; l++) {  /* Bmat */
544           col = garray[bj[l]];
545           if (col > col_max) break;
546           if (PetscBTLookup(table0,col)) {
547             if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
548             break; /* for l = start; l<end ; l++) */
549           }
550         }
551       }
552     }
553 
554     if (i < is_max - 1) {
555       idx_i  += n;   /* ptr to input is[i+1] array */
556       nidx_i += isz; /* ptr to output is[i+1] array */
557     }
558     nidx[1+i] = isz; /* size of new is[i] */
559   } /* for each is */
560   PetscCall(PetscBTDestroy(&table0));
561   PetscFunctionReturn(0);
562 }
563