xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision 2205254efee3a00a594e5e2a3a70f74dcb40bc03)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
14 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
15 {
16   PetscErrorCode ierr;
17   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
18   IS             *is_new,*is_row;
19   Mat            *submats;
20   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
21   Mat_SeqSBAIJ   *asub_i;
22   PetscBT        table;
23   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
24   const PetscInt *idx;
25   PetscBool      flg,*allcolumns,*allrows;
26 
27   PetscFunctionBegin;
28   ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
29   /* Convert the indices into block format */
30   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);CHKERRQ(ierr);
31   if (ov < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
32 
33   /* ----- previous non-scalable implementation ----- */
34   flg=PETSC_FALSE;
35   ierr = PetscOptionsHasName(PETSC_NULL, "-IncreaseOverlap_old", &flg);CHKERRQ(ierr);
36   if (flg) { /* previous non-scalable implementation */
37     printf("use previous non-scalable implementation...\n");
38     for (i=0; i<ov; ++i) {
39       ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
40     }
41   } else { /* implementation using modified BAIJ routines */
42 
43   ierr = PetscMalloc((Mbs+1)*sizeof(PetscInt),&nidx);CHKERRQ(ierr);
44   ierr = PetscBTCreate(Mbs,&table);CHKERRQ(ierr); /* for column search */
45   ierr = PetscMalloc2(is_max+1,PetscBool,&allcolumns,is_max+1,PetscBool,&allrows);CHKERRQ(ierr);
46 
47   /* Create is_row */
48   ierr = PetscMalloc(is_max*sizeof(IS **),&is_row);CHKERRQ(ierr);
49   ierr = ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);CHKERRQ(ierr);
50   allrows[0] = PETSC_TRUE;
51   for (i=1; i<is_max; i++) {
52     is_row[i]  = is_row[0]; /* reuse is_row[0] */
53     allrows[i] = PETSC_TRUE;
54   }
55 
56   /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
57   ierr = PetscMalloc((is_max+1)*sizeof(Mat),&submats);CHKERRQ(ierr);
58 
59   /* Check for special case: each processor gets entire matrix columns */
60   for (i=0; i<is_max; i++) {
61     ierr = ISIdentity(is_new[i],&flg);CHKERRQ(ierr);
62     ierr = ISGetLocalSize(is_new[i],&isz);CHKERRQ(ierr);
63     if (flg && isz == Mbs) {
64       allcolumns[i] = PETSC_TRUE;
65     } else {
66       allcolumns[i] = PETSC_FALSE;
67     }
68   }
69 
70   /* Determine the number of stages through which submatrices are done */
71   nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
72   if (!nmax) nmax = 1;
73   nstages_local = is_max/nmax + ((is_max % nmax)?1:0);
74 
75   /* Make sure every processor loops through the nstages */
76   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr);
77 
78   for (iov=0; iov<ov; ++iov) {
79     /* 1) Get submats for column search */
80     for (i=0,pos=0; i<nstages; i++) {
81       if (pos+nmax <= is_max) max_no = nmax;
82       else if (pos == is_max) max_no = 0;
83       else                   max_no = is_max-pos;
84       c->ijonly = PETSC_TRUE;
85       ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allrows+pos,allcolumns+pos,submats+pos);CHKERRQ(ierr);
86       pos += max_no;
87     }
88 
89     /* 2) Row search */
90     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
91 
92     /* 3) Column search */
93     for (i=0; i<is_max; i++) {
94       asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
95       ai=asub_i->i;;
96 
97       /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
98       ierr = PetscBTMemzero(Mbs,table);CHKERRQ(ierr);
99 
100       ierr = ISGetIndices(is_new[i],&idx);CHKERRQ(ierr);
101       ierr = ISGetLocalSize(is_new[i],&nis);CHKERRQ(ierr);
102       for (l=0; l<nis; l++) {
103         ierr = PetscBTSet(table,idx[l]);CHKERRQ(ierr);
104         nidx[l] = idx[l];
105       }
106       isz = nis;
107 
108       /* add column entries to table */
109       for (brow=0; brow<Mbs; brow++) {
110         nz = ai[brow+1] - ai[brow];
111         if (nz) {
112           if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
113         }
114       }
115       ierr = ISRestoreIndices(is_new[i],&idx);CHKERRQ(ierr);
116       ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);
117 
118       /* create updated is_new */
119       ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);CHKERRQ(ierr);
120     }
121 
122     /* Free tmp spaces */
123     for (i=0; i<is_max; i++) {
124       ierr = MatDestroy(&submats[i]);CHKERRQ(ierr);
125     }
126   }
127   ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr);
128   ierr = PetscBTDestroy(&table);CHKERRQ(ierr);
129   ierr = PetscFree(submats);CHKERRQ(ierr);
130   ierr = ISDestroy(&is_row[0]);CHKERRQ(ierr);
131   ierr = PetscFree(is_row);CHKERRQ(ierr);
132   ierr = PetscFree(nidx);CHKERRQ(ierr);
133 
134   }
135 
136   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
137   ierr = ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);CHKERRQ(ierr);
138 
139   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
140   ierr = PetscFree(is_new);CHKERRQ(ierr);
141   PetscFunctionReturn(0);
142 }
143 
144 typedef enum {MINE,OTHER} WhoseOwner;
145 /*  data1, odata1 and odata2 are packed in the format (for communication):
146        data[0]          = is_max, no of is
147        data[1]          = size of is[0]
148         ...
149        data[is_max]     = size of is[is_max-1]
150        data[is_max + 1] = data(is[0])
151         ...
152        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
153         ...
154    data2 is packed in the format (for creating output is[]):
155        data[0]          = is_max, no of is
156        data[1]          = size of is[0]
157         ...
158        data[is_max]     = size of is[is_max-1]
159        data[is_max + 1] = data(is[0])
160         ...
161        data[is_max + 1 + Mbs*i) = data(is[i])
162         ...
163 */
164 #undef __FUNCT__
165 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
166 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
167 {
168   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
169   PetscErrorCode ierr;
170   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
171   const PetscInt *idx_i;
172   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
173                  Mbs,i,j,k,*odata1,*odata2,
174                  proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
175   PetscInt       proc_end=0,len_unused,nodata2;
176   PetscInt       ois_max; /* max no of is[] in each of processor */
177   char           *t_p;
178   MPI_Comm       comm;
179   MPI_Request    *s_waits1,*s_waits2,r_req;
180   MPI_Status     *s_status,r_status;
181   PetscBT        *table;  /* mark indices of this processor's is[] */
182   PetscBT        table_i;
183   PetscBT        otable; /* mark indices of other processors' is[] */
184   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
185   IS             garray_local,garray_gl;
186 
187   PetscFunctionBegin;
188   comm = ((PetscObject)C)->comm;
189   size = c->size;
190   rank = c->rank;
191   Mbs  = c->Mbs;
192 
193   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
194   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
195 
196   /* create tables used in
197      step 1: table[i] - mark c->garray of proc [i]
198      step 3: table[i] - mark indices of is[i] when whose=MINE
199              table[0] - mark incideces of is[] when whose=OTHER */
200   len = PetscMax(is_max, size);CHKERRQ(ierr);
201   ierr = PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);CHKERRQ(ierr);
202   for (i=0; i<len; i++) {
203     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
204   }
205 
206   ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
207 
208   /* 1. Send this processor's is[] to other processors */
209   /*---------------------------------------------------*/
210   /* allocate spaces */
211   ierr = PetscMalloc(is_max*sizeof(PetscInt),&n);CHKERRQ(ierr);
212   len = 0;
213   for (i=0; i<is_max; i++) {
214     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
215     len += n[i];
216   }
217   if (!len) {
218     is_max = 0;
219   } else {
220     len += 1 + is_max; /* max length of data1 for one processor */
221   }
222 
223 
224   ierr = PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);CHKERRQ(ierr);
225   ierr = PetscMalloc(size*sizeof(PetscInt*),&data1_start);CHKERRQ(ierr);
226   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
227 
228   ierr = PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscMPIInt,&iwork,size+1,PetscInt,&Bowners);CHKERRQ(ierr);
229 
230   /* gather c->garray from all processors */
231   ierr = ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);CHKERRQ(ierr);
232   ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
233   ierr = ISDestroy(&garray_local);CHKERRQ(ierr);
234   ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
235   Bowners[0] = 0;
236   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
237 
238   if (is_max) {
239     /* hash table ctable which maps c->row to proc_id) */
240     ierr = PetscMalloc(Mbs*sizeof(PetscInt),&ctable);CHKERRQ(ierr);
241     for (proc_id=0,j=0; proc_id<size; proc_id++) {
242       for (; j<C->rmap->range[proc_id+1]/bs; j++) {
243         ctable[j] = proc_id;
244       }
245     }
246 
247     /* hash tables marking c->garray */
248     ierr = ISGetIndices(garray_gl,&idx_i);CHKERRQ(ierr);
249     for (i=0; i<size; i++) {
250       table_i = table[i];
251       ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
252       for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/
253         ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
254       }
255     }
256     ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
257   }  /* if (is_max) */
258   ierr = ISDestroy(&garray_gl);CHKERRQ(ierr);
259 
260   /* evaluate communication - mesg to who, length, and buffer space */
261   for (i=0; i<size; i++) len_s[i] = 0;
262 
263   /* header of data1 */
264   for (proc_id=0; proc_id<size; proc_id++) {
265     iwork[proc_id] = 0;
266     *data1_start[proc_id] = is_max;
267     data1_start[proc_id]++;
268     for (j=0; j<is_max; j++) {
269       if (proc_id == rank) {
270         *data1_start[proc_id] = n[j];
271       } else {
272         *data1_start[proc_id] = 0;
273       }
274       data1_start[proc_id]++;
275     }
276   }
277 
278   for (i=0; i<is_max; i++) {
279     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
280     for (j=0; j<n[i]; j++) {
281       idx = idx_i[j];
282       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
283       proc_end = ctable[idx];
284       for (proc_id=0;  proc_id<=proc_end; proc_id++) { /* for others to process */
285         if (proc_id == rank) continue; /* done before this loop */
286         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
287           continue;   /* no need for sending idx to [proc_id] */
288         *data1_start[proc_id] = idx; data1_start[proc_id]++;
289         len_s[proc_id]++;
290       }
291     }
292     /* update header data */
293     for (proc_id=0; proc_id<size; proc_id++) {
294       if (proc_id== rank) continue;
295       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
296       iwork[proc_id] = len_s[proc_id] ;
297     }
298     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
299   }
300 
301   nrqs = 0; nrqr = 0;
302   for (i=0; i<size; i++) {
303     data1_start[i] = data1 + i*len;
304     if (len_s[i]) {
305       nrqs++;
306       len_s[i] += 1 + is_max; /* add no. of header msg */
307     }
308   }
309 
310   for (i=0; i<is_max; i++) {
311     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
312   }
313   ierr = PetscFree(n);CHKERRQ(ierr);
314   ierr = PetscFree(ctable);CHKERRQ(ierr);
315 
316   /* Determine the number of messages to expect, their lengths, from from-ids */
317   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
318   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
319 
320   /*  Now  post the sends */
321   ierr = PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);CHKERRQ(ierr);
322   k = 0;
323   for (proc_id=0; proc_id<size; proc_id++) {  /* send data1 to processor [proc_id] */
324     if (len_s[proc_id]) {
325       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
326       k++;
327     }
328   }
329 
330   /* 2. Receive other's is[] and process. Then send back */
331   /*-----------------------------------------------------*/
332   len = 0;
333   for (i=0; i<nrqr; i++) {
334     if (len_r1[i] > len)len = len_r1[i];
335   }
336   ierr = PetscFree(len_r1);CHKERRQ(ierr);
337   ierr = PetscFree(id_r1);CHKERRQ(ierr);
338 
339   for (proc_id=0; proc_id<size; proc_id++)
340     len_s[proc_id] = iwork[proc_id] = 0;
341 
342   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&odata1);CHKERRQ(ierr);
343   ierr = PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);CHKERRQ(ierr);
344   ierr = PetscBTCreate(Mbs,&otable);CHKERRQ(ierr);
345 
346   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
347   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
348   ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
349   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
350   odata2_ptr[nodata2] = odata2;
351   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
352 
353   k = 0;
354   while (k < nrqr) {
355     /* Receive messages */
356     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
357     if (flag) {
358       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
359       proc_id = r_status.MPI_SOURCE;
360       ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
361       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
362 
363       /*  Process messages */
364       /*  make sure there is enough unused space in odata2 array */
365       if (len_unused < len_max) { /* allocate more space for odata2 */
366         ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
367         odata2_ptr[++nodata2] = odata2;
368         len_unused = len_est;
369       }
370 
371       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
372       len = 1 + odata2[0];
373       for (i=0; i<odata2[0]; i++) {
374         len += odata2[1 + i];
375       }
376 
377       /* Send messages back */
378       ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
379       k++;
380       odata2     += len;
381       len_unused -= len;
382       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
383     }
384   }
385   ierr = PetscFree(odata1);CHKERRQ(ierr);
386   ierr = PetscBTDestroy(&otable);CHKERRQ(ierr);
387 
388   /* 3. Do local work on this processor's is[] */
389   /*-------------------------------------------*/
390   /* make sure there is enough unused space in odata2(=data) array */
391   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
392   if (len_unused < len_max) { /* allocate more space for odata2 */
393     ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
394     odata2_ptr[++nodata2] = odata2;
395   }
396 
397   data = odata2;
398   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
399   ierr = PetscFree(data1_start);CHKERRQ(ierr);
400 
401   /* 4. Receive work done on other processors, then merge */
402   /*------------------------------------------------------*/
403   /* get max number of messages that this processor expects to recv */
404   ierr = MPI_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
405   ierr = PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);CHKERRQ(ierr);
406   ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr);
407 
408   k = 0;
409   while (k < nrqs) {
410     /* Receive messages */
411     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);CHKERRQ(ierr);
412     if (flag) {
413       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
414       proc_id = r_status.MPI_SOURCE;
415       ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
416       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
417       if (len > 1+is_max) { /* Add data2 into data */
418         data2_i = data2 + 1 + is_max;
419         for (i=0; i<is_max; i++) {
420           table_i = table[i];
421           data_i  = data + 1 + is_max + Mbs*i;
422           isz     = data[1+i];
423           for (j=0; j<data2[1+i]; j++) {
424             col = data2_i[j];
425             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
426           }
427           data[1+i] = isz;
428           if (i < is_max - 1) data2_i += data2[1+i];
429         }
430       }
431       k++;
432     }
433   }
434   ierr = PetscFree(data2);CHKERRQ(ierr);
435   ierr = PetscFree2(table,t_p);CHKERRQ(ierr);
436 
437   /* phase 1 sends are complete */
438   ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
439   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
440   ierr = PetscFree(data1);CHKERRQ(ierr);
441 
442   /* phase 2 sends are complete */
443   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);}
444   ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr);
445   ierr = PetscFree(s_status);CHKERRQ(ierr);
446 
447   /* 5. Create new is[] */
448   /*--------------------*/
449   for (i=0; i<is_max; i++) {
450     data_i = data + 1 + is_max + Mbs*i;
451     ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
452   }
453   for (k=0; k<=nodata2; k++) {
454     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
455   }
456   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
457   PetscFunctionReturn(0);
458 }
459 
460 #undef __FUNCT__
461 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
462 /*
463    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
464        the work on the local processor.
465 
466      Inputs:
467       C      - MAT_MPISBAIJ;
468       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
469       whose  - whose is[] to be processed,
470                MINE:  this processor's is[]
471                OTHER: other processor's is[]
472      Output:
473        nidx  - whose = MINE:
474                      holds input and newly found indices in the same format as data
475                whose = OTHER:
476                      only holds the newly found indices
477        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
478 */
479 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
480 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
481 {
482   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
483   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
484   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
485   PetscErrorCode ierr;
486   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
487   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
488   PetscBT        table0;  /* mark the indices of input is[] for look up */
489   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
490 
491   PetscFunctionBegin;
492   Mbs = c->Mbs; mbs = a->mbs;
493   ai = a->i; aj = a->j;
494   bi = b->i; bj = b->j;
495   garray = c->garray;
496   rstart = c->rstartbs;
497   is_max = data[0];
498 
499   ierr = PetscBTCreate(Mbs,&table0);CHKERRQ(ierr);
500 
501   nidx[0] = is_max;
502   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
503   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
504   for (i=0; i<is_max; i++) { /* for each is */
505     isz  = 0;
506     n = data[1+i]; /* size of input is[i] */
507 
508     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
509     if (whose == MINE) { /* process this processor's is[] */
510       table_i = table[i];
511       nidx_i  = nidx + 1+ is_max + Mbs*i;
512     } else {            /* process other processor's is[] - only use one temp table */
513       table_i = table[0];
514     }
515     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
516     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
517     if (n==0) {
518        nidx[1+i] = 0; /* size of new is[i] */
519        continue;
520     }
521 
522     isz0 = 0; col_max = 0;
523     for (j=0; j<n; j++) {
524       col = idx_i[j];
525       if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
526       if (!PetscBTLookupSet(table_i,col)) {
527         ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
528         if (whose == MINE) {nidx_i[isz0] = col;}
529         if (col_max < col) col_max = col;
530         isz0++;
531       }
532     }
533 
534     if (whose == MINE) {isz = isz0;}
535     k = 0;  /* no. of indices from input is[i] that have been examined */
536     for (row=0; row<mbs; row++) {
537       a_start = ai[row]; a_end = ai[row+1];
538       b_start = bi[row]; b_end = bi[row+1];
539       if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]:
540                                                 do row search: collect all col in this row */
541         for (l = a_start; l<a_end ; l++) { /* Amat */
542           col = aj[l] + rstart;
543           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
544         }
545         for (l = b_start; l<b_end ; l++) { /* Bmat */
546           col = garray[bj[l]];
547           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
548         }
549         k++;
550         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
551       } else { /* row is not on input is[i]:
552                   do col serach: add row onto nidx_i if there is a col in nidx_i */
553         for (l = a_start; l<a_end ; l++) { /* Amat */
554           col = aj[l] + rstart;
555           if (col > col_max) break;
556           if (PetscBTLookup(table0,col)) {
557             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
558             break; /* for l = start; l<end ; l++) */
559           }
560         }
561         for (l = b_start; l<b_end ; l++) { /* Bmat */
562           col = garray[bj[l]];
563           if (col > col_max) break;
564           if (PetscBTLookup(table0,col)) {
565             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
566             break; /* for l = start; l<end ; l++) */
567           }
568         }
569       }
570     }
571 
572     if (i < is_max - 1) {
573       idx_i  += n;   /* ptr to input is[i+1] array */
574       nidx_i += isz; /* ptr to output is[i+1] array */
575     }
576     nidx[1+i] = isz; /* size of new is[i] */
577   } /* for each is */
578   ierr = PetscBTDestroy(&table0);CHKERRQ(ierr);
579   PetscFunctionReturn(0);
580 }
581 
582 
583