xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision 7cf18bfc0af3bd20f57ca1ab9dbfc3d774cdee5d)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
14 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
15 {
16   PetscErrorCode ierr;
17   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
18   IS             *is_new,*is_row;
19   Mat            *submats;
20   Mat_MPISBAIJ   *c=(Mat_MPISBAIJ*)C->data;
21   Mat_SeqSBAIJ   *asub_i;
22   PetscBT        table;
23   PetscInt       *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
24   const PetscInt *idx;
25   PetscBool      flg,*allcolumns;
26 
27   PetscFunctionBegin;
28   ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
29   /* Convert the indices into block format */
30   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);CHKERRQ(ierr);
31   if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
32 
33   /* ----- previous non-scalable implementation ----- */
34   flg=PETSC_FALSE;
35   ierr = PetscOptionsHasName(PETSC_NULL, "-IncreaseOverlap_old", &flg);CHKERRQ(ierr);
36   if (flg){ /* previous non-scalable implementation */
37     printf("use previous non-scalable implementation...\n");
38     for (i=0; i<ov; ++i) {
39       ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
40     }
41   } else { /* scalable implementation using modified BAIJ routines */
42     /* non-scalable here! use CTABLE */
43   ierr = PetscMalloc((Mbs+1)*sizeof(PetscInt),&nidx);CHKERRQ(ierr);
44   ierr = PetscBTCreate(Mbs,table);CHKERRQ(ierr); /* for column search */
45 
46   /* Create is_row */
47   ierr = PetscMalloc(is_max*sizeof(IS **),&is_row);CHKERRQ(ierr);
48   ierr = ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);CHKERRQ(ierr); /* globle rows!!! */
49   for (i=1; i<is_max; i++) is_row[i] = is_row[0]; /* reuse is_row[0] */
50 
51   /* Allocate memory to hold all the submatrices - Modified from MatGetSubMatrices_MPIBAIJ() */
52   ierr = PetscMalloc((is_max+1)*sizeof(Mat),&submats);CHKERRQ(ierr);
53 
54   /* Check for special case: each processor gets entire matrix columns */
55   ierr = PetscMalloc((is_max+1)*sizeof(PetscBool),&allcolumns);CHKERRQ(ierr);
56   for (i=0; i<is_max; i++) {
57     ierr = ISIdentity(is_new[i],&flg);CHKERRQ(ierr);
58     ierr = ISGetLocalSize(is_new[i],&isz);CHKERRQ(ierr);
59     if (flg && isz == Mbs){
60       allcolumns[i] = PETSC_TRUE;
61     } else {
62       allcolumns[i] = PETSC_FALSE;
63     }
64   }
65 
66   /* Determine the number of stages through which submatrices are done */
67   nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
68   if (!nmax) nmax = 1;
69   nstages_local = is_max/nmax + ((is_max % nmax)?1:0);
70 
71   /* Make sure every processor loops through the nstages */
72   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr);
73 
74   for (iov=0; iov<ov; ++iov) {
75     /* 1) Get submats for column search */
76     for (i=0,pos=0; i<nstages; i++) {
77       if (pos+nmax <= is_max) max_no = nmax;
78       else if (pos == is_max) max_no = 0;
79       else                   max_no = is_max-pos;
80 
81       c->ijonly = PETSC_TRUE;
82       ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,allcolumns+pos,submats+pos);CHKERRQ(ierr);
83       pos += max_no;
84     }
85 
86     /* 2) Row search */
87     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
88 
89     /* 3) Column search */
90     for (i=0; i<is_max; i++){
91       asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
92       ai=asub_i->i;;
93 
94       /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
95       ierr = PetscBTMemzero(Mbs,table);CHKERRQ(ierr);
96 
97       ierr = ISGetIndices(is_new[i],&idx);CHKERRQ(ierr);
98       ierr = ISGetLocalSize(is_new[i],&nis);CHKERRQ(ierr);
99       for (l=0; l<nis; l++) {
100         ierr = PetscBTSet(table,idx[l]);CHKERRQ(ierr);
101         nidx[l] = idx[l];
102       }
103       isz = nis;
104 
105       /* add column entries to table */
106       for (brow=0; brow<Mbs; brow++){
107         nz = ai[brow+1] - ai[brow];
108         if (nz) {
109           if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
110         }
111       }
112       ierr = ISRestoreIndices(is_new[i],&idx);CHKERRQ(ierr);
113       ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);
114 
115       /* create updated is_new */
116       ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);CHKERRQ(ierr);
117     }
118 
119     /* Free tmp spaces */
120     for (i=0; i<is_max; i++){
121       ierr = MatDestroy(&submats[i]);CHKERRQ(ierr);
122     }
123   }
124   ierr = PetscFree(allcolumns);CHKERRQ(ierr);
125   ierr = PetscBTDestroy(table);CHKERRQ(ierr);
126   ierr = PetscFree(submats);CHKERRQ(ierr);
127   ierr = ISDestroy(&is_row[0]);CHKERRQ(ierr);
128   ierr = PetscFree(is_row);CHKERRQ(ierr);
129   ierr = PetscFree(nidx);CHKERRQ(ierr);
130 
131   }
132 
133   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
134   ierr = ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);CHKERRQ(ierr);
135 
136   for (i=0; i<is_max; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
137   ierr = PetscFree(is_new);CHKERRQ(ierr);
138   PetscFunctionReturn(0);
139 }
140 
141 typedef enum {MINE,OTHER} WhoseOwner;
142 /*  data1, odata1 and odata2 are packed in the format (for communication):
143        data[0]          = is_max, no of is
144        data[1]          = size of is[0]
145         ...
146        data[is_max]     = size of is[is_max-1]
147        data[is_max + 1] = data(is[0])
148         ...
149        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
150         ...
151    data2 is packed in the format (for creating output is[]):
152        data[0]          = is_max, no of is
153        data[1]          = size of is[0]
154         ...
155        data[is_max]     = size of is[is_max-1]
156        data[is_max + 1] = data(is[0])
157         ...
158        data[is_max + 1 + Mbs*i) = data(is[i])
159         ...
160 */
161 #undef __FUNCT__
162 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
163 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
164 {
165   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
166   PetscErrorCode ierr;
167   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
168   const PetscInt *idx_i;
169   PetscInt       idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
170                  Mbs,i,j,k,*odata1,*odata2,
171                  proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
172   PetscInt       proc_end=0,*iwork,len_unused,nodata2;
173   PetscInt       ois_max; /* max no of is[] in each of processor */
174   char           *t_p;
175   MPI_Comm       comm;
176   MPI_Request    *s_waits1,*s_waits2,r_req;
177   MPI_Status     *s_status,r_status;
178   PetscBT        *table;  /* mark indices of this processor's is[] */
179   PetscBT        table_i;
180   PetscBT        otable; /* mark indices of other processors' is[] */
181   PetscInt       bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
182   IS             garray_local,garray_gl;
183 
184   PetscFunctionBegin;
185   comm = ((PetscObject)C)->comm;
186   size = c->size;
187   rank = c->rank;
188   Mbs  = c->Mbs;
189 
190   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
191   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
192 
193   /* create tables used in
194      step 1: table[i] - mark c->garray of proc [i]
195      step 3: table[i] - mark indices of is[i] when whose=MINE
196              table[0] - mark incideces of is[] when whose=OTHER */
197   len = PetscMax(is_max, size);CHKERRQ(ierr);
198   ierr = PetscMalloc2(len,PetscBT,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,char,&t_p);CHKERRQ(ierr);
199   for (i=0; i<len; i++) {
200     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
201   }
202 
203   ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
204 
205   /* 1. Send this processor's is[] to other processors */
206   /*---------------------------------------------------*/
207   /* allocate spaces */
208   ierr = PetscMalloc(is_max*sizeof(PetscInt),&n);CHKERRQ(ierr);
209   len = 0;
210   for (i=0; i<is_max; i++) {
211     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
212     len += n[i];
213   }
214   if (!len) {
215     is_max = 0;
216   } else {
217     len += 1 + is_max; /* max length of data1 for one processor */
218   }
219 
220 
221   ierr = PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);CHKERRQ(ierr);
222   ierr = PetscMalloc(size*sizeof(PetscInt*),&data1_start);CHKERRQ(ierr);
223   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
224 
225   ierr = PetscMalloc4(size,PetscInt,&len_s,size,PetscInt,&btable,size,PetscInt,&iwork,size+1,PetscInt,&Bowners);CHKERRQ(ierr);
226 
227   /* gather c->garray from all processors */
228   ierr = ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);CHKERRQ(ierr);
229   ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
230   ierr = ISDestroy(&garray_local);CHKERRQ(ierr);
231   ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
232   Bowners[0] = 0;
233   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
234 
235   if (is_max){
236     /* hash table ctable which maps c->row to proc_id) */
237     ierr = PetscMalloc(Mbs*sizeof(PetscInt),&ctable);CHKERRQ(ierr);
238     for (proc_id=0,j=0; proc_id<size; proc_id++) {
239       for (; j<C->rmap->range[proc_id+1]/bs; j++) {
240         ctable[j] = proc_id;
241       }
242     }
243 
244     /* hash tables marking c->garray */
245     ierr = ISGetIndices(garray_gl,&idx_i);
246     for (i=0; i<size; i++){
247       table_i = table[i];
248       ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
249       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
250         ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
251       }
252     }
253     ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
254   }  /* if (is_max) */
255   ierr = ISDestroy(&garray_gl);CHKERRQ(ierr);
256 
257   /* evaluate communication - mesg to who, length, and buffer space */
258   for (i=0; i<size; i++) len_s[i] = 0;
259 
260   /* header of data1 */
261   for (proc_id=0; proc_id<size; proc_id++){
262     iwork[proc_id] = 0;
263     *data1_start[proc_id] = is_max;
264     data1_start[proc_id]++;
265     for (j=0; j<is_max; j++) {
266       if (proc_id == rank){
267         *data1_start[proc_id] = n[j];
268       } else {
269         *data1_start[proc_id] = 0;
270       }
271       data1_start[proc_id]++;
272     }
273   }
274 
275   for (i=0; i<is_max; i++) {
276     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
277     for (j=0; j<n[i]; j++){
278       idx = idx_i[j];
279       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
280       proc_end = ctable[idx];
281       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
282         if (proc_id == rank ) continue; /* done before this loop */
283         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
284           continue;   /* no need for sending idx to [proc_id] */
285         *data1_start[proc_id] = idx; data1_start[proc_id]++;
286         len_s[proc_id]++;
287       }
288     }
289     /* update header data */
290     for (proc_id=0; proc_id<size; proc_id++){
291       if (proc_id== rank) continue;
292       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
293       iwork[proc_id] = len_s[proc_id] ;
294     }
295     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
296   }
297 
298   nrqs = 0; nrqr = 0;
299   for (i=0; i<size; i++){
300     data1_start[i] = data1 + i*len;
301     if (len_s[i]){
302       nrqs++;
303       len_s[i] += 1 + is_max; /* add no. of header msg */
304     }
305   }
306 
307   for (i=0; i<is_max; i++) {
308     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
309   }
310   ierr = PetscFree(n);CHKERRQ(ierr);
311   ierr = PetscFree(ctable);CHKERRQ(ierr);
312 
313   /* Determine the number of messages to expect, their lengths, from from-ids */
314   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
315   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
316 
317   /*  Now  post the sends */
318   ierr = PetscMalloc2(size,MPI_Request,&s_waits1,size,MPI_Request,&s_waits2);CHKERRQ(ierr);
319   k = 0;
320   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
321     if (len_s[proc_id]){
322       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
323       k++;
324     }
325   }
326 
327   /* 2. Receive other's is[] and process. Then send back */
328   /*-----------------------------------------------------*/
329   len = 0;
330   for (i=0; i<nrqr; i++){
331     if (len_r1[i] > len)len = len_r1[i];
332   }
333   ierr = PetscFree(len_r1);CHKERRQ(ierr);
334   ierr = PetscFree(id_r1);CHKERRQ(ierr);
335 
336   for (proc_id=0; proc_id<size; proc_id++)
337     len_s[proc_id] = iwork[proc_id] = 0;
338 
339   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&odata1);CHKERRQ(ierr);
340   ierr = PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);CHKERRQ(ierr);
341   ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr);
342 
343   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
344   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
345   ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
346   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
347   odata2_ptr[nodata2] = odata2;
348   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
349 
350   k = 0;
351   while (k < nrqr){
352     /* Receive messages */
353     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
354     if (flag){
355       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
356       proc_id = r_status.MPI_SOURCE;
357       ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
358       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
359 
360       /*  Process messages */
361       /*  make sure there is enough unused space in odata2 array */
362       if (len_unused < len_max){ /* allocate more space for odata2 */
363         ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
364         odata2_ptr[++nodata2] = odata2;
365         len_unused = len_est;
366       }
367 
368       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
369       len = 1 + odata2[0];
370       for (i=0; i<odata2[0]; i++){
371         len += odata2[1 + i];
372       }
373 
374       /* Send messages back */
375       ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
376       k++;
377       odata2     += len;
378       len_unused -= len;
379       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
380     }
381   }
382   ierr = PetscFree(odata1);CHKERRQ(ierr);
383   ierr = PetscBTDestroy(otable);CHKERRQ(ierr);
384 
385   /* 3. Do local work on this processor's is[] */
386   /*-------------------------------------------*/
387   /* make sure there is enough unused space in odata2(=data) array */
388   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
389   if (len_unused < len_max){ /* allocate more space for odata2 */
390     ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
391     odata2_ptr[++nodata2] = odata2;
392     len_unused = len_est;
393   }
394 
395   data = odata2;
396   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
397   ierr = PetscFree(data1_start);CHKERRQ(ierr);
398 
399   /* 4. Receive work done on other processors, then merge */
400   /*------------------------------------------------------*/
401   /* get max number of messages that this processor expects to recv */
402   ierr = MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
403   ierr = PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);CHKERRQ(ierr);
404   ierr = PetscFree4(len_s,btable,iwork,Bowners);CHKERRQ(ierr);
405 
406   k = 0;
407   while (k < nrqs){
408     /* Receive messages */
409     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
410     if (flag){
411       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
412       proc_id = r_status.MPI_SOURCE;
413       ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
414       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
415       if (len > 1+is_max){ /* Add data2 into data */
416         data2_i = data2 + 1 + is_max;
417         for (i=0; i<is_max; i++){
418           table_i = table[i];
419           data_i  = data + 1 + is_max + Mbs*i;
420           isz     = data[1+i];
421           for (j=0; j<data2[1+i]; j++){
422             col = data2_i[j];
423             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
424           }
425           data[1+i] = isz;
426           if (i < is_max - 1) data2_i += data2[1+i];
427         }
428       }
429       k++;
430     }
431   }
432   ierr = PetscFree(data2);CHKERRQ(ierr);
433   ierr = PetscFree2(table,t_p);CHKERRQ(ierr);
434 
435   /* phase 1 sends are complete */
436   ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
437   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
438   ierr = PetscFree(data1);CHKERRQ(ierr);
439 
440   /* phase 2 sends are complete */
441   if (nrqr){ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);}
442   ierr = PetscFree2(s_waits1,s_waits2);CHKERRQ(ierr);
443   ierr = PetscFree(s_status);CHKERRQ(ierr);
444 
445   /* 5. Create new is[] */
446   /*--------------------*/
447   for (i=0; i<is_max; i++) {
448     data_i = data + 1 + is_max + Mbs*i;
449     ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
450   }
451   for (k=0; k<=nodata2; k++){
452     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
453   }
454   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
455 
456   PetscFunctionReturn(0);
457 }
458 
459 #undef __FUNCT__
460 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
461 /*
462    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
463        the work on the local processor.
464 
465      Inputs:
466       C      - MAT_MPISBAIJ;
467       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
468       whose  - whose is[] to be processed,
469                MINE:  this processor's is[]
470                OTHER: other processor's is[]
471      Output:
472        nidx  - whose = MINE:
473                      holds input and newly found indices in the same format as data
474                whose = OTHER:
475                      only holds the newly found indices
476        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
477 */
478 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
479 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
480 {
481   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
482   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
483   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
484   PetscErrorCode ierr;
485   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
486   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
487   PetscBT        table0;  /* mark the indices of input is[] for look up */
488   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
489 
490   PetscFunctionBegin;
491   Mbs = c->Mbs; mbs = a->mbs;
492   ai = a->i; aj = a->j;
493   bi = b->i; bj = b->j;
494   garray = c->garray;
495   rstart = c->rstartbs;
496   is_max = data[0];
497 
498   ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr);
499 
500   nidx[0] = is_max;
501   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
502   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
503   for (i=0; i<is_max; i++) { /* for each is */
504     isz  = 0;
505     n = data[1+i]; /* size of input is[i] */
506 
507     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
508     if (whose == MINE){ /* process this processor's is[] */
509       table_i = table[i];
510       nidx_i  = nidx + 1+ is_max + Mbs*i;
511     } else {            /* process other processor's is[] - only use one temp table */
512       table_i = table[0];
513     }
514     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
515     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
516     if (n==0) {
517        nidx[1+i] = 0; /* size of new is[i] */
518        continue;
519     }
520 
521     isz0 = 0; col_max = 0;
522     for (j=0; j<n; j++){
523       col = idx_i[j];
524       if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
525       if(!PetscBTLookupSet(table_i,col)) {
526         ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
527         if (whose == MINE) {nidx_i[isz0] = col;}
528         if (col_max < col) col_max = col;
529         isz0++;
530       }
531     }
532 
533     if (whose == MINE) {isz = isz0;}
534     k = 0;  /* no. of indices from input is[i] that have been examined */
535     for (row=0; row<mbs; row++){
536       a_start = ai[row]; a_end = ai[row+1];
537       b_start = bi[row]; b_end = bi[row+1];
538       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
539                                                 do row search: collect all col in this row */
540         for (l = a_start; l<a_end ; l++){ /* Amat */
541           col = aj[l] + rstart;
542           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
543         }
544         for (l = b_start; l<b_end ; l++){ /* Bmat */
545           col = garray[bj[l]];
546           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
547         }
548         k++;
549         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
550       } else { /* row is not on input is[i]:
551                   do col serach: add row onto nidx_i if there is a col in nidx_i */
552         for (l = a_start; l<a_end ; l++){ /* Amat */
553           col = aj[l] + rstart;
554           if (col > col_max) break;
555           if (PetscBTLookup(table0,col)){
556             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
557             break; /* for l = start; l<end ; l++) */
558           }
559         }
560         for (l = b_start; l<b_end ; l++){ /* Bmat */
561           col = garray[bj[l]];
562           if (col > col_max) break;
563           if (PetscBTLookup(table0,col)){
564             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
565             break; /* for l = start; l<end ; l++) */
566           }
567         }
568       }
569     }
570 
571     if (i < is_max - 1){
572       idx_i  += n;   /* ptr to input is[i+1] array */
573       nidx_i += isz; /* ptr to output is[i+1] array */
574     }
575     nidx[1+i] = isz; /* size of new is[i] */
576   } /* for each is */
577   ierr = PetscBTDestroy(table0);CHKERRQ(ierr);
578 
579   PetscFunctionReturn(0);
580 }
581 
582 
583