xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision 2cfbe0a4033f200cbcea9ee73bfde5e8fae7dca9)
1 /*$Id: sbaijov.c,v 1.65 2001/08/06 21:15:42 bsmith Exp $*/
2 
3 /*
4    Routines to compute overlapping regions of a parallel MPI matrix.
5    Used for finding submatrices that were shared across processors.
6 */
7 #include "src/mat/impls/sbaij/mpi/mpisbaij.h"
8 #include "petscbt.h"
9 
10 static int MatIncreaseOverlap_MPISBAIJ_Once(Mat,int,IS *);
11 static int MatIncreaseOverlap_MPISBAIJ_Local(Mat,int *,int,int **,PetscBT*);
12 
13 #undef __FUNCT__
14 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
15 int MatIncreaseOverlap_MPISBAIJ(Mat C,int is_max,IS is[],int ov)
16 {
17   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
18   int           i,ierr,N=C->N, bs=c->bs;
19   IS            *is_new;
20 
21   PetscFunctionBegin;
22   ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
23   /* Convert the indices into block format */
24   ierr = ISCompressIndicesGeneral(N,bs,is_max,is,is_new);CHKERRQ(ierr);
25   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
26   for (i=0; i<ov; ++i) {
27     ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
28   }
29   for (i=0; i<is_max; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);}
30   ierr = ISExpandIndicesGeneral(N,bs,is_max,is_new,is);CHKERRQ(ierr);
31   for (i=0; i<is_max; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);}
32   ierr = PetscFree(is_new);CHKERRQ(ierr);
33   PetscFunctionReturn(0);
34 }
35 
36 typedef enum {MINE,OTHER} WhoseOwner;
37 /*  data1, odata1 and odata2 are packed in the format (for communication):
38        data[0]          = is_max, no of is
39        data[1]          = size of is[0]
40         ...
41        data[is_max]     = size of is[is_max-1]
42        data[is_max + 1] = data(is[0])
43         ...
44        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
45         ...
46    data2 is packed in the format (for creating output is[]):
47        data[0]          = is_max, no of is
48        data[1]          = size of is[0]
49         ...
50        data[is_max]     = size of is[is_max-1]
51        data[is_max + 1] = data(is[0])
52         ...
53        data[is_max + 1 + Mbs*i) = data(is[i])
54         ...
55 */
56 #undef __FUNCT__
57 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
58 static int MatIncreaseOverlap_MPISBAIJ_Once(Mat C,int is_max,IS is[])
59 {
60   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
61   int         len,idx,*idx_i,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
62               size,rank,Mbs,i,j,k,ierr,nrqs,nrqr,*odata1,*odata2,
63               tag1,tag2,flag,proc_id,**odata2_ptr,*ctable=0,*btable,len_b;
64   int         *id_r1,*len_r1,proc_end=0,*iwork,*len_s;
65   char        *t_p;
66   MPI_Comm    comm;
67   MPI_Request *s_waits1,*s_waits2,r_req;
68   MPI_Status  *s_status,r_status;
69   PetscBT     *table=0;  /* mark indices of this processor's is[] */
70   PetscBT     table_i;
71   PetscBT     otable; /* mark indices of other processors' is[] */
72   int         bs=c->bs,Bn = c->B->n,Bnbs = Bn/bs,*Bowners;
73   IS          is_local,is_gl;
74 
75   PetscFunctionBegin;
76 
77   comm = C->comm;
78   size = c->size;
79   rank = c->rank;
80   Mbs  = c->Mbs;
81 
82   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
83   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
84 
85   /* 1. Send this processor's is[] to other processors */
86   /*---------------------------------------------------*/
87   /* allocate spaces */
88   ierr = PetscMalloc(is_max*sizeof(int),&n);CHKERRQ(ierr);
89   len = 0;
90   for (i=0; i<is_max; i++) {
91     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
92     len += n[i];
93   }
94   if (len == 0) {
95     is_max = 0;
96   } else {
97     len += 1 + is_max; /* max length of data1 for one processor */
98   }
99 
100   ierr = PetscMalloc((size*len+1)*sizeof(int),&data1);CHKERRQ(ierr);
101   ierr = PetscMalloc(size*sizeof(int*),&data1_start);CHKERRQ(ierr);
102   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
103 
104   ierr   = PetscMalloc(size*3*sizeof(int),&len_s);CHKERRQ(ierr);
105   btable = len_s + size;
106   iwork  = btable + size;
107 
108   if (is_max){
109     /* create hash table ctable which maps c->row to proc_id) */
110     ierr = PetscMalloc(Mbs*sizeof(int),&ctable);CHKERRQ(ierr);
111     for (proc_id=0,j=0; proc_id<size; proc_id++) {
112       for (; j<c->rowners[proc_id+1]; j++) {
113         ctable[j] = proc_id;
114       }
115     }
116 
117     /* create tables for marking indices */
118     len_b = is_max*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*is_max*sizeof(char) + 1;
119     ierr = PetscMalloc(len_b,&table);CHKERRQ(ierr);
120     t_p  = (char *)(table + is_max);
121     for (i=0; i<is_max; i++) {
122       table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
123     }
124 
125     ierr = ISCreateGeneral(comm,Bnbs,c->garray,&is_local);CHKERRQ(ierr);
126     ierr = ISAllGather(is_local, &is_gl);CHKERRQ(ierr);
127     ierr = ISDestroy(is_local);CHKERRQ(ierr);
128 
129     ierr = PetscMalloc((size+1)*sizeof(int),&Bowners);CHKERRQ(ierr);
130     ierr = MPI_Allgather(&Bnbs,1,MPI_INT,Bowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
131     Bowners[0] = 0;
132     for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
133 
134     /* hash table table_i[idx] = 1 if idx is on is[] array
135                                = 0 otherwise */
136     table_i = table[0];
137     ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
138     for (i=0; i<is_max; i++){
139       ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
140       for (j=0; j<n[i]; j++){
141         idx = idx_i[j];
142         ierr = PetscBTSet(table_i,idx);CHKERRQ(ierr);
143       }
144       ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
145     }
146 
147     /* hash table btable[id_proc] = 1 if a col index of B-matrix in [id_proc] is on is[] array
148                                   = 0 otherwise */
149     ierr = ISGetIndices(is_gl,&idx_i);
150     for (i=0; i<size; i++){
151       btable[i] = 0;
152       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols */
153         idx = idx_i[j];
154         if(PetscBTLookup(table_i,idx)){
155           btable[i] = 1;
156           break;
157         }
158       }
159     }
160     ierr = ISRestoreIndices(is_gl,&idx_i);CHKERRQ(ierr);
161     ierr = PetscFree(Bowners);CHKERRQ(ierr);
162     ierr = ISDestroy(is_gl);CHKERRQ(ierr);
163   } /* if (is_max) */
164 
165   /* evaluate communication - mesg to who, length, and buffer space */
166   for (i=0; i<size; i++) len_s[i] = 0;
167 
168   /* header of data1 */
169   for (proc_id=0; proc_id<size; proc_id++){
170     iwork[proc_id] = 0;
171     *data1_start[proc_id] = is_max;
172     data1_start[proc_id]++;
173     for (j=0; j<is_max; j++) {
174       if (proc_id == rank){
175         *data1_start[proc_id] = n[j];
176       } else {
177         *data1_start[proc_id] = 0;
178       }
179       data1_start[proc_id]++;
180     }
181   }
182 
183   for (i=0; i<is_max; i++) {
184     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
185     for (j=0; j<n[i]; j++){
186       idx = idx_i[j];
187       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
188       proc_end = ctable[idx];
189       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
190         if (proc_id == rank ) continue; /* done before this loop */
191         if (proc_id < proc_end && !btable[proc_id]) continue;   /* no need for sending idx to [proc_id] */
192         *data1_start[proc_id] = idx; data1_start[proc_id]++;
193         len_s[proc_id]++;
194       }
195     }
196     /* update header data */
197     for (proc_id=0; proc_id<size; proc_id++){
198       if (proc_id== rank) continue;
199       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
200       iwork[proc_id] = len_s[proc_id] ;
201     }
202     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
203   }
204 
205   nrqs = 0; nrqr = 0;
206   for (i=0; i<size; i++){
207     data1_start[i] = data1 + i*len;
208     if (len_s[i]){
209       nrqs++;
210       len_s[i] += 1 + is_max; /* add no. of header msg */
211     }
212   }
213 
214   for (i=0; i<is_max; i++) {
215     ierr = ISDestroy(is[i]);CHKERRQ(ierr);
216   }
217   ierr = PetscFree(n);CHKERRQ(ierr);
218   if (ctable){ierr = PetscFree(ctable);CHKERRQ(ierr);}
219 
220   /* Determine the number of messages to expect, their lengths, from from-ids */
221   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
222   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
223   /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] nrqs: %d, nrqr: %d\n",rank,nrqs,nrqr); */
224 
225   /*  Now  post the sends */
226   ierr = PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
227   s_waits2 = s_waits1 + size;
228   k = 0;
229   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
230     if (len_s[proc_id]){
231       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPI_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
232       k++;
233     }
234   }
235 
236   /* 2. Do local work on this processor's is[] */
237   /*-------------------------------------------*/
238   ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr);
239   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,&data,table);CHKERRQ(ierr);
240   ierr = PetscFree(data1_start);CHKERRQ(ierr);
241 
242   /* 3. Receive other's is[] and process. Then send back */
243   /*-----------------------------------------------------*/
244   len = 0;
245   for (i=0; i<nrqr; i++){
246     if (len_r1[i] > len)len = len_r1[i];
247     /* ierr = PetscPrintf(PETSC_COMM_SELF, "[%d] expect to recv len=%d from [%d]\n",rank,len_r1[i],id_r1[i]); */
248   }
249   ierr = PetscMalloc((len+1)*sizeof(int),&odata1);CHKERRQ(ierr);
250   ierr = PetscMalloc(size*sizeof(int**),&odata2_ptr);CHKERRQ(ierr);
251   k = 0;
252   while (k < nrqr){
253     /* Receive messages */
254     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
255     if (flag){
256       ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr);
257       proc_id = r_status.MPI_SOURCE;
258       ierr = MPI_Irecv(odata1,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
259       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
260       /* ierr = PetscPrintf(PETSC_COMM_SELF, " [%d] recv %d from [%d]\n",rank,len,proc_id); */
261 
262       /*  Process messages */
263       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,&odata2_ptr[k],&otable);CHKERRQ(ierr);
264       odata2 = odata2_ptr[k];
265       len = 1 + odata2[0];
266       for (i=0; i<odata2[0]; i++){
267         len += odata2[1 + i];
268       }
269 
270       /* Send messages back */
271       ierr = MPI_Isend(odata2,len,MPI_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
272       /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] send %d back to [%d] \n",rank,len,proc_id); */
273       k++;
274     }
275   }
276   ierr = PetscFree(odata1);CHKERRQ(ierr);
277 
278   /* 4. Receive work done on other processors, then merge */
279   /*--------------------------------------------------------*/
280   /* Allocate memory for incoming data */
281   len = (1+is_max*(Mbs+1));
282   ierr = PetscMalloc(len*sizeof(int),&data2);CHKERRQ(ierr);
283 
284   k = 0;
285   while (k < nrqs){
286     /* Receive messages */
287     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
288     if (flag){
289       ierr = MPI_Get_count(&r_status,MPI_INT,&len);CHKERRQ(ierr);
290       proc_id = r_status.MPI_SOURCE;
291       ierr = MPI_Irecv(data2,len,MPI_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
292       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
293       /* ierr = PetscPrintf(PETSC_COMM_SELF," [%d] recv %d from [%d], data2:\n",rank,len,proc_id); */
294       if (len > 1+is_max){ /* Add data2 into data */
295         data2_i = data2 + 1 + is_max;
296         for (i=0; i<is_max; i++){
297           table_i = table[i];
298           data_i  = data + 1 + is_max + Mbs*i;
299           isz     = data[1+i];
300           for (j=0; j<data2[1+i]; j++){
301             col = data2_i[j];
302             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
303           }
304           data[1+i] = isz;
305           if (i < is_max - 1) data2_i += data2[1+i];
306         }
307       }
308       k++;
309     }
310   }
311 
312   /* phase 1 sends are complete */
313   ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
314   if (nrqs){
315     ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);
316   }
317   ierr = PetscFree(data1);CHKERRQ(ierr);
318 
319   /* phase 3 sends are complete */
320   if (nrqr){
321     ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);
322   }
323   for (k=0; k<nrqr; k++){
324     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
325   }
326   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
327 
328   /* 5. Create new is[] */
329   /*--------------------*/
330   for (i=0; i<is_max; i++) {
331     data_i = data + 1 + is_max + Mbs*i;
332     ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);CHKERRQ(ierr);
333   }
334 
335   ierr = PetscFree(data2);CHKERRQ(ierr);
336   ierr = PetscFree(data);CHKERRQ(ierr);
337   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
338   ierr = PetscFree(s_status);CHKERRQ(ierr);
339   if (table) {ierr = PetscFree(table);CHKERRQ(ierr);}
340   ierr = PetscBTDestroy(otable);CHKERRQ(ierr);
341 
342   ierr = PetscFree(len_s);CHKERRQ(ierr);
343   ierr = PetscFree(id_r1);CHKERRQ(ierr);
344   ierr = PetscFree(len_r1);CHKERRQ(ierr);
345 
346   PetscFunctionReturn(0);
347 }
348 
349 #undef __FUNCT__
350 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
351 /*
352    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
353        the work on the local processor.
354 
355      Inputs:
356       C      - MAT_MPISBAIJ;
357       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
358       whose  - whose is[] to be processed,
359                MINE:  this processor's is[]
360                OTHER: other processor's is[]
361      Output:
362        data_new  - whose = MINE:
363                      holds input and newly found indices in the same format as data
364                    whose = OTHER:
365                      only holds the newly found indices
366        table     - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
367 */
368 static int MatIncreaseOverlap_MPISBAIJ_Local(Mat C,int *data,int whose,int **data_new,PetscBT *table)
369 {
370   Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
371   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
372   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(c->B)->data;
373   int          ierr,row,mbs,Mbs,*nidx,*nidx_i,col,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
374   int          a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
375   PetscBT      table0;  /* mark the indices of input is[] for look up */
376   PetscBT      table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
377 
378   PetscFunctionBegin;
379   Mbs = c->Mbs; mbs = a->mbs;
380   ai = a->i; aj = a->j;
381   bi = b->i; bj = b->j;
382   garray = c->garray;
383   rstart = c->rstart;
384   is_max = data[0];
385 
386   ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr);
387 
388   ierr = PetscMalloc((1+is_max*(Mbs+1))*sizeof(int),&nidx);CHKERRQ(ierr);
389   nidx[0] = is_max;
390 
391   idx_i  = data + is_max + 1; /* ptr to input is[0] array */
392   nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */
393   for (i=0; i<is_max; i++) { /* for each is */
394     isz  = 0;
395     n = data[1+i]; /* size of input is[i] */
396 
397     /* initialize table_i, set table0 */
398     if (whose == MINE){ /* process this processor's is[] */
399       table_i = table[i];
400       nidx_i  = nidx + 1+ is_max + Mbs*i;
401     } else {            /* process other processor's is[] - only use one temp table */
402       table_i = *table;
403     }
404     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
405     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
406     if (n > 0) {
407       isz0 = 0;
408       for (j=0; j<n; j++){
409         col = idx_i[j];
410         if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %d >= Mbs %d",col,Mbs);
411         if(!PetscBTLookupSet(table_i,col)) {
412           ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
413           if (whose == MINE) {nidx_i[isz0] = col;}
414           isz0++;
415         }
416       }
417 
418       if (whose == MINE) {isz = isz0;}
419       k = 0;  /* no. of indices from input is[i] that have been examined */
420       for (row=0; row<mbs; row++){
421         a_start = ai[row]; a_end = ai[row+1];
422         b_start = bi[row]; b_end = bi[row+1];
423         if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
424            do row search: collect all col in this row */
425           for (l = a_start; l<a_end ; l++){ /* Amat */
426             col = aj[l] + rstart;
427             if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
428           }
429           for (l = b_start; l<b_end ; l++){ /* Bmat */
430             col = garray[bj[l]];
431             if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
432           }
433           k++;
434           if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
435         } else { /* row is not on input is[i]:
436           do col serach: add row onto nidx_i if there is a col in nidx_i */
437           for (l = a_start; l<a_end ; l++){ /* Amat */
438             col = aj[l] + rstart;
439             if (PetscBTLookup(table0,col)){
440               if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
441               break; /* for l = start; l<end ; l++) */
442             }
443           }
444           for (l = b_start; l<b_end ; l++){ /* Bmat */
445             col = garray[bj[l]];
446             if (PetscBTLookup(table0,col)){
447               if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
448               break; /* for l = start; l<end ; l++) */
449             }
450           }
451         }
452       }
453     } /* if (n > 0) */
454 
455     if (i < is_max - 1){
456       idx_i  += n;   /* ptr to input is[i+1] array */
457       nidx_i += isz; /* ptr to output is[i+1] array */
458     }
459     nidx[1+i] = isz; /* size of new is[i] */
460   } /* for each is */
461   *data_new = nidx;
462   ierr = PetscBTDestroy(table0);CHKERRQ(ierr);
463 
464   PetscFunctionReturn(0);
465 }
466 
467 
468