xref: /petsc/src/mat/impls/sbaij/mpi/sbaijov.c (revision d44834fb5aa6ab55eaa2e18f09dfd00e40aa4639)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix.
4    Used for finding submatrices that were shared across processors.
5 */
6 #include "src/mat/impls/sbaij/mpi/mpisbaij.h"
7 #include "petscbt.h"
8 
9 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
11 
12 #undef __FUNCT__
13 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ"
14 PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
15 {
16   PetscErrorCode ierr;
17   PetscInt       i,N=C->N, bs=C->bs;
18   IS             *is_new;
19 
20   PetscFunctionBegin;
21   ierr = PetscMalloc(is_max*sizeof(IS),&is_new);CHKERRQ(ierr);
22   /* Convert the indices into block format */
23   ierr = ISCompressIndicesGeneral(N,bs,is_max,is,is_new);CHKERRQ(ierr);
24   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
25   for (i=0; i<ov; ++i) {
26     ierr = MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);CHKERRQ(ierr);
27   }
28   for (i=0; i<is_max; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);}
29   ierr = ISExpandIndicesGeneral(N,bs,is_max,is_new,is);CHKERRQ(ierr);
30   for (i=0; i<is_max; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);}
31   ierr = PetscFree(is_new);CHKERRQ(ierr);
32   PetscFunctionReturn(0);
33 }
34 
35 typedef enum {MINE,OTHER} WhoseOwner;
36 /*  data1, odata1 and odata2 are packed in the format (for communication):
37        data[0]          = is_max, no of is
38        data[1]          = size of is[0]
39         ...
40        data[is_max]     = size of is[is_max-1]
41        data[is_max + 1] = data(is[0])
42         ...
43        data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
44         ...
45    data2 is packed in the format (for creating output is[]):
46        data[0]          = is_max, no of is
47        data[1]          = size of is[0]
48         ...
49        data[is_max]     = size of is[is_max-1]
50        data[is_max + 1] = data(is[0])
51         ...
52        data[is_max + 1 + Mbs*i) = data(is[i])
53         ...
54 */
55 #undef __FUNCT__
56 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Once"
57 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
58 {
59   Mat_MPISBAIJ  *c = (Mat_MPISBAIJ*)C->data;
60   PetscErrorCode ierr;
61   PetscMPIInt    size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len;
62   PetscInt       idx,*idx_i,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i,
63                  Mbs,i,j,k,*odata1,*odata2,
64                  proc_id,**odata2_ptr,*ctable=0,*btable,len_max,len_est;
65   PetscInt       proc_end=0,*iwork,len_unused,nodata2;
66   PetscInt       ois_max; /* max no of is[] in each of processor */
67   char           *t_p;
68   MPI_Comm       comm;
69   MPI_Request    *s_waits1,*s_waits2,r_req;
70   MPI_Status     *s_status,r_status;
71   PetscBT        *table;  /* mark indices of this processor's is[] */
72   PetscBT        table_i;
73   PetscBT        otable; /* mark indices of other processors' is[] */
74   PetscInt       bs=C->bs,Bn = c->B->n,Bnbs = Bn/bs,*Bowners;
75   IS             garray_local,garray_gl;
76 
77   PetscFunctionBegin;
78   comm = C->comm;
79   size = c->size;
80   rank = c->rank;
81   Mbs  = c->Mbs;
82 
83   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
84   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
85 
86   /* create tables used in
87      step 1: table[i] - mark c->garray of proc [i]
88      step 3: table[i] - mark indices of is[i] when whose=MINE
89              table[0] - mark incideces of is[] when whose=OTHER */
90   len = PetscMax(is_max, size);CHKERRQ(ierr);
91   len_max = len*sizeof(PetscBT) + (Mbs/PETSC_BITS_PER_BYTE+1)*len*sizeof(char) + 1;
92   ierr = PetscMalloc(len_max,&table);CHKERRQ(ierr);
93   t_p  = (char *)(table + len);
94   for (i=0; i<len; i++) {
95     table[i]  = t_p  + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
96   }
97 
98   ierr = MPI_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
99 
100   /* 1. Send this processor's is[] to other processors */
101   /*---------------------------------------------------*/
102   /* allocate spaces */
103   ierr = PetscMalloc(is_max*sizeof(PetscInt),&n);CHKERRQ(ierr);
104   len = 0;
105   for (i=0; i<is_max; i++) {
106     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
107     len += n[i];
108   }
109   if (!len) {
110     is_max = 0;
111   } else {
112     len += 1 + is_max; /* max length of data1 for one processor */
113   }
114 
115 
116   ierr = PetscMalloc((size*len+1)*sizeof(PetscInt),&data1);CHKERRQ(ierr);
117   ierr = PetscMalloc(size*sizeof(PetscInt*),&data1_start);CHKERRQ(ierr);
118   for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
119 
120   ierr = PetscMalloc((size*4+1)*sizeof(PetscInt),&len_s);CHKERRQ(ierr);
121   btable  = (PetscInt*)(len_s + size);
122   iwork   = btable + size;
123   Bowners = iwork + size;
124 
125   /* gather c->garray from all processors */
126   ierr = ISCreateGeneral(comm,Bnbs,c->garray,&garray_local);CHKERRQ(ierr);
127   ierr = ISAllGather(garray_local, &garray_gl);CHKERRQ(ierr);
128   ierr = ISDestroy(garray_local);CHKERRQ(ierr);
129   ierr = MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
130   Bowners[0] = 0;
131   for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
132 
133   if (is_max){
134     /* hash table ctable which maps c->row to proc_id) */
135     ierr = PetscMalloc(Mbs*sizeof(PetscInt),&ctable);CHKERRQ(ierr);
136     for (proc_id=0,j=0; proc_id<size; proc_id++) {
137       for (; j<c->rowners[proc_id+1]; j++) {
138         ctable[j] = proc_id;
139       }
140     }
141 
142     /* hash tables marking c->garray */
143     ierr = ISGetIndices(garray_gl,&idx_i);
144     for (i=0; i<size; i++){
145       table_i = table[i];
146       ierr    = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
147       for (j = Bowners[i]; j<Bowners[i+1]; j++){ /* go through B cols of proc[i]*/
148         ierr = PetscBTSet(table_i,idx_i[j]);CHKERRQ(ierr);
149       }
150     }
151     ierr = ISRestoreIndices(garray_gl,&idx_i);CHKERRQ(ierr);
152   }  /* if (is_max) */
153   ierr = ISDestroy(garray_gl);CHKERRQ(ierr);
154 
155   /* evaluate communication - mesg to who, length, and buffer space */
156   for (i=0; i<size; i++) len_s[i] = 0;
157 
158   /* header of data1 */
159   for (proc_id=0; proc_id<size; proc_id++){
160     iwork[proc_id] = 0;
161     *data1_start[proc_id] = is_max;
162     data1_start[proc_id]++;
163     for (j=0; j<is_max; j++) {
164       if (proc_id == rank){
165         *data1_start[proc_id] = n[j];
166       } else {
167         *data1_start[proc_id] = 0;
168       }
169       data1_start[proc_id]++;
170     }
171   }
172 
173   for (i=0; i<is_max; i++) {
174     ierr = ISGetIndices(is[i],&idx_i);CHKERRQ(ierr);
175     for (j=0; j<n[i]; j++){
176       idx = idx_i[j];
177       *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
178       proc_end = ctable[idx];
179       for (proc_id=0;  proc_id<=proc_end; proc_id++){ /* for others to process */
180         if (proc_id == rank ) continue; /* done before this loop */
181         if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx))
182           continue;   /* no need for sending idx to [proc_id] */
183         *data1_start[proc_id] = idx; data1_start[proc_id]++;
184         len_s[proc_id]++;
185       }
186     }
187     /* update header data */
188     for (proc_id=0; proc_id<size; proc_id++){
189       if (proc_id== rank) continue;
190       *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
191       iwork[proc_id] = len_s[proc_id] ;
192     }
193     ierr = ISRestoreIndices(is[i],&idx_i);CHKERRQ(ierr);
194   }
195 
196   nrqs = 0; nrqr = 0;
197   for (i=0; i<size; i++){
198     data1_start[i] = data1 + i*len;
199     if (len_s[i]){
200       nrqs++;
201       len_s[i] += 1 + is_max; /* add no. of header msg */
202     }
203   }
204 
205   for (i=0; i<is_max; i++) {
206     ierr = ISDestroy(is[i]);CHKERRQ(ierr);
207   }
208   ierr = PetscFree(n);CHKERRQ(ierr);
209   if (ctable){ierr = PetscFree(ctable);CHKERRQ(ierr);}
210 
211   /* Determine the number of messages to expect, their lengths, from from-ids */
212   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&nrqr);CHKERRQ(ierr);
213   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);CHKERRQ(ierr);
214 
215   /*  Now  post the sends */
216   ierr = PetscMalloc(2*size*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
217   s_waits2 = s_waits1 + size;
218   k = 0;
219   for (proc_id=0; proc_id<size; proc_id++){  /* send data1 to processor [proc_id] */
220     if (len_s[proc_id]){
221       ierr = MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);CHKERRQ(ierr);
222       k++;
223     }
224   }
225 
226   /* 2. Receive other's is[] and process. Then send back */
227   /*-----------------------------------------------------*/
228   len = 0;
229   for (i=0; i<nrqr; i++){
230     if (len_r1[i] > len)len = len_r1[i];
231   }
232   ierr = PetscFree(len_r1);CHKERRQ(ierr);
233   ierr = PetscFree(id_r1);CHKERRQ(ierr);
234 
235   for (proc_id=0; proc_id<size; proc_id++)
236     len_s[proc_id] = iwork[proc_id] = 0;
237 
238   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&odata1);CHKERRQ(ierr);
239   ierr = PetscMalloc(size*sizeof(PetscInt**),&odata2_ptr);CHKERRQ(ierr);
240   ierr = PetscBTCreate(Mbs,otable);CHKERRQ(ierr);
241 
242   len_max = ois_max*(Mbs+1);  /* max space storing all is[] for each receive */
243   len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
244   ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
245   nodata2 = 0;       /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
246   odata2_ptr[nodata2] = odata2;
247   len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max  */
248 
249   k = 0;
250   while (k < nrqr){
251     /* Receive messages */
252     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);CHKERRQ(ierr);
253     if (flag){
254       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
255       proc_id = r_status.MPI_SOURCE;
256       ierr = MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
257       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
258 
259       /*  Process messages */
260       /*  make sure there is enough unused space in odata2 array */
261       if (len_unused < len_max){ /* allocate more space for odata2 */
262         ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
263         odata2_ptr[++nodata2] = odata2;
264         len_unused = len_est;
265       }
266 
267       ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);CHKERRQ(ierr);
268       len = 1 + odata2[0];
269       for (i=0; i<odata2[0]; i++){
270         len += odata2[1 + i];
271       }
272 
273       /* Send messages back */
274       ierr = MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);CHKERRQ(ierr);
275       k++;
276       odata2     += len;
277       len_unused -= len;
278       len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
279     }
280   }
281   ierr = PetscFree(odata1);CHKERRQ(ierr);
282   ierr = PetscBTDestroy(otable);CHKERRQ(ierr);
283 
284   /* 3. Do local work on this processor's is[] */
285   /*-------------------------------------------*/
286   /* make sure there is enough unused space in odata2(=data) array */
287   len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
288   if (len_unused < len_max){ /* allocate more space for odata2 */
289     ierr = PetscMalloc((len_est+1)*sizeof(PetscInt),&odata2);CHKERRQ(ierr);
290     odata2_ptr[++nodata2] = odata2;
291     len_unused = len_est;
292   }
293 
294   data = odata2;
295   ierr = MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);CHKERRQ(ierr);
296   ierr = PetscFree(data1_start);CHKERRQ(ierr);
297 
298   /* 4. Receive work done on other processors, then merge */
299   /*------------------------------------------------------*/
300   /* get max number of messages that this processor expects to recv */
301   ierr = MPI_Allreduce(len_s,iwork,size,MPIU_INT,MPI_MAX,comm);CHKERRQ(ierr);
302   ierr = PetscMalloc((iwork[rank]+1)*sizeof(PetscInt),&data2);CHKERRQ(ierr);
303   ierr = PetscFree(len_s);CHKERRQ(ierr);
304 
305   k = 0;
306   while (k < nrqs){
307     /* Receive messages */
308     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
309     if (flag){
310       ierr = MPI_Get_count(&r_status,MPIU_INT,&len);CHKERRQ(ierr);
311       proc_id = r_status.MPI_SOURCE;
312       ierr = MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);CHKERRQ(ierr);
313       ierr = MPI_Wait(&r_req,&r_status);CHKERRQ(ierr);
314       if (len > 1+is_max){ /* Add data2 into data */
315         data2_i = data2 + 1 + is_max;
316         for (i=0; i<is_max; i++){
317           table_i = table[i];
318           data_i  = data + 1 + is_max + Mbs*i;
319           isz     = data[1+i];
320           for (j=0; j<data2[1+i]; j++){
321             col = data2_i[j];
322             if (!PetscBTLookupSet(table_i,col)) {data_i[isz++] = col;}
323           }
324           data[1+i] = isz;
325           if (i < is_max - 1) data2_i += data2[1+i];
326         }
327       }
328       k++;
329     }
330   }
331   ierr = PetscFree(data2);CHKERRQ(ierr);
332   ierr = PetscFree(table);CHKERRQ(ierr);
333 
334   /* phase 1 sends are complete */
335   ierr = PetscMalloc(size*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
336   if (nrqs){
337     ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);
338   }
339   ierr = PetscFree(data1);CHKERRQ(ierr);
340 
341   /* phase 2 sends are complete */
342   if (nrqr){
343     ierr = MPI_Waitall(nrqr,s_waits2,s_status);CHKERRQ(ierr);
344   }
345   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
346   ierr = PetscFree(s_status);CHKERRQ(ierr);
347 
348   /* 5. Create new is[] */
349   /*--------------------*/
350   for (i=0; i<is_max; i++) {
351     data_i = data + 1 + is_max + Mbs*i;
352     ierr = ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,is+i);CHKERRQ(ierr);
353   }
354   for (k=0; k<=nodata2; k++){
355     ierr = PetscFree(odata2_ptr[k]);CHKERRQ(ierr);
356   }
357   ierr = PetscFree(odata2_ptr);CHKERRQ(ierr);
358 
359   PetscFunctionReturn(0);
360 }
361 
362 #undef __FUNCT__
363 #define __FUNCT__ "MatIncreaseOverlap_MPISBAIJ_Local"
364 /*
365    MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
366        the work on the local processor.
367 
368      Inputs:
369       C      - MAT_MPISBAIJ;
370       data   - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
371       whose  - whose is[] to be processed,
372                MINE:  this processor's is[]
373                OTHER: other processor's is[]
374      Output:
375        nidx  - whose = MINE:
376                      holds input and newly found indices in the same format as data
377                whose = OTHER:
378                      only holds the newly found indices
379        table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
380 */
381 /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
382 static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
383 {
384   Mat_MPISBAIJ   *c = (Mat_MPISBAIJ*)C->data;
385   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(c->A)->data;
386   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(c->B)->data;
387   PetscErrorCode ierr;
388   PetscInt       row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
389   PetscInt       a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
390   PetscBT        table0;  /* mark the indices of input is[] for look up */
391   PetscBT        table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
392 
393   PetscFunctionBegin;
394   Mbs = c->Mbs; mbs = a->mbs;
395   ai = a->i; aj = a->j;
396   bi = b->i; bj = b->j;
397   garray = c->garray;
398   rstart = c->rstart;
399   is_max = data[0];
400 
401   ierr = PetscBTCreate(Mbs,table0);CHKERRQ(ierr);
402 
403   nidx[0] = is_max;
404   idx_i   = data + is_max + 1; /* ptr to input is[0] array */
405   nidx_i  = nidx + is_max + 1; /* ptr to output is[0] array */
406   for (i=0; i<is_max; i++) { /* for each is */
407     isz  = 0;
408     n = data[1+i]; /* size of input is[i] */
409 
410     /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
411     if (whose == MINE){ /* process this processor's is[] */
412       table_i = table[i];
413       nidx_i  = nidx + 1+ is_max + Mbs*i;
414     } else {            /* process other processor's is[] - only use one temp table */
415       table_i = table[0];
416     }
417     ierr = PetscBTMemzero(Mbs,table_i);CHKERRQ(ierr);
418     ierr = PetscBTMemzero(Mbs,table0);CHKERRQ(ierr);
419     if (n==0) {
420        nidx[1+i] = 0; /* size of new is[i] */
421        continue;
422     }
423 
424     isz0 = 0; col_max = 0;
425     for (j=0; j<n; j++){
426       col = idx_i[j];
427       if (col >= Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
428       if(!PetscBTLookupSet(table_i,col)) {
429         ierr = PetscBTSet(table0,col);CHKERRQ(ierr);
430         if (whose == MINE) {nidx_i[isz0] = col;}
431         if (col_max < col) col_max = col;
432         isz0++;
433       }
434     }
435 
436     if (whose == MINE) {isz = isz0;}
437     k = 0;  /* no. of indices from input is[i] that have been examined */
438     for (row=0; row<mbs; row++){
439       a_start = ai[row]; a_end = ai[row+1];
440       b_start = bi[row]; b_end = bi[row+1];
441       if (PetscBTLookup(table0,row+rstart)){ /* row is on input is[i]:
442                                                 do row search: collect all col in this row */
443         for (l = a_start; l<a_end ; l++){ /* Amat */
444           col = aj[l] + rstart;
445           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
446         }
447         for (l = b_start; l<b_end ; l++){ /* Bmat */
448           col = garray[bj[l]];
449           if (!PetscBTLookupSet(table_i,col)) {nidx_i[isz++] = col;}
450         }
451         k++;
452         if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
453       } else { /* row is not on input is[i]:
454                   do col serach: add row onto nidx_i if there is a col in nidx_i */
455         for (l = a_start; l<a_end ; l++){ /* Amat */
456           col = aj[l] + rstart;
457           if (col > col_max) break;
458           if (PetscBTLookup(table0,col)){
459             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
460             break; /* for l = start; l<end ; l++) */
461           }
462         }
463         for (l = b_start; l<b_end ; l++){ /* Bmat */
464           col = garray[bj[l]];
465           if (col > col_max) break;
466           if (PetscBTLookup(table0,col)){
467             if (!PetscBTLookupSet(table_i,row+rstart)) {nidx_i[isz++] = row+rstart;}
468             break; /* for l = start; l<end ; l++) */
469           }
470         }
471       }
472     }
473 
474     if (i < is_max - 1){
475       idx_i  += n;   /* ptr to input is[i+1] array */
476       nidx_i += isz; /* ptr to output is[i+1] array */
477     }
478     nidx[1+i] = isz; /* size of new is[i] */
479   } /* for each is */
480   ierr = PetscBTDestroy(table0);CHKERRQ(ierr);
481 
482   PetscFunctionReturn(0);
483 }
484 
485 
486