xref: /petsc/src/mat/impls/dense/mpi/mpidense.c (revision b24902e06ab141841ce6546a773244be2474cd18)
1 #define PETSCMAT_DLL
2 
3 /*
4    Basic functions for basic parallel dense matrices.
5 */
6 
7 
8 #include "src/mat/impls/dense/mpi/mpidense.h"    /*I   "petscmat.h"  I*/
9 #if defined(PETSC_HAVE_PLAPACK)
10 static PetscMPIInt Plapack_nprows,Plapack_npcols,Plapack_ierror,Plapack_nb_alg;
11 static MPI_Comm Plapack_comm_2d;
12 #endif
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatDenseGetLocalMatrix"
16 /*@
17 
18       MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
19               matrix that represents the operator. For sequential matrices it returns itself.
20 
21     Input Parameter:
22 .      A - the Seq or MPI dense matrix
23 
24     Output Parameter:
25 .      B - the inner matrix
26 
27     Level: intermediate
28 
29 @*/
30 PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
31 {
32   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
33   PetscErrorCode ierr;
34   PetscTruth     flg;
35 
36   PetscFunctionBegin;
37   ierr = PetscTypeCompare((PetscObject)A,MATMPIDENSE,&flg);CHKERRQ(ierr);
38   if (flg) {
39     *B = mat->A;
40   } else {
41     *B = A;
42   }
43   PetscFunctionReturn(0);
44 }
45 
46 #undef __FUNCT__
47 #define __FUNCT__ "MatGetRow_MPIDense"
48 PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
49 {
50   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
51   PetscErrorCode ierr;
52   PetscInt       lrow,rstart = A->rmap.rstart,rend = A->rmap.rend;
53 
54   PetscFunctionBegin;
55   if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_SUP,"only local rows")
56   lrow = row - rstart;
57   ierr = MatGetRow(mat->A,lrow,nz,(const PetscInt **)idx,(const PetscScalar **)v);CHKERRQ(ierr);
58   PetscFunctionReturn(0);
59 }
60 
61 #undef __FUNCT__
62 #define __FUNCT__ "MatRestoreRow_MPIDense"
63 PetscErrorCode MatRestoreRow_MPIDense(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
64 {
65   PetscErrorCode ierr;
66 
67   PetscFunctionBegin;
68   if (idx) {ierr = PetscFree(*idx);CHKERRQ(ierr);}
69   if (v) {ierr = PetscFree(*v);CHKERRQ(ierr);}
70   PetscFunctionReturn(0);
71 }
72 
73 EXTERN_C_BEGIN
74 #undef __FUNCT__
75 #define __FUNCT__ "MatGetDiagonalBlock_MPIDense"
76 PetscErrorCode PETSCMAT_DLLEXPORT MatGetDiagonalBlock_MPIDense(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *B)
77 {
78   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
79   PetscErrorCode ierr;
80   PetscInt       m = A->rmap.n,rstart = A->rmap.rstart;
81   PetscScalar    *array;
82   MPI_Comm       comm;
83 
84   PetscFunctionBegin;
85   if (A->rmap.N != A->cmap.N) SETERRQ(PETSC_ERR_SUP,"Only square matrices supported.");
86 
87   /* The reuse aspect is not implemented efficiently */
88   if (reuse) { ierr = MatDestroy(*B);CHKERRQ(ierr);}
89 
90   ierr = PetscObjectGetComm((PetscObject)(mdn->A),&comm);CHKERRQ(ierr);
91   ierr = MatGetArray(mdn->A,&array);CHKERRQ(ierr);
92   ierr = MatCreate(comm,B);CHKERRQ(ierr);
93   ierr = MatSetSizes(*B,m,m,m,m);CHKERRQ(ierr);
94   ierr = MatSetType(*B,((PetscObject)mdn->A)->type_name);CHKERRQ(ierr);
95   ierr = MatSeqDenseSetPreallocation(*B,array+m*rstart);CHKERRQ(ierr);
96   ierr = MatRestoreArray(mdn->A,&array);CHKERRQ(ierr);
97   ierr = MatAssemblyBegin(*B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
98   ierr = MatAssemblyEnd(*B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
99 
100   *iscopy = PETSC_TRUE;
101   PetscFunctionReturn(0);
102 }
103 EXTERN_C_END
104 
105 #undef __FUNCT__
106 #define __FUNCT__ "MatSetValues_MPIDense"
107 PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
108 {
109   Mat_MPIDense   *A = (Mat_MPIDense*)mat->data;
110   PetscErrorCode ierr;
111   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;
112   PetscTruth     roworiented = A->roworiented;
113 
114   PetscFunctionBegin;
115   for (i=0; i<m; i++) {
116     if (idxm[i] < 0) continue;
117     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
118     if (idxm[i] >= rstart && idxm[i] < rend) {
119       row = idxm[i] - rstart;
120       if (roworiented) {
121         ierr = MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);CHKERRQ(ierr);
122       } else {
123         for (j=0; j<n; j++) {
124           if (idxn[j] < 0) continue;
125           if (idxn[j] >= mat->cmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
126           ierr = MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);CHKERRQ(ierr);
127         }
128       }
129     } else {
130       if (!A->donotstash) {
131         if (roworiented) {
132           ierr = MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n);CHKERRQ(ierr);
133         } else {
134           ierr = MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m);CHKERRQ(ierr);
135         }
136       }
137     }
138   }
139   PetscFunctionReturn(0);
140 }
141 
142 #undef __FUNCT__
143 #define __FUNCT__ "MatGetValues_MPIDense"
144 PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
145 {
146   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
147   PetscErrorCode ierr;
148   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;
149 
150   PetscFunctionBegin;
151   for (i=0; i<m; i++) {
152     if (idxm[i] < 0) continue; /* SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); */
153     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
154     if (idxm[i] >= rstart && idxm[i] < rend) {
155       row = idxm[i] - rstart;
156       for (j=0; j<n; j++) {
157         if (idxn[j] < 0) continue; /* SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column"); */
158         if (idxn[j] >= mat->cmap.N) {
159           SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
160         }
161         ierr = MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);CHKERRQ(ierr);
162       }
163     } else {
164       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
165     }
166   }
167   PetscFunctionReturn(0);
168 }
169 
170 #undef __FUNCT__
171 #define __FUNCT__ "MatGetArray_MPIDense"
172 PetscErrorCode MatGetArray_MPIDense(Mat A,PetscScalar *array[])
173 {
174   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
175   PetscErrorCode ierr;
176 
177   PetscFunctionBegin;
178   ierr = MatGetArray(a->A,array);CHKERRQ(ierr);
179   PetscFunctionReturn(0);
180 }
181 
182 #undef __FUNCT__
183 #define __FUNCT__ "MatGetSubMatrix_MPIDense"
184 static PetscErrorCode MatGetSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,PetscInt cs,MatReuse scall,Mat *B)
185 {
186   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data,*newmatd;
187   Mat_SeqDense   *lmat = (Mat_SeqDense*)mat->A->data;
188   PetscErrorCode ierr;
189   PetscInt       i,j,*irow,*icol,rstart,rend,nrows,ncols,nlrows,nlcols;
190   PetscScalar    *av,*bv,*v = lmat->v;
191   Mat            newmat;
192 
193   PetscFunctionBegin;
194   ierr = ISGetIndices(isrow,&irow);CHKERRQ(ierr);
195   ierr = ISGetIndices(iscol,&icol);CHKERRQ(ierr);
196   ierr = ISGetLocalSize(isrow,&nrows);CHKERRQ(ierr);
197   ierr = ISGetLocalSize(iscol,&ncols);CHKERRQ(ierr);
198 
199   /* No parallel redistribution currently supported! Should really check each index set
200      to comfirm that it is OK.  ... Currently supports only submatrix same partitioning as
201      original matrix! */
202 
203   ierr = MatGetLocalSize(A,&nlrows,&nlcols);CHKERRQ(ierr);
204   ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
205 
206   /* Check submatrix call */
207   if (scall == MAT_REUSE_MATRIX) {
208     /* SETERRQ(PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
209     /* Really need to test rows and column sizes! */
210     newmat = *B;
211   } else {
212     /* Create and fill new matrix */
213     ierr = MatCreate(((PetscObject)A)->comm,&newmat);CHKERRQ(ierr);
214     ierr = MatSetSizes(newmat,nrows,cs,PETSC_DECIDE,ncols);CHKERRQ(ierr);
215     ierr = MatSetType(newmat,((PetscObject)A)->type_name);CHKERRQ(ierr);
216     ierr = MatMPIDenseSetPreallocation(newmat,PETSC_NULL);CHKERRQ(ierr);
217   }
218 
219   /* Now extract the data pointers and do the copy, column at a time */
220   newmatd = (Mat_MPIDense*)newmat->data;
221   bv      = ((Mat_SeqDense *)newmatd->A->data)->v;
222 
223   for (i=0; i<ncols; i++) {
224     av = v + nlrows*icol[i];
225     for (j=0; j<nrows; j++) {
226       *bv++ = av[irow[j] - rstart];
227     }
228   }
229 
230   /* Assemble the matrices so that the correct flags are set */
231   ierr = MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
232   ierr = MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
233 
234   /* Free work space */
235   ierr = ISRestoreIndices(isrow,&irow);CHKERRQ(ierr);
236   ierr = ISRestoreIndices(iscol,&icol);CHKERRQ(ierr);
237   *B = newmat;
238   PetscFunctionReturn(0);
239 }
240 
241 #undef __FUNCT__
242 #define __FUNCT__ "MatRestoreArray_MPIDense"
243 PetscErrorCode MatRestoreArray_MPIDense(Mat A,PetscScalar *array[])
244 {
245   PetscFunctionBegin;
246   PetscFunctionReturn(0);
247 }
248 
249 #undef __FUNCT__
250 #define __FUNCT__ "MatAssemblyBegin_MPIDense"
251 PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
252 {
253   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
254   MPI_Comm       comm = ((PetscObject)mat)->comm;
255   PetscErrorCode ierr;
256   PetscInt       nstash,reallocs;
257   InsertMode     addv;
258 
259   PetscFunctionBegin;
260   /* make sure all processors are either in INSERTMODE or ADDMODE */
261   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);CHKERRQ(ierr);
262   if (addv == (ADD_VALUES|INSERT_VALUES)) {
263     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
264   }
265   mat->insertmode = addv; /* in case this processor had no cache */
266 
267   ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap.range);CHKERRQ(ierr);
268   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
269   ierr = PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
270   PetscFunctionReturn(0);
271 }
272 
273 #undef __FUNCT__
274 #define __FUNCT__ "MatAssemblyEnd_MPIDense"
275 PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
276 {
277   Mat_MPIDense    *mdn=(Mat_MPIDense*)mat->data;
278   PetscErrorCode  ierr;
279   PetscInt        i,*row,*col,flg,j,rstart,ncols;
280   PetscMPIInt     n;
281   PetscScalar     *val;
282   InsertMode      addv=mat->insertmode;
283 
284   PetscFunctionBegin;
285   /*  wait on receives */
286   while (1) {
287     ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
288     if (!flg) break;
289 
290     for (i=0; i<n;) {
291       /* Now identify the consecutive vals belonging to the same row */
292       for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
293       if (j < n) ncols = j-i;
294       else       ncols = n-i;
295       /* Now assemble all these values with a single function call */
296       ierr = MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
297       i = j;
298     }
299   }
300   ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
301 
302   ierr = MatAssemblyBegin(mdn->A,mode);CHKERRQ(ierr);
303   ierr = MatAssemblyEnd(mdn->A,mode);CHKERRQ(ierr);
304 
305   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
306     ierr = MatSetUpMultiply_MPIDense(mat);CHKERRQ(ierr);
307   }
308   PetscFunctionReturn(0);
309 }
310 
311 #undef __FUNCT__
312 #define __FUNCT__ "MatZeroEntries_MPIDense"
313 PetscErrorCode MatZeroEntries_MPIDense(Mat A)
314 {
315   PetscErrorCode ierr;
316   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;
317 
318   PetscFunctionBegin;
319   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
320   PetscFunctionReturn(0);
321 }
322 
323 /* the code does not do the diagonal entries correctly unless the
324    matrix is square and the column and row owerships are identical.
325    This is a BUG. The only way to fix it seems to be to access
326    mdn->A and mdn->B directly and not through the MatZeroRows()
327    routine.
328 */
329 #undef __FUNCT__
330 #define __FUNCT__ "MatZeroRows_MPIDense"
331 PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
332 {
333   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;
334   PetscErrorCode ierr;
335   PetscInt       i,*owners = A->rmap.range;
336   PetscInt       *nprocs,j,idx,nsends;
337   PetscInt       nmax,*svalues,*starts,*owner,nrecvs;
338   PetscInt       *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source;
339   PetscInt       *lens,*lrows,*values;
340   PetscMPIInt    n,imdex,rank = l->rank,size = l->size;
341   MPI_Comm       comm = ((PetscObject)A)->comm;
342   MPI_Request    *send_waits,*recv_waits;
343   MPI_Status     recv_status,*send_status;
344   PetscTruth     found;
345 
346   PetscFunctionBegin;
347   /*  first count number of contributors to each processor */
348   ierr  = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
349   ierr  = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
350   ierr  = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/
351   for (i=0; i<N; i++) {
352     idx = rows[i];
353     found = PETSC_FALSE;
354     for (j=0; j<size; j++) {
355       if (idx >= owners[j] && idx < owners[j+1]) {
356         nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
357       }
358     }
359     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
360   }
361   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
362 
363   /* inform other processors of number of messages and max length*/
364   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
365 
366   /* post receives:   */
367   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
368   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
369   for (i=0; i<nrecvs; i++) {
370     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
371   }
372 
373   /* do sends:
374       1) starts[i] gives the starting index in svalues for stuff going to
375          the ith processor
376   */
377   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr);
378   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
379   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
380   starts[0]  = 0;
381   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
382   for (i=0; i<N; i++) {
383     svalues[starts[owner[i]]++] = rows[i];
384   }
385 
386   starts[0] = 0;
387   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
388   count = 0;
389   for (i=0; i<size; i++) {
390     if (nprocs[2*i+1]) {
391       ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
392     }
393   }
394   ierr = PetscFree(starts);CHKERRQ(ierr);
395 
396   base = owners[rank];
397 
398   /*  wait on receives */
399   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);CHKERRQ(ierr);
400   source = lens + nrecvs;
401   count  = nrecvs; slen = 0;
402   while (count) {
403     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
404     /* unpack receives into our local space */
405     ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
406     source[imdex]  = recv_status.MPI_SOURCE;
407     lens[imdex]    = n;
408     slen += n;
409     count--;
410   }
411   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
412 
413   /* move the data into the send scatter */
414   ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr);
415   count = 0;
416   for (i=0; i<nrecvs; i++) {
417     values = rvalues + i*nmax;
418     for (j=0; j<lens[i]; j++) {
419       lrows[count++] = values[j] - base;
420     }
421   }
422   ierr = PetscFree(rvalues);CHKERRQ(ierr);
423   ierr = PetscFree(lens);CHKERRQ(ierr);
424   ierr = PetscFree(owner);CHKERRQ(ierr);
425   ierr = PetscFree(nprocs);CHKERRQ(ierr);
426 
427   /* actually zap the local rows */
428   ierr = MatZeroRows(l->A,slen,lrows,diag);CHKERRQ(ierr);
429   ierr = PetscFree(lrows);CHKERRQ(ierr);
430 
431   /* wait on sends */
432   if (nsends) {
433     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
434     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
435     ierr = PetscFree(send_status);CHKERRQ(ierr);
436   }
437   ierr = PetscFree(send_waits);CHKERRQ(ierr);
438   ierr = PetscFree(svalues);CHKERRQ(ierr);
439 
440   PetscFunctionReturn(0);
441 }
442 
443 #undef __FUNCT__
444 #define __FUNCT__ "MatMult_MPIDense"
445 PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
446 {
447   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
448   PetscErrorCode ierr;
449 
450   PetscFunctionBegin;
451   ierr = VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
452   ierr = VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
453   ierr = MatMult_SeqDense(mdn->A,mdn->lvec,yy);CHKERRQ(ierr);
454   PetscFunctionReturn(0);
455 }
456 
457 #undef __FUNCT__
458 #define __FUNCT__ "MatMultAdd_MPIDense"
459 PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
460 {
461   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
462   PetscErrorCode ierr;
463 
464   PetscFunctionBegin;
465   ierr = VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
466   ierr = VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
467   ierr = MatMultAdd_SeqDense(mdn->A,mdn->lvec,yy,zz);CHKERRQ(ierr);
468   PetscFunctionReturn(0);
469 }
470 
471 #undef __FUNCT__
472 #define __FUNCT__ "MatMultTranspose_MPIDense"
473 PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
474 {
475   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
476   PetscErrorCode ierr;
477   PetscScalar    zero = 0.0;
478 
479   PetscFunctionBegin;
480   ierr = VecSet(yy,zero);CHKERRQ(ierr);
481   ierr = MatMultTranspose_SeqDense(a->A,xx,a->lvec);CHKERRQ(ierr);
482   ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
483   ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
484   PetscFunctionReturn(0);
485 }
486 
487 #undef __FUNCT__
488 #define __FUNCT__ "MatMultTransposeAdd_MPIDense"
489 PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
490 {
491   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
492   PetscErrorCode ierr;
493 
494   PetscFunctionBegin;
495   ierr = VecCopy(yy,zz);CHKERRQ(ierr);
496   ierr = MatMultTranspose_SeqDense(a->A,xx,a->lvec);CHKERRQ(ierr);
497   ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
498   ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
499   PetscFunctionReturn(0);
500 }
501 
502 #undef __FUNCT__
503 #define __FUNCT__ "MatGetDiagonal_MPIDense"
504 PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
505 {
506   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
507   Mat_SeqDense   *aloc = (Mat_SeqDense*)a->A->data;
508   PetscErrorCode ierr;
509   PetscInt       len,i,n,m = A->rmap.n,radd;
510   PetscScalar    *x,zero = 0.0;
511 
512   PetscFunctionBegin;
513   ierr = VecSet(v,zero);CHKERRQ(ierr);
514   ierr = VecGetArray(v,&x);CHKERRQ(ierr);
515   ierr = VecGetSize(v,&n);CHKERRQ(ierr);
516   if (n != A->rmap.N) SETERRQ(PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
517   len  = PetscMin(a->A->rmap.n,a->A->cmap.n);
518   radd = A->rmap.rstart*m;
519   for (i=0; i<len; i++) {
520     x[i] = aloc->v[radd + i*m + i];
521   }
522   ierr = VecRestoreArray(v,&x);CHKERRQ(ierr);
523   PetscFunctionReturn(0);
524 }
525 
526 #undef __FUNCT__
527 #define __FUNCT__ "MatDestroy_MPIDense"
528 PetscErrorCode MatDestroy_MPIDense(Mat mat)
529 {
530   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
531   PetscErrorCode ierr;
532 #if defined(PETSC_HAVE_PLAPACK)
533   Mat_Plapack   *lu=(Mat_Plapack*)(mat->spptr);
534 #endif
535 
536   PetscFunctionBegin;
537 
538 #if defined(PETSC_USE_LOG)
539   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap.N,mat->cmap.N);
540 #endif
541   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
542   ierr = MatDestroy(mdn->A);CHKERRQ(ierr);
543   if (mdn->lvec)   {ierr = VecDestroy(mdn->lvec);CHKERRQ(ierr);}
544   if (mdn->Mvctx)  {ierr = VecScatterDestroy(mdn->Mvctx);CHKERRQ(ierr);}
545 #if defined(PETSC_HAVE_PLAPACK)
546   if (lu) {
547     ierr = PLA_Obj_free(&lu->A);CHKERRQ(ierr);
548     ierr = PLA_Obj_free (&lu->pivots);CHKERRQ(ierr);
549     ierr = PLA_Temp_free(&lu->templ);CHKERRQ(ierr);
550 
551     if (lu->is_pla) {
552       ierr = ISDestroy(lu->is_pla);CHKERRQ(ierr);
553       ierr = ISDestroy(lu->is_petsc);CHKERRQ(ierr);
554       ierr = VecScatterDestroy(lu->ctx);CHKERRQ(ierr);
555     }
556   }
557 #endif
558 
559   ierr = PetscFree(mdn);CHKERRQ(ierr);
560   ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr);
561   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);CHKERRQ(ierr);
562   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C","",PETSC_NULL);CHKERRQ(ierr);
563   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
564   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
565   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
566   PetscFunctionReturn(0);
567 }
568 
569 #undef __FUNCT__
570 #define __FUNCT__ "MatView_MPIDense_Binary"
571 static PetscErrorCode MatView_MPIDense_Binary(Mat mat,PetscViewer viewer)
572 {
573   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
574   PetscErrorCode    ierr;
575   PetscViewerFormat format;
576   int               fd;
577   PetscInt          header[4],mmax,N = mat->cmap.N,i,j,m,k;
578   PetscMPIInt       rank,tag  = ((PetscObject)viewer)->tag,size;
579   PetscScalar       *work,*v,*vv;
580   Mat_SeqDense      *a = (Mat_SeqDense*)mdn->A->data;
581   MPI_Status        status;
582 
583   PetscFunctionBegin;
584   if (mdn->size == 1) {
585     ierr = MatView(mdn->A,viewer);CHKERRQ(ierr);
586   } else {
587     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
588     ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
589     ierr = MPI_Comm_size(((PetscObject)mat)->comm,&size);CHKERRQ(ierr);
590 
591     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
592     if (format == PETSC_VIEWER_BINARY_NATIVE) {
593 
594       if (!rank) {
595         /* store the matrix as a dense matrix */
596         header[0] = MAT_FILE_COOKIE;
597         header[1] = mat->rmap.N;
598         header[2] = N;
599         header[3] = MATRIX_BINARY_FORMAT_DENSE;
600         ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
601 
602         /* get largest work array needed for transposing array */
603         mmax = mat->rmap.n;
604         for (i=1; i<size; i++) {
605           mmax = PetscMax(mmax,mat->rmap.range[i+1] - mat->rmap.range[i]);
606         }
607         ierr = PetscMalloc(mmax*N*sizeof(PetscScalar),&work);CHKERRQ(ierr);
608 
609         /* write out local array, by rows */
610         m    = mat->rmap.n;
611         v    = a->v;
612         for (j=0; j<N; j++) {
613           for (i=0; i<m; i++) {
614             work[j + i*N] = *v++;
615           }
616         }
617         ierr = PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);CHKERRQ(ierr);
618         /* get largest work array to receive messages from other processes, excludes process zero */
619         mmax = 0;
620         for (i=1; i<size; i++) {
621           mmax = PetscMax(mmax,mat->rmap.range[i+1] - mat->rmap.range[i]);
622         }
623         ierr = PetscMalloc(mmax*N*sizeof(PetscScalar),&vv);CHKERRQ(ierr);
624         for(k = 1; k < size; k++) {
625           v    = vv;
626           m    = mat->rmap.range[k+1] - mat->rmap.range[k];
627           ierr = MPI_Recv(v,m*N,MPIU_SCALAR,k,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
628 
629           for(j = 0; j < N; j++) {
630             for(i = 0; i < m; i++) {
631               work[j + i*N] = *v++;
632             }
633           }
634           ierr = PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);CHKERRQ(ierr);
635         }
636         ierr = PetscFree(work);CHKERRQ(ierr);
637         ierr = PetscFree(vv);CHKERRQ(ierr);
638       } else {
639         ierr = MPI_Send(a->v,mat->rmap.n*mat->cmap.N,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
640       }
641     } else {
642       SETERRQ(PETSC_ERR_SUP,"To store a parallel dense matrix you must first call PetscViewerSetFormat(viewer,PETSC_VIEWER_BINARY_NATIVE");
643     }
644   }
645   PetscFunctionReturn(0);
646 }
647 
648 #undef __FUNCT__
649 #define __FUNCT__ "MatView_MPIDense_ASCIIorDraworSocket"
650 static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
651 {
652   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
653   PetscErrorCode    ierr;
654   PetscMPIInt       size = mdn->size,rank = mdn->rank;
655   PetscViewerType   vtype;
656   PetscTruth        iascii,isdraw;
657   PetscViewer       sviewer;
658   PetscViewerFormat format;
659 #if defined(PETSC_HAVE_PLAPACK)
660   Mat_Plapack       *lu=(Mat_Plapack*)(mat->spptr);
661 #endif
662 
663   PetscFunctionBegin;
664   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
665   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
666   if (iascii) {
667     ierr = PetscViewerGetType(viewer,&vtype);CHKERRQ(ierr);
668     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
669     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
670       MatInfo info;
671       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
672       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"  [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap.n,
673                    (PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
674       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
675       ierr = VecScatterView(mdn->Mvctx,viewer);CHKERRQ(ierr);
676 #if defined(PETSC_HAVE_PLAPACK)
677       ierr = PetscViewerASCIIPrintf(viewer,"PLAPACK run parameters:\n");CHKERRQ(ierr);
678       ierr = PetscViewerASCIIPrintf(viewer,"  Processor mesh: nprows %d, npcols %d\n",Plapack_nprows, Plapack_npcols);CHKERRQ(ierr);
679       ierr = PetscViewerASCIIPrintf(viewer,"  Distr. block size nb: %d \n",lu->nb);CHKERRQ(ierr);
680       ierr = PetscViewerASCIIPrintf(viewer,"  Error checking: %d\n",Plapack_ierror);CHKERRQ(ierr);
681       ierr = PetscViewerASCIIPrintf(viewer,"  Algorithmic block size: %d\n",Plapack_nb_alg);CHKERRQ(ierr);
682 #endif
683       PetscFunctionReturn(0);
684     } else if (format == PETSC_VIEWER_ASCII_INFO) {
685       PetscFunctionReturn(0);
686     }
687   } else if (isdraw) {
688     PetscDraw  draw;
689     PetscTruth isnull;
690 
691     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
692     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr);
693     if (isnull) PetscFunctionReturn(0);
694   }
695 
696   if (size == 1) {
697     ierr = MatView(mdn->A,viewer);CHKERRQ(ierr);
698   } else {
699     /* assemble the entire matrix onto first processor. */
700     Mat         A;
701     PetscInt    M = mat->rmap.N,N = mat->cmap.N,m,row,i,nz;
702     PetscInt    *cols;
703     PetscScalar *vals;
704 
705     ierr = MatCreate(((PetscObject)mat)->comm,&A);CHKERRQ(ierr);
706     if (!rank) {
707       ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr);
708     } else {
709       ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr);
710     }
711     /* Since this is a temporary matrix, MATMPIDENSE instead of ((PetscObject)A)->type_name here is probably acceptable. */
712     ierr = MatSetType(A,MATMPIDENSE);CHKERRQ(ierr);
713     ierr = MatMPIDenseSetPreallocation(A,PETSC_NULL);
714     ierr = PetscLogObjectParent(mat,A);CHKERRQ(ierr);
715 
716     /* Copy the matrix ... This isn't the most efficient means,
717        but it's quick for now */
718     A->insertmode = INSERT_VALUES;
719     row = mat->rmap.rstart; m = mdn->A->rmap.n;
720     for (i=0; i<m; i++) {
721       ierr = MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);CHKERRQ(ierr);
722       ierr = MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
723       ierr = MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);CHKERRQ(ierr);
724       row++;
725     }
726 
727     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
728     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
729     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
730     if (!rank) {
731       ierr = MatView(((Mat_MPIDense*)(A->data))->A,sviewer);CHKERRQ(ierr);
732     }
733     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
734     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
735     ierr = MatDestroy(A);CHKERRQ(ierr);
736   }
737   PetscFunctionReturn(0);
738 }
739 
740 #undef __FUNCT__
741 #define __FUNCT__ "MatView_MPIDense"
742 PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
743 {
744   PetscErrorCode ierr;
745   PetscTruth     iascii,isbinary,isdraw,issocket;
746 
747   PetscFunctionBegin;
748 
749   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
750   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
751   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
752   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
753 
754   if (iascii || issocket || isdraw) {
755     ierr = MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
756   } else if (isbinary) {
757     ierr = MatView_MPIDense_Binary(mat,viewer);CHKERRQ(ierr);
758   } else {
759     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPI dense matrix",((PetscObject)viewer)->type_name);
760   }
761   PetscFunctionReturn(0);
762 }
763 
764 #undef __FUNCT__
765 #define __FUNCT__ "MatGetInfo_MPIDense"
766 PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
767 {
768   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
769   Mat            mdn = mat->A;
770   PetscErrorCode ierr;
771   PetscReal      isend[5],irecv[5];
772 
773   PetscFunctionBegin;
774   info->rows_global    = (double)A->rmap.N;
775   info->columns_global = (double)A->cmap.N;
776   info->rows_local     = (double)A->rmap.n;
777   info->columns_local  = (double)A->cmap.N;
778   info->block_size     = 1.0;
779   ierr = MatGetInfo(mdn,MAT_LOCAL,info);CHKERRQ(ierr);
780   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
781   isend[3] = info->memory;  isend[4] = info->mallocs;
782   if (flag == MAT_LOCAL) {
783     info->nz_used      = isend[0];
784     info->nz_allocated = isend[1];
785     info->nz_unneeded  = isend[2];
786     info->memory       = isend[3];
787     info->mallocs      = isend[4];
788   } else if (flag == MAT_GLOBAL_MAX) {
789     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)A)->comm);CHKERRQ(ierr);
790     info->nz_used      = irecv[0];
791     info->nz_allocated = irecv[1];
792     info->nz_unneeded  = irecv[2];
793     info->memory       = irecv[3];
794     info->mallocs      = irecv[4];
795   } else if (flag == MAT_GLOBAL_SUM) {
796     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
797     info->nz_used      = irecv[0];
798     info->nz_allocated = irecv[1];
799     info->nz_unneeded  = irecv[2];
800     info->memory       = irecv[3];
801     info->mallocs      = irecv[4];
802   }
803   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
804   info->fill_ratio_needed = 0;
805   info->factor_mallocs    = 0;
806   PetscFunctionReturn(0);
807 }
808 
809 #undef __FUNCT__
810 #define __FUNCT__ "MatSetOption_MPIDense"
811 PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op,PetscTruth flg)
812 {
813   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
814   PetscErrorCode ierr;
815 
816   PetscFunctionBegin;
817   switch (op) {
818   case MAT_NEW_NONZERO_LOCATIONS:
819   case MAT_NEW_NONZERO_LOCATION_ERR:
820   case MAT_NEW_NONZERO_ALLOCATION_ERR:
821     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
822     break;
823   case MAT_ROW_ORIENTED:
824     a->roworiented = flg;
825     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
826     break;
827   case MAT_NEW_DIAGONALS:
828   case MAT_USE_HASH_TABLE:
829     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
830     break;
831   case MAT_IGNORE_OFF_PROC_ENTRIES:
832     a->donotstash = flg;
833     break;
834   case MAT_SYMMETRIC:
835   case MAT_STRUCTURALLY_SYMMETRIC:
836   case MAT_HERMITIAN:
837   case MAT_SYMMETRY_ETERNAL:
838   case MAT_IGNORE_LOWER_TRIANGULAR:
839     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
840     break;
841   default:
842     SETERRQ1(PETSC_ERR_SUP,"unknown option %s",MatOptions[op]);
843   }
844   PetscFunctionReturn(0);
845 }
846 
847 
848 #undef __FUNCT__
849 #define __FUNCT__ "MatDiagonalScale_MPIDense"
850 PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
851 {
852   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
853   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
854   PetscScalar    *l,*r,x,*v;
855   PetscErrorCode ierr;
856   PetscInt       i,j,s2a,s3a,s2,s3,m=mdn->A->rmap.n,n=mdn->A->cmap.n;
857 
858   PetscFunctionBegin;
859   ierr = MatGetLocalSize(A,&s2,&s3);CHKERRQ(ierr);
860   if (ll) {
861     ierr = VecGetLocalSize(ll,&s2a);CHKERRQ(ierr);
862     if (s2a != s2) SETERRQ2(PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %d != %d.", s2a, s2);
863     ierr = VecGetArray(ll,&l);CHKERRQ(ierr);
864     for (i=0; i<m; i++) {
865       x = l[i];
866       v = mat->v + i;
867       for (j=0; j<n; j++) { (*v) *= x; v+= m;}
868     }
869     ierr = VecRestoreArray(ll,&l);CHKERRQ(ierr);
870     ierr = PetscLogFlops(n*m);CHKERRQ(ierr);
871   }
872   if (rr) {
873     ierr = VecGetLocalSize(rr,&s3a);CHKERRQ(ierr);
874     if (s3a != s3) SETERRQ2(PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
875     ierr = VecScatterBegin(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
876     ierr = VecScatterEnd(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
877     ierr = VecGetArray(mdn->lvec,&r);CHKERRQ(ierr);
878     for (i=0; i<n; i++) {
879       x = r[i];
880       v = mat->v + i*m;
881       for (j=0; j<m; j++) { (*v++) *= x;}
882     }
883     ierr = VecRestoreArray(mdn->lvec,&r);CHKERRQ(ierr);
884     ierr = PetscLogFlops(n*m);CHKERRQ(ierr);
885   }
886   PetscFunctionReturn(0);
887 }
888 
889 #undef __FUNCT__
890 #define __FUNCT__ "MatNorm_MPIDense"
891 PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
892 {
893   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
894   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
895   PetscErrorCode ierr;
896   PetscInt       i,j;
897   PetscReal      sum = 0.0;
898   PetscScalar    *v = mat->v;
899 
900   PetscFunctionBegin;
901   if (mdn->size == 1) {
902     ierr =  MatNorm(mdn->A,type,nrm);CHKERRQ(ierr);
903   } else {
904     if (type == NORM_FROBENIUS) {
905       for (i=0; i<mdn->A->cmap.n*mdn->A->rmap.n; i++) {
906 #if defined(PETSC_USE_COMPLEX)
907         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
908 #else
909         sum += (*v)*(*v); v++;
910 #endif
911       }
912       ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
913       *nrm = sqrt(*nrm);
914       ierr = PetscLogFlops(2*mdn->A->cmap.n*mdn->A->rmap.n);CHKERRQ(ierr);
915     } else if (type == NORM_1) {
916       PetscReal *tmp,*tmp2;
917       ierr = PetscMalloc(2*A->cmap.N*sizeof(PetscReal),&tmp);CHKERRQ(ierr);
918       tmp2 = tmp + A->cmap.N;
919       ierr = PetscMemzero(tmp,2*A->cmap.N*sizeof(PetscReal));CHKERRQ(ierr);
920       *nrm = 0.0;
921       v = mat->v;
922       for (j=0; j<mdn->A->cmap.n; j++) {
923         for (i=0; i<mdn->A->rmap.n; i++) {
924           tmp[j] += PetscAbsScalar(*v);  v++;
925         }
926       }
927       ierr = MPI_Allreduce(tmp,tmp2,A->cmap.N,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
928       for (j=0; j<A->cmap.N; j++) {
929         if (tmp2[j] > *nrm) *nrm = tmp2[j];
930       }
931       ierr = PetscFree(tmp);CHKERRQ(ierr);
932       ierr = PetscLogFlops(A->cmap.n*A->rmap.n);CHKERRQ(ierr);
933     } else if (type == NORM_INFINITY) { /* max row norm */
934       PetscReal ntemp;
935       ierr = MatNorm(mdn->A,type,&ntemp);CHKERRQ(ierr);
936       ierr = MPI_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPI_MAX,((PetscObject)A)->comm);CHKERRQ(ierr);
937     } else {
938       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
939     }
940   }
941   PetscFunctionReturn(0);
942 }
943 
944 #undef __FUNCT__
945 #define __FUNCT__ "MatTranspose_MPIDense"
946 PetscErrorCode MatTranspose_MPIDense(Mat A,MatReuse reuse,Mat *matout)
947 {
948   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
949   Mat_SeqDense   *Aloc = (Mat_SeqDense*)a->A->data;
950   Mat            B;
951   PetscInt       M = A->rmap.N,N = A->cmap.N,m,n,*rwork,rstart = A->rmap.rstart;
952   PetscErrorCode ierr;
953   PetscInt       j,i;
954   PetscScalar    *v;
955 
956   PetscFunctionBegin;
957   if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_SUP,"Supports square matrix only in-place");
958   if (reuse == MAT_INITIAL_MATRIX || A == *matout) {
959     ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr);
960     ierr = MatSetSizes(B,PETSC_DECIDE,PETSC_DECIDE,N,M);CHKERRQ(ierr);
961     ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr);
962     ierr = MatMPIDenseSetPreallocation(B,PETSC_NULL);CHKERRQ(ierr);
963   } else {
964     B = *matout;
965   }
966 
967   m = a->A->rmap.n; n = a->A->cmap.n; v = Aloc->v;
968   ierr = PetscMalloc(m*sizeof(PetscInt),&rwork);CHKERRQ(ierr);
969   for (i=0; i<m; i++) rwork[i] = rstart + i;
970   for (j=0; j<n; j++) {
971     ierr = MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);CHKERRQ(ierr);
972     v   += m;
973   }
974   ierr = PetscFree(rwork);CHKERRQ(ierr);
975   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
976   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
977   if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
978     *matout = B;
979   } else {
980     ierr = MatHeaderCopy(A,B);CHKERRQ(ierr);
981   }
982   PetscFunctionReturn(0);
983 }
984 
985 #include "petscblaslapack.h"
986 #undef __FUNCT__
987 #define __FUNCT__ "MatScale_MPIDense"
988 PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
989 {
990   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;
991   Mat_SeqDense   *a = (Mat_SeqDense*)A->A->data;
992   PetscScalar    oalpha = alpha;
993   PetscErrorCode ierr;
994   PetscBLASInt   one = 1,nz = PetscBLASIntCast(inA->rmap.n*inA->cmap.N);
995 
996   PetscFunctionBegin;
997   BLASscal_(&nz,&oalpha,a->v,&one);
998   ierr = PetscLogFlops(nz);CHKERRQ(ierr);
999   PetscFunctionReturn(0);
1000 }
1001 
1002 static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat *);
1003 
1004 #undef __FUNCT__
1005 #define __FUNCT__ "MatSetUpPreallocation_MPIDense"
1006 PetscErrorCode MatSetUpPreallocation_MPIDense(Mat A)
1007 {
1008   PetscErrorCode ierr;
1009 
1010   PetscFunctionBegin;
1011   ierr =  MatMPIDenseSetPreallocation(A,0);CHKERRQ(ierr);
1012   PetscFunctionReturn(0);
1013 }
1014 
1015 #if defined(PETSC_HAVE_PLAPACK)
1016 
1017 #undef __FUNCT__
1018 #define __FUNCT__ "MatMPIDenseCopyToPlapack"
1019 PetscErrorCode MatMPIDenseCopyToPlapack(Mat A,Mat F)
1020 {
1021   Mat_Plapack    *lu = (Mat_Plapack*)(F)->spptr;
1022   PetscErrorCode ierr;
1023   PetscInt       M=A->cmap.N,m=A->rmap.n,rstart;
1024   PetscScalar    *array;
1025   PetscReal      one = 1.0;
1026 
1027   PetscFunctionBegin;
1028   /* Copy A into F->lu->A */
1029   ierr = PLA_Obj_set_to_zero(lu->A);CHKERRQ(ierr);
1030   ierr = PLA_API_begin();CHKERRQ(ierr);
1031   ierr = PLA_Obj_API_open(lu->A);CHKERRQ(ierr);
1032   ierr = MatGetOwnershipRange(A,&rstart,PETSC_NULL);CHKERRQ(ierr);
1033   ierr = MatGetArray(A,&array);CHKERRQ(ierr);
1034   ierr = PLA_API_axpy_matrix_to_global(m,M, &one,(void *)array,m,lu->A,rstart,0);CHKERRQ(ierr);
1035   ierr = MatRestoreArray(A,&array);CHKERRQ(ierr);
1036   ierr = PLA_Obj_API_close(lu->A);CHKERRQ(ierr);
1037   ierr = PLA_API_end();CHKERRQ(ierr);
1038   lu->rstart = rstart;
1039   PetscFunctionReturn(0);
1040 }
1041 
1042 #undef __FUNCT__
1043 #define __FUNCT__ "MatMPIDenseCopyFromPlapack"
1044 PetscErrorCode MatMPIDenseCopyFromPlapack(Mat F,Mat A)
1045 {
1046   Mat_Plapack    *lu = (Mat_Plapack*)(F)->spptr;
1047   PetscErrorCode ierr;
1048   PetscInt       M=A->cmap.N,m=A->rmap.n,rstart;
1049   PetscScalar    *array;
1050   PetscReal      one = 1.0;
1051 
1052   PetscFunctionBegin;
1053   /* Copy F into A->lu->A */
1054   ierr = MatZeroEntries(A);CHKERRQ(ierr);
1055   ierr = PLA_API_begin();CHKERRQ(ierr);
1056   ierr = PLA_Obj_API_open(lu->A);CHKERRQ(ierr);
1057   ierr = MatGetOwnershipRange(A,&rstart,PETSC_NULL);CHKERRQ(ierr);
1058   ierr = MatGetArray(A,&array);CHKERRQ(ierr);
1059   ierr = PLA_API_axpy_global_to_matrix(m,M, &one,lu->A,rstart,0,(void *)array,m);CHKERRQ(ierr);
1060   ierr = MatRestoreArray(A,&array);CHKERRQ(ierr);
1061   ierr = PLA_Obj_API_close(lu->A);CHKERRQ(ierr);
1062   ierr = PLA_API_end();CHKERRQ(ierr);
1063   lu->rstart = rstart;
1064   PetscFunctionReturn(0);
1065 }
1066 
1067 #undef __FUNCT__
1068 #define __FUNCT__ "MatMatMultNumeric_MPIDense_MPIDense"
1069 PetscErrorCode MatMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
1070 {
1071   PetscErrorCode ierr;
1072   Mat_Plapack    *luA = (Mat_Plapack*)A->spptr;
1073   Mat_Plapack    *luB = (Mat_Plapack*)B->spptr;
1074   Mat_Plapack    *luC = (Mat_Plapack*)C->spptr;
1075   PLA_Obj        alpha = NULL,beta = NULL;
1076 
1077   PetscFunctionBegin;
1078   ierr = MatMPIDenseCopyToPlapack(A,A);CHKERRQ(ierr);
1079   ierr = MatMPIDenseCopyToPlapack(B,B);CHKERRQ(ierr);
1080 
1081   /*
1082   ierr = PLA_Global_show("A = ",luA->A,"%g ","");CHKERRQ(ierr);
1083   ierr = PLA_Global_show("B = ",luB->A,"%g ","");CHKERRQ(ierr);
1084   */
1085 
1086   /* do the multiply in PLA  */
1087   ierr = PLA_Create_constants_conf_to(luA->A,NULL,NULL,&alpha);CHKERRQ(ierr);
1088   ierr = PLA_Create_constants_conf_to(luC->A,NULL,&beta,NULL);CHKERRQ(ierr);
1089   CHKMEMQ;
1090 
1091   ierr = PLA_Gemm(PLA_NO_TRANSPOSE,PLA_NO_TRANSPOSE,alpha,luA->A,luB->A,beta,luC->A);//CHKERRQ(ierr);
1092   CHKMEMQ;
1093   ierr = PLA_Obj_free(&alpha);CHKERRQ(ierr);
1094   ierr = PLA_Obj_free(&beta);CHKERRQ(ierr);
1095 
1096   /*
1097   ierr = PLA_Global_show("C = ",luC->A,"%g ","");CHKERRQ(ierr);
1098   */
1099   ierr = MatMPIDenseCopyFromPlapack(C,C);CHKERRQ(ierr);
1100   PetscFunctionReturn(0);
1101 }
1102 
1103 #undef __FUNCT__
1104 #define __FUNCT__ "MatMatMultSymbolic_MPIDense_MPIDense"
1105 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
1106 {
1107   PetscErrorCode ierr;
1108   PetscInt       m=A->rmap.n,n=B->cmap.n;
1109   Mat            Cmat;
1110 
1111   PetscFunctionBegin;
1112   if (A->cmap.n != B->rmap.n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap.n %d != B->rmap.n %d\n",A->cmap.n,B->rmap.n);
1113   SETERRQ(PETSC_ERR_LIB,"Due to aparent bugs in PLAPACK,this is not currently supported");
1114   ierr = MatCreate(((PetscObject)B)->comm,&Cmat);CHKERRQ(ierr);
1115   ierr = MatSetSizes(Cmat,m,n,A->rmap.N,B->cmap.N);CHKERRQ(ierr);
1116   ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr);
1117   ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1118   ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1119   ierr = MatMPIDenseCreatePlapack(A);CHKERRQ(ierr);
1120   ierr = MatMPIDenseCreatePlapack(B);CHKERRQ(ierr);
1121   ierr = MatMPIDenseCreatePlapack(Cmat);CHKERRQ(ierr);
1122   *C = Cmat;
1123   PetscFunctionReturn(0);
1124 }
1125 
1126 #undef __FUNCT__
1127 #define __FUNCT__ "MatMatMult_MPIDense_MPIDense"
1128 PetscErrorCode MatMatMult_MPIDense_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1129 {
1130   PetscErrorCode ierr;
1131 
1132   PetscFunctionBegin;
1133   if (scall == MAT_INITIAL_MATRIX){
1134     ierr = MatMatMultSymbolic_MPIDense_MPIDense(A,B,fill,C);CHKERRQ(ierr);
1135   }
1136   ierr = MatMatMultNumeric_MPIDense_MPIDense(A,B,*C);CHKERRQ(ierr);
1137   PetscFunctionReturn(0);
1138 }
1139 
1140 /* Note the Petsc perm permutation is ignored */
1141 #undef __FUNCT__
1142 #define __FUNCT__ "MatCholeskyFactorSymbolic_Plapack"
1143 PetscErrorCode MatCholeskyFactorSymbolic_Plapack(Mat A,IS perm,MatFactorInfo *info,Mat *F)
1144 {
1145   PetscErrorCode ierr;
1146   PetscTruth     issymmetric,set;
1147 
1148   PetscFunctionBegin;
1149   ierr = MatIsSymmetricKnown(A,&set,&issymmetric); CHKERRQ(ierr);
1150   if (!set || !issymmetric) SETERRQ(PETSC_ERR_USER,"Matrix must be set as MAT_SYMMETRIC for CholeskyFactor()");
1151   ierr = MatFactorSymbolic_Plapack_Private(A,info,F);CHKERRQ(ierr);
1152   (*F)->factor = FACTOR_CHOLESKY;
1153   PetscFunctionReturn(0);
1154 }
1155 
1156 /* Note the Petsc r and c permutations are ignored */
1157 #undef __FUNCT__
1158 #define __FUNCT__ "MatLUFactorSymbolic_Plapack"
1159 PetscErrorCode MatLUFactorSymbolic_Plapack(Mat A,IS r,IS c,MatFactorInfo *info,Mat *F)
1160 {
1161   PetscErrorCode ierr;
1162   PetscInt       M = A->rmap.N;
1163   Mat_Plapack    *lu;
1164 
1165   PetscFunctionBegin;
1166   ierr = MatFactorSymbolic_Plapack_Private(A,info,F);CHKERRQ(ierr);
1167   lu = (Mat_Plapack*)(*F)->spptr;
1168   ierr = PLA_Mvector_create(MPI_INT,M,1,lu->templ,PLA_ALIGN_FIRST,&lu->pivots);CHKERRQ(ierr);
1169   (*F)->factor = FACTOR_LU;
1170   PetscFunctionReturn(0);
1171 }
1172 
1173 #undef __FUNCT__
1174 #define __FUNCT__ "MatGetFactor_mpidense_plapack"
1175 PetscErrorCode MatGetFactor_mpidense_plapack(Mat A,MatFactorType ftype,Mat *F)
1176 {
1177   PetscErrorCode ierr;
1178 
1179   PetscFunctionBegin;
1180   /* Create the factorization matrix */
1181   ierr = MatCreate(((PetscObject)A)->comm,F);CHKERRQ(ierr);
1182   ierr = MatSetSizes(*F,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);CHKERRQ(ierr);
1183   ierr = MatSetType(*F,((PetscObject)A)->type_name);CHKERRQ(ierr);
1184   ierr = PetscNewLog(A,Mat_Plapack,&lu);CHKERRQ(ierr);
1185   A->spptr = (void*)lu;
1186 
1187   lu = (Mat_Plapack*)(A->spptr);
1188 
1189   /* Set default Plapack parameters */
1190   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1191   lu->nb     = M/size;
1192   if (M - lu->nb*size) lu->nb++; /* without cyclic distribution */
1193 
1194   /* Set runtime options */
1195   ierr = PetscOptionsBegin(((PetscObject)A)->comm,((PetscObject)A)->prefix,"PLAPACK Options","Mat");CHKERRQ(ierr);
1196     ierr = PetscOptionsInt("-mat_plapack_nb","block size of template vector","None",lu->nb,&lu->nb,PETSC_NULL);CHKERRQ(ierr);
1197   PetscOptionsEnd();
1198 
1199   /* Create object distribution template */
1200   lu->templ = NULL;
1201   ierr = PLA_Temp_create(lu->nb, 0, &lu->templ);CHKERRQ(ierr);
1202 
1203   /* Set the datatype */
1204 #if defined(PETSC_USE_COMPLEX)
1205   lu->datatype = MPI_DOUBLE_COMPLEX;
1206 #else
1207   lu->datatype = MPI_DOUBLE;
1208 #endif
1209 
1210   ierr = PLA_Matrix_create(lu->datatype,M,A->cmap.N,lu->templ,PLA_ALIGN_FIRST,PLA_ALIGN_FIRST,&lu->A);CHKERRQ(ierr);
1211 
1212 
1213   lu->pla_solved     = PETSC_FALSE; /* MatSolve_Plapack() is called yet */
1214 
1215   if (ftype == MAT_FACTOR_LU) {
1216     (*F)->ops->lufactorsymbolic = MatLUFactorSymbolic_MPIDense;
1217     (*F)->ops->lufactornumeric  = MatLUFactorNumeric_MPIDense;
1218     (*F)->ops->solve            = MatSolve_MPIDense;
1219   } else if (ftype == MAT_FACTOR_CHOLESKY) {
1220     (*F)->ops->choleksyfactorsymbolic = MatCholeskyFactorSymbolic_MPIDense;
1221     (*F)->ops->choleskyfactornumeric  = MatCholeksyFactorNumeric_MPIDense;
1222     (*F)->ops->solve                  = MatSolve_MPIDense;
1223   } else SETERRQ(PETSC_ERR_SUP,"No incomplete factorizations for dense matrices");
1224 
1225   PetscFunctionReturn(0);
1226 }
1227 #endif
1228 
1229 /* -------------------------------------------------------------------*/
1230 static struct _MatOps MatOps_Values = {MatSetValues_MPIDense,
1231        MatGetRow_MPIDense,
1232        MatRestoreRow_MPIDense,
1233        MatMult_MPIDense,
1234 /* 4*/ MatMultAdd_MPIDense,
1235        MatMultTranspose_MPIDense,
1236        MatMultTransposeAdd_MPIDense,
1237 #if defined(PETSC_HAVE_PLAPACK)
1238        MatSolve_MPIDense,
1239 #else
1240        0,
1241 #endif
1242        0,
1243        0,
1244 /*10*/ 0,
1245        0,
1246        0,
1247        0,
1248        MatTranspose_MPIDense,
1249 /*15*/ MatGetInfo_MPIDense,
1250        MatEqual_MPIDense,
1251        MatGetDiagonal_MPIDense,
1252        MatDiagonalScale_MPIDense,
1253        MatNorm_MPIDense,
1254 /*20*/ MatAssemblyBegin_MPIDense,
1255        MatAssemblyEnd_MPIDense,
1256        0,
1257        MatSetOption_MPIDense,
1258        MatZeroEntries_MPIDense,
1259 /*25*/ MatZeroRows_MPIDense,
1260 #if defined(PETSC_HAVE_PLAPACK)
1261        MatLUFactorSymbolic_MPIDense,
1262        MatLUFactorNumeric_MPIDense,
1263        MatCholeskyFactorSymbolic_MPIDense,
1264        MatCholeskyFactorNumeric_MPIDense,
1265 #else
1266        0,
1267        0,
1268        0,
1269        0,
1270 #endif
1271 /*30*/ MatSetUpPreallocation_MPIDense,
1272        0,
1273        0,
1274        MatGetArray_MPIDense,
1275        MatRestoreArray_MPIDense,
1276 /*35*/ MatDuplicate_MPIDense,
1277        0,
1278        0,
1279        0,
1280        0,
1281 /*40*/ 0,
1282        MatGetSubMatrices_MPIDense,
1283        0,
1284        MatGetValues_MPIDense,
1285        0,
1286 /*45*/ 0,
1287        MatScale_MPIDense,
1288        0,
1289        0,
1290        0,
1291 /*50*/ 0,
1292        0,
1293        0,
1294        0,
1295        0,
1296 /*55*/ 0,
1297        0,
1298        0,
1299        0,
1300        0,
1301 /*60*/ MatGetSubMatrix_MPIDense,
1302        MatDestroy_MPIDense,
1303        MatView_MPIDense,
1304        0,
1305        0,
1306 /*65*/ 0,
1307        0,
1308        0,
1309        0,
1310        0,
1311 /*70*/ 0,
1312        0,
1313        0,
1314        0,
1315        0,
1316 /*75*/ 0,
1317        0,
1318        0,
1319        0,
1320        0,
1321 /*80*/ 0,
1322        0,
1323        0,
1324        0,
1325 /*84*/ MatLoad_MPIDense,
1326        0,
1327        0,
1328        0,
1329        0,
1330        0,
1331 /*90*/
1332 #if defined(PETSC_HAVE_PLAPACK)
1333        MatMatMult_MPIDense_MPIDense,
1334        MatMatMultSymbolic_MPIDense_MPIDense,
1335        MatMatMultNumeric_MPIDense_MPIDense,
1336 #else
1337        0,
1338        0,
1339        0,
1340 #endif
1341        0,
1342 /*95*/ 0,
1343        0,
1344        0,
1345        0};
1346 
1347 EXTERN_C_BEGIN
1348 #undef __FUNCT__
1349 #define __FUNCT__ "MatMPIDenseSetPreallocation_MPIDense"
1350 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1351 {
1352   Mat_MPIDense   *a;
1353   PetscErrorCode ierr;
1354 
1355   PetscFunctionBegin;
1356   mat->preallocated = PETSC_TRUE;
1357   /* Note:  For now, when data is specified above, this assumes the user correctly
1358    allocates the local dense storage space.  We should add error checking. */
1359 
1360   a    = (Mat_MPIDense*)mat->data;
1361   ierr = MatCreate(PETSC_COMM_SELF,&a->A);CHKERRQ(ierr);
1362   ierr = MatSetSizes(a->A,mat->rmap.n,mat->cmap.N,mat->rmap.n,mat->cmap.N);CHKERRQ(ierr);
1363   ierr = MatSetType(a->A,MATSEQDENSE);CHKERRQ(ierr);
1364   ierr = MatSeqDenseSetPreallocation(a->A,data);CHKERRQ(ierr);
1365   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
1366   PetscFunctionReturn(0);
1367 }
1368 EXTERN_C_END
1369 
1370 /*MC
1371    MATMPIDENSE - MATMPIDENSE = "mpidense" - A matrix type to be used for distributed dense matrices.
1372 
1373    Options Database Keys:
1374 . -mat_type mpidense - sets the matrix type to "mpidense" during a call to MatSetFromOptions()
1375 
1376   Level: beginner
1377 
1378   MATMPIDENSE matrices may use direct solvers (LU, Cholesky, and QR)
1379   for parallel dense matrices via the external package PLAPACK, if PLAPACK is installed
1380   (run config/configure.py with the option --download-plapack)
1381 
1382 
1383   Options Database Keys:
1384 . -mat_plapack_nprows <n> - number of rows in processor partition
1385 . -mat_plapack_npcols <n> - number of columns in processor partition
1386 . -mat_plapack_nb <n> - block size of template vector
1387 . -mat_plapack_nb_alg <n> - algorithmic block size
1388 - -mat_plapack_ckerror <n> - error checking flag
1389 
1390 .seealso: MatCreateMPIDense(), MATDENSE, MATSEQDENSE
1391 M*/
1392 
1393 EXTERN_C_BEGIN
1394 #undef __FUNCT__
1395 #define __FUNCT__ "MatCreate_MPIDense"
1396 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIDense(Mat mat)
1397 {
1398   Mat_MPIDense   *a;
1399   PetscErrorCode ierr;
1400 
1401   PetscFunctionBegin;
1402   ierr              = PetscNewLog(mat,Mat_MPIDense,&a);CHKERRQ(ierr);
1403   mat->data         = (void*)a;
1404   ierr              = PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1405   mat->factor       = 0;
1406   mat->mapping      = 0;
1407 
1408   mat->insertmode = NOT_SET_VALUES;
1409   ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&a->rank);CHKERRQ(ierr);
1410   ierr = MPI_Comm_size(((PetscObject)mat)->comm,&a->size);CHKERRQ(ierr);
1411 
1412   mat->rmap.bs = mat->cmap.bs = 1;
1413   ierr = PetscMapSetUp(&mat->rmap);CHKERRQ(ierr);
1414   ierr = PetscMapSetUp(&mat->cmap);CHKERRQ(ierr);
1415   a->nvec = mat->cmap.n;
1416 
1417   /* build cache for off array entries formed */
1418   a->donotstash = PETSC_FALSE;
1419   ierr = MatStashCreate_Private(((PetscObject)mat)->comm,1,&mat->stash);CHKERRQ(ierr);
1420 
1421   /* stuff used for matrix vector multiply */
1422   a->lvec        = 0;
1423   a->Mvctx       = 0;
1424   a->roworiented = PETSC_TRUE;
1425 
1426   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C",
1427                                      "MatGetDiagonalBlock_MPIDense",
1428                                      MatGetDiagonalBlock_MPIDense);CHKERRQ(ierr);
1429   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C",
1430                                      "MatMPIDenseSetPreallocation_MPIDense",
1431                                      MatMPIDenseSetPreallocation_MPIDense);CHKERRQ(ierr);
1432   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",
1433                                      "MatMatMult_MPIAIJ_MPIDense",
1434                                       MatMatMult_MPIAIJ_MPIDense);CHKERRQ(ierr);
1435   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",
1436                                      "MatMatMultSymbolic_MPIAIJ_MPIDense",
1437                                       MatMatMultSymbolic_MPIAIJ_MPIDense);CHKERRQ(ierr);
1438   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",
1439                                      "MatMatMultNumeric_MPIAIJ_MPIDense",
1440                                       MatMatMultNumeric_MPIAIJ_MPIDense);CHKERRQ(ierr);
1441 #if defined(PETSC_HAVE_PLAPACK)
1442   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetFactor_mpidense_plapack_C",
1443                                      "MatGetFactor_mpidense_plapack",
1444                                       MatGetFactor_mpidense_plapack);CHKERRQ(ierr);
1445 #endif
1446   ierr = PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);CHKERRQ(ierr);
1447 
1448   PetscFunctionReturn(0);
1449 }
1450 EXTERN_C_END
1451 
1452 /*MC
1453    MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.
1454 
1455    This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1456    and MATMPIDENSE otherwise.
1457 
1458    Options Database Keys:
1459 . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()
1460 
1461   Level: beginner
1462 
1463 
1464 .seealso: MatCreateMPIDense,MATSEQDENSE,MATMPIDENSE
1465 M*/
1466 
1467 EXTERN_C_BEGIN
1468 #undef __FUNCT__
1469 #define __FUNCT__ "MatCreate_Dense"
1470 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_Dense(Mat A)
1471 {
1472   PetscErrorCode ierr;
1473   PetscMPIInt    size;
1474 
1475   PetscFunctionBegin;
1476   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
1477   if (size == 1) {
1478     ierr = MatSetType(A,MATSEQDENSE);CHKERRQ(ierr);
1479   } else {
1480     ierr = MatSetType(A,MATMPIDENSE);CHKERRQ(ierr);
1481   }
1482   PetscFunctionReturn(0);
1483 }
1484 EXTERN_C_END
1485 
1486 #undef __FUNCT__
1487 #define __FUNCT__ "MatMPIDenseSetPreallocation"
1488 /*@C
1489    MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries
1490 
1491    Not collective
1492 
1493    Input Parameters:
1494 .  A - the matrix
1495 -  data - optional location of matrix data.  Set data=PETSC_NULL for PETSc
1496    to control all matrix memory allocation.
1497 
1498    Notes:
1499    The dense format is fully compatible with standard Fortran 77
1500    storage by columns.
1501 
1502    The data input variable is intended primarily for Fortran programmers
1503    who wish to allocate their own matrix memory space.  Most users should
1504    set data=PETSC_NULL.
1505 
1506    Level: intermediate
1507 
1508 .keywords: matrix,dense, parallel
1509 
1510 .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1511 @*/
1512 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIDenseSetPreallocation(Mat mat,PetscScalar *data)
1513 {
1514   PetscErrorCode ierr,(*f)(Mat,PetscScalar *);
1515 
1516   PetscFunctionBegin;
1517   ierr = PetscObjectQueryFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
1518   if (f) {
1519     ierr = (*f)(mat,data);CHKERRQ(ierr);
1520   }
1521   PetscFunctionReturn(0);
1522 }
1523 
1524 #undef __FUNCT__
1525 #define __FUNCT__ "MatCreateMPIDense"
1526 /*@C
1527    MatCreateMPIDense - Creates a sparse parallel matrix in dense format.
1528 
1529    Collective on MPI_Comm
1530 
1531    Input Parameters:
1532 +  comm - MPI communicator
1533 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1534 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1535 .  M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
1536 .  N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
1537 -  data - optional location of matrix data.  Set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
1538    to control all matrix memory allocation.
1539 
1540    Output Parameter:
1541 .  A - the matrix
1542 
1543    Notes:
1544    The dense format is fully compatible with standard Fortran 77
1545    storage by columns.
1546 
1547    The data input variable is intended primarily for Fortran programmers
1548    who wish to allocate their own matrix memory space.  Most users should
1549    set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users).
1550 
1551    The user MUST specify either the local or global matrix dimensions
1552    (possibly both).
1553 
1554    Level: intermediate
1555 
1556 .keywords: matrix,dense, parallel
1557 
1558 .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1559 @*/
1560 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
1561 {
1562   PetscErrorCode ierr;
1563   PetscMPIInt    size;
1564 
1565   PetscFunctionBegin;
1566   ierr = MatCreate(comm,A);CHKERRQ(ierr);
1567   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
1568   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1569   if (size > 1) {
1570     ierr = MatSetType(*A,MATMPIDENSE);CHKERRQ(ierr);
1571     ierr = MatMPIDenseSetPreallocation(*A,data);CHKERRQ(ierr);
1572   } else {
1573     ierr = MatSetType(*A,MATSEQDENSE);CHKERRQ(ierr);
1574     ierr = MatSeqDenseSetPreallocation(*A,data);CHKERRQ(ierr);
1575   }
1576   PetscFunctionReturn(0);
1577 }
1578 
1579 #undef __FUNCT__
1580 #define __FUNCT__ "MatDuplicate_MPIDense"
1581 static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
1582 {
1583   Mat            mat;
1584   Mat_MPIDense   *a,*oldmat = (Mat_MPIDense*)A->data;
1585   PetscErrorCode ierr;
1586 
1587   PetscFunctionBegin;
1588   *newmat       = 0;
1589   ierr = MatCreate(((PetscObject)A)->comm,&mat);CHKERRQ(ierr);
1590   ierr = MatSetSizes(mat,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);CHKERRQ(ierr);
1591   ierr = MatSetType(mat,((PetscObject)A)->type_name);CHKERRQ(ierr);
1592   a                 = (Mat_MPIDense*)mat->data;
1593   ierr              = PetscMemcpy(mat->ops,A->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
1594   mat->factor       = A->factor;
1595   mat->assembled    = PETSC_TRUE;
1596   mat->preallocated = PETSC_TRUE;
1597 
1598   mat->rmap.rstart     = A->rmap.rstart;
1599   mat->rmap.rend       = A->rmap.rend;
1600   a->size              = oldmat->size;
1601   a->rank              = oldmat->rank;
1602   mat->insertmode      = NOT_SET_VALUES;
1603   a->nvec              = oldmat->nvec;
1604   a->donotstash        = oldmat->donotstash;
1605 
1606   ierr = PetscMemcpy(mat->rmap.range,A->rmap.range,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr);
1607   ierr = PetscMemcpy(mat->cmap.range,A->cmap.range,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr);
1608   ierr = MatStashCreate_Private(((PetscObject)A)->comm,1,&mat->stash);CHKERRQ(ierr);
1609 
1610   ierr = MatSetUpMultiply_MPIDense(mat);CHKERRQ(ierr);
1611   ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1612   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
1613 
1614 #if defined(PETSC_HAVE_PLAPACK)
1615   ierr = PetscMemcpy(mat->spptr,A->spptr,sizeof(Mat_Plapack));CHKERRQ(ierr);
1616 #endif
1617   *newmat = mat;
1618   PetscFunctionReturn(0);
1619 }
1620 
1621 #include "petscsys.h"
1622 
1623 #undef __FUNCT__
1624 #define __FUNCT__ "MatLoad_MPIDense_DenseInFile"
1625 PetscErrorCode MatLoad_MPIDense_DenseInFile(MPI_Comm comm,PetscInt fd,PetscInt M,PetscInt N, MatType type,Mat *newmat)
1626 {
1627   PetscErrorCode ierr;
1628   PetscMPIInt    rank,size;
1629   PetscInt       *rowners,i,m,nz,j;
1630   PetscScalar    *array,*vals,*vals_ptr;
1631   MPI_Status     status;
1632 
1633   PetscFunctionBegin;
1634   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1635   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1636 
1637   /* determine ownership of all rows */
1638   m          = M/size + ((M % size) > rank);
1639   ierr       = PetscMalloc((size+2)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
1640   ierr       = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
1641   rowners[0] = 0;
1642   for (i=2; i<=size; i++) {
1643     rowners[i] += rowners[i-1];
1644   }
1645 
1646   ierr = MatCreate(comm,newmat);CHKERRQ(ierr);
1647   ierr = MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);CHKERRQ(ierr);
1648   ierr = MatSetType(*newmat,type);CHKERRQ(ierr);
1649   ierr = MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);CHKERRQ(ierr);
1650   ierr = MatGetArray(*newmat,&array);CHKERRQ(ierr);
1651 
1652   if (!rank) {
1653     ierr = PetscMalloc(m*N*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1654 
1655     /* read in my part of the matrix numerical values  */
1656     ierr = PetscBinaryRead(fd,vals,m*N,PETSC_SCALAR);CHKERRQ(ierr);
1657 
1658     /* insert into matrix-by row (this is why cannot directly read into array */
1659     vals_ptr = vals;
1660     for (i=0; i<m; i++) {
1661       for (j=0; j<N; j++) {
1662         array[i + j*m] = *vals_ptr++;
1663       }
1664     }
1665 
1666     /* read in other processors and ship out */
1667     for (i=1; i<size; i++) {
1668       nz   = (rowners[i+1] - rowners[i])*N;
1669       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1670       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)(*newmat))->tag,comm);CHKERRQ(ierr);
1671     }
1672   } else {
1673     /* receive numeric values */
1674     ierr = PetscMalloc(m*N*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1675 
1676     /* receive message of values*/
1677     ierr = MPI_Recv(vals,m*N,MPIU_SCALAR,0,((PetscObject)(*newmat))->tag,comm,&status);CHKERRQ(ierr);
1678 
1679     /* insert into matrix-by row (this is why cannot directly read into array */
1680     vals_ptr = vals;
1681     for (i=0; i<m; i++) {
1682       for (j=0; j<N; j++) {
1683         array[i + j*m] = *vals_ptr++;
1684       }
1685     }
1686   }
1687   ierr = PetscFree(rowners);CHKERRQ(ierr);
1688   ierr = PetscFree(vals);CHKERRQ(ierr);
1689   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1690   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1691   PetscFunctionReturn(0);
1692 }
1693 
1694 #undef __FUNCT__
1695 #define __FUNCT__ "MatLoad_MPIDense"
1696 PetscErrorCode MatLoad_MPIDense(PetscViewer viewer, MatType type,Mat *newmat)
1697 {
1698   Mat            A;
1699   PetscScalar    *vals,*svals;
1700   MPI_Comm       comm = ((PetscObject)viewer)->comm;
1701   MPI_Status     status;
1702   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*rowners,*sndcounts,m,maxnz;
1703   PetscInt       header[4],*rowlengths = 0,M,N,*cols;
1704   PetscInt       *ourlens,*procsnz = 0,*offlens,jj,*mycols,*smycols;
1705   PetscInt       i,nz,j,rstart,rend;
1706   int            fd;
1707   PetscErrorCode ierr;
1708 
1709   PetscFunctionBegin;
1710   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1711   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1712   if (!rank) {
1713     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1714     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1715     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1716   }
1717 
1718   ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr);
1719   M = header[1]; N = header[2]; nz = header[3];
1720 
1721   /*
1722        Handle case where matrix is stored on disk as a dense matrix
1723   */
1724   if (nz == MATRIX_BINARY_FORMAT_DENSE) {
1725     ierr = MatLoad_MPIDense_DenseInFile(comm,fd,M,N,type,newmat);CHKERRQ(ierr);
1726     PetscFunctionReturn(0);
1727   }
1728 
1729   /* determine ownership of all rows */
1730   m          = PetscMPIIntCast(M/size + ((M % size) > rank));
1731   ierr       = PetscMalloc((size+2)*sizeof(PetscMPIInt),&rowners);CHKERRQ(ierr);
1732   ierr       = MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
1733   rowners[0] = 0;
1734   for (i=2; i<=size; i++) {
1735     rowners[i] += rowners[i-1];
1736   }
1737   rstart = rowners[rank];
1738   rend   = rowners[rank+1];
1739 
1740   /* distribute row lengths to all processors */
1741   ierr    = PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&ourlens);CHKERRQ(ierr);
1742   offlens = ourlens + (rend-rstart);
1743   if (!rank) {
1744     ierr = PetscMalloc(M*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr);
1745     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
1746     ierr = PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);CHKERRQ(ierr);
1747     for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1748     ierr = MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);CHKERRQ(ierr);
1749     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
1750   } else {
1751     ierr = MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);CHKERRQ(ierr);
1752   }
1753 
1754   if (!rank) {
1755     /* calculate the number of nonzeros on each processor */
1756     ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr);
1757     ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr);
1758     for (i=0; i<size; i++) {
1759       for (j=rowners[i]; j< rowners[i+1]; j++) {
1760         procsnz[i] += rowlengths[j];
1761       }
1762     }
1763     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
1764 
1765     /* determine max buffer needed and allocate it */
1766     maxnz = 0;
1767     for (i=0; i<size; i++) {
1768       maxnz = PetscMax(maxnz,procsnz[i]);
1769     }
1770     ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1771 
1772     /* read in my part of the matrix column indices  */
1773     nz = procsnz[0];
1774     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
1775     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
1776 
1777     /* read in every one elses and ship off */
1778     for (i=1; i<size; i++) {
1779       nz   = procsnz[i];
1780       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
1781       ierr = MPI_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
1782     }
1783     ierr = PetscFree(cols);CHKERRQ(ierr);
1784   } else {
1785     /* determine buffer space needed for message */
1786     nz = 0;
1787     for (i=0; i<m; i++) {
1788       nz += ourlens[i];
1789     }
1790     ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
1791 
1792     /* receive message of column indices*/
1793     ierr = MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
1794     ierr = MPI_Get_count(&status,MPIU_INT,&maxnz);CHKERRQ(ierr);
1795     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1796   }
1797 
1798   /* loop over local rows, determining number of off diagonal entries */
1799   ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr);
1800   jj = 0;
1801   for (i=0; i<m; i++) {
1802     for (j=0; j<ourlens[i]; j++) {
1803       if (mycols[jj] < rstart || mycols[jj] >= rend) offlens[i]++;
1804       jj++;
1805     }
1806   }
1807 
1808   /* create our matrix */
1809   for (i=0; i<m; i++) {
1810     ourlens[i] -= offlens[i];
1811   }
1812   ierr = MatCreate(comm,newmat);CHKERRQ(ierr);
1813   ierr = MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);CHKERRQ(ierr);
1814   ierr = MatSetType(*newmat,type);CHKERRQ(ierr);
1815   ierr = MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);CHKERRQ(ierr);
1816   A = *newmat;
1817   for (i=0; i<m; i++) {
1818     ourlens[i] += offlens[i];
1819   }
1820 
1821   if (!rank) {
1822     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1823 
1824     /* read in my part of the matrix numerical values  */
1825     nz = procsnz[0];
1826     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1827 
1828     /* insert into matrix */
1829     jj      = rstart;
1830     smycols = mycols;
1831     svals   = vals;
1832     for (i=0; i<m; i++) {
1833       ierr = MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
1834       smycols += ourlens[i];
1835       svals   += ourlens[i];
1836       jj++;
1837     }
1838 
1839     /* read in other processors and ship out */
1840     for (i=1; i<size; i++) {
1841       nz   = procsnz[i];
1842       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1843       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);CHKERRQ(ierr);
1844     }
1845     ierr = PetscFree(procsnz);CHKERRQ(ierr);
1846   } else {
1847     /* receive numeric values */
1848     ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1849 
1850     /* receive message of values*/
1851     ierr = MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);CHKERRQ(ierr);
1852     ierr = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
1853     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1854 
1855     /* insert into matrix */
1856     jj      = rstart;
1857     smycols = mycols;
1858     svals   = vals;
1859     for (i=0; i<m; i++) {
1860       ierr = MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
1861       smycols += ourlens[i];
1862       svals   += ourlens[i];
1863       jj++;
1864     }
1865   }
1866   ierr = PetscFree(ourlens);CHKERRQ(ierr);
1867   ierr = PetscFree(vals);CHKERRQ(ierr);
1868   ierr = PetscFree(mycols);CHKERRQ(ierr);
1869   ierr = PetscFree(rowners);CHKERRQ(ierr);
1870 
1871   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1872   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1873   PetscFunctionReturn(0);
1874 }
1875 
1876 #undef __FUNCT__
1877 #define __FUNCT__ "MatEqual_MPIDense"
1878 PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscTruth *flag)
1879 {
1880   Mat_MPIDense   *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
1881   Mat            a,b;
1882   PetscTruth     flg;
1883   PetscErrorCode ierr;
1884 
1885   PetscFunctionBegin;
1886   a = matA->A;
1887   b = matB->A;
1888   ierr = MatEqual(a,b,&flg);CHKERRQ(ierr);
1889   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);CHKERRQ(ierr);
1890   PetscFunctionReturn(0);
1891 }
1892 
1893 #if defined(PETSC_HAVE_PLAPACK)
1894 
1895 #undef __FUNCT__
1896 #define __FUNCT__ "PetscPLAPACKFinalizePackage"
1897 /*@C
1898   PetscPLAPACKFinalizePackage - This function destroys everything in the Petsc interface to PLAPACK.
1899   Level: developer
1900 
1901 .keywords: Petsc, destroy, package, PLAPACK
1902 .seealso: PetscFinalize()
1903 @*/
1904 PetscErrorCode PETSC_DLLEXPORT PetscPLAPACKFinalizePackage(void)
1905 {
1906   PetscErrorCode ierr;
1907 
1908   PetscFunctionBegin;
1909   ierr = PLA_Finalize();CHKERRQ(ierr);
1910   PetscFunctionReturn(0);
1911 }
1912 
1913 #undef __FUNCT__
1914 #define __FUNCT__ "PetscPLAPACKInitializePackage"
1915 /*@C
1916   PetscPLAPACKInitializePackage - This function initializes everything in the Petsc interface to PLAPACK. It is
1917   called from PetscDLLibraryRegister() when using dynamic libraries, and on the call to PetscInitialize()
1918   when using static libraries.
1919 
1920   Input Parameter:
1921   path - The dynamic library path, or PETSC_NULL
1922 
1923   Level: developer
1924 
1925 .keywords: Petsc, initialize, package, PLAPACK
1926 .seealso: PetscInitializePackage(), PetscInitialize()
1927 @*/
1928 PetscErrorCode PETSC_DLLEXPORT PetscPLAPACKInitializePackage(const char path[])
1929 {
1930   MPI_Comm       comm = PETSC_COMM_WORLD;
1931   PetscMPIInt    size;
1932   PetscErrorCode ierr;
1933 
1934   PetscFunctionBegin;
1935   if (!PLA_Initialized(PETSC_NULL)) {
1936 
1937     ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1938     Plapack_nprows = 1;
1939     Plapack_npcols = size;
1940 
1941     ierr = PetscOptionsBegin(comm,PETSC_NULL,"PLAPACK Options","Mat");CHKERRQ(ierr);
1942       ierr = PetscOptionsInt("-plapack_nprows","row dimension of 2D processor mesh","None",Plapack_nprows,&Plapack_nprows,PETSC_NULL);CHKERRQ(ierr);
1943       ierr = PetscOptionsInt("-plapack_npcols","column dimension of 2D processor mesh","None",Plapack_npcols,&Plapack_npcols,PETSC_NULL);CHKERRQ(ierr);
1944 #if defined(PETSC_USE_DEBUG)
1945       Plapack_ierror = 3;
1946 #else
1947       Plapack_ierror = 0;
1948 #endif
1949       ierr = PetscOptionsInt("-plapack_ckerror","error checking flag","None",Plapack_ierror,&Plapack_ierror,PETSC_NULL);CHKERRQ(ierr);
1950       if (Plapack_ierror){
1951 	ierr = PLA_Set_error_checking(Plapack_ierror,PETSC_TRUE,PETSC_TRUE,PETSC_FALSE );CHKERRQ(ierr);
1952       } else {
1953 	ierr = PLA_Set_error_checking(Plapack_ierror,PETSC_FALSE,PETSC_FALSE,PETSC_FALSE );CHKERRQ(ierr);
1954       }
1955 
1956       Plapack_nb_alg = 0;
1957       ierr = PetscOptionsInt("-plapack_nb_alg","algorithmic block size","None",Plapack_nb_alg,&Plapack_nb_alg,PETSC_NULL);CHKERRQ(ierr);
1958       if (Plapack_nb_alg) {
1959         ierr = pla_Environ_set_nb_alg (PLA_OP_ALL_ALG,Plapack_nb_alg);CHKERRQ(ierr);
1960       }
1961     PetscOptionsEnd();
1962 
1963     ierr = PLA_Comm_1D_to_2D(comm,Plapack_nprows,Plapack_npcols,&Plapack_comm_2d);CHKERRQ(ierr);
1964     ierr = PLA_Init(Plapack_comm_2d);CHKERRQ(ierr);
1965     ierr = PetscRegisterFinalize(PetscPLAPACKFinalizePackage);CHKERRQ(ierr);
1966   }
1967   PetscFunctionReturn(0);
1968 }
1969 
1970 
1971 #endif
1972