1 2 3 #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ 4 #include <petsc/private/vecimpl.h> 5 #include <petsc/private/isimpl.h> 6 #include <petscblaslapack.h> 7 #include <petscsf.h> 8 9 /*MC 10 MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices. 11 12 This matrix type is identical to MATSEQAIJ when constructed with a single process communicator, 13 and MATMPIAIJ otherwise. As a result, for single process communicators, 14 MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported 15 for communicators controlling multiple processes. It is recommended that you call both of 16 the above preallocation routines for simplicity. 17 18 Options Database Keys: 19 . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions() 20 21 Developer Notes: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJCRL, and also automatically switches over to use inodes when 22 enough exist. 23 24 Level: beginner 25 26 .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ 27 M*/ 28 29 /*MC 30 MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices. 31 32 This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator, 33 and MATMPIAIJCRL otherwise. As a result, for single process communicators, 34 MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported 35 for communicators controlling multiple processes. It is recommended that you call both of 36 the above preallocation routines for simplicity. 37 38 Options Database Keys: 39 . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions() 40 41 Level: beginner 42 43 .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL 44 M*/ 45 46 PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs) 47 { 48 PetscErrorCode ierr; 49 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data; 50 51 PetscFunctionBegin; 52 if (mat->A) { 53 ierr = MatSetBlockSizes(mat->A,rbs,cbs);CHKERRQ(ierr); 54 ierr = MatSetBlockSizes(mat->B,rbs,1);CHKERRQ(ierr); 55 } 56 PetscFunctionReturn(0); 57 } 58 59 PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows) 60 { 61 PetscErrorCode ierr; 62 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)M->data; 63 Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data; 64 Mat_SeqAIJ *b = (Mat_SeqAIJ*)mat->B->data; 65 const PetscInt *ia,*ib; 66 const MatScalar *aa,*bb; 67 PetscInt na,nb,i,j,*rows,cnt=0,n0rows; 68 PetscInt m = M->rmap->n,rstart = M->rmap->rstart; 69 70 PetscFunctionBegin; 71 *keptrows = 0; 72 ia = a->i; 73 ib = b->i; 74 for (i=0; i<m; i++) { 75 na = ia[i+1] - ia[i]; 76 nb = ib[i+1] - ib[i]; 77 if (!na && !nb) { 78 cnt++; 79 goto ok1; 80 } 81 aa = a->a + ia[i]; 82 for (j=0; j<na; j++) { 83 if (aa[j] != 0.0) goto ok1; 84 } 85 bb = b->a + ib[i]; 86 for (j=0; j <nb; j++) { 87 if (bb[j] != 0.0) goto ok1; 88 } 89 cnt++; 90 ok1:; 91 } 92 ierr = MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));CHKERRQ(ierr); 93 if (!n0rows) PetscFunctionReturn(0); 94 ierr = PetscMalloc1(M->rmap->n-cnt,&rows);CHKERRQ(ierr); 95 cnt = 0; 96 for (i=0; i<m; i++) { 97 na = ia[i+1] - ia[i]; 98 nb = ib[i+1] - ib[i]; 99 if (!na && !nb) continue; 100 aa = a->a + ia[i]; 101 for (j=0; j<na;j++) { 102 if (aa[j] != 0.0) { 103 rows[cnt++] = rstart + i; 104 goto ok2; 105 } 106 } 107 bb = b->a + ib[i]; 108 for (j=0; j<nb; j++) { 109 if (bb[j] != 0.0) { 110 rows[cnt++] = rstart + i; 111 goto ok2; 112 } 113 } 114 ok2:; 115 } 116 ierr = ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);CHKERRQ(ierr); 117 PetscFunctionReturn(0); 118 } 119 120 PetscErrorCode MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is) 121 { 122 PetscErrorCode ierr; 123 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) Y->data; 124 125 PetscFunctionBegin; 126 if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) { 127 ierr = MatDiagonalSet(aij->A,D,is);CHKERRQ(ierr); 128 } else { 129 ierr = MatDiagonalSet_Default(Y,D,is);CHKERRQ(ierr); 130 } 131 PetscFunctionReturn(0); 132 } 133 134 135 PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows) 136 { 137 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)M->data; 138 PetscErrorCode ierr; 139 PetscInt i,rstart,nrows,*rows; 140 141 PetscFunctionBegin; 142 *zrows = NULL; 143 ierr = MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);CHKERRQ(ierr); 144 ierr = MatGetOwnershipRange(M,&rstart,NULL);CHKERRQ(ierr); 145 for (i=0; i<nrows; i++) rows[i] += rstart; 146 ierr = ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);CHKERRQ(ierr); 147 PetscFunctionReturn(0); 148 } 149 150 PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms) 151 { 152 PetscErrorCode ierr; 153 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data; 154 PetscInt i,n,*garray = aij->garray; 155 Mat_SeqAIJ *a_aij = (Mat_SeqAIJ*) aij->A->data; 156 Mat_SeqAIJ *b_aij = (Mat_SeqAIJ*) aij->B->data; 157 PetscReal *work; 158 159 PetscFunctionBegin; 160 ierr = MatGetSize(A,NULL,&n);CHKERRQ(ierr); 161 ierr = PetscCalloc1(n,&work);CHKERRQ(ierr); 162 if (type == NORM_2) { 163 for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) { 164 work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]); 165 } 166 for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) { 167 work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]); 168 } 169 } else if (type == NORM_1) { 170 for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) { 171 work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]); 172 } 173 for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) { 174 work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]); 175 } 176 } else if (type == NORM_INFINITY) { 177 for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) { 178 work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]); 179 } 180 for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) { 181 work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]); 182 } 183 184 } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType"); 185 if (type == NORM_INFINITY) { 186 ierr = MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 187 } else { 188 ierr = MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 189 } 190 ierr = PetscFree(work);CHKERRQ(ierr); 191 if (type == NORM_2) { 192 for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]); 193 } 194 PetscFunctionReturn(0); 195 } 196 197 PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is) 198 { 199 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 200 IS sis,gis; 201 PetscErrorCode ierr; 202 const PetscInt *isis,*igis; 203 PetscInt n,*iis,nsis,ngis,rstart,i; 204 205 PetscFunctionBegin; 206 ierr = MatFindOffBlockDiagonalEntries(a->A,&sis);CHKERRQ(ierr); 207 ierr = MatFindNonzeroRows(a->B,&gis);CHKERRQ(ierr); 208 ierr = ISGetSize(gis,&ngis);CHKERRQ(ierr); 209 ierr = ISGetSize(sis,&nsis);CHKERRQ(ierr); 210 ierr = ISGetIndices(sis,&isis);CHKERRQ(ierr); 211 ierr = ISGetIndices(gis,&igis);CHKERRQ(ierr); 212 213 ierr = PetscMalloc1(ngis+nsis,&iis);CHKERRQ(ierr); 214 ierr = PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));CHKERRQ(ierr); 215 ierr = PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));CHKERRQ(ierr); 216 n = ngis + nsis; 217 ierr = PetscSortRemoveDupsInt(&n,iis);CHKERRQ(ierr); 218 ierr = MatGetOwnershipRange(A,&rstart,NULL);CHKERRQ(ierr); 219 for (i=0; i<n; i++) iis[i] += rstart; 220 ierr = ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);CHKERRQ(ierr); 221 222 ierr = ISRestoreIndices(sis,&isis);CHKERRQ(ierr); 223 ierr = ISRestoreIndices(gis,&igis);CHKERRQ(ierr); 224 ierr = ISDestroy(&sis);CHKERRQ(ierr); 225 ierr = ISDestroy(&gis);CHKERRQ(ierr); 226 PetscFunctionReturn(0); 227 } 228 229 /* 230 Distributes a SeqAIJ matrix across a set of processes. Code stolen from 231 MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type. 232 233 Only for square matrices 234 235 Used by a preconditioner, hence PETSC_EXTERN 236 */ 237 PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat) 238 { 239 PetscMPIInt rank,size; 240 PetscInt *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2]; 241 PetscErrorCode ierr; 242 Mat mat; 243 Mat_SeqAIJ *gmata; 244 PetscMPIInt tag; 245 MPI_Status status; 246 PetscBool aij; 247 MatScalar *gmataa,*ao,*ad,*gmataarestore=0; 248 249 PetscFunctionBegin; 250 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 251 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 252 if (!rank) { 253 ierr = PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);CHKERRQ(ierr); 254 if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name); 255 } 256 if (reuse == MAT_INITIAL_MATRIX) { 257 ierr = MatCreate(comm,&mat);CHKERRQ(ierr); 258 ierr = MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 259 ierr = MatGetBlockSizes(gmat,&bses[0],&bses[1]);CHKERRQ(ierr); 260 ierr = MPI_Bcast(bses,2,MPIU_INT,0,comm);CHKERRQ(ierr); 261 ierr = MatSetBlockSizes(mat,bses[0],bses[1]);CHKERRQ(ierr); 262 ierr = MatSetType(mat,MATAIJ);CHKERRQ(ierr); 263 ierr = PetscMalloc1(size+1,&rowners);CHKERRQ(ierr); 264 ierr = PetscMalloc2(m,&dlens,m,&olens);CHKERRQ(ierr); 265 ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 266 267 rowners[0] = 0; 268 for (i=2; i<=size; i++) rowners[i] += rowners[i-1]; 269 rstart = rowners[rank]; 270 rend = rowners[rank+1]; 271 ierr = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr); 272 if (!rank) { 273 gmata = (Mat_SeqAIJ*) gmat->data; 274 /* send row lengths to all processors */ 275 for (i=0; i<m; i++) dlens[i] = gmata->ilen[i]; 276 for (i=1; i<size; i++) { 277 ierr = MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr); 278 } 279 /* determine number diagonal and off-diagonal counts */ 280 ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr); 281 ierr = PetscCalloc1(m,&ld);CHKERRQ(ierr); 282 jj = 0; 283 for (i=0; i<m; i++) { 284 for (j=0; j<dlens[i]; j++) { 285 if (gmata->j[jj] < rstart) ld[i]++; 286 if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++; 287 jj++; 288 } 289 } 290 /* send column indices to other processes */ 291 for (i=1; i<size; i++) { 292 nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]]; 293 ierr = MPI_Send(&nz,1,MPIU_INT,i,tag,comm);CHKERRQ(ierr); 294 ierr = MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr); 295 } 296 297 /* send numerical values to other processes */ 298 for (i=1; i<size; i++) { 299 nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]]; 300 ierr = MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr); 301 } 302 gmataa = gmata->a; 303 gmataj = gmata->j; 304 305 } else { 306 /* receive row lengths */ 307 ierr = MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 308 /* receive column indices */ 309 ierr = MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 310 ierr = PetscMalloc2(nz,&gmataa,nz,&gmataj);CHKERRQ(ierr); 311 ierr = MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 312 /* determine number diagonal and off-diagonal counts */ 313 ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr); 314 ierr = PetscCalloc1(m,&ld);CHKERRQ(ierr); 315 jj = 0; 316 for (i=0; i<m; i++) { 317 for (j=0; j<dlens[i]; j++) { 318 if (gmataj[jj] < rstart) ld[i]++; 319 if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++; 320 jj++; 321 } 322 } 323 /* receive numerical values */ 324 ierr = PetscMemzero(gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); 325 ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr); 326 } 327 /* set preallocation */ 328 for (i=0; i<m; i++) { 329 dlens[i] -= olens[i]; 330 } 331 ierr = MatSeqAIJSetPreallocation(mat,0,dlens);CHKERRQ(ierr); 332 ierr = MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);CHKERRQ(ierr); 333 334 for (i=0; i<m; i++) { 335 dlens[i] += olens[i]; 336 } 337 cnt = 0; 338 for (i=0; i<m; i++) { 339 row = rstart + i; 340 ierr = MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);CHKERRQ(ierr); 341 cnt += dlens[i]; 342 } 343 if (rank) { 344 ierr = PetscFree2(gmataa,gmataj);CHKERRQ(ierr); 345 } 346 ierr = PetscFree2(dlens,olens);CHKERRQ(ierr); 347 ierr = PetscFree(rowners);CHKERRQ(ierr); 348 349 ((Mat_MPIAIJ*)(mat->data))->ld = ld; 350 351 *inmat = mat; 352 } else { /* column indices are already set; only need to move over numerical values from process 0 */ 353 Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data; 354 Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data; 355 mat = *inmat; 356 ierr = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr); 357 if (!rank) { 358 /* send numerical values to other processes */ 359 gmata = (Mat_SeqAIJ*) gmat->data; 360 ierr = MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);CHKERRQ(ierr); 361 gmataa = gmata->a; 362 for (i=1; i<size; i++) { 363 nz = gmata->i[rowners[i+1]]-gmata->i[rowners[i]]; 364 ierr = MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr); 365 } 366 nz = gmata->i[rowners[1]]-gmata->i[rowners[0]]; 367 } else { 368 /* receive numerical values from process 0*/ 369 nz = Ad->nz + Ao->nz; 370 ierr = PetscMalloc1(nz,&gmataa);CHKERRQ(ierr); gmataarestore = gmataa; 371 ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr); 372 } 373 /* transfer numerical values into the diagonal A and off diagonal B parts of mat */ 374 ld = ((Mat_MPIAIJ*)(mat->data))->ld; 375 ad = Ad->a; 376 ao = Ao->a; 377 if (mat->rmap->n) { 378 i = 0; 379 nz = ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz; 380 nz = Ad->i[i+1] - Ad->i[i]; ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz; 381 } 382 for (i=1; i<mat->rmap->n; i++) { 383 nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz; 384 nz = Ad->i[i+1] - Ad->i[i]; ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz; 385 } 386 i--; 387 if (mat->rmap->n) { 388 nz = Ao->i[i+1] - Ao->i[i] - ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); 389 } 390 if (rank) { 391 ierr = PetscFree(gmataarestore);CHKERRQ(ierr); 392 } 393 } 394 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 395 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 396 PetscFunctionReturn(0); 397 } 398 399 /* 400 Local utility routine that creates a mapping from the global column 401 number to the local number in the off-diagonal part of the local 402 storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at 403 a slightly higher hash table cost; without it it is not scalable (each processor 404 has an order N integer array but is fast to acess. 405 */ 406 PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat) 407 { 408 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 409 PetscErrorCode ierr; 410 PetscInt n = aij->B->cmap->n,i; 411 412 PetscFunctionBegin; 413 if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray"); 414 #if defined(PETSC_USE_CTABLE) 415 ierr = PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);CHKERRQ(ierr); 416 for (i=0; i<n; i++) { 417 ierr = PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);CHKERRQ(ierr); 418 } 419 #else 420 ierr = PetscCalloc1(mat->cmap->N+1,&aij->colmap);CHKERRQ(ierr); 421 ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));CHKERRQ(ierr); 422 for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1; 423 #endif 424 PetscFunctionReturn(0); 425 } 426 427 #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol) \ 428 { \ 429 if (col <= lastcol1) low1 = 0; \ 430 else high1 = nrow1; \ 431 lastcol1 = col;\ 432 while (high1-low1 > 5) { \ 433 t = (low1+high1)/2; \ 434 if (rp1[t] > col) high1 = t; \ 435 else low1 = t; \ 436 } \ 437 for (_i=low1; _i<high1; _i++) { \ 438 if (rp1[_i] > col) break; \ 439 if (rp1[_i] == col) { \ 440 if (addv == ADD_VALUES) ap1[_i] += value; \ 441 else ap1[_i] = value; \ 442 goto a_noinsert; \ 443 } \ 444 } \ 445 if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \ 446 if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \ 447 if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \ 448 MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \ 449 N = nrow1++ - 1; a->nz++; high1++; \ 450 /* shift up all the later entries in this row */ \ 451 for (ii=N; ii>=_i; ii--) { \ 452 rp1[ii+1] = rp1[ii]; \ 453 ap1[ii+1] = ap1[ii]; \ 454 } \ 455 rp1[_i] = col; \ 456 ap1[_i] = value; \ 457 A->nonzerostate++;\ 458 a_noinsert: ; \ 459 ailen[row] = nrow1; \ 460 } 461 462 463 #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \ 464 { \ 465 if (col <= lastcol2) low2 = 0; \ 466 else high2 = nrow2; \ 467 lastcol2 = col; \ 468 while (high2-low2 > 5) { \ 469 t = (low2+high2)/2; \ 470 if (rp2[t] > col) high2 = t; \ 471 else low2 = t; \ 472 } \ 473 for (_i=low2; _i<high2; _i++) { \ 474 if (rp2[_i] > col) break; \ 475 if (rp2[_i] == col) { \ 476 if (addv == ADD_VALUES) ap2[_i] += value; \ 477 else ap2[_i] = value; \ 478 goto b_noinsert; \ 479 } \ 480 } \ 481 if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \ 482 if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \ 483 if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \ 484 MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \ 485 N = nrow2++ - 1; b->nz++; high2++; \ 486 /* shift up all the later entries in this row */ \ 487 for (ii=N; ii>=_i; ii--) { \ 488 rp2[ii+1] = rp2[ii]; \ 489 ap2[ii+1] = ap2[ii]; \ 490 } \ 491 rp2[_i] = col; \ 492 ap2[_i] = value; \ 493 B->nonzerostate++; \ 494 b_noinsert: ; \ 495 bilen[row] = nrow2; \ 496 } 497 498 PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[]) 499 { 500 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)A->data; 501 Mat_SeqAIJ *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data; 502 PetscErrorCode ierr; 503 PetscInt l,*garray = mat->garray,diag; 504 505 PetscFunctionBegin; 506 /* code only works for square matrices A */ 507 508 /* find size of row to the left of the diagonal part */ 509 ierr = MatGetOwnershipRange(A,&diag,0);CHKERRQ(ierr); 510 row = row - diag; 511 for (l=0; l<b->i[row+1]-b->i[row]; l++) { 512 if (garray[b->j[b->i[row]+l]] > diag) break; 513 } 514 ierr = PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));CHKERRQ(ierr); 515 516 /* diagonal part */ 517 ierr = PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));CHKERRQ(ierr); 518 519 /* right of diagonal part */ 520 ierr = PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));CHKERRQ(ierr); 521 PetscFunctionReturn(0); 522 } 523 524 PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 525 { 526 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 527 PetscScalar value; 528 PetscErrorCode ierr; 529 PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend; 530 PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col; 531 PetscBool roworiented = aij->roworiented; 532 533 /* Some Variables required in the macro */ 534 Mat A = aij->A; 535 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 536 PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j; 537 MatScalar *aa = a->a; 538 PetscBool ignorezeroentries = a->ignorezeroentries; 539 Mat B = aij->B; 540 Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 541 PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n; 542 MatScalar *ba = b->a; 543 544 PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2; 545 PetscInt nonew; 546 MatScalar *ap1,*ap2; 547 548 PetscFunctionBegin; 549 for (i=0; i<m; i++) { 550 if (im[i] < 0) continue; 551 #if defined(PETSC_USE_DEBUG) 552 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 553 #endif 554 if (im[i] >= rstart && im[i] < rend) { 555 row = im[i] - rstart; 556 lastcol1 = -1; 557 rp1 = aj + ai[row]; 558 ap1 = aa + ai[row]; 559 rmax1 = aimax[row]; 560 nrow1 = ailen[row]; 561 low1 = 0; 562 high1 = nrow1; 563 lastcol2 = -1; 564 rp2 = bj + bi[row]; 565 ap2 = ba + bi[row]; 566 rmax2 = bimax[row]; 567 nrow2 = bilen[row]; 568 low2 = 0; 569 high2 = nrow2; 570 571 for (j=0; j<n; j++) { 572 if (roworiented) value = v[i*n+j]; 573 else value = v[i+j*m]; 574 if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue; 575 if (in[j] >= cstart && in[j] < cend) { 576 col = in[j] - cstart; 577 nonew = a->nonew; 578 MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]); 579 } else if (in[j] < 0) continue; 580 #if defined(PETSC_USE_DEBUG) 581 else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1); 582 #endif 583 else { 584 if (mat->was_assembled) { 585 if (!aij->colmap) { 586 ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr); 587 } 588 #if defined(PETSC_USE_CTABLE) 589 ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr); 590 col--; 591 #else 592 col = aij->colmap[in[j]] - 1; 593 #endif 594 if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) { 595 ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr); 596 col = in[j]; 597 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */ 598 B = aij->B; 599 b = (Mat_SeqAIJ*)B->data; 600 bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a; 601 rp2 = bj + bi[row]; 602 ap2 = ba + bi[row]; 603 rmax2 = bimax[row]; 604 nrow2 = bilen[row]; 605 low2 = 0; 606 high2 = nrow2; 607 bm = aij->B->rmap->n; 608 ba = b->a; 609 } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]); 610 } else col = in[j]; 611 nonew = b->nonew; 612 MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]); 613 } 614 } 615 } else { 616 if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]); 617 if (!aij->donotstash) { 618 mat->assembled = PETSC_FALSE; 619 if (roworiented) { 620 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 621 } else { 622 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 623 } 624 } 625 } 626 } 627 PetscFunctionReturn(0); 628 } 629 630 PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[]) 631 { 632 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 633 PetscErrorCode ierr; 634 PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend; 635 PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col; 636 637 PetscFunctionBegin; 638 for (i=0; i<m; i++) { 639 if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/ 640 if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1); 641 if (idxm[i] >= rstart && idxm[i] < rend) { 642 row = idxm[i] - rstart; 643 for (j=0; j<n; j++) { 644 if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */ 645 if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1); 646 if (idxn[j] >= cstart && idxn[j] < cend) { 647 col = idxn[j] - cstart; 648 ierr = MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 649 } else { 650 if (!aij->colmap) { 651 ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr); 652 } 653 #if defined(PETSC_USE_CTABLE) 654 ierr = PetscTableFind(aij->colmap,idxn[j]+1,&col);CHKERRQ(ierr); 655 col--; 656 #else 657 col = aij->colmap[idxn[j]] - 1; 658 #endif 659 if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0; 660 else { 661 ierr = MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 662 } 663 } 664 } 665 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported"); 666 } 667 PetscFunctionReturn(0); 668 } 669 670 extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec); 671 672 PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode) 673 { 674 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 675 PetscErrorCode ierr; 676 PetscInt nstash,reallocs; 677 678 PetscFunctionBegin; 679 if (aij->donotstash || mat->nooffprocentries) PetscFunctionReturn(0); 680 681 ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr); 682 ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr); 683 ierr = PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 684 PetscFunctionReturn(0); 685 } 686 687 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode) 688 { 689 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 690 Mat_SeqAIJ *a = (Mat_SeqAIJ*)aij->A->data; 691 PetscErrorCode ierr; 692 PetscMPIInt n; 693 PetscInt i,j,rstart,ncols,flg; 694 PetscInt *row,*col; 695 PetscBool other_disassembled; 696 PetscScalar *val; 697 698 /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */ 699 700 PetscFunctionBegin; 701 if (!aij->donotstash && !mat->nooffprocentries) { 702 while (1) { 703 ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 704 if (!flg) break; 705 706 for (i=0; i<n; ) { 707 /* Now identify the consecutive vals belonging to the same row */ 708 for (j=i,rstart=row[j]; j<n; j++) { 709 if (row[j] != rstart) break; 710 } 711 if (j < n) ncols = j-i; 712 else ncols = n-i; 713 /* Now assemble all these values with a single function call */ 714 ierr = MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);CHKERRQ(ierr); 715 716 i = j; 717 } 718 } 719 ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr); 720 } 721 ierr = MatAssemblyBegin(aij->A,mode);CHKERRQ(ierr); 722 ierr = MatAssemblyEnd(aij->A,mode);CHKERRQ(ierr); 723 724 /* determine if any processor has disassembled, if so we must 725 also disassemble ourselfs, in order that we may reassemble. */ 726 /* 727 if nonzero structure of submatrix B cannot change then we know that 728 no processor disassembled thus we can skip this stuff 729 */ 730 if (!((Mat_SeqAIJ*)aij->B->data)->nonew) { 731 ierr = MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 732 if (mat->was_assembled && !other_disassembled) { 733 ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr); 734 } 735 } 736 if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) { 737 ierr = MatSetUpMultiply_MPIAIJ(mat);CHKERRQ(ierr); 738 } 739 ierr = MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);CHKERRQ(ierr); 740 ierr = MatAssemblyBegin(aij->B,mode);CHKERRQ(ierr); 741 ierr = MatAssemblyEnd(aij->B,mode);CHKERRQ(ierr); 742 743 ierr = PetscFree2(aij->rowvalues,aij->rowindices);CHKERRQ(ierr); 744 745 aij->rowvalues = 0; 746 747 ierr = VecDestroy(&aij->diag);CHKERRQ(ierr); 748 if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ; 749 750 /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */ 751 if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) { 752 PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate; 753 ierr = MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 754 } 755 PetscFunctionReturn(0); 756 } 757 758 PetscErrorCode MatZeroEntries_MPIAIJ(Mat A) 759 { 760 Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data; 761 PetscErrorCode ierr; 762 763 PetscFunctionBegin; 764 ierr = MatZeroEntries(l->A);CHKERRQ(ierr); 765 ierr = MatZeroEntries(l->B);CHKERRQ(ierr); 766 PetscFunctionReturn(0); 767 } 768 769 PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b) 770 { 771 Mat_MPIAIJ *mat = (Mat_MPIAIJ *) A->data; 772 PetscInt *lrows; 773 PetscInt r, len; 774 PetscErrorCode ierr; 775 776 PetscFunctionBegin; 777 /* get locally owned rows */ 778 ierr = MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);CHKERRQ(ierr); 779 /* fix right hand side if needed */ 780 if (x && b) { 781 const PetscScalar *xx; 782 PetscScalar *bb; 783 784 ierr = VecGetArrayRead(x, &xx);CHKERRQ(ierr); 785 ierr = VecGetArray(b, &bb);CHKERRQ(ierr); 786 for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]]; 787 ierr = VecRestoreArrayRead(x, &xx);CHKERRQ(ierr); 788 ierr = VecRestoreArray(b, &bb);CHKERRQ(ierr); 789 } 790 /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/ 791 ierr = MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr); 792 if (A->congruentlayouts == -1) { /* first time we compare rows and cols layouts */ 793 PetscBool cong; 794 ierr = PetscLayoutCompare(A->rmap,A->cmap,&cong);CHKERRQ(ierr); 795 if (cong) A->congruentlayouts = 1; 796 else A->congruentlayouts = 0; 797 } 798 if ((diag != 0.0) && A->congruentlayouts) { 799 ierr = MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);CHKERRQ(ierr); 800 } else if (diag != 0.0) { 801 ierr = MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr); 802 if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR"); 803 for (r = 0; r < len; ++r) { 804 const PetscInt row = lrows[r] + A->rmap->rstart; 805 ierr = MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);CHKERRQ(ierr); 806 } 807 ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 808 ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 809 } else { 810 ierr = MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr); 811 } 812 ierr = PetscFree(lrows);CHKERRQ(ierr); 813 814 /* only change matrix nonzero state if pattern was allowed to be changed */ 815 if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) { 816 PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate; 817 ierr = MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 818 } 819 PetscFunctionReturn(0); 820 } 821 822 PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b) 823 { 824 Mat_MPIAIJ *l = (Mat_MPIAIJ*)A->data; 825 PetscErrorCode ierr; 826 PetscMPIInt n = A->rmap->n; 827 PetscInt i,j,r,m,p = 0,len = 0; 828 PetscInt *lrows,*owners = A->rmap->range; 829 PetscSFNode *rrows; 830 PetscSF sf; 831 const PetscScalar *xx; 832 PetscScalar *bb,*mask; 833 Vec xmask,lmask; 834 Mat_SeqAIJ *aij = (Mat_SeqAIJ*)l->B->data; 835 const PetscInt *aj, *ii,*ridx; 836 PetscScalar *aa; 837 838 PetscFunctionBegin; 839 /* Create SF where leaves are input rows and roots are owned rows */ 840 ierr = PetscMalloc1(n, &lrows);CHKERRQ(ierr); 841 for (r = 0; r < n; ++r) lrows[r] = -1; 842 ierr = PetscMalloc1(N, &rrows);CHKERRQ(ierr); 843 for (r = 0; r < N; ++r) { 844 const PetscInt idx = rows[r]; 845 if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N); 846 if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */ 847 ierr = PetscLayoutFindOwner(A->rmap,idx,&p);CHKERRQ(ierr); 848 } 849 rrows[r].rank = p; 850 rrows[r].index = rows[r] - owners[p]; 851 } 852 ierr = PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);CHKERRQ(ierr); 853 ierr = PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);CHKERRQ(ierr); 854 /* Collect flags for rows to be zeroed */ 855 ierr = PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);CHKERRQ(ierr); 856 ierr = PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);CHKERRQ(ierr); 857 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 858 /* Compress and put in row numbers */ 859 for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r; 860 /* zero diagonal part of matrix */ 861 ierr = MatZeroRowsColumns(l->A,len,lrows,diag,x,b);CHKERRQ(ierr); 862 /* handle off diagonal part of matrix */ 863 ierr = MatCreateVecs(A,&xmask,NULL);CHKERRQ(ierr); 864 ierr = VecDuplicate(l->lvec,&lmask);CHKERRQ(ierr); 865 ierr = VecGetArray(xmask,&bb);CHKERRQ(ierr); 866 for (i=0; i<len; i++) bb[lrows[i]] = 1; 867 ierr = VecRestoreArray(xmask,&bb);CHKERRQ(ierr); 868 ierr = VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 869 ierr = VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 870 ierr = VecDestroy(&xmask);CHKERRQ(ierr); 871 if (x) { 872 ierr = VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 873 ierr = VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 874 ierr = VecGetArrayRead(l->lvec,&xx);CHKERRQ(ierr); 875 ierr = VecGetArray(b,&bb);CHKERRQ(ierr); 876 } 877 ierr = VecGetArray(lmask,&mask);CHKERRQ(ierr); 878 /* remove zeroed rows of off diagonal matrix */ 879 ii = aij->i; 880 for (i=0; i<len; i++) { 881 ierr = PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));CHKERRQ(ierr); 882 } 883 /* loop over all elements of off process part of matrix zeroing removed columns*/ 884 if (aij->compressedrow.use) { 885 m = aij->compressedrow.nrows; 886 ii = aij->compressedrow.i; 887 ridx = aij->compressedrow.rindex; 888 for (i=0; i<m; i++) { 889 n = ii[i+1] - ii[i]; 890 aj = aij->j + ii[i]; 891 aa = aij->a + ii[i]; 892 893 for (j=0; j<n; j++) { 894 if (PetscAbsScalar(mask[*aj])) { 895 if (b) bb[*ridx] -= *aa*xx[*aj]; 896 *aa = 0.0; 897 } 898 aa++; 899 aj++; 900 } 901 ridx++; 902 } 903 } else { /* do not use compressed row format */ 904 m = l->B->rmap->n; 905 for (i=0; i<m; i++) { 906 n = ii[i+1] - ii[i]; 907 aj = aij->j + ii[i]; 908 aa = aij->a + ii[i]; 909 for (j=0; j<n; j++) { 910 if (PetscAbsScalar(mask[*aj])) { 911 if (b) bb[i] -= *aa*xx[*aj]; 912 *aa = 0.0; 913 } 914 aa++; 915 aj++; 916 } 917 } 918 } 919 if (x) { 920 ierr = VecRestoreArray(b,&bb);CHKERRQ(ierr); 921 ierr = VecRestoreArrayRead(l->lvec,&xx);CHKERRQ(ierr); 922 } 923 ierr = VecRestoreArray(lmask,&mask);CHKERRQ(ierr); 924 ierr = VecDestroy(&lmask);CHKERRQ(ierr); 925 ierr = PetscFree(lrows);CHKERRQ(ierr); 926 927 /* only change matrix nonzero state if pattern was allowed to be changed */ 928 if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) { 929 PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate; 930 ierr = MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 931 } 932 PetscFunctionReturn(0); 933 } 934 935 PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy) 936 { 937 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 938 PetscErrorCode ierr; 939 PetscInt nt; 940 941 PetscFunctionBegin; 942 ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); 943 if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); 944 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 945 ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); 946 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 947 ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); 948 PetscFunctionReturn(0); 949 } 950 951 PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx) 952 { 953 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 954 PetscErrorCode ierr; 955 956 PetscFunctionBegin; 957 ierr = MatMultDiagonalBlock(a->A,bb,xx);CHKERRQ(ierr); 958 PetscFunctionReturn(0); 959 } 960 961 PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz) 962 { 963 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 964 PetscErrorCode ierr; 965 966 PetscFunctionBegin; 967 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 968 ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 969 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 970 ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); 971 PetscFunctionReturn(0); 972 } 973 974 PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy) 975 { 976 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 977 PetscErrorCode ierr; 978 PetscBool merged; 979 980 PetscFunctionBegin; 981 ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr); 982 /* do nondiagonal part */ 983 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 984 if (!merged) { 985 /* send it on its way */ 986 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 987 /* do local part */ 988 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 989 /* receive remote parts: note this assumes the values are not actually */ 990 /* added in yy until the next line, */ 991 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 992 } else { 993 /* do local part */ 994 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 995 /* send it on its way */ 996 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 997 /* values actually were received in the Begin() but we need to call this nop */ 998 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 999 } 1000 PetscFunctionReturn(0); 1001 } 1002 1003 PetscErrorCode MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f) 1004 { 1005 MPI_Comm comm; 1006 Mat_MPIAIJ *Aij = (Mat_MPIAIJ*) Amat->data, *Bij; 1007 Mat Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs; 1008 IS Me,Notme; 1009 PetscErrorCode ierr; 1010 PetscInt M,N,first,last,*notme,i; 1011 PetscMPIInt size; 1012 1013 PetscFunctionBegin; 1014 /* Easy test: symmetric diagonal block */ 1015 Bij = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A; 1016 ierr = MatIsTranspose(Adia,Bdia,tol,f);CHKERRQ(ierr); 1017 if (!*f) PetscFunctionReturn(0); 1018 ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr); 1019 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1020 if (size == 1) PetscFunctionReturn(0); 1021 1022 /* Hard test: off-diagonal block. This takes a MatGetSubMatrix. */ 1023 ierr = MatGetSize(Amat,&M,&N);CHKERRQ(ierr); 1024 ierr = MatGetOwnershipRange(Amat,&first,&last);CHKERRQ(ierr); 1025 ierr = PetscMalloc1(N-last+first,¬me);CHKERRQ(ierr); 1026 for (i=0; i<first; i++) notme[i] = i; 1027 for (i=last; i<M; i++) notme[i-last+first] = i; 1028 ierr = ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);CHKERRQ(ierr); 1029 ierr = ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);CHKERRQ(ierr); 1030 ierr = MatGetSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);CHKERRQ(ierr); 1031 Aoff = Aoffs[0]; 1032 ierr = MatGetSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);CHKERRQ(ierr); 1033 Boff = Boffs[0]; 1034 ierr = MatIsTranspose(Aoff,Boff,tol,f);CHKERRQ(ierr); 1035 ierr = MatDestroyMatrices(1,&Aoffs);CHKERRQ(ierr); 1036 ierr = MatDestroyMatrices(1,&Boffs);CHKERRQ(ierr); 1037 ierr = ISDestroy(&Me);CHKERRQ(ierr); 1038 ierr = ISDestroy(&Notme);CHKERRQ(ierr); 1039 ierr = PetscFree(notme);CHKERRQ(ierr); 1040 PetscFunctionReturn(0); 1041 } 1042 1043 PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz) 1044 { 1045 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1046 PetscErrorCode ierr; 1047 1048 PetscFunctionBegin; 1049 /* do nondiagonal part */ 1050 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 1051 /* send it on its way */ 1052 ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1053 /* do local part */ 1054 ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 1055 /* receive remote parts */ 1056 ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1057 PetscFunctionReturn(0); 1058 } 1059 1060 /* 1061 This only works correctly for square matrices where the subblock A->A is the 1062 diagonal block 1063 */ 1064 PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v) 1065 { 1066 PetscErrorCode ierr; 1067 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1068 1069 PetscFunctionBegin; 1070 if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); 1071 if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition"); 1072 ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr); 1073 PetscFunctionReturn(0); 1074 } 1075 1076 PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa) 1077 { 1078 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1079 PetscErrorCode ierr; 1080 1081 PetscFunctionBegin; 1082 ierr = MatScale(a->A,aa);CHKERRQ(ierr); 1083 ierr = MatScale(a->B,aa);CHKERRQ(ierr); 1084 PetscFunctionReturn(0); 1085 } 1086 1087 PetscErrorCode MatDestroy_MPIAIJ(Mat mat) 1088 { 1089 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1090 PetscErrorCode ierr; 1091 1092 PetscFunctionBegin; 1093 #if defined(PETSC_USE_LOG) 1094 PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N); 1095 #endif 1096 ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr); 1097 ierr = VecDestroy(&aij->diag);CHKERRQ(ierr); 1098 ierr = MatDestroy(&aij->A);CHKERRQ(ierr); 1099 ierr = MatDestroy(&aij->B);CHKERRQ(ierr); 1100 #if defined(PETSC_USE_CTABLE) 1101 ierr = PetscTableDestroy(&aij->colmap);CHKERRQ(ierr); 1102 #else 1103 ierr = PetscFree(aij->colmap);CHKERRQ(ierr); 1104 #endif 1105 ierr = PetscFree(aij->garray);CHKERRQ(ierr); 1106 ierr = VecDestroy(&aij->lvec);CHKERRQ(ierr); 1107 ierr = VecScatterDestroy(&aij->Mvctx);CHKERRQ(ierr); 1108 ierr = PetscFree2(aij->rowvalues,aij->rowindices);CHKERRQ(ierr); 1109 ierr = PetscFree(aij->ld);CHKERRQ(ierr); 1110 ierr = PetscFree(mat->data);CHKERRQ(ierr); 1111 1112 ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr); 1113 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);CHKERRQ(ierr); 1114 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);CHKERRQ(ierr); 1115 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);CHKERRQ(ierr); 1116 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);CHKERRQ(ierr); 1117 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);CHKERRQ(ierr); 1118 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);CHKERRQ(ierr); 1119 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);CHKERRQ(ierr); 1120 #if defined(PETSC_HAVE_ELEMENTAL) 1121 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);CHKERRQ(ierr); 1122 #endif 1123 #if defined(PETSC_HAVE_HYPRE) 1124 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);CHKERRQ(ierr); 1125 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);CHKERRQ(ierr); 1126 #endif 1127 PetscFunctionReturn(0); 1128 } 1129 1130 PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer) 1131 { 1132 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1133 Mat_SeqAIJ *A = (Mat_SeqAIJ*)aij->A->data; 1134 Mat_SeqAIJ *B = (Mat_SeqAIJ*)aij->B->data; 1135 PetscErrorCode ierr; 1136 PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag; 1137 int fd; 1138 PetscInt nz,header[4],*row_lengths,*range=0,rlen,i; 1139 PetscInt nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0; 1140 PetscScalar *column_values; 1141 PetscInt message_count,flowcontrolcount; 1142 FILE *file; 1143 1144 PetscFunctionBegin; 1145 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr); 1146 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);CHKERRQ(ierr); 1147 nz = A->nz + B->nz; 1148 ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr); 1149 if (!rank) { 1150 header[0] = MAT_FILE_CLASSID; 1151 header[1] = mat->rmap->N; 1152 header[2] = mat->cmap->N; 1153 1154 ierr = MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1155 ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1156 /* get largest number of rows any processor has */ 1157 rlen = mat->rmap->n; 1158 range = mat->rmap->range; 1159 for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]); 1160 } else { 1161 ierr = MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1162 rlen = mat->rmap->n; 1163 } 1164 1165 /* load up the local row counts */ 1166 ierr = PetscMalloc1(rlen+1,&row_lengths);CHKERRQ(ierr); 1167 for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i]; 1168 1169 /* store the row lengths to the file */ 1170 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1171 if (!rank) { 1172 ierr = PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1173 for (i=1; i<size; i++) { 1174 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1175 rlen = range[i+1] - range[i]; 1176 ierr = MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1177 ierr = PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1178 } 1179 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1180 } else { 1181 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1182 ierr = MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1183 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1184 } 1185 ierr = PetscFree(row_lengths);CHKERRQ(ierr); 1186 1187 /* load up the local column indices */ 1188 nzmax = nz; /* th processor needs space a largest processor needs */ 1189 ierr = MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1190 ierr = PetscMalloc1(nzmax+1,&column_indices);CHKERRQ(ierr); 1191 cnt = 0; 1192 for (i=0; i<mat->rmap->n; i++) { 1193 for (j=B->i[i]; j<B->i[i+1]; j++) { 1194 if ((col = garray[B->j[j]]) > cstart) break; 1195 column_indices[cnt++] = col; 1196 } 1197 for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart; 1198 for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]]; 1199 } 1200 if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz); 1201 1202 /* store the column indices to the file */ 1203 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1204 if (!rank) { 1205 MPI_Status status; 1206 ierr = PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1207 for (i=1; i<size; i++) { 1208 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1209 ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1210 if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax); 1211 ierr = MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1212 ierr = PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1213 } 1214 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1215 } else { 1216 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1217 ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1218 ierr = MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1219 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1220 } 1221 ierr = PetscFree(column_indices);CHKERRQ(ierr); 1222 1223 /* load up the local column values */ 1224 ierr = PetscMalloc1(nzmax+1,&column_values);CHKERRQ(ierr); 1225 cnt = 0; 1226 for (i=0; i<mat->rmap->n; i++) { 1227 for (j=B->i[i]; j<B->i[i+1]; j++) { 1228 if (garray[B->j[j]] > cstart) break; 1229 column_values[cnt++] = B->a[j]; 1230 } 1231 for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k]; 1232 for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j]; 1233 } 1234 if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz); 1235 1236 /* store the column values to the file */ 1237 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1238 if (!rank) { 1239 MPI_Status status; 1240 ierr = PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr); 1241 for (i=1; i<size; i++) { 1242 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1243 ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1244 if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax); 1245 ierr = MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1246 ierr = PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr); 1247 } 1248 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1249 } else { 1250 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1251 ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1252 ierr = MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1253 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1254 } 1255 ierr = PetscFree(column_values);CHKERRQ(ierr); 1256 1257 ierr = PetscViewerBinaryGetInfoPointer(viewer,&file);CHKERRQ(ierr); 1258 if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs)); 1259 PetscFunctionReturn(0); 1260 } 1261 1262 #include <petscdraw.h> 1263 PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer) 1264 { 1265 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1266 PetscErrorCode ierr; 1267 PetscMPIInt rank = aij->rank,size = aij->size; 1268 PetscBool isdraw,iascii,isbinary; 1269 PetscViewer sviewer; 1270 PetscViewerFormat format; 1271 1272 PetscFunctionBegin; 1273 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 1274 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 1275 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 1276 if (iascii) { 1277 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 1278 if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 1279 MatInfo info; 1280 PetscBool inodes; 1281 1282 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr); 1283 ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr); 1284 ierr = MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);CHKERRQ(ierr); 1285 ierr = PetscViewerASCIIPushSynchronized(viewer);CHKERRQ(ierr); 1286 if (!inodes) { 1287 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n", 1288 rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr); 1289 } else { 1290 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n", 1291 rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr); 1292 } 1293 ierr = MatGetInfo(aij->A,MAT_LOCAL,&info);CHKERRQ(ierr); 1294 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 1295 ierr = MatGetInfo(aij->B,MAT_LOCAL,&info);CHKERRQ(ierr); 1296 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 1297 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 1298 ierr = PetscViewerASCIIPopSynchronized(viewer);CHKERRQ(ierr); 1299 ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr); 1300 ierr = VecScatterView(aij->Mvctx,viewer);CHKERRQ(ierr); 1301 PetscFunctionReturn(0); 1302 } else if (format == PETSC_VIEWER_ASCII_INFO) { 1303 PetscInt inodecount,inodelimit,*inodes; 1304 ierr = MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);CHKERRQ(ierr); 1305 if (inodes) { 1306 ierr = PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);CHKERRQ(ierr); 1307 } else { 1308 ierr = PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");CHKERRQ(ierr); 1309 } 1310 PetscFunctionReturn(0); 1311 } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) { 1312 PetscFunctionReturn(0); 1313 } 1314 } else if (isbinary) { 1315 if (size == 1) { 1316 ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr); 1317 ierr = MatView(aij->A,viewer);CHKERRQ(ierr); 1318 } else { 1319 ierr = MatView_MPIAIJ_Binary(mat,viewer);CHKERRQ(ierr); 1320 } 1321 PetscFunctionReturn(0); 1322 } else if (isdraw) { 1323 PetscDraw draw; 1324 PetscBool isnull; 1325 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 1326 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); 1327 if (isnull) PetscFunctionReturn(0); 1328 } 1329 1330 { 1331 /* assemble the entire matrix onto first processor. */ 1332 Mat A; 1333 Mat_SeqAIJ *Aloc; 1334 PetscInt M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct; 1335 MatScalar *a; 1336 1337 ierr = MatCreate(PetscObjectComm((PetscObject)mat),&A);CHKERRQ(ierr); 1338 if (!rank) { 1339 ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr); 1340 } else { 1341 ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr); 1342 } 1343 /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */ 1344 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 1345 ierr = MatMPIAIJSetPreallocation(A,0,NULL,0,NULL);CHKERRQ(ierr); 1346 ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); 1347 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)A);CHKERRQ(ierr); 1348 1349 /* copy over the A part */ 1350 Aloc = (Mat_SeqAIJ*)aij->A->data; 1351 m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1352 row = mat->rmap->rstart; 1353 for (i=0; i<ai[m]; i++) aj[i] += mat->cmap->rstart; 1354 for (i=0; i<m; i++) { 1355 ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);CHKERRQ(ierr); 1356 row++; 1357 a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i]; 1358 } 1359 aj = Aloc->j; 1360 for (i=0; i<ai[m]; i++) aj[i] -= mat->cmap->rstart; 1361 1362 /* copy over the B part */ 1363 Aloc = (Mat_SeqAIJ*)aij->B->data; 1364 m = aij->B->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1365 row = mat->rmap->rstart; 1366 ierr = PetscMalloc1(ai[m]+1,&cols);CHKERRQ(ierr); 1367 ct = cols; 1368 for (i=0; i<ai[m]; i++) cols[i] = aij->garray[aj[i]]; 1369 for (i=0; i<m; i++) { 1370 ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);CHKERRQ(ierr); 1371 row++; 1372 a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i]; 1373 } 1374 ierr = PetscFree(ct);CHKERRQ(ierr); 1375 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1376 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1377 /* 1378 Everyone has to call to draw the matrix since the graphics waits are 1379 synchronized across all processors that share the PetscDraw object 1380 */ 1381 ierr = PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); 1382 if (!rank) { 1383 ierr = PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr); 1384 ierr = MatView_SeqAIJ(((Mat_MPIAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr); 1385 } 1386 ierr = PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); 1387 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 1388 ierr = MatDestroy(&A);CHKERRQ(ierr); 1389 } 1390 PetscFunctionReturn(0); 1391 } 1392 1393 PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer) 1394 { 1395 PetscErrorCode ierr; 1396 PetscBool iascii,isdraw,issocket,isbinary; 1397 1398 PetscFunctionBegin; 1399 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 1400 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 1401 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 1402 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);CHKERRQ(ierr); 1403 if (iascii || isdraw || isbinary || issocket) { 1404 ierr = MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr); 1405 } 1406 PetscFunctionReturn(0); 1407 } 1408 1409 PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx) 1410 { 1411 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data; 1412 PetscErrorCode ierr; 1413 Vec bb1 = 0; 1414 PetscBool hasop; 1415 1416 PetscFunctionBegin; 1417 if (flag == SOR_APPLY_UPPER) { 1418 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1419 PetscFunctionReturn(0); 1420 } 1421 1422 if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) { 1423 ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr); 1424 } 1425 1426 if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) { 1427 if (flag & SOR_ZERO_INITIAL_GUESS) { 1428 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1429 its--; 1430 } 1431 1432 while (its--) { 1433 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1434 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1435 1436 /* update rhs: bb1 = bb - B*x */ 1437 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1438 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1439 1440 /* local sweep */ 1441 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1442 } 1443 } else if (flag & SOR_LOCAL_FORWARD_SWEEP) { 1444 if (flag & SOR_ZERO_INITIAL_GUESS) { 1445 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1446 its--; 1447 } 1448 while (its--) { 1449 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1450 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1451 1452 /* update rhs: bb1 = bb - B*x */ 1453 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1454 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1455 1456 /* local sweep */ 1457 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1458 } 1459 } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) { 1460 if (flag & SOR_ZERO_INITIAL_GUESS) { 1461 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1462 its--; 1463 } 1464 while (its--) { 1465 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1466 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1467 1468 /* update rhs: bb1 = bb - B*x */ 1469 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1470 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1471 1472 /* local sweep */ 1473 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1474 } 1475 } else if (flag & SOR_EISENSTAT) { 1476 Vec xx1; 1477 1478 ierr = VecDuplicate(bb,&xx1);CHKERRQ(ierr); 1479 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);CHKERRQ(ierr); 1480 1481 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1482 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1483 if (!mat->diag) { 1484 ierr = MatCreateVecs(matin,&mat->diag,NULL);CHKERRQ(ierr); 1485 ierr = MatGetDiagonal(matin,mat->diag);CHKERRQ(ierr); 1486 } 1487 ierr = MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);CHKERRQ(ierr); 1488 if (hasop) { 1489 ierr = MatMultDiagonalBlock(matin,xx,bb1);CHKERRQ(ierr); 1490 } else { 1491 ierr = VecPointwiseMult(bb1,mat->diag,xx);CHKERRQ(ierr); 1492 } 1493 ierr = VecAYPX(bb1,(omega-2.0)/omega,bb);CHKERRQ(ierr); 1494 1495 ierr = MatMultAdd(mat->B,mat->lvec,bb1,bb1);CHKERRQ(ierr); 1496 1497 /* local sweep */ 1498 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);CHKERRQ(ierr); 1499 ierr = VecAXPY(xx,1.0,xx1);CHKERRQ(ierr); 1500 ierr = VecDestroy(&xx1);CHKERRQ(ierr); 1501 } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported"); 1502 1503 ierr = VecDestroy(&bb1);CHKERRQ(ierr); 1504 1505 matin->factorerrortype = mat->A->factorerrortype; 1506 PetscFunctionReturn(0); 1507 } 1508 1509 PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B) 1510 { 1511 Mat aA,aB,Aperm; 1512 const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj; 1513 PetscScalar *aa,*ba; 1514 PetscInt i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest; 1515 PetscSF rowsf,sf; 1516 IS parcolp = NULL; 1517 PetscBool done; 1518 PetscErrorCode ierr; 1519 1520 PetscFunctionBegin; 1521 ierr = MatGetLocalSize(A,&m,&n);CHKERRQ(ierr); 1522 ierr = ISGetIndices(rowp,&rwant);CHKERRQ(ierr); 1523 ierr = ISGetIndices(colp,&cwant);CHKERRQ(ierr); 1524 ierr = PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);CHKERRQ(ierr); 1525 1526 /* Invert row permutation to find out where my rows should go */ 1527 ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);CHKERRQ(ierr); 1528 ierr = PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);CHKERRQ(ierr); 1529 ierr = PetscSFSetFromOptions(rowsf);CHKERRQ(ierr); 1530 for (i=0; i<m; i++) work[i] = A->rmap->rstart + i; 1531 ierr = PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);CHKERRQ(ierr); 1532 ierr = PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);CHKERRQ(ierr); 1533 1534 /* Invert column permutation to find out where my columns should go */ 1535 ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr); 1536 ierr = PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);CHKERRQ(ierr); 1537 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1538 for (i=0; i<n; i++) work[i] = A->cmap->rstart + i; 1539 ierr = PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);CHKERRQ(ierr); 1540 ierr = PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);CHKERRQ(ierr); 1541 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1542 1543 ierr = ISRestoreIndices(rowp,&rwant);CHKERRQ(ierr); 1544 ierr = ISRestoreIndices(colp,&cwant);CHKERRQ(ierr); 1545 ierr = MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);CHKERRQ(ierr); 1546 1547 /* Find out where my gcols should go */ 1548 ierr = MatGetSize(aB,NULL,&ng);CHKERRQ(ierr); 1549 ierr = PetscMalloc1(ng,&gcdest);CHKERRQ(ierr); 1550 ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr); 1551 ierr = PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);CHKERRQ(ierr); 1552 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1553 ierr = PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);CHKERRQ(ierr); 1554 ierr = PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);CHKERRQ(ierr); 1555 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1556 1557 ierr = PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);CHKERRQ(ierr); 1558 ierr = MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);CHKERRQ(ierr); 1559 ierr = MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);CHKERRQ(ierr); 1560 for (i=0; i<m; i++) { 1561 PetscInt row = rdest[i],rowner; 1562 ierr = PetscLayoutFindOwner(A->rmap,row,&rowner);CHKERRQ(ierr); 1563 for (j=ai[i]; j<ai[i+1]; j++) { 1564 PetscInt cowner,col = cdest[aj[j]]; 1565 ierr = PetscLayoutFindOwner(A->cmap,col,&cowner);CHKERRQ(ierr); /* Could build an index for the columns to eliminate this search */ 1566 if (rowner == cowner) dnnz[i]++; 1567 else onnz[i]++; 1568 } 1569 for (j=bi[i]; j<bi[i+1]; j++) { 1570 PetscInt cowner,col = gcdest[bj[j]]; 1571 ierr = PetscLayoutFindOwner(A->cmap,col,&cowner);CHKERRQ(ierr); 1572 if (rowner == cowner) dnnz[i]++; 1573 else onnz[i]++; 1574 } 1575 } 1576 ierr = PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);CHKERRQ(ierr); 1577 ierr = PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);CHKERRQ(ierr); 1578 ierr = PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);CHKERRQ(ierr); 1579 ierr = PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);CHKERRQ(ierr); 1580 ierr = PetscSFDestroy(&rowsf);CHKERRQ(ierr); 1581 1582 ierr = MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);CHKERRQ(ierr); 1583 ierr = MatSeqAIJGetArray(aA,&aa);CHKERRQ(ierr); 1584 ierr = MatSeqAIJGetArray(aB,&ba);CHKERRQ(ierr); 1585 for (i=0; i<m; i++) { 1586 PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */ 1587 PetscInt j0,rowlen; 1588 rowlen = ai[i+1] - ai[i]; 1589 for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */ 1590 for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]]; 1591 ierr = MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);CHKERRQ(ierr); 1592 } 1593 rowlen = bi[i+1] - bi[i]; 1594 for (j0=j=0; j<rowlen; j0=j) { 1595 for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]]; 1596 ierr = MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);CHKERRQ(ierr); 1597 } 1598 } 1599 ierr = MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1600 ierr = MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1601 ierr = MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);CHKERRQ(ierr); 1602 ierr = MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);CHKERRQ(ierr); 1603 ierr = MatSeqAIJRestoreArray(aA,&aa);CHKERRQ(ierr); 1604 ierr = MatSeqAIJRestoreArray(aB,&ba);CHKERRQ(ierr); 1605 ierr = PetscFree4(dnnz,onnz,tdnnz,tonnz);CHKERRQ(ierr); 1606 ierr = PetscFree3(work,rdest,cdest);CHKERRQ(ierr); 1607 ierr = PetscFree(gcdest);CHKERRQ(ierr); 1608 if (parcolp) {ierr = ISDestroy(&colp);CHKERRQ(ierr);} 1609 *B = Aperm; 1610 PetscFunctionReturn(0); 1611 } 1612 1613 PetscErrorCode MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[]) 1614 { 1615 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1616 PetscErrorCode ierr; 1617 1618 PetscFunctionBegin; 1619 ierr = MatGetSize(aij->B,NULL,nghosts);CHKERRQ(ierr); 1620 if (ghosts) *ghosts = aij->garray; 1621 PetscFunctionReturn(0); 1622 } 1623 1624 PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info) 1625 { 1626 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data; 1627 Mat A = mat->A,B = mat->B; 1628 PetscErrorCode ierr; 1629 PetscReal isend[5],irecv[5]; 1630 1631 PetscFunctionBegin; 1632 info->block_size = 1.0; 1633 ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr); 1634 1635 isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded; 1636 isend[3] = info->memory; isend[4] = info->mallocs; 1637 1638 ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr); 1639 1640 isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded; 1641 isend[3] += info->memory; isend[4] += info->mallocs; 1642 if (flag == MAT_LOCAL) { 1643 info->nz_used = isend[0]; 1644 info->nz_allocated = isend[1]; 1645 info->nz_unneeded = isend[2]; 1646 info->memory = isend[3]; 1647 info->mallocs = isend[4]; 1648 } else if (flag == MAT_GLOBAL_MAX) { 1649 ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 1650 1651 info->nz_used = irecv[0]; 1652 info->nz_allocated = irecv[1]; 1653 info->nz_unneeded = irecv[2]; 1654 info->memory = irecv[3]; 1655 info->mallocs = irecv[4]; 1656 } else if (flag == MAT_GLOBAL_SUM) { 1657 ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 1658 1659 info->nz_used = irecv[0]; 1660 info->nz_allocated = irecv[1]; 1661 info->nz_unneeded = irecv[2]; 1662 info->memory = irecv[3]; 1663 info->mallocs = irecv[4]; 1664 } 1665 info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */ 1666 info->fill_ratio_needed = 0; 1667 info->factor_mallocs = 0; 1668 PetscFunctionReturn(0); 1669 } 1670 1671 PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg) 1672 { 1673 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1674 PetscErrorCode ierr; 1675 1676 PetscFunctionBegin; 1677 switch (op) { 1678 case MAT_NEW_NONZERO_LOCATIONS: 1679 case MAT_NEW_NONZERO_ALLOCATION_ERR: 1680 case MAT_UNUSED_NONZERO_LOCATION_ERR: 1681 case MAT_KEEP_NONZERO_PATTERN: 1682 case MAT_NEW_NONZERO_LOCATION_ERR: 1683 case MAT_USE_INODES: 1684 case MAT_IGNORE_ZERO_ENTRIES: 1685 MatCheckPreallocated(A,1); 1686 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1687 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1688 break; 1689 case MAT_ROW_ORIENTED: 1690 MatCheckPreallocated(A,1); 1691 a->roworiented = flg; 1692 1693 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1694 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1695 break; 1696 case MAT_NEW_DIAGONALS: 1697 ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr); 1698 break; 1699 case MAT_IGNORE_OFF_PROC_ENTRIES: 1700 a->donotstash = flg; 1701 break; 1702 case MAT_SPD: 1703 A->spd_set = PETSC_TRUE; 1704 A->spd = flg; 1705 if (flg) { 1706 A->symmetric = PETSC_TRUE; 1707 A->structurally_symmetric = PETSC_TRUE; 1708 A->symmetric_set = PETSC_TRUE; 1709 A->structurally_symmetric_set = PETSC_TRUE; 1710 } 1711 break; 1712 case MAT_SYMMETRIC: 1713 MatCheckPreallocated(A,1); 1714 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1715 break; 1716 case MAT_STRUCTURALLY_SYMMETRIC: 1717 MatCheckPreallocated(A,1); 1718 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1719 break; 1720 case MAT_HERMITIAN: 1721 MatCheckPreallocated(A,1); 1722 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1723 break; 1724 case MAT_SYMMETRY_ETERNAL: 1725 MatCheckPreallocated(A,1); 1726 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1727 break; 1728 case MAT_SUBMAT_SINGLEIS: 1729 A->submat_singleis = flg; 1730 break; 1731 default: 1732 SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op); 1733 } 1734 PetscFunctionReturn(0); 1735 } 1736 1737 PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1738 { 1739 Mat_MPIAIJ *mat = (Mat_MPIAIJ*)matin->data; 1740 PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p; 1741 PetscErrorCode ierr; 1742 PetscInt i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart; 1743 PetscInt nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend; 1744 PetscInt *cmap,*idx_p; 1745 1746 PetscFunctionBegin; 1747 if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active"); 1748 mat->getrowactive = PETSC_TRUE; 1749 1750 if (!mat->rowvalues && (idx || v)) { 1751 /* 1752 allocate enough space to hold information from the longest row. 1753 */ 1754 Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data; 1755 PetscInt max = 1,tmp; 1756 for (i=0; i<matin->rmap->n; i++) { 1757 tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; 1758 if (max < tmp) max = tmp; 1759 } 1760 ierr = PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);CHKERRQ(ierr); 1761 } 1762 1763 if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows"); 1764 lrow = row - rstart; 1765 1766 pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB; 1767 if (!v) {pvA = 0; pvB = 0;} 1768 if (!idx) {pcA = 0; if (!v) pcB = 0;} 1769 ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1770 ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1771 nztot = nzA + nzB; 1772 1773 cmap = mat->garray; 1774 if (v || idx) { 1775 if (nztot) { 1776 /* Sort by increasing column numbers, assuming A and B already sorted */ 1777 PetscInt imark = -1; 1778 if (v) { 1779 *v = v_p = mat->rowvalues; 1780 for (i=0; i<nzB; i++) { 1781 if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i]; 1782 else break; 1783 } 1784 imark = i; 1785 for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i]; 1786 for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i]; 1787 } 1788 if (idx) { 1789 *idx = idx_p = mat->rowindices; 1790 if (imark > -1) { 1791 for (i=0; i<imark; i++) { 1792 idx_p[i] = cmap[cworkB[i]]; 1793 } 1794 } else { 1795 for (i=0; i<nzB; i++) { 1796 if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]]; 1797 else break; 1798 } 1799 imark = i; 1800 } 1801 for (i=0; i<nzA; i++) idx_p[imark+i] = cstart + cworkA[i]; 1802 for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]]; 1803 } 1804 } else { 1805 if (idx) *idx = 0; 1806 if (v) *v = 0; 1807 } 1808 } 1809 *nz = nztot; 1810 ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1811 ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1812 PetscFunctionReturn(0); 1813 } 1814 1815 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1816 { 1817 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1818 1819 PetscFunctionBegin; 1820 if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first"); 1821 aij->getrowactive = PETSC_FALSE; 1822 PetscFunctionReturn(0); 1823 } 1824 1825 PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm) 1826 { 1827 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1828 Mat_SeqAIJ *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data; 1829 PetscErrorCode ierr; 1830 PetscInt i,j,cstart = mat->cmap->rstart; 1831 PetscReal sum = 0.0; 1832 MatScalar *v; 1833 1834 PetscFunctionBegin; 1835 if (aij->size == 1) { 1836 ierr = MatNorm(aij->A,type,norm);CHKERRQ(ierr); 1837 } else { 1838 if (type == NORM_FROBENIUS) { 1839 v = amat->a; 1840 for (i=0; i<amat->nz; i++) { 1841 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 1842 } 1843 v = bmat->a; 1844 for (i=0; i<bmat->nz; i++) { 1845 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 1846 } 1847 ierr = MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1848 *norm = PetscSqrtReal(*norm); 1849 ierr = PetscLogFlops(2*amat->nz+2*bmat->nz);CHKERRQ(ierr); 1850 } else if (type == NORM_1) { /* max column norm */ 1851 PetscReal *tmp,*tmp2; 1852 PetscInt *jj,*garray = aij->garray; 1853 ierr = PetscCalloc1(mat->cmap->N+1,&tmp);CHKERRQ(ierr); 1854 ierr = PetscMalloc1(mat->cmap->N+1,&tmp2);CHKERRQ(ierr); 1855 *norm = 0.0; 1856 v = amat->a; jj = amat->j; 1857 for (j=0; j<amat->nz; j++) { 1858 tmp[cstart + *jj++] += PetscAbsScalar(*v); v++; 1859 } 1860 v = bmat->a; jj = bmat->j; 1861 for (j=0; j<bmat->nz; j++) { 1862 tmp[garray[*jj++]] += PetscAbsScalar(*v); v++; 1863 } 1864 ierr = MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1865 for (j=0; j<mat->cmap->N; j++) { 1866 if (tmp2[j] > *norm) *norm = tmp2[j]; 1867 } 1868 ierr = PetscFree(tmp);CHKERRQ(ierr); 1869 ierr = PetscFree(tmp2);CHKERRQ(ierr); 1870 ierr = PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));CHKERRQ(ierr); 1871 } else if (type == NORM_INFINITY) { /* max row norm */ 1872 PetscReal ntemp = 0.0; 1873 for (j=0; j<aij->A->rmap->n; j++) { 1874 v = amat->a + amat->i[j]; 1875 sum = 0.0; 1876 for (i=0; i<amat->i[j+1]-amat->i[j]; i++) { 1877 sum += PetscAbsScalar(*v); v++; 1878 } 1879 v = bmat->a + bmat->i[j]; 1880 for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) { 1881 sum += PetscAbsScalar(*v); v++; 1882 } 1883 if (sum > ntemp) ntemp = sum; 1884 } 1885 ierr = MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1886 ierr = PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));CHKERRQ(ierr); 1887 } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm"); 1888 } 1889 PetscFunctionReturn(0); 1890 } 1891 1892 PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout) 1893 { 1894 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1895 Mat_SeqAIJ *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data; 1896 PetscErrorCode ierr; 1897 PetscInt M = A->rmap->N,N = A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i; 1898 PetscInt cstart = A->cmap->rstart,ncol; 1899 Mat B; 1900 MatScalar *array; 1901 1902 PetscFunctionBegin; 1903 if (reuse == MAT_INPLACE_MATRIX && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place"); 1904 1905 ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n; 1906 ai = Aloc->i; aj = Aloc->j; 1907 bi = Bloc->i; bj = Bloc->j; 1908 if (reuse == MAT_INITIAL_MATRIX || *matout == A) { 1909 PetscInt *d_nnz,*g_nnz,*o_nnz; 1910 PetscSFNode *oloc; 1911 PETSC_UNUSED PetscSF sf; 1912 1913 ierr = PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);CHKERRQ(ierr); 1914 /* compute d_nnz for preallocation */ 1915 ierr = PetscMemzero(d_nnz,na*sizeof(PetscInt));CHKERRQ(ierr); 1916 for (i=0; i<ai[ma]; i++) { 1917 d_nnz[aj[i]]++; 1918 aj[i] += cstart; /* global col index to be used by MatSetValues() */ 1919 } 1920 /* compute local off-diagonal contributions */ 1921 ierr = PetscMemzero(g_nnz,nb*sizeof(PetscInt));CHKERRQ(ierr); 1922 for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++; 1923 /* map those to global */ 1924 ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr); 1925 ierr = PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);CHKERRQ(ierr); 1926 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1927 ierr = PetscMemzero(o_nnz,na*sizeof(PetscInt));CHKERRQ(ierr); 1928 ierr = PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);CHKERRQ(ierr); 1929 ierr = PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);CHKERRQ(ierr); 1930 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1931 1932 ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); 1933 ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr); 1934 ierr = MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));CHKERRQ(ierr); 1935 ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr); 1936 ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr); 1937 ierr = PetscFree4(d_nnz,o_nnz,g_nnz,oloc);CHKERRQ(ierr); 1938 } else { 1939 B = *matout; 1940 ierr = MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 1941 for (i=0; i<ai[ma]; i++) aj[i] += cstart; /* global col index to be used by MatSetValues() */ 1942 } 1943 1944 /* copy over the A part */ 1945 array = Aloc->a; 1946 row = A->rmap->rstart; 1947 for (i=0; i<ma; i++) { 1948 ncol = ai[i+1]-ai[i]; 1949 ierr = MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);CHKERRQ(ierr); 1950 row++; 1951 array += ncol; aj += ncol; 1952 } 1953 aj = Aloc->j; 1954 for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */ 1955 1956 /* copy over the B part */ 1957 ierr = PetscCalloc1(bi[mb],&cols);CHKERRQ(ierr); 1958 array = Bloc->a; 1959 row = A->rmap->rstart; 1960 for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]]; 1961 cols_tmp = cols; 1962 for (i=0; i<mb; i++) { 1963 ncol = bi[i+1]-bi[i]; 1964 ierr = MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);CHKERRQ(ierr); 1965 row++; 1966 array += ncol; cols_tmp += ncol; 1967 } 1968 ierr = PetscFree(cols);CHKERRQ(ierr); 1969 1970 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1971 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1972 if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) { 1973 *matout = B; 1974 } else { 1975 ierr = MatHeaderMerge(A,&B);CHKERRQ(ierr); 1976 } 1977 PetscFunctionReturn(0); 1978 } 1979 1980 PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr) 1981 { 1982 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 1983 Mat a = aij->A,b = aij->B; 1984 PetscErrorCode ierr; 1985 PetscInt s1,s2,s3; 1986 1987 PetscFunctionBegin; 1988 ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr); 1989 if (rr) { 1990 ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr); 1991 if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size"); 1992 /* Overlap communication with computation. */ 1993 ierr = VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1994 } 1995 if (ll) { 1996 ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr); 1997 if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size"); 1998 ierr = (*b->ops->diagonalscale)(b,ll,0);CHKERRQ(ierr); 1999 } 2000 /* scale the diagonal block */ 2001 ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr); 2002 2003 if (rr) { 2004 /* Do a scatter end and then right scale the off-diagonal block */ 2005 ierr = VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2006 ierr = (*b->ops->diagonalscale)(b,0,aij->lvec);CHKERRQ(ierr); 2007 } 2008 PetscFunctionReturn(0); 2009 } 2010 2011 PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A) 2012 { 2013 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2014 PetscErrorCode ierr; 2015 2016 PetscFunctionBegin; 2017 ierr = MatSetUnfactored(a->A);CHKERRQ(ierr); 2018 PetscFunctionReturn(0); 2019 } 2020 2021 PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool *flag) 2022 { 2023 Mat_MPIAIJ *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data; 2024 Mat a,b,c,d; 2025 PetscBool flg; 2026 PetscErrorCode ierr; 2027 2028 PetscFunctionBegin; 2029 a = matA->A; b = matA->B; 2030 c = matB->A; d = matB->B; 2031 2032 ierr = MatEqual(a,c,&flg);CHKERRQ(ierr); 2033 if (flg) { 2034 ierr = MatEqual(b,d,&flg);CHKERRQ(ierr); 2035 } 2036 ierr = MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 2037 PetscFunctionReturn(0); 2038 } 2039 2040 PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str) 2041 { 2042 PetscErrorCode ierr; 2043 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2044 Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data; 2045 2046 PetscFunctionBegin; 2047 /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ 2048 if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) { 2049 /* because of the column compression in the off-processor part of the matrix a->B, 2050 the number of columns in a->B and b->B may be different, hence we cannot call 2051 the MatCopy() directly on the two parts. If need be, we can provide a more 2052 efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices 2053 then copying the submatrices */ 2054 ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); 2055 } else { 2056 ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr); 2057 ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr); 2058 } 2059 PetscFunctionReturn(0); 2060 } 2061 2062 PetscErrorCode MatSetUp_MPIAIJ(Mat A) 2063 { 2064 PetscErrorCode ierr; 2065 2066 PetscFunctionBegin; 2067 ierr = MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr); 2068 PetscFunctionReturn(0); 2069 } 2070 2071 /* 2072 Computes the number of nonzeros per row needed for preallocation when X and Y 2073 have different nonzero structure. 2074 */ 2075 PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz) 2076 { 2077 PetscInt i,j,k,nzx,nzy; 2078 2079 PetscFunctionBegin; 2080 /* Set the number of nonzeros in the new matrix */ 2081 for (i=0; i<m; i++) { 2082 const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i]; 2083 nzx = xi[i+1] - xi[i]; 2084 nzy = yi[i+1] - yi[i]; 2085 nnz[i] = 0; 2086 for (j=0,k=0; j<nzx; j++) { /* Point in X */ 2087 for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */ 2088 if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++; /* Skip duplicate */ 2089 nnz[i]++; 2090 } 2091 for (; k<nzy; k++) nnz[i]++; 2092 } 2093 PetscFunctionReturn(0); 2094 } 2095 2096 /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */ 2097 static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz) 2098 { 2099 PetscErrorCode ierr; 2100 PetscInt m = Y->rmap->N; 2101 Mat_SeqAIJ *x = (Mat_SeqAIJ*)X->data; 2102 Mat_SeqAIJ *y = (Mat_SeqAIJ*)Y->data; 2103 2104 PetscFunctionBegin; 2105 ierr = MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);CHKERRQ(ierr); 2106 PetscFunctionReturn(0); 2107 } 2108 2109 PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str) 2110 { 2111 PetscErrorCode ierr; 2112 Mat_MPIAIJ *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data; 2113 PetscBLASInt bnz,one=1; 2114 Mat_SeqAIJ *x,*y; 2115 2116 PetscFunctionBegin; 2117 if (str == SAME_NONZERO_PATTERN) { 2118 PetscScalar alpha = a; 2119 x = (Mat_SeqAIJ*)xx->A->data; 2120 ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); 2121 y = (Mat_SeqAIJ*)yy->A->data; 2122 PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one)); 2123 x = (Mat_SeqAIJ*)xx->B->data; 2124 y = (Mat_SeqAIJ*)yy->B->data; 2125 ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); 2126 PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one)); 2127 ierr = PetscObjectStateIncrease((PetscObject)Y);CHKERRQ(ierr); 2128 } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */ 2129 ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr); 2130 } else { 2131 Mat B; 2132 PetscInt *nnz_d,*nnz_o; 2133 ierr = PetscMalloc1(yy->A->rmap->N,&nnz_d);CHKERRQ(ierr); 2134 ierr = PetscMalloc1(yy->B->rmap->N,&nnz_o);CHKERRQ(ierr); 2135 ierr = MatCreate(PetscObjectComm((PetscObject)Y),&B);CHKERRQ(ierr); 2136 ierr = PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);CHKERRQ(ierr); 2137 ierr = MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);CHKERRQ(ierr); 2138 ierr = MatSetBlockSizesFromMats(B,Y,Y);CHKERRQ(ierr); 2139 ierr = MatSetType(B,MATMPIAIJ);CHKERRQ(ierr); 2140 ierr = MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);CHKERRQ(ierr); 2141 ierr = MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);CHKERRQ(ierr); 2142 ierr = MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);CHKERRQ(ierr); 2143 ierr = MatAXPY_BasicWithPreallocation(B,Y,a,X,str);CHKERRQ(ierr); 2144 ierr = MatHeaderReplace(Y,&B);CHKERRQ(ierr); 2145 ierr = PetscFree(nnz_d);CHKERRQ(ierr); 2146 ierr = PetscFree(nnz_o);CHKERRQ(ierr); 2147 } 2148 PetscFunctionReturn(0); 2149 } 2150 2151 extern PetscErrorCode MatConjugate_SeqAIJ(Mat); 2152 2153 PetscErrorCode MatConjugate_MPIAIJ(Mat mat) 2154 { 2155 #if defined(PETSC_USE_COMPLEX) 2156 PetscErrorCode ierr; 2157 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 2158 2159 PetscFunctionBegin; 2160 ierr = MatConjugate_SeqAIJ(aij->A);CHKERRQ(ierr); 2161 ierr = MatConjugate_SeqAIJ(aij->B);CHKERRQ(ierr); 2162 #else 2163 PetscFunctionBegin; 2164 #endif 2165 PetscFunctionReturn(0); 2166 } 2167 2168 PetscErrorCode MatRealPart_MPIAIJ(Mat A) 2169 { 2170 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2171 PetscErrorCode ierr; 2172 2173 PetscFunctionBegin; 2174 ierr = MatRealPart(a->A);CHKERRQ(ierr); 2175 ierr = MatRealPart(a->B);CHKERRQ(ierr); 2176 PetscFunctionReturn(0); 2177 } 2178 2179 PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A) 2180 { 2181 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2182 PetscErrorCode ierr; 2183 2184 PetscFunctionBegin; 2185 ierr = MatImaginaryPart(a->A);CHKERRQ(ierr); 2186 ierr = MatImaginaryPart(a->B);CHKERRQ(ierr); 2187 PetscFunctionReturn(0); 2188 } 2189 2190 PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2191 { 2192 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2193 PetscErrorCode ierr; 2194 PetscInt i,*idxb = 0; 2195 PetscScalar *va,*vb; 2196 Vec vtmp; 2197 2198 PetscFunctionBegin; 2199 ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr); 2200 ierr = VecGetArray(v,&va);CHKERRQ(ierr); 2201 if (idx) { 2202 for (i=0; i<A->rmap->n; i++) { 2203 if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart; 2204 } 2205 } 2206 2207 ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr); 2208 if (idx) { 2209 ierr = PetscMalloc1(A->rmap->n,&idxb);CHKERRQ(ierr); 2210 } 2211 ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr); 2212 ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr); 2213 2214 for (i=0; i<A->rmap->n; i++) { 2215 if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) { 2216 va[i] = vb[i]; 2217 if (idx) idx[i] = a->garray[idxb[i]]; 2218 } 2219 } 2220 2221 ierr = VecRestoreArray(v,&va);CHKERRQ(ierr); 2222 ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr); 2223 ierr = PetscFree(idxb);CHKERRQ(ierr); 2224 ierr = VecDestroy(&vtmp);CHKERRQ(ierr); 2225 PetscFunctionReturn(0); 2226 } 2227 2228 PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2229 { 2230 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2231 PetscErrorCode ierr; 2232 PetscInt i,*idxb = 0; 2233 PetscScalar *va,*vb; 2234 Vec vtmp; 2235 2236 PetscFunctionBegin; 2237 ierr = MatGetRowMinAbs(a->A,v,idx);CHKERRQ(ierr); 2238 ierr = VecGetArray(v,&va);CHKERRQ(ierr); 2239 if (idx) { 2240 for (i=0; i<A->cmap->n; i++) { 2241 if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart; 2242 } 2243 } 2244 2245 ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr); 2246 if (idx) { 2247 ierr = PetscMalloc1(A->rmap->n,&idxb);CHKERRQ(ierr); 2248 } 2249 ierr = MatGetRowMinAbs(a->B,vtmp,idxb);CHKERRQ(ierr); 2250 ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr); 2251 2252 for (i=0; i<A->rmap->n; i++) { 2253 if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) { 2254 va[i] = vb[i]; 2255 if (idx) idx[i] = a->garray[idxb[i]]; 2256 } 2257 } 2258 2259 ierr = VecRestoreArray(v,&va);CHKERRQ(ierr); 2260 ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr); 2261 ierr = PetscFree(idxb);CHKERRQ(ierr); 2262 ierr = VecDestroy(&vtmp);CHKERRQ(ierr); 2263 PetscFunctionReturn(0); 2264 } 2265 2266 PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2267 { 2268 Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data; 2269 PetscInt n = A->rmap->n; 2270 PetscInt cstart = A->cmap->rstart; 2271 PetscInt *cmap = mat->garray; 2272 PetscInt *diagIdx, *offdiagIdx; 2273 Vec diagV, offdiagV; 2274 PetscScalar *a, *diagA, *offdiagA; 2275 PetscInt r; 2276 PetscErrorCode ierr; 2277 2278 PetscFunctionBegin; 2279 ierr = PetscMalloc2(n,&diagIdx,n,&offdiagIdx);CHKERRQ(ierr); 2280 ierr = VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);CHKERRQ(ierr); 2281 ierr = VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);CHKERRQ(ierr); 2282 ierr = MatGetRowMin(mat->A, diagV, diagIdx);CHKERRQ(ierr); 2283 ierr = MatGetRowMin(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr); 2284 ierr = VecGetArray(v, &a);CHKERRQ(ierr); 2285 ierr = VecGetArray(diagV, &diagA);CHKERRQ(ierr); 2286 ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr); 2287 for (r = 0; r < n; ++r) { 2288 if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) { 2289 a[r] = diagA[r]; 2290 idx[r] = cstart + diagIdx[r]; 2291 } else { 2292 a[r] = offdiagA[r]; 2293 idx[r] = cmap[offdiagIdx[r]]; 2294 } 2295 } 2296 ierr = VecRestoreArray(v, &a);CHKERRQ(ierr); 2297 ierr = VecRestoreArray(diagV, &diagA);CHKERRQ(ierr); 2298 ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr); 2299 ierr = VecDestroy(&diagV);CHKERRQ(ierr); 2300 ierr = VecDestroy(&offdiagV);CHKERRQ(ierr); 2301 ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr); 2302 PetscFunctionReturn(0); 2303 } 2304 2305 PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[]) 2306 { 2307 Mat_MPIAIJ *mat = (Mat_MPIAIJ*) A->data; 2308 PetscInt n = A->rmap->n; 2309 PetscInt cstart = A->cmap->rstart; 2310 PetscInt *cmap = mat->garray; 2311 PetscInt *diagIdx, *offdiagIdx; 2312 Vec diagV, offdiagV; 2313 PetscScalar *a, *diagA, *offdiagA; 2314 PetscInt r; 2315 PetscErrorCode ierr; 2316 2317 PetscFunctionBegin; 2318 ierr = PetscMalloc2(n,&diagIdx,n,&offdiagIdx);CHKERRQ(ierr); 2319 ierr = VecCreateSeq(PETSC_COMM_SELF, n, &diagV);CHKERRQ(ierr); 2320 ierr = VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);CHKERRQ(ierr); 2321 ierr = MatGetRowMax(mat->A, diagV, diagIdx);CHKERRQ(ierr); 2322 ierr = MatGetRowMax(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr); 2323 ierr = VecGetArray(v, &a);CHKERRQ(ierr); 2324 ierr = VecGetArray(diagV, &diagA);CHKERRQ(ierr); 2325 ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr); 2326 for (r = 0; r < n; ++r) { 2327 if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) { 2328 a[r] = diagA[r]; 2329 idx[r] = cstart + diagIdx[r]; 2330 } else { 2331 a[r] = offdiagA[r]; 2332 idx[r] = cmap[offdiagIdx[r]]; 2333 } 2334 } 2335 ierr = VecRestoreArray(v, &a);CHKERRQ(ierr); 2336 ierr = VecRestoreArray(diagV, &diagA);CHKERRQ(ierr); 2337 ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr); 2338 ierr = VecDestroy(&diagV);CHKERRQ(ierr); 2339 ierr = VecDestroy(&offdiagV);CHKERRQ(ierr); 2340 ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr); 2341 PetscFunctionReturn(0); 2342 } 2343 2344 PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat) 2345 { 2346 PetscErrorCode ierr; 2347 Mat *dummy; 2348 2349 PetscFunctionBegin; 2350 ierr = MatGetSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);CHKERRQ(ierr); 2351 *newmat = *dummy; 2352 ierr = PetscFree(dummy);CHKERRQ(ierr); 2353 PetscFunctionReturn(0); 2354 } 2355 2356 PetscErrorCode MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values) 2357 { 2358 Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data; 2359 PetscErrorCode ierr; 2360 2361 PetscFunctionBegin; 2362 ierr = MatInvertBlockDiagonal(a->A,values);CHKERRQ(ierr); 2363 A->factorerrortype = a->A->factorerrortype; 2364 PetscFunctionReturn(0); 2365 } 2366 2367 static PetscErrorCode MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx) 2368 { 2369 PetscErrorCode ierr; 2370 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)x->data; 2371 2372 PetscFunctionBegin; 2373 ierr = MatSetRandom(aij->A,rctx);CHKERRQ(ierr); 2374 ierr = MatSetRandom(aij->B,rctx);CHKERRQ(ierr); 2375 ierr = MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2376 ierr = MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2377 PetscFunctionReturn(0); 2378 } 2379 2380 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc) 2381 { 2382 PetscFunctionBegin; 2383 if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable; 2384 else A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ; 2385 PetscFunctionReturn(0); 2386 } 2387 2388 /*@ 2389 MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap 2390 2391 Collective on Mat 2392 2393 Input Parameters: 2394 + A - the matrix 2395 - sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm) 2396 2397 Level: advanced 2398 2399 @*/ 2400 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc) 2401 { 2402 PetscErrorCode ierr; 2403 2404 PetscFunctionBegin; 2405 ierr = PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));CHKERRQ(ierr); 2406 PetscFunctionReturn(0); 2407 } 2408 2409 PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A) 2410 { 2411 PetscErrorCode ierr; 2412 PetscBool sc = PETSC_FALSE,flg; 2413 2414 PetscFunctionBegin; 2415 ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");CHKERRQ(ierr); 2416 ierr = PetscObjectOptionsBegin((PetscObject)A); 2417 if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE; 2418 ierr = PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);CHKERRQ(ierr); 2419 if (flg) { 2420 ierr = MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);CHKERRQ(ierr); 2421 } 2422 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2423 PetscFunctionReturn(0); 2424 } 2425 2426 PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a) 2427 { 2428 PetscErrorCode ierr; 2429 Mat_MPIAIJ *maij = (Mat_MPIAIJ*)Y->data; 2430 Mat_SeqAIJ *aij = (Mat_SeqAIJ*)maij->A->data; 2431 2432 PetscFunctionBegin; 2433 if (!Y->preallocated) { 2434 ierr = MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);CHKERRQ(ierr); 2435 } else if (!aij->nz) { 2436 PetscInt nonew = aij->nonew; 2437 ierr = MatSeqAIJSetPreallocation(maij->A,1,NULL);CHKERRQ(ierr); 2438 aij->nonew = nonew; 2439 } 2440 ierr = MatShift_Basic(Y,a);CHKERRQ(ierr); 2441 PetscFunctionReturn(0); 2442 } 2443 2444 PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool *missing,PetscInt *d) 2445 { 2446 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 2447 PetscErrorCode ierr; 2448 2449 PetscFunctionBegin; 2450 if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices"); 2451 ierr = MatMissingDiagonal(a->A,missing,d);CHKERRQ(ierr); 2452 if (d) { 2453 PetscInt rstart; 2454 ierr = MatGetOwnershipRange(A,&rstart,NULL);CHKERRQ(ierr); 2455 *d += rstart; 2456 2457 } 2458 PetscFunctionReturn(0); 2459 } 2460 2461 2462 /* -------------------------------------------------------------------*/ 2463 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ, 2464 MatGetRow_MPIAIJ, 2465 MatRestoreRow_MPIAIJ, 2466 MatMult_MPIAIJ, 2467 /* 4*/ MatMultAdd_MPIAIJ, 2468 MatMultTranspose_MPIAIJ, 2469 MatMultTransposeAdd_MPIAIJ, 2470 0, 2471 0, 2472 0, 2473 /*10*/ 0, 2474 0, 2475 0, 2476 MatSOR_MPIAIJ, 2477 MatTranspose_MPIAIJ, 2478 /*15*/ MatGetInfo_MPIAIJ, 2479 MatEqual_MPIAIJ, 2480 MatGetDiagonal_MPIAIJ, 2481 MatDiagonalScale_MPIAIJ, 2482 MatNorm_MPIAIJ, 2483 /*20*/ MatAssemblyBegin_MPIAIJ, 2484 MatAssemblyEnd_MPIAIJ, 2485 MatSetOption_MPIAIJ, 2486 MatZeroEntries_MPIAIJ, 2487 /*24*/ MatZeroRows_MPIAIJ, 2488 0, 2489 0, 2490 0, 2491 0, 2492 /*29*/ MatSetUp_MPIAIJ, 2493 0, 2494 0, 2495 MatGetDiagonalBlock_MPIAIJ, 2496 0, 2497 /*34*/ MatDuplicate_MPIAIJ, 2498 0, 2499 0, 2500 0, 2501 0, 2502 /*39*/ MatAXPY_MPIAIJ, 2503 MatGetSubMatrices_MPIAIJ, 2504 MatIncreaseOverlap_MPIAIJ, 2505 MatGetValues_MPIAIJ, 2506 MatCopy_MPIAIJ, 2507 /*44*/ MatGetRowMax_MPIAIJ, 2508 MatScale_MPIAIJ, 2509 MatShift_MPIAIJ, 2510 MatDiagonalSet_MPIAIJ, 2511 MatZeroRowsColumns_MPIAIJ, 2512 /*49*/ MatSetRandom_MPIAIJ, 2513 0, 2514 0, 2515 0, 2516 0, 2517 /*54*/ MatFDColoringCreate_MPIXAIJ, 2518 0, 2519 MatSetUnfactored_MPIAIJ, 2520 MatPermute_MPIAIJ, 2521 0, 2522 /*59*/ MatGetSubMatrix_MPIAIJ, 2523 MatDestroy_MPIAIJ, 2524 MatView_MPIAIJ, 2525 0, 2526 MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ, 2527 /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ, 2528 MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ, 2529 0, 2530 0, 2531 0, 2532 /*69*/ MatGetRowMaxAbs_MPIAIJ, 2533 MatGetRowMinAbs_MPIAIJ, 2534 0, 2535 0, 2536 0, 2537 0, 2538 /*75*/ MatFDColoringApply_AIJ, 2539 MatSetFromOptions_MPIAIJ, 2540 0, 2541 0, 2542 MatFindZeroDiagonals_MPIAIJ, 2543 /*80*/ 0, 2544 0, 2545 0, 2546 /*83*/ MatLoad_MPIAIJ, 2547 0, 2548 0, 2549 0, 2550 0, 2551 0, 2552 /*89*/ MatMatMult_MPIAIJ_MPIAIJ, 2553 MatMatMultSymbolic_MPIAIJ_MPIAIJ, 2554 MatMatMultNumeric_MPIAIJ_MPIAIJ, 2555 MatPtAP_MPIAIJ_MPIAIJ, 2556 MatPtAPSymbolic_MPIAIJ_MPIAIJ, 2557 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ, 2558 0, 2559 0, 2560 0, 2561 0, 2562 /*99*/ 0, 2563 0, 2564 0, 2565 MatConjugate_MPIAIJ, 2566 0, 2567 /*104*/MatSetValuesRow_MPIAIJ, 2568 MatRealPart_MPIAIJ, 2569 MatImaginaryPart_MPIAIJ, 2570 0, 2571 0, 2572 /*109*/0, 2573 0, 2574 MatGetRowMin_MPIAIJ, 2575 0, 2576 MatMissingDiagonal_MPIAIJ, 2577 /*114*/MatGetSeqNonzeroStructure_MPIAIJ, 2578 0, 2579 MatGetGhosts_MPIAIJ, 2580 0, 2581 0, 2582 /*119*/0, 2583 0, 2584 0, 2585 0, 2586 MatGetMultiProcBlock_MPIAIJ, 2587 /*124*/MatFindNonzeroRows_MPIAIJ, 2588 MatGetColumnNorms_MPIAIJ, 2589 MatInvertBlockDiagonal_MPIAIJ, 2590 0, 2591 MatGetSubMatricesMPI_MPIAIJ, 2592 /*129*/0, 2593 MatTransposeMatMult_MPIAIJ_MPIAIJ, 2594 MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ, 2595 MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ, 2596 0, 2597 /*134*/0, 2598 0, 2599 0, 2600 0, 2601 0, 2602 /*139*/MatSetBlockSizes_MPIAIJ, 2603 0, 2604 0, 2605 MatFDColoringSetUp_MPIXAIJ, 2606 MatFindOffBlockDiagonalEntries_MPIAIJ, 2607 /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ 2608 }; 2609 2610 /* ----------------------------------------------------------------------------------------*/ 2611 2612 PetscErrorCode MatStoreValues_MPIAIJ(Mat mat) 2613 { 2614 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 2615 PetscErrorCode ierr; 2616 2617 PetscFunctionBegin; 2618 ierr = MatStoreValues(aij->A);CHKERRQ(ierr); 2619 ierr = MatStoreValues(aij->B);CHKERRQ(ierr); 2620 PetscFunctionReturn(0); 2621 } 2622 2623 PetscErrorCode MatRetrieveValues_MPIAIJ(Mat mat) 2624 { 2625 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 2626 PetscErrorCode ierr; 2627 2628 PetscFunctionBegin; 2629 ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr); 2630 ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr); 2631 PetscFunctionReturn(0); 2632 } 2633 2634 PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) 2635 { 2636 Mat_MPIAIJ *b; 2637 PetscErrorCode ierr; 2638 2639 PetscFunctionBegin; 2640 ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); 2641 ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); 2642 b = (Mat_MPIAIJ*)B->data; 2643 2644 #if defined(PETSC_USE_CTABLE) 2645 ierr = PetscTableDestroy(&b->colmap);CHKERRQ(ierr); 2646 #else 2647 ierr = PetscFree(b->colmap);CHKERRQ(ierr); 2648 #endif 2649 ierr = PetscFree(b->garray);CHKERRQ(ierr); 2650 ierr = VecDestroy(&b->lvec);CHKERRQ(ierr); 2651 ierr = VecScatterDestroy(&b->Mvctx);CHKERRQ(ierr); 2652 2653 /* Because the B will have been resized we simply destroy it and create a new one each time */ 2654 ierr = MatDestroy(&b->B);CHKERRQ(ierr); 2655 ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); 2656 ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); 2657 ierr = MatSetBlockSizesFromMats(b->B,B,B);CHKERRQ(ierr); 2658 ierr = MatSetType(b->B,MATSEQAIJ);CHKERRQ(ierr); 2659 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); 2660 2661 if (!B->preallocated) { 2662 ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); 2663 ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); 2664 ierr = MatSetBlockSizesFromMats(b->A,B,B);CHKERRQ(ierr); 2665 ierr = MatSetType(b->A,MATSEQAIJ);CHKERRQ(ierr); 2666 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); 2667 } 2668 2669 ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr); 2670 ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr); 2671 B->preallocated = PETSC_TRUE; 2672 B->was_assembled = PETSC_FALSE; 2673 B->assembled = PETSC_FALSE;; 2674 PetscFunctionReturn(0); 2675 } 2676 2677 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat) 2678 { 2679 Mat mat; 2680 Mat_MPIAIJ *a,*oldmat = (Mat_MPIAIJ*)matin->data; 2681 PetscErrorCode ierr; 2682 2683 PetscFunctionBegin; 2684 *newmat = 0; 2685 ierr = MatCreate(PetscObjectComm((PetscObject)matin),&mat);CHKERRQ(ierr); 2686 ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr); 2687 ierr = MatSetBlockSizesFromMats(mat,matin,matin);CHKERRQ(ierr); 2688 ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr); 2689 ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr); 2690 a = (Mat_MPIAIJ*)mat->data; 2691 2692 mat->factortype = matin->factortype; 2693 mat->assembled = PETSC_TRUE; 2694 mat->insertmode = NOT_SET_VALUES; 2695 mat->preallocated = PETSC_TRUE; 2696 2697 a->size = oldmat->size; 2698 a->rank = oldmat->rank; 2699 a->donotstash = oldmat->donotstash; 2700 a->roworiented = oldmat->roworiented; 2701 a->rowindices = 0; 2702 a->rowvalues = 0; 2703 a->getrowactive = PETSC_FALSE; 2704 2705 ierr = PetscLayoutReference(matin->rmap,&mat->rmap);CHKERRQ(ierr); 2706 ierr = PetscLayoutReference(matin->cmap,&mat->cmap);CHKERRQ(ierr); 2707 2708 if (oldmat->colmap) { 2709 #if defined(PETSC_USE_CTABLE) 2710 ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr); 2711 #else 2712 ierr = PetscMalloc1(mat->cmap->N,&a->colmap);CHKERRQ(ierr); 2713 ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr); 2714 ierr = PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr); 2715 #endif 2716 } else a->colmap = 0; 2717 if (oldmat->garray) { 2718 PetscInt len; 2719 len = oldmat->B->cmap->n; 2720 ierr = PetscMalloc1(len+1,&a->garray);CHKERRQ(ierr); 2721 ierr = PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));CHKERRQ(ierr); 2722 if (len) { ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); } 2723 } else a->garray = 0; 2724 2725 ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr); 2726 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);CHKERRQ(ierr); 2727 ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr); 2728 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);CHKERRQ(ierr); 2729 ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr); 2730 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);CHKERRQ(ierr); 2731 ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr); 2732 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);CHKERRQ(ierr); 2733 ierr = PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr); 2734 *newmat = mat; 2735 PetscFunctionReturn(0); 2736 } 2737 2738 2739 2740 PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer) 2741 { 2742 PetscScalar *vals,*svals; 2743 MPI_Comm comm; 2744 PetscErrorCode ierr; 2745 PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag; 2746 PetscInt i,nz,j,rstart,rend,mmax,maxnz = 0; 2747 PetscInt header[4],*rowlengths = 0,M,N,m,*cols; 2748 PetscInt *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols; 2749 PetscInt cend,cstart,n,*rowners; 2750 int fd; 2751 PetscInt bs = newMat->rmap->bs; 2752 2753 PetscFunctionBegin; 2754 /* force binary viewer to load .info file if it has not yet done so */ 2755 ierr = PetscViewerSetUp(viewer);CHKERRQ(ierr); 2756 ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr); 2757 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 2758 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 2759 ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr); 2760 if (!rank) { 2761 ierr = PetscBinaryRead(fd,(char*)header,4,PETSC_INT);CHKERRQ(ierr); 2762 if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object"); 2763 if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ"); 2764 } 2765 2766 ierr = PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");CHKERRQ(ierr); 2767 ierr = PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);CHKERRQ(ierr); 2768 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2769 if (bs < 0) bs = 1; 2770 2771 ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr); 2772 M = header[1]; N = header[2]; 2773 2774 /* If global sizes are set, check if they are consistent with that given in the file */ 2775 if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M); 2776 if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N); 2777 2778 /* determine ownership of all (block) rows */ 2779 if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs); 2780 if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank)); /* PETSC_DECIDE */ 2781 else m = newMat->rmap->n; /* Set by user */ 2782 2783 ierr = PetscMalloc1(size+1,&rowners);CHKERRQ(ierr); 2784 ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 2785 2786 /* First process needs enough room for process with most rows */ 2787 if (!rank) { 2788 mmax = rowners[1]; 2789 for (i=2; i<=size; i++) { 2790 mmax = PetscMax(mmax, rowners[i]); 2791 } 2792 } else mmax = -1; /* unused, but compilers complain */ 2793 2794 rowners[0] = 0; 2795 for (i=2; i<=size; i++) { 2796 rowners[i] += rowners[i-1]; 2797 } 2798 rstart = rowners[rank]; 2799 rend = rowners[rank+1]; 2800 2801 /* distribute row lengths to all processors */ 2802 ierr = PetscMalloc2(m,&ourlens,m,&offlens);CHKERRQ(ierr); 2803 if (!rank) { 2804 ierr = PetscBinaryRead(fd,ourlens,m,PETSC_INT);CHKERRQ(ierr); 2805 ierr = PetscMalloc1(mmax,&rowlengths);CHKERRQ(ierr); 2806 ierr = PetscCalloc1(size,&procsnz);CHKERRQ(ierr); 2807 for (j=0; j<m; j++) { 2808 procsnz[0] += ourlens[j]; 2809 } 2810 for (i=1; i<size; i++) { 2811 ierr = PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);CHKERRQ(ierr); 2812 /* calculate the number of nonzeros on each processor */ 2813 for (j=0; j<rowners[i+1]-rowners[i]; j++) { 2814 procsnz[i] += rowlengths[j]; 2815 } 2816 ierr = MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr); 2817 } 2818 ierr = PetscFree(rowlengths);CHKERRQ(ierr); 2819 } else { 2820 ierr = MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);CHKERRQ(ierr); 2821 } 2822 2823 if (!rank) { 2824 /* determine max buffer needed and allocate it */ 2825 maxnz = 0; 2826 for (i=0; i<size; i++) { 2827 maxnz = PetscMax(maxnz,procsnz[i]); 2828 } 2829 ierr = PetscMalloc1(maxnz,&cols);CHKERRQ(ierr); 2830 2831 /* read in my part of the matrix column indices */ 2832 nz = procsnz[0]; 2833 ierr = PetscMalloc1(nz,&mycols);CHKERRQ(ierr); 2834 ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr); 2835 2836 /* read in every one elses and ship off */ 2837 for (i=1; i<size; i++) { 2838 nz = procsnz[i]; 2839 ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr); 2840 ierr = MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr); 2841 } 2842 ierr = PetscFree(cols);CHKERRQ(ierr); 2843 } else { 2844 /* determine buffer space needed for message */ 2845 nz = 0; 2846 for (i=0; i<m; i++) { 2847 nz += ourlens[i]; 2848 } 2849 ierr = PetscMalloc1(nz,&mycols);CHKERRQ(ierr); 2850 2851 /* receive message of column indices*/ 2852 ierr = MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);CHKERRQ(ierr); 2853 } 2854 2855 /* determine column ownership if matrix is not square */ 2856 if (N != M) { 2857 if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank); 2858 else n = newMat->cmap->n; 2859 ierr = MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 2860 cstart = cend - n; 2861 } else { 2862 cstart = rstart; 2863 cend = rend; 2864 n = cend - cstart; 2865 } 2866 2867 /* loop over local rows, determining number of off diagonal entries */ 2868 ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr); 2869 jj = 0; 2870 for (i=0; i<m; i++) { 2871 for (j=0; j<ourlens[i]; j++) { 2872 if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++; 2873 jj++; 2874 } 2875 } 2876 2877 for (i=0; i<m; i++) { 2878 ourlens[i] -= offlens[i]; 2879 } 2880 ierr = MatSetSizes(newMat,m,n,M,N);CHKERRQ(ierr); 2881 2882 if (bs > 1) {ierr = MatSetBlockSize(newMat,bs);CHKERRQ(ierr);} 2883 2884 ierr = MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);CHKERRQ(ierr); 2885 2886 for (i=0; i<m; i++) { 2887 ourlens[i] += offlens[i]; 2888 } 2889 2890 if (!rank) { 2891 ierr = PetscMalloc1(maxnz+1,&vals);CHKERRQ(ierr); 2892 2893 /* read in my part of the matrix numerical values */ 2894 nz = procsnz[0]; 2895 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 2896 2897 /* insert into matrix */ 2898 jj = rstart; 2899 smycols = mycols; 2900 svals = vals; 2901 for (i=0; i<m; i++) { 2902 ierr = MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr); 2903 smycols += ourlens[i]; 2904 svals += ourlens[i]; 2905 jj++; 2906 } 2907 2908 /* read in other processors and ship out */ 2909 for (i=1; i<size; i++) { 2910 nz = procsnz[i]; 2911 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 2912 ierr = MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);CHKERRQ(ierr); 2913 } 2914 ierr = PetscFree(procsnz);CHKERRQ(ierr); 2915 } else { 2916 /* receive numeric values */ 2917 ierr = PetscMalloc1(nz+1,&vals);CHKERRQ(ierr); 2918 2919 /* receive message of values*/ 2920 ierr = MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);CHKERRQ(ierr); 2921 2922 /* insert into matrix */ 2923 jj = rstart; 2924 smycols = mycols; 2925 svals = vals; 2926 for (i=0; i<m; i++) { 2927 ierr = MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr); 2928 smycols += ourlens[i]; 2929 svals += ourlens[i]; 2930 jj++; 2931 } 2932 } 2933 ierr = PetscFree2(ourlens,offlens);CHKERRQ(ierr); 2934 ierr = PetscFree(vals);CHKERRQ(ierr); 2935 ierr = PetscFree(mycols);CHKERRQ(ierr); 2936 ierr = PetscFree(rowners);CHKERRQ(ierr); 2937 ierr = MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2938 ierr = MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2939 PetscFunctionReturn(0); 2940 } 2941 2942 /* TODO: Not scalable because of ISAllGather() unless getting all columns. */ 2943 PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat) 2944 { 2945 PetscErrorCode ierr; 2946 IS iscol_local; 2947 PetscInt csize; 2948 2949 PetscFunctionBegin; 2950 ierr = ISGetLocalSize(iscol,&csize);CHKERRQ(ierr); 2951 if (call == MAT_REUSE_MATRIX) { 2952 ierr = PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);CHKERRQ(ierr); 2953 if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 2954 } else { 2955 /* check if we are grabbing all columns*/ 2956 PetscBool isstride; 2957 PetscMPIInt lisstride = 0,gisstride; 2958 ierr = PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);CHKERRQ(ierr); 2959 if (isstride) { 2960 PetscInt start,len,mstart,mlen; 2961 ierr = ISStrideGetInfo(iscol,&start,NULL);CHKERRQ(ierr); 2962 ierr = ISGetLocalSize(iscol,&len);CHKERRQ(ierr); 2963 ierr = MatGetOwnershipRangeColumn(mat,&mstart,&mlen);CHKERRQ(ierr); 2964 if (mstart == start && mlen-mstart == len) lisstride = 1; 2965 } 2966 ierr = MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 2967 if (gisstride) { 2968 PetscInt N; 2969 ierr = MatGetSize(mat,NULL,&N);CHKERRQ(ierr); 2970 ierr = ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);CHKERRQ(ierr); 2971 ierr = ISSetIdentity(iscol_local);CHKERRQ(ierr); 2972 ierr = PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");CHKERRQ(ierr); 2973 } else { 2974 PetscInt cbs; 2975 ierr = ISGetBlockSize(iscol,&cbs);CHKERRQ(ierr); 2976 ierr = ISAllGather(iscol,&iscol_local);CHKERRQ(ierr); 2977 ierr = ISSetBlockSize(iscol_local,cbs);CHKERRQ(ierr); 2978 } 2979 } 2980 ierr = MatGetSubMatrix_MPIAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);CHKERRQ(ierr); 2981 if (call == MAT_INITIAL_MATRIX) { 2982 ierr = PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);CHKERRQ(ierr); 2983 ierr = ISDestroy(&iscol_local);CHKERRQ(ierr); 2984 } 2985 PetscFunctionReturn(0); 2986 } 2987 2988 extern PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,Mat*); 2989 /* 2990 Not great since it makes two copies of the submatrix, first an SeqAIJ 2991 in local and then by concatenating the local matrices the end result. 2992 Writing it directly would be much like MatGetSubMatrices_MPIAIJ() 2993 2994 Note: This requires a sequential iscol with all indices. 2995 */ 2996 PetscErrorCode MatGetSubMatrix_MPIAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat) 2997 { 2998 PetscErrorCode ierr; 2999 PetscMPIInt rank,size; 3000 PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs; 3001 PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal,ncol; 3002 PetscBool allcolumns, colflag; 3003 Mat M,Mreuse; 3004 MatScalar *vwork,*aa; 3005 MPI_Comm comm; 3006 Mat_SeqAIJ *aij; 3007 3008 PetscFunctionBegin; 3009 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 3010 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 3011 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3012 3013 ierr = ISIdentity(iscol,&colflag);CHKERRQ(ierr); 3014 ierr = ISGetLocalSize(iscol,&ncol);CHKERRQ(ierr); 3015 if (colflag && ncol == mat->cmap->N) { 3016 allcolumns = PETSC_TRUE; 3017 ierr = PetscInfo(mat,"Optimizing for obtaining all columns of the matrix\n");CHKERRQ(ierr); 3018 } else { 3019 allcolumns = PETSC_FALSE; 3020 } 3021 if (call == MAT_REUSE_MATRIX) { 3022 ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);CHKERRQ(ierr); 3023 if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 3024 ierr = MatGetSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&allcolumns,&Mreuse);CHKERRQ(ierr); 3025 } else { 3026 ierr = MatGetSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&allcolumns,&Mreuse);CHKERRQ(ierr); 3027 } 3028 3029 /* 3030 m - number of local rows 3031 n - number of columns (same on all processors) 3032 rstart - first row in new global matrix generated 3033 */ 3034 ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr); 3035 ierr = MatGetBlockSizes(Mreuse,&bs,&cbs);CHKERRQ(ierr); 3036 if (call == MAT_INITIAL_MATRIX) { 3037 aij = (Mat_SeqAIJ*)(Mreuse)->data; 3038 ii = aij->i; 3039 jj = aij->j; 3040 3041 /* 3042 Determine the number of non-zeros in the diagonal and off-diagonal 3043 portions of the matrix in order to do correct preallocation 3044 */ 3045 3046 /* first get start and end of "diagonal" columns */ 3047 if (csize == PETSC_DECIDE) { 3048 ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr); 3049 if (mglobal == n) { /* square matrix */ 3050 nlocal = m; 3051 } else { 3052 nlocal = n/size + ((n % size) > rank); 3053 } 3054 } else { 3055 nlocal = csize; 3056 } 3057 ierr = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 3058 rstart = rend - nlocal; 3059 if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n); 3060 3061 /* next, compute all the lengths */ 3062 ierr = PetscMalloc1(2*m+1,&dlens);CHKERRQ(ierr); 3063 olens = dlens + m; 3064 for (i=0; i<m; i++) { 3065 jend = ii[i+1] - ii[i]; 3066 olen = 0; 3067 dlen = 0; 3068 for (j=0; j<jend; j++) { 3069 if (*jj < rstart || *jj >= rend) olen++; 3070 else dlen++; 3071 jj++; 3072 } 3073 olens[i] = olen; 3074 dlens[i] = dlen; 3075 } 3076 ierr = MatCreate(comm,&M);CHKERRQ(ierr); 3077 ierr = MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);CHKERRQ(ierr); 3078 ierr = MatSetBlockSizes(M,bs,cbs);CHKERRQ(ierr); 3079 ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr); 3080 ierr = MatMPIAIJSetPreallocation(M,0,dlens,0,olens);CHKERRQ(ierr); 3081 ierr = PetscFree(dlens);CHKERRQ(ierr); 3082 } else { 3083 PetscInt ml,nl; 3084 3085 M = *newmat; 3086 ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr); 3087 if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request"); 3088 ierr = MatZeroEntries(M);CHKERRQ(ierr); 3089 /* 3090 The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly, 3091 rather than the slower MatSetValues(). 3092 */ 3093 M->was_assembled = PETSC_TRUE; 3094 M->assembled = PETSC_FALSE; 3095 } 3096 ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr); 3097 aij = (Mat_SeqAIJ*)(Mreuse)->data; 3098 ii = aij->i; 3099 jj = aij->j; 3100 aa = aij->a; 3101 for (i=0; i<m; i++) { 3102 row = rstart + i; 3103 nz = ii[i+1] - ii[i]; 3104 cwork = jj; jj += nz; 3105 vwork = aa; aa += nz; 3106 ierr = MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr); 3107 } 3108 3109 ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3110 ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3111 *newmat = M; 3112 3113 /* save submatrix used in processor for next request */ 3114 if (call == MAT_INITIAL_MATRIX) { 3115 ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr); 3116 ierr = MatDestroy(&Mreuse);CHKERRQ(ierr); 3117 } 3118 PetscFunctionReturn(0); 3119 } 3120 3121 PetscErrorCode MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[]) 3122 { 3123 PetscInt m,cstart, cend,j,nnz,i,d; 3124 PetscInt *d_nnz,*o_nnz,nnz_max = 0,rstart,ii; 3125 const PetscInt *JJ; 3126 PetscScalar *values; 3127 PetscErrorCode ierr; 3128 3129 PetscFunctionBegin; 3130 if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]); 3131 3132 ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); 3133 ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); 3134 m = B->rmap->n; 3135 cstart = B->cmap->rstart; 3136 cend = B->cmap->rend; 3137 rstart = B->rmap->rstart; 3138 3139 ierr = PetscMalloc2(m,&d_nnz,m,&o_nnz);CHKERRQ(ierr); 3140 3141 #if defined(PETSC_USE_DEBUGGING) 3142 for (i=0; i<m; i++) { 3143 nnz = Ii[i+1]- Ii[i]; 3144 JJ = J + Ii[i]; 3145 if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz); 3146 if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j); 3147 if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N); 3148 } 3149 #endif 3150 3151 for (i=0; i<m; i++) { 3152 nnz = Ii[i+1]- Ii[i]; 3153 JJ = J + Ii[i]; 3154 nnz_max = PetscMax(nnz_max,nnz); 3155 d = 0; 3156 for (j=0; j<nnz; j++) { 3157 if (cstart <= JJ[j] && JJ[j] < cend) d++; 3158 } 3159 d_nnz[i] = d; 3160 o_nnz[i] = nnz - d; 3161 } 3162 ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr); 3163 ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr); 3164 3165 if (v) values = (PetscScalar*)v; 3166 else { 3167 ierr = PetscCalloc1(nnz_max+1,&values);CHKERRQ(ierr); 3168 } 3169 3170 for (i=0; i<m; i++) { 3171 ii = i + rstart; 3172 nnz = Ii[i+1]- Ii[i]; 3173 ierr = MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);CHKERRQ(ierr); 3174 } 3175 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3176 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3177 3178 if (!v) { 3179 ierr = PetscFree(values);CHKERRQ(ierr); 3180 } 3181 ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 3182 PetscFunctionReturn(0); 3183 } 3184 3185 /*@ 3186 MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format 3187 (the default parallel PETSc format). 3188 3189 Collective on MPI_Comm 3190 3191 Input Parameters: 3192 + B - the matrix 3193 . i - the indices into j for the start of each local row (starts with zero) 3194 . j - the column indices for each local row (starts with zero) 3195 - v - optional values in the matrix 3196 3197 Level: developer 3198 3199 Notes: 3200 The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc; 3201 thus you CANNOT change the matrix entries by changing the values of a[] after you have 3202 called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays. 3203 3204 The i and j indices are 0 based, and i indices are indices corresponding to the local j array. 3205 3206 The format which is used for the sparse matrix input, is equivalent to a 3207 row-major ordering.. i.e for the following matrix, the input data expected is 3208 as shown 3209 3210 $ 1 0 0 3211 $ 2 0 3 P0 3212 $ ------- 3213 $ 4 5 6 P1 3214 $ 3215 $ Process0 [P0]: rows_owned=[0,1] 3216 $ i = {0,1,3} [size = nrow+1 = 2+1] 3217 $ j = {0,0,2} [size = 3] 3218 $ v = {1,2,3} [size = 3] 3219 $ 3220 $ Process1 [P1]: rows_owned=[2] 3221 $ i = {0,3} [size = nrow+1 = 1+1] 3222 $ j = {0,1,2} [size = 3] 3223 $ v = {4,5,6} [size = 3] 3224 3225 .keywords: matrix, aij, compressed row, sparse, parallel 3226 3227 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ, 3228 MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays() 3229 @*/ 3230 PetscErrorCode MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[]) 3231 { 3232 PetscErrorCode ierr; 3233 3234 PetscFunctionBegin; 3235 ierr = PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));CHKERRQ(ierr); 3236 PetscFunctionReturn(0); 3237 } 3238 3239 /*@C 3240 MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format 3241 (the default parallel PETSc format). For good matrix assembly performance 3242 the user should preallocate the matrix storage by setting the parameters 3243 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 3244 performance can be increased by more than a factor of 50. 3245 3246 Collective on MPI_Comm 3247 3248 Input Parameters: 3249 + B - the matrix 3250 . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix 3251 (same value is used for all local rows) 3252 . d_nnz - array containing the number of nonzeros in the various rows of the 3253 DIAGONAL portion of the local submatrix (possibly different for each row) 3254 or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure. 3255 The size of this array is equal to the number of local rows, i.e 'm'. 3256 For matrices that will be factored, you must leave room for (and set) 3257 the diagonal entry even if it is zero. 3258 . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local 3259 submatrix (same value is used for all local rows). 3260 - o_nnz - array containing the number of nonzeros in the various rows of the 3261 OFF-DIAGONAL portion of the local submatrix (possibly different for 3262 each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero 3263 structure. The size of this array is equal to the number 3264 of local rows, i.e 'm'. 3265 3266 If the *_nnz parameter is given then the *_nz parameter is ignored 3267 3268 The AIJ format (also called the Yale sparse matrix format or 3269 compressed row storage (CSR)), is fully compatible with standard Fortran 77 3270 storage. The stored row and column indices begin with zero. 3271 See Users-Manual: ch_mat for details. 3272 3273 The parallel matrix is partitioned such that the first m0 rows belong to 3274 process 0, the next m1 rows belong to process 1, the next m2 rows belong 3275 to process 2 etc.. where m0,m1,m2... are the input parameter 'm'. 3276 3277 The DIAGONAL portion of the local submatrix of a processor can be defined 3278 as the submatrix which is obtained by extraction the part corresponding to 3279 the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the 3280 first row that belongs to the processor, r2 is the last row belonging to 3281 the this processor, and c1-c2 is range of indices of the local part of a 3282 vector suitable for applying the matrix to. This is an mxn matrix. In the 3283 common case of a square matrix, the row and column ranges are the same and 3284 the DIAGONAL part is also square. The remaining portion of the local 3285 submatrix (mxN) constitute the OFF-DIAGONAL portion. 3286 3287 If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored. 3288 3289 You can call MatGetInfo() to get information on how effective the preallocation was; 3290 for example the fields mallocs,nz_allocated,nz_used,nz_unneeded; 3291 You can also run with the option -info and look for messages with the string 3292 malloc in them to see if additional memory allocation was needed. 3293 3294 Example usage: 3295 3296 Consider the following 8x8 matrix with 34 non-zero values, that is 3297 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 3298 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 3299 as follows: 3300 3301 .vb 3302 1 2 0 | 0 3 0 | 0 4 3303 Proc0 0 5 6 | 7 0 0 | 8 0 3304 9 0 10 | 11 0 0 | 12 0 3305 ------------------------------------- 3306 13 0 14 | 15 16 17 | 0 0 3307 Proc1 0 18 0 | 19 20 21 | 0 0 3308 0 0 0 | 22 23 0 | 24 0 3309 ------------------------------------- 3310 Proc2 25 26 27 | 0 0 28 | 29 0 3311 30 0 0 | 31 32 33 | 0 34 3312 .ve 3313 3314 This can be represented as a collection of submatrices as: 3315 3316 .vb 3317 A B C 3318 D E F 3319 G H I 3320 .ve 3321 3322 Where the submatrices A,B,C are owned by proc0, D,E,F are 3323 owned by proc1, G,H,I are owned by proc2. 3324 3325 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 3326 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 3327 The 'M','N' parameters are 8,8, and have the same values on all procs. 3328 3329 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 3330 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 3331 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 3332 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 3333 part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ 3334 matrix, ans [DF] as another SeqAIJ matrix. 3335 3336 When d_nz, o_nz parameters are specified, d_nz storage elements are 3337 allocated for every row of the local diagonal submatrix, and o_nz 3338 storage locations are allocated for every row of the OFF-DIAGONAL submat. 3339 One way to choose d_nz and o_nz is to use the max nonzerors per local 3340 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 3341 In this case, the values of d_nz,o_nz are: 3342 .vb 3343 proc0 : dnz = 2, o_nz = 2 3344 proc1 : dnz = 3, o_nz = 2 3345 proc2 : dnz = 1, o_nz = 4 3346 .ve 3347 We are allocating m*(d_nz+o_nz) storage locations for every proc. This 3348 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 3349 for proc3. i.e we are using 12+15+10=37 storage locations to store 3350 34 values. 3351 3352 When d_nnz, o_nnz parameters are specified, the storage is specified 3353 for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 3354 In the above case the values for d_nnz,o_nnz are: 3355 .vb 3356 proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2] 3357 proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1] 3358 proc2: d_nnz = [1,1] and o_nnz = [4,4] 3359 .ve 3360 Here the space allocated is sum of all the above values i.e 34, and 3361 hence pre-allocation is perfect. 3362 3363 Level: intermediate 3364 3365 .keywords: matrix, aij, compressed row, sparse, parallel 3366 3367 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(), 3368 MATMPIAIJ, MatGetInfo(), PetscSplitOwnership() 3369 @*/ 3370 PetscErrorCode MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) 3371 { 3372 PetscErrorCode ierr; 3373 3374 PetscFunctionBegin; 3375 PetscValidHeaderSpecific(B,MAT_CLASSID,1); 3376 PetscValidType(B,1); 3377 ierr = PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));CHKERRQ(ierr); 3378 PetscFunctionReturn(0); 3379 } 3380 3381 /*@ 3382 MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard 3383 CSR format the local rows. 3384 3385 Collective on MPI_Comm 3386 3387 Input Parameters: 3388 + comm - MPI communicator 3389 . m - number of local rows (Cannot be PETSC_DECIDE) 3390 . n - This value should be the same as the local size used in creating the 3391 x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have 3392 calculated if N is given) For square matrices n is almost always m. 3393 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 3394 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 3395 . i - row indices 3396 . j - column indices 3397 - a - matrix values 3398 3399 Output Parameter: 3400 . mat - the matrix 3401 3402 Level: intermediate 3403 3404 Notes: 3405 The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc; 3406 thus you CANNOT change the matrix entries by changing the values of a[] after you have 3407 called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays. 3408 3409 The i and j indices are 0 based, and i indices are indices corresponding to the local j array. 3410 3411 The format which is used for the sparse matrix input, is equivalent to a 3412 row-major ordering.. i.e for the following matrix, the input data expected is 3413 as shown 3414 3415 $ 1 0 0 3416 $ 2 0 3 P0 3417 $ ------- 3418 $ 4 5 6 P1 3419 $ 3420 $ Process0 [P0]: rows_owned=[0,1] 3421 $ i = {0,1,3} [size = nrow+1 = 2+1] 3422 $ j = {0,0,2} [size = 3] 3423 $ v = {1,2,3} [size = 3] 3424 $ 3425 $ Process1 [P1]: rows_owned=[2] 3426 $ i = {0,3} [size = nrow+1 = 1+1] 3427 $ j = {0,1,2} [size = 3] 3428 $ v = {4,5,6} [size = 3] 3429 3430 .keywords: matrix, aij, compressed row, sparse, parallel 3431 3432 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(), 3433 MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays() 3434 @*/ 3435 PetscErrorCode MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat) 3436 { 3437 PetscErrorCode ierr; 3438 3439 PetscFunctionBegin; 3440 if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0"); 3441 if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative"); 3442 ierr = MatCreate(comm,mat);CHKERRQ(ierr); 3443 ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr); 3444 /* ierr = MatSetBlockSizes(M,bs,cbs);CHKERRQ(ierr); */ 3445 ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr); 3446 ierr = MatMPIAIJSetPreallocationCSR(*mat,i,j,a);CHKERRQ(ierr); 3447 PetscFunctionReturn(0); 3448 } 3449 3450 /*@C 3451 MatCreateAIJ - Creates a sparse parallel matrix in AIJ format 3452 (the default parallel PETSc format). For good matrix assembly performance 3453 the user should preallocate the matrix storage by setting the parameters 3454 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 3455 performance can be increased by more than a factor of 50. 3456 3457 Collective on MPI_Comm 3458 3459 Input Parameters: 3460 + comm - MPI communicator 3461 . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) 3462 This value should be the same as the local size used in creating the 3463 y vector for the matrix-vector product y = Ax. 3464 . n - This value should be the same as the local size used in creating the 3465 x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have 3466 calculated if N is given) For square matrices n is almost always m. 3467 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 3468 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 3469 . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix 3470 (same value is used for all local rows) 3471 . d_nnz - array containing the number of nonzeros in the various rows of the 3472 DIAGONAL portion of the local submatrix (possibly different for each row) 3473 or NULL, if d_nz is used to specify the nonzero structure. 3474 The size of this array is equal to the number of local rows, i.e 'm'. 3475 . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local 3476 submatrix (same value is used for all local rows). 3477 - o_nnz - array containing the number of nonzeros in the various rows of the 3478 OFF-DIAGONAL portion of the local submatrix (possibly different for 3479 each row) or NULL, if o_nz is used to specify the nonzero 3480 structure. The size of this array is equal to the number 3481 of local rows, i.e 'm'. 3482 3483 Output Parameter: 3484 . A - the matrix 3485 3486 It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 3487 MatXXXXSetPreallocation() paradgm instead of this routine directly. 3488 [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 3489 3490 Notes: 3491 If the *_nnz parameter is given then the *_nz parameter is ignored 3492 3493 m,n,M,N parameters specify the size of the matrix, and its partitioning across 3494 processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate 3495 storage requirements for this matrix. 3496 3497 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one 3498 processor than it must be used on all processors that share the object for 3499 that argument. 3500 3501 The user MUST specify either the local or global matrix dimensions 3502 (possibly both). 3503 3504 The parallel matrix is partitioned across processors such that the 3505 first m0 rows belong to process 0, the next m1 rows belong to 3506 process 1, the next m2 rows belong to process 2 etc.. where 3507 m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores 3508 values corresponding to [m x N] submatrix. 3509 3510 The columns are logically partitioned with the n0 columns belonging 3511 to 0th partition, the next n1 columns belonging to the next 3512 partition etc.. where n0,n1,n2... are the input parameter 'n'. 3513 3514 The DIAGONAL portion of the local submatrix on any given processor 3515 is the submatrix corresponding to the rows and columns m,n 3516 corresponding to the given processor. i.e diagonal matrix on 3517 process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1] 3518 etc. The remaining portion of the local submatrix [m x (N-n)] 3519 constitute the OFF-DIAGONAL portion. The example below better 3520 illustrates this concept. 3521 3522 For a square global matrix we define each processor's diagonal portion 3523 to be its local rows and the corresponding columns (a square submatrix); 3524 each processor's off-diagonal portion encompasses the remainder of the 3525 local matrix (a rectangular submatrix). 3526 3527 If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored. 3528 3529 When calling this routine with a single process communicator, a matrix of 3530 type SEQAIJ is returned. If a matrix of type MATMPIAIJ is desired for this 3531 type of communicator, use the construction mechanism: 3532 MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...); 3533 3534 By default, this format uses inodes (identical nodes) when possible. 3535 We search for consecutive rows with the same nonzero structure, thereby 3536 reusing matrix information to achieve increased efficiency. 3537 3538 Options Database Keys: 3539 + -mat_no_inode - Do not use inodes 3540 . -mat_inode_limit <limit> - Sets inode limit (max limit=5) 3541 - -mat_aij_oneindex - Internally use indexing starting at 1 3542 rather than 0. Note that when calling MatSetValues(), 3543 the user still MUST index entries starting at 0! 3544 3545 3546 Example usage: 3547 3548 Consider the following 8x8 matrix with 34 non-zero values, that is 3549 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 3550 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 3551 as follows: 3552 3553 .vb 3554 1 2 0 | 0 3 0 | 0 4 3555 Proc0 0 5 6 | 7 0 0 | 8 0 3556 9 0 10 | 11 0 0 | 12 0 3557 ------------------------------------- 3558 13 0 14 | 15 16 17 | 0 0 3559 Proc1 0 18 0 | 19 20 21 | 0 0 3560 0 0 0 | 22 23 0 | 24 0 3561 ------------------------------------- 3562 Proc2 25 26 27 | 0 0 28 | 29 0 3563 30 0 0 | 31 32 33 | 0 34 3564 .ve 3565 3566 This can be represented as a collection of submatrices as: 3567 3568 .vb 3569 A B C 3570 D E F 3571 G H I 3572 .ve 3573 3574 Where the submatrices A,B,C are owned by proc0, D,E,F are 3575 owned by proc1, G,H,I are owned by proc2. 3576 3577 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 3578 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 3579 The 'M','N' parameters are 8,8, and have the same values on all procs. 3580 3581 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 3582 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 3583 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 3584 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 3585 part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ 3586 matrix, ans [DF] as another SeqAIJ matrix. 3587 3588 When d_nz, o_nz parameters are specified, d_nz storage elements are 3589 allocated for every row of the local diagonal submatrix, and o_nz 3590 storage locations are allocated for every row of the OFF-DIAGONAL submat. 3591 One way to choose d_nz and o_nz is to use the max nonzerors per local 3592 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 3593 In this case, the values of d_nz,o_nz are: 3594 .vb 3595 proc0 : dnz = 2, o_nz = 2 3596 proc1 : dnz = 3, o_nz = 2 3597 proc2 : dnz = 1, o_nz = 4 3598 .ve 3599 We are allocating m*(d_nz+o_nz) storage locations for every proc. This 3600 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 3601 for proc3. i.e we are using 12+15+10=37 storage locations to store 3602 34 values. 3603 3604 When d_nnz, o_nnz parameters are specified, the storage is specified 3605 for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 3606 In the above case the values for d_nnz,o_nnz are: 3607 .vb 3608 proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2] 3609 proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1] 3610 proc2: d_nnz = [1,1] and o_nnz = [4,4] 3611 .ve 3612 Here the space allocated is sum of all the above values i.e 34, and 3613 hence pre-allocation is perfect. 3614 3615 Level: intermediate 3616 3617 .keywords: matrix, aij, compressed row, sparse, parallel 3618 3619 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(), 3620 MATMPIAIJ, MatCreateMPIAIJWithArrays() 3621 @*/ 3622 PetscErrorCode MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) 3623 { 3624 PetscErrorCode ierr; 3625 PetscMPIInt size; 3626 3627 PetscFunctionBegin; 3628 ierr = MatCreate(comm,A);CHKERRQ(ierr); 3629 ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); 3630 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3631 if (size > 1) { 3632 ierr = MatSetType(*A,MATMPIAIJ);CHKERRQ(ierr); 3633 ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); 3634 } else { 3635 ierr = MatSetType(*A,MATSEQAIJ);CHKERRQ(ierr); 3636 ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr); 3637 } 3638 PetscFunctionReturn(0); 3639 } 3640 3641 PetscErrorCode MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[]) 3642 { 3643 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 3644 PetscBool flg; 3645 PetscErrorCode ierr; 3646 3647 PetscFunctionBegin; 3648 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr); 3649 if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input"); 3650 if (Ad) *Ad = a->A; 3651 if (Ao) *Ao = a->B; 3652 if (colmap) *colmap = a->garray; 3653 PetscFunctionReturn(0); 3654 } 3655 3656 PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat) 3657 { 3658 PetscErrorCode ierr; 3659 PetscInt m,N,i,rstart,nnz,Ii; 3660 PetscInt *indx; 3661 PetscScalar *values; 3662 3663 PetscFunctionBegin; 3664 ierr = MatGetSize(inmat,&m,&N);CHKERRQ(ierr); 3665 if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */ 3666 PetscInt *dnz,*onz,sum,bs,cbs; 3667 3668 if (n == PETSC_DECIDE) { 3669 ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr); 3670 } 3671 /* Check sum(n) = N */ 3672 ierr = MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 3673 if (sum != N) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns != global columns %d",N); 3674 3675 ierr = MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 3676 rstart -= m; 3677 3678 ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr); 3679 for (i=0; i<m; i++) { 3680 ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);CHKERRQ(ierr); 3681 ierr = MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);CHKERRQ(ierr); 3682 ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);CHKERRQ(ierr); 3683 } 3684 3685 ierr = MatCreate(comm,outmat);CHKERRQ(ierr); 3686 ierr = MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 3687 ierr = MatGetBlockSizes(inmat,&bs,&cbs);CHKERRQ(ierr); 3688 ierr = MatSetBlockSizes(*outmat,bs,cbs);CHKERRQ(ierr); 3689 ierr = MatSetType(*outmat,MATMPIAIJ);CHKERRQ(ierr); 3690 ierr = MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);CHKERRQ(ierr); 3691 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 3692 } 3693 3694 /* numeric phase */ 3695 ierr = MatGetOwnershipRange(*outmat,&rstart,NULL);CHKERRQ(ierr); 3696 for (i=0; i<m; i++) { 3697 ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr); 3698 Ii = i + rstart; 3699 ierr = MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr); 3700 ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr); 3701 } 3702 ierr = MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3703 ierr = MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3704 PetscFunctionReturn(0); 3705 } 3706 3707 PetscErrorCode MatFileSplit(Mat A,char *outfile) 3708 { 3709 PetscErrorCode ierr; 3710 PetscMPIInt rank; 3711 PetscInt m,N,i,rstart,nnz; 3712 size_t len; 3713 const PetscInt *indx; 3714 PetscViewer out; 3715 char *name; 3716 Mat B; 3717 const PetscScalar *values; 3718 3719 PetscFunctionBegin; 3720 ierr = MatGetLocalSize(A,&m,0);CHKERRQ(ierr); 3721 ierr = MatGetSize(A,0,&N);CHKERRQ(ierr); 3722 /* Should this be the type of the diagonal block of A? */ 3723 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 3724 ierr = MatSetSizes(B,m,N,m,N);CHKERRQ(ierr); 3725 ierr = MatSetBlockSizesFromMats(B,A,A);CHKERRQ(ierr); 3726 ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr); 3727 ierr = MatSeqAIJSetPreallocation(B,0,NULL);CHKERRQ(ierr); 3728 ierr = MatGetOwnershipRange(A,&rstart,0);CHKERRQ(ierr); 3729 for (i=0; i<m; i++) { 3730 ierr = MatGetRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr); 3731 ierr = MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr); 3732 ierr = MatRestoreRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr); 3733 } 3734 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3735 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3736 3737 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr); 3738 ierr = PetscStrlen(outfile,&len);CHKERRQ(ierr); 3739 ierr = PetscMalloc1(len+5,&name);CHKERRQ(ierr); 3740 sprintf(name,"%s.%d",outfile,rank); 3741 ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);CHKERRQ(ierr); 3742 ierr = PetscFree(name);CHKERRQ(ierr); 3743 ierr = MatView(B,out);CHKERRQ(ierr); 3744 ierr = PetscViewerDestroy(&out);CHKERRQ(ierr); 3745 ierr = MatDestroy(&B);CHKERRQ(ierr); 3746 PetscFunctionReturn(0); 3747 } 3748 3749 extern PetscErrorCode MatDestroy_MPIAIJ(Mat); 3750 PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat A) 3751 { 3752 PetscErrorCode ierr; 3753 Mat_Merge_SeqsToMPI *merge; 3754 PetscContainer container; 3755 3756 PetscFunctionBegin; 3757 ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);CHKERRQ(ierr); 3758 if (container) { 3759 ierr = PetscContainerGetPointer(container,(void**)&merge);CHKERRQ(ierr); 3760 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 3761 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 3762 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 3763 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 3764 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 3765 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 3766 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 3767 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 3768 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 3769 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 3770 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 3771 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 3772 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 3773 ierr = PetscFree(merge);CHKERRQ(ierr); 3774 ierr = PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);CHKERRQ(ierr); 3775 } 3776 ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr); 3777 PetscFunctionReturn(0); 3778 } 3779 3780 #include <../src/mat/utils/freespace.h> 3781 #include <petscbt.h> 3782 3783 PetscErrorCode MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat) 3784 { 3785 PetscErrorCode ierr; 3786 MPI_Comm comm; 3787 Mat_SeqAIJ *a =(Mat_SeqAIJ*)seqmat->data; 3788 PetscMPIInt size,rank,taga,*len_s; 3789 PetscInt N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj; 3790 PetscInt proc,m; 3791 PetscInt **buf_ri,**buf_rj; 3792 PetscInt k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj; 3793 PetscInt nrows,**buf_ri_k,**nextrow,**nextai; 3794 MPI_Request *s_waits,*r_waits; 3795 MPI_Status *status; 3796 MatScalar *aa=a->a; 3797 MatScalar **abuf_r,*ba_i; 3798 Mat_Merge_SeqsToMPI *merge; 3799 PetscContainer container; 3800 3801 PetscFunctionBegin; 3802 ierr = PetscObjectGetComm((PetscObject)mpimat,&comm);CHKERRQ(ierr); 3803 ierr = PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr); 3804 3805 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3806 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 3807 3808 ierr = PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);CHKERRQ(ierr); 3809 ierr = PetscContainerGetPointer(container,(void**)&merge);CHKERRQ(ierr); 3810 3811 bi = merge->bi; 3812 bj = merge->bj; 3813 buf_ri = merge->buf_ri; 3814 buf_rj = merge->buf_rj; 3815 3816 ierr = PetscMalloc1(size,&status);CHKERRQ(ierr); 3817 owners = merge->rowmap->range; 3818 len_s = merge->len_s; 3819 3820 /* send and recv matrix values */ 3821 /*-----------------------------*/ 3822 ierr = PetscObjectGetNewTag((PetscObject)mpimat,&taga);CHKERRQ(ierr); 3823 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 3824 3825 ierr = PetscMalloc1(merge->nsend+1,&s_waits);CHKERRQ(ierr); 3826 for (proc=0,k=0; proc<size; proc++) { 3827 if (!len_s[proc]) continue; 3828 i = owners[proc]; 3829 ierr = MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 3830 k++; 3831 } 3832 3833 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 3834 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 3835 ierr = PetscFree(status);CHKERRQ(ierr); 3836 3837 ierr = PetscFree(s_waits);CHKERRQ(ierr); 3838 ierr = PetscFree(r_waits);CHKERRQ(ierr); 3839 3840 /* insert mat values of mpimat */ 3841 /*----------------------------*/ 3842 ierr = PetscMalloc1(N,&ba_i);CHKERRQ(ierr); 3843 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);CHKERRQ(ierr); 3844 3845 for (k=0; k<merge->nrecv; k++) { 3846 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 3847 nrows = *(buf_ri_k[k]); 3848 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 3849 nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 3850 } 3851 3852 /* set values of ba */ 3853 m = merge->rowmap->n; 3854 for (i=0; i<m; i++) { 3855 arow = owners[rank] + i; 3856 bj_i = bj+bi[i]; /* col indices of the i-th row of mpimat */ 3857 bnzi = bi[i+1] - bi[i]; 3858 ierr = PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));CHKERRQ(ierr); 3859 3860 /* add local non-zero vals of this proc's seqmat into ba */ 3861 anzi = ai[arow+1] - ai[arow]; 3862 aj = a->j + ai[arow]; 3863 aa = a->a + ai[arow]; 3864 nextaj = 0; 3865 for (j=0; nextaj<anzi; j++) { 3866 if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */ 3867 ba_i[j] += aa[nextaj++]; 3868 } 3869 } 3870 3871 /* add received vals into ba */ 3872 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 3873 /* i-th row */ 3874 if (i == *nextrow[k]) { 3875 anzi = *(nextai[k]+1) - *nextai[k]; 3876 aj = buf_rj[k] + *(nextai[k]); 3877 aa = abuf_r[k] + *(nextai[k]); 3878 nextaj = 0; 3879 for (j=0; nextaj<anzi; j++) { 3880 if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */ 3881 ba_i[j] += aa[nextaj++]; 3882 } 3883 } 3884 nextrow[k]++; nextai[k]++; 3885 } 3886 } 3887 ierr = MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 3888 } 3889 ierr = MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3890 ierr = MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3891 3892 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 3893 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 3894 ierr = PetscFree(ba_i);CHKERRQ(ierr); 3895 ierr = PetscFree3(buf_ri_k,nextrow,nextai);CHKERRQ(ierr); 3896 ierr = PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr); 3897 PetscFunctionReturn(0); 3898 } 3899 3900 extern PetscErrorCode MatDestroy_MPIAIJ_SeqsToMPI(Mat); 3901 3902 PetscErrorCode MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat) 3903 { 3904 PetscErrorCode ierr; 3905 Mat B_mpi; 3906 Mat_SeqAIJ *a=(Mat_SeqAIJ*)seqmat->data; 3907 PetscMPIInt size,rank,tagi,tagj,*len_s,*len_si,*len_ri; 3908 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 3909 PetscInt M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j; 3910 PetscInt len,proc,*dnz,*onz,bs,cbs; 3911 PetscInt k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0; 3912 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai; 3913 MPI_Request *si_waits,*sj_waits,*ri_waits,*rj_waits; 3914 MPI_Status *status; 3915 PetscFreeSpaceList free_space=NULL,current_space=NULL; 3916 PetscBT lnkbt; 3917 Mat_Merge_SeqsToMPI *merge; 3918 PetscContainer container; 3919 3920 PetscFunctionBegin; 3921 ierr = PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr); 3922 3923 /* make sure it is a PETSc comm */ 3924 ierr = PetscCommDuplicate(comm,&comm,NULL);CHKERRQ(ierr); 3925 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3926 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 3927 3928 ierr = PetscNew(&merge);CHKERRQ(ierr); 3929 ierr = PetscMalloc1(size,&status);CHKERRQ(ierr); 3930 3931 /* determine row ownership */ 3932 /*---------------------------------------------------------*/ 3933 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 3934 ierr = PetscLayoutSetLocalSize(merge->rowmap,m);CHKERRQ(ierr); 3935 ierr = PetscLayoutSetSize(merge->rowmap,M);CHKERRQ(ierr); 3936 ierr = PetscLayoutSetBlockSize(merge->rowmap,1);CHKERRQ(ierr); 3937 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 3938 ierr = PetscMalloc1(size,&len_si);CHKERRQ(ierr); 3939 ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr); 3940 3941 m = merge->rowmap->n; 3942 owners = merge->rowmap->range; 3943 3944 /* determine the number of messages to send, their lengths */ 3945 /*---------------------------------------------------------*/ 3946 len_s = merge->len_s; 3947 3948 len = 0; /* length of buf_si[] */ 3949 merge->nsend = 0; 3950 for (proc=0; proc<size; proc++) { 3951 len_si[proc] = 0; 3952 if (proc == rank) { 3953 len_s[proc] = 0; 3954 } else { 3955 len_si[proc] = owners[proc+1] - owners[proc] + 1; 3956 len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */ 3957 } 3958 if (len_s[proc]) { 3959 merge->nsend++; 3960 nrows = 0; 3961 for (i=owners[proc]; i<owners[proc+1]; i++) { 3962 if (ai[i+1] > ai[i]) nrows++; 3963 } 3964 len_si[proc] = 2*(nrows+1); 3965 len += len_si[proc]; 3966 } 3967 } 3968 3969 /* determine the number and length of messages to receive for ij-structure */ 3970 /*-------------------------------------------------------------------------*/ 3971 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 3972 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 3973 3974 /* post the Irecv of j-structure */ 3975 /*-------------------------------*/ 3976 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 3977 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr); 3978 3979 /* post the Isend of j-structure */ 3980 /*--------------------------------*/ 3981 ierr = PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);CHKERRQ(ierr); 3982 3983 for (proc=0, k=0; proc<size; proc++) { 3984 if (!len_s[proc]) continue; 3985 i = owners[proc]; 3986 ierr = MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr); 3987 k++; 3988 } 3989 3990 /* receives and sends of j-structure are complete */ 3991 /*------------------------------------------------*/ 3992 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);} 3993 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);} 3994 3995 /* send and recv i-structure */ 3996 /*---------------------------*/ 3997 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 3998 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr); 3999 4000 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 4001 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 4002 for (proc=0,k=0; proc<size; proc++) { 4003 if (!len_s[proc]) continue; 4004 /* form outgoing message for i-structure: 4005 buf_si[0]: nrows to be sent 4006 [1:nrows]: row index (global) 4007 [nrows+1:2*nrows+1]: i-structure index 4008 */ 4009 /*-------------------------------------------*/ 4010 nrows = len_si[proc]/2 - 1; 4011 buf_si_i = buf_si + nrows+1; 4012 buf_si[0] = nrows; 4013 buf_si_i[0] = 0; 4014 nrows = 0; 4015 for (i=owners[proc]; i<owners[proc+1]; i++) { 4016 anzi = ai[i+1] - ai[i]; 4017 if (anzi) { 4018 buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */ 4019 buf_si[nrows+1] = i-owners[proc]; /* local row index */ 4020 nrows++; 4021 } 4022 } 4023 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr); 4024 k++; 4025 buf_si += len_si[proc]; 4026 } 4027 4028 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);} 4029 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);} 4030 4031 ierr = PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);CHKERRQ(ierr); 4032 for (i=0; i<merge->nrecv; i++) { 4033 ierr = PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr); 4034 } 4035 4036 ierr = PetscFree(len_si);CHKERRQ(ierr); 4037 ierr = PetscFree(len_ri);CHKERRQ(ierr); 4038 ierr = PetscFree(rj_waits);CHKERRQ(ierr); 4039 ierr = PetscFree2(si_waits,sj_waits);CHKERRQ(ierr); 4040 ierr = PetscFree(ri_waits);CHKERRQ(ierr); 4041 ierr = PetscFree(buf_s);CHKERRQ(ierr); 4042 ierr = PetscFree(status);CHKERRQ(ierr); 4043 4044 /* compute a local seq matrix in each processor */ 4045 /*----------------------------------------------*/ 4046 /* allocate bi array and free space for accumulating nonzero column info */ 4047 ierr = PetscMalloc1(m+1,&bi);CHKERRQ(ierr); 4048 bi[0] = 0; 4049 4050 /* create and initialize a linked list */ 4051 nlnk = N+1; 4052 ierr = PetscLLCreate(N,N,nlnk,lnk,lnkbt);CHKERRQ(ierr); 4053 4054 /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */ 4055 len = ai[owners[rank+1]] - ai[owners[rank]]; 4056 ierr = PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);CHKERRQ(ierr); 4057 4058 current_space = free_space; 4059 4060 /* determine symbolic info for each local row */ 4061 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);CHKERRQ(ierr); 4062 4063 for (k=0; k<merge->nrecv; k++) { 4064 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 4065 nrows = *buf_ri_k[k]; 4066 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 4067 nextai[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 4068 } 4069 4070 ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr); 4071 len = 0; 4072 for (i=0; i<m; i++) { 4073 bnzi = 0; 4074 /* add local non-zero cols of this proc's seqmat into lnk */ 4075 arow = owners[rank] + i; 4076 anzi = ai[arow+1] - ai[arow]; 4077 aj = a->j + ai[arow]; 4078 ierr = PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr); 4079 bnzi += nlnk; 4080 /* add received col data into lnk */ 4081 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 4082 if (i == *nextrow[k]) { /* i-th row */ 4083 anzi = *(nextai[k]+1) - *nextai[k]; 4084 aj = buf_rj[k] + *nextai[k]; 4085 ierr = PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr); 4086 bnzi += nlnk; 4087 nextrow[k]++; nextai[k]++; 4088 } 4089 } 4090 if (len < bnzi) len = bnzi; /* =max(bnzi) */ 4091 4092 /* if free space is not available, make more free space */ 4093 if (current_space->local_remaining<bnzi) { 4094 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 4095 nspacedouble++; 4096 } 4097 /* copy data into free space, then initialize lnk */ 4098 ierr = PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 4099 ierr = MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);CHKERRQ(ierr); 4100 4101 current_space->array += bnzi; 4102 current_space->local_used += bnzi; 4103 current_space->local_remaining -= bnzi; 4104 4105 bi[i+1] = bi[i] + bnzi; 4106 } 4107 4108 ierr = PetscFree3(buf_ri_k,nextrow,nextai);CHKERRQ(ierr); 4109 4110 ierr = PetscMalloc1(bi[m]+1,&bj);CHKERRQ(ierr); 4111 ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr); 4112 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 4113 4114 /* create symbolic parallel matrix B_mpi */ 4115 /*---------------------------------------*/ 4116 ierr = MatGetBlockSizes(seqmat,&bs,&cbs);CHKERRQ(ierr); 4117 ierr = MatCreate(comm,&B_mpi);CHKERRQ(ierr); 4118 if (n==PETSC_DECIDE) { 4119 ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);CHKERRQ(ierr); 4120 } else { 4121 ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 4122 } 4123 ierr = MatSetBlockSizes(B_mpi,bs,cbs);CHKERRQ(ierr); 4124 ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr); 4125 ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr); 4126 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 4127 ierr = MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); 4128 4129 /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */ 4130 B_mpi->assembled = PETSC_FALSE; 4131 B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI; 4132 merge->bi = bi; 4133 merge->bj = bj; 4134 merge->buf_ri = buf_ri; 4135 merge->buf_rj = buf_rj; 4136 merge->coi = NULL; 4137 merge->coj = NULL; 4138 merge->owners_co = NULL; 4139 4140 ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 4141 4142 /* attach the supporting struct to B_mpi for reuse */ 4143 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 4144 ierr = PetscContainerSetPointer(container,merge);CHKERRQ(ierr); 4145 ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr); 4146 ierr = PetscContainerDestroy(&container);CHKERRQ(ierr); 4147 *mpimat = B_mpi; 4148 4149 ierr = PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr); 4150 PetscFunctionReturn(0); 4151 } 4152 4153 /*@C 4154 MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential 4155 matrices from each processor 4156 4157 Collective on MPI_Comm 4158 4159 Input Parameters: 4160 + comm - the communicators the parallel matrix will live on 4161 . seqmat - the input sequential matrices 4162 . m - number of local rows (or PETSC_DECIDE) 4163 . n - number of local columns (or PETSC_DECIDE) 4164 - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 4165 4166 Output Parameter: 4167 . mpimat - the parallel matrix generated 4168 4169 Level: advanced 4170 4171 Notes: 4172 The dimensions of the sequential matrix in each processor MUST be the same. 4173 The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be 4174 destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat. 4175 @*/ 4176 PetscErrorCode MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat) 4177 { 4178 PetscErrorCode ierr; 4179 PetscMPIInt size; 4180 4181 PetscFunctionBegin; 4182 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 4183 if (size == 1) { 4184 ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr); 4185 if (scall == MAT_INITIAL_MATRIX) { 4186 ierr = MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);CHKERRQ(ierr); 4187 } else { 4188 ierr = MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 4189 } 4190 ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr); 4191 PetscFunctionReturn(0); 4192 } 4193 ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr); 4194 if (scall == MAT_INITIAL_MATRIX) { 4195 ierr = MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);CHKERRQ(ierr); 4196 } 4197 ierr = MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);CHKERRQ(ierr); 4198 ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr); 4199 PetscFunctionReturn(0); 4200 } 4201 4202 /*@ 4203 MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential vector with 4204 mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained 4205 with MatGetSize() 4206 4207 Not Collective 4208 4209 Input Parameters: 4210 + A - the matrix 4211 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 4212 4213 Output Parameter: 4214 . A_loc - the local sequential matrix generated 4215 4216 Level: developer 4217 4218 .seealso: MatGetOwnerShipRange(), MatMPIAIJGetLocalMatCondensed() 4219 4220 @*/ 4221 PetscErrorCode MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc) 4222 { 4223 PetscErrorCode ierr; 4224 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 4225 Mat_SeqAIJ *mat,*a,*b; 4226 PetscInt *ai,*aj,*bi,*bj,*cmap=mpimat->garray; 4227 MatScalar *aa,*ba,*cam; 4228 PetscScalar *ca; 4229 PetscInt am=A->rmap->n,i,j,k,cstart=A->cmap->rstart; 4230 PetscInt *ci,*cj,col,ncols_d,ncols_o,jo; 4231 PetscBool match; 4232 MPI_Comm comm; 4233 PetscMPIInt size; 4234 4235 PetscFunctionBegin; 4236 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);CHKERRQ(ierr); 4237 if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input"); 4238 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 4239 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 4240 if (size == 1 && scall == MAT_REUSE_MATRIX) PetscFunctionReturn(0); 4241 4242 ierr = PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr); 4243 a = (Mat_SeqAIJ*)(mpimat->A)->data; 4244 b = (Mat_SeqAIJ*)(mpimat->B)->data; 4245 ai = a->i; aj = a->j; bi = b->i; bj = b->j; 4246 aa = a->a; ba = b->a; 4247 if (scall == MAT_INITIAL_MATRIX) { 4248 if (size == 1) { 4249 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);CHKERRQ(ierr); 4250 PetscFunctionReturn(0); 4251 } 4252 4253 ierr = PetscMalloc1(1+am,&ci);CHKERRQ(ierr); 4254 ci[0] = 0; 4255 for (i=0; i<am; i++) { 4256 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 4257 } 4258 ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr); 4259 ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr); 4260 k = 0; 4261 for (i=0; i<am; i++) { 4262 ncols_o = bi[i+1] - bi[i]; 4263 ncols_d = ai[i+1] - ai[i]; 4264 /* off-diagonal portion of A */ 4265 for (jo=0; jo<ncols_o; jo++) { 4266 col = cmap[*bj]; 4267 if (col >= cstart) break; 4268 cj[k] = col; bj++; 4269 ca[k++] = *ba++; 4270 } 4271 /* diagonal portion of A */ 4272 for (j=0; j<ncols_d; j++) { 4273 cj[k] = cstart + *aj++; 4274 ca[k++] = *aa++; 4275 } 4276 /* off-diagonal portion of A */ 4277 for (j=jo; j<ncols_o; j++) { 4278 cj[k] = cmap[*bj++]; 4279 ca[k++] = *ba++; 4280 } 4281 } 4282 /* put together the new matrix */ 4283 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);CHKERRQ(ierr); 4284 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 4285 /* Since these are PETSc arrays, change flags to free them as necessary. */ 4286 mat = (Mat_SeqAIJ*)(*A_loc)->data; 4287 mat->free_a = PETSC_TRUE; 4288 mat->free_ij = PETSC_TRUE; 4289 mat->nonew = 0; 4290 } else if (scall == MAT_REUSE_MATRIX) { 4291 mat=(Mat_SeqAIJ*)(*A_loc)->data; 4292 ci = mat->i; cj = mat->j; cam = mat->a; 4293 for (i=0; i<am; i++) { 4294 /* off-diagonal portion of A */ 4295 ncols_o = bi[i+1] - bi[i]; 4296 for (jo=0; jo<ncols_o; jo++) { 4297 col = cmap[*bj]; 4298 if (col >= cstart) break; 4299 *cam++ = *ba++; bj++; 4300 } 4301 /* diagonal portion of A */ 4302 ncols_d = ai[i+1] - ai[i]; 4303 for (j=0; j<ncols_d; j++) *cam++ = *aa++; 4304 /* off-diagonal portion of A */ 4305 for (j=jo; j<ncols_o; j++) { 4306 *cam++ = *ba++; bj++; 4307 } 4308 } 4309 } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 4310 ierr = PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr); 4311 PetscFunctionReturn(0); 4312 } 4313 4314 /*@C 4315 MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns 4316 4317 Not Collective 4318 4319 Input Parameters: 4320 + A - the matrix 4321 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 4322 - row, col - index sets of rows and columns to extract (or NULL) 4323 4324 Output Parameter: 4325 . A_loc - the local sequential matrix generated 4326 4327 Level: developer 4328 4329 .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat() 4330 4331 @*/ 4332 PetscErrorCode MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc) 4333 { 4334 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 4335 PetscErrorCode ierr; 4336 PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx; 4337 IS isrowa,iscola; 4338 Mat *aloc; 4339 PetscBool match; 4340 4341 PetscFunctionBegin; 4342 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);CHKERRQ(ierr); 4343 if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input"); 4344 ierr = PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr); 4345 if (!row) { 4346 start = A->rmap->rstart; end = A->rmap->rend; 4347 ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);CHKERRQ(ierr); 4348 } else { 4349 isrowa = *row; 4350 } 4351 if (!col) { 4352 start = A->cmap->rstart; 4353 cmap = a->garray; 4354 nzA = a->A->cmap->n; 4355 nzB = a->B->cmap->n; 4356 ierr = PetscMalloc1(nzA+nzB, &idx);CHKERRQ(ierr); 4357 ncols = 0; 4358 for (i=0; i<nzB; i++) { 4359 if (cmap[i] < start) idx[ncols++] = cmap[i]; 4360 else break; 4361 } 4362 imark = i; 4363 for (i=0; i<nzA; i++) idx[ncols++] = start + i; 4364 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; 4365 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);CHKERRQ(ierr); 4366 } else { 4367 iscola = *col; 4368 } 4369 if (scall != MAT_INITIAL_MATRIX) { 4370 ierr = PetscMalloc1(1,&aloc);CHKERRQ(ierr); 4371 aloc[0] = *A_loc; 4372 } 4373 ierr = MatGetSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);CHKERRQ(ierr); 4374 *A_loc = aloc[0]; 4375 ierr = PetscFree(aloc);CHKERRQ(ierr); 4376 if (!row) { 4377 ierr = ISDestroy(&isrowa);CHKERRQ(ierr); 4378 } 4379 if (!col) { 4380 ierr = ISDestroy(&iscola);CHKERRQ(ierr); 4381 } 4382 ierr = PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr); 4383 PetscFunctionReturn(0); 4384 } 4385 4386 /*@C 4387 MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A 4388 4389 Collective on Mat 4390 4391 Input Parameters: 4392 + A,B - the matrices in mpiaij format 4393 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 4394 - rowb, colb - index sets of rows and columns of B to extract (or NULL) 4395 4396 Output Parameter: 4397 + rowb, colb - index sets of rows and columns of B to extract 4398 - B_seq - the sequential matrix generated 4399 4400 Level: developer 4401 4402 @*/ 4403 PetscErrorCode MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq) 4404 { 4405 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 4406 PetscErrorCode ierr; 4407 PetscInt *idx,i,start,ncols,nzA,nzB,*cmap,imark; 4408 IS isrowb,iscolb; 4409 Mat *bseq=NULL; 4410 4411 PetscFunctionBegin; 4412 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) { 4413 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 4414 } 4415 ierr = PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr); 4416 4417 if (scall == MAT_INITIAL_MATRIX) { 4418 start = A->cmap->rstart; 4419 cmap = a->garray; 4420 nzA = a->A->cmap->n; 4421 nzB = a->B->cmap->n; 4422 ierr = PetscMalloc1(nzA+nzB, &idx);CHKERRQ(ierr); 4423 ncols = 0; 4424 for (i=0; i<nzB; i++) { /* row < local row index */ 4425 if (cmap[i] < start) idx[ncols++] = cmap[i]; 4426 else break; 4427 } 4428 imark = i; 4429 for (i=0; i<nzA; i++) idx[ncols++] = start + i; /* local rows */ 4430 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */ 4431 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);CHKERRQ(ierr); 4432 ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);CHKERRQ(ierr); 4433 } else { 4434 if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX"); 4435 isrowb = *rowb; iscolb = *colb; 4436 ierr = PetscMalloc1(1,&bseq);CHKERRQ(ierr); 4437 bseq[0] = *B_seq; 4438 } 4439 ierr = MatGetSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);CHKERRQ(ierr); 4440 *B_seq = bseq[0]; 4441 ierr = PetscFree(bseq);CHKERRQ(ierr); 4442 if (!rowb) { 4443 ierr = ISDestroy(&isrowb);CHKERRQ(ierr); 4444 } else { 4445 *rowb = isrowb; 4446 } 4447 if (!colb) { 4448 ierr = ISDestroy(&iscolb);CHKERRQ(ierr); 4449 } else { 4450 *colb = iscolb; 4451 } 4452 ierr = PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr); 4453 PetscFunctionReturn(0); 4454 } 4455 4456 /* 4457 MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns 4458 of the OFF-DIAGONAL portion of local A 4459 4460 Collective on Mat 4461 4462 Input Parameters: 4463 + A,B - the matrices in mpiaij format 4464 - scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 4465 4466 Output Parameter: 4467 + startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL) 4468 . startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL) 4469 . bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL) 4470 - B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N 4471 4472 Level: developer 4473 4474 */ 4475 PetscErrorCode MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth) 4476 { 4477 VecScatter_MPI_General *gen_to,*gen_from; 4478 PetscErrorCode ierr; 4479 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 4480 Mat_SeqAIJ *b_oth; 4481 VecScatter ctx =a->Mvctx; 4482 MPI_Comm comm; 4483 PetscMPIInt *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank; 4484 PetscInt *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj; 4485 PetscInt *rvalues,*svalues; 4486 MatScalar *b_otha,*bufa,*bufA; 4487 PetscInt i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len; 4488 MPI_Request *rwaits = NULL,*swaits = NULL; 4489 MPI_Status *sstatus,rstatus; 4490 PetscMPIInt jj,size; 4491 PetscInt *cols,sbs,rbs; 4492 PetscScalar *vals; 4493 4494 PetscFunctionBegin; 4495 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 4496 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 4497 4498 if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) { 4499 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend); 4500 } 4501 ierr = PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr); 4502 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 4503 4504 gen_to = (VecScatter_MPI_General*)ctx->todata; 4505 gen_from = (VecScatter_MPI_General*)ctx->fromdata; 4506 nrecvs = gen_from->n; 4507 nsends = gen_to->n; 4508 4509 ierr = PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);CHKERRQ(ierr); 4510 srow = gen_to->indices; /* local row index to be sent */ 4511 sstarts = gen_to->starts; 4512 sprocs = gen_to->procs; 4513 sstatus = gen_to->sstatus; 4514 sbs = gen_to->bs; 4515 rstarts = gen_from->starts; 4516 rprocs = gen_from->procs; 4517 rbs = gen_from->bs; 4518 4519 if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX; 4520 if (scall == MAT_INITIAL_MATRIX) { 4521 /* i-array */ 4522 /*---------*/ 4523 /* post receives */ 4524 ierr = PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);CHKERRQ(ierr); 4525 for (i=0; i<nrecvs; i++) { 4526 rowlen = rvalues + rstarts[i]*rbs; 4527 nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */ 4528 ierr = MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 4529 } 4530 4531 /* pack the outgoing message */ 4532 ierr = PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);CHKERRQ(ierr); 4533 4534 sstartsj[0] = 0; 4535 rstartsj[0] = 0; 4536 len = 0; /* total length of j or a array to be sent */ 4537 k = 0; 4538 ierr = PetscMalloc1(sbs*(sstarts[nsends] - sstarts[0]),&svalues);CHKERRQ(ierr); 4539 for (i=0; i<nsends; i++) { 4540 rowlen = svalues + sstarts[i]*sbs; 4541 nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */ 4542 for (j=0; j<nrows; j++) { 4543 row = srow[k] + B->rmap->range[rank]; /* global row idx */ 4544 for (l=0; l<sbs; l++) { 4545 ierr = MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);CHKERRQ(ierr); /* rowlength */ 4546 4547 rowlen[j*sbs+l] = ncols; 4548 4549 len += ncols; 4550 ierr = MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);CHKERRQ(ierr); 4551 } 4552 k++; 4553 } 4554 ierr = MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 4555 4556 sstartsj[i+1] = len; /* starting point of (i+1)-th outgoing msg in bufj and bufa */ 4557 } 4558 /* recvs and sends of i-array are completed */ 4559 i = nrecvs; 4560 while (i--) { 4561 ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr); 4562 } 4563 if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);} 4564 ierr = PetscFree(svalues);CHKERRQ(ierr); 4565 4566 /* allocate buffers for sending j and a arrays */ 4567 ierr = PetscMalloc1(len+1,&bufj);CHKERRQ(ierr); 4568 ierr = PetscMalloc1(len+1,&bufa);CHKERRQ(ierr); 4569 4570 /* create i-array of B_oth */ 4571 ierr = PetscMalloc1(aBn+2,&b_othi);CHKERRQ(ierr); 4572 4573 b_othi[0] = 0; 4574 len = 0; /* total length of j or a array to be received */ 4575 k = 0; 4576 for (i=0; i<nrecvs; i++) { 4577 rowlen = rvalues + rstarts[i]*rbs; 4578 nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be received */ 4579 for (j=0; j<nrows; j++) { 4580 b_othi[k+1] = b_othi[k] + rowlen[j]; 4581 ierr = PetscIntSumError(rowlen[j],len,&len);CHKERRQ(ierr); 4582 k++; 4583 } 4584 rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */ 4585 } 4586 ierr = PetscFree(rvalues);CHKERRQ(ierr); 4587 4588 /* allocate space for j and a arrrays of B_oth */ 4589 ierr = PetscMalloc1(b_othi[aBn]+1,&b_othj);CHKERRQ(ierr); 4590 ierr = PetscMalloc1(b_othi[aBn]+1,&b_otha);CHKERRQ(ierr); 4591 4592 /* j-array */ 4593 /*---------*/ 4594 /* post receives of j-array */ 4595 for (i=0; i<nrecvs; i++) { 4596 nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */ 4597 ierr = MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 4598 } 4599 4600 /* pack the outgoing message j-array */ 4601 k = 0; 4602 for (i=0; i<nsends; i++) { 4603 nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */ 4604 bufJ = bufj+sstartsj[i]; 4605 for (j=0; j<nrows; j++) { 4606 row = srow[k++] + B->rmap->range[rank]; /* global row idx */ 4607 for (ll=0; ll<sbs; ll++) { 4608 ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);CHKERRQ(ierr); 4609 for (l=0; l<ncols; l++) { 4610 *bufJ++ = cols[l]; 4611 } 4612 ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);CHKERRQ(ierr); 4613 } 4614 } 4615 ierr = MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 4616 } 4617 4618 /* recvs and sends of j-array are completed */ 4619 i = nrecvs; 4620 while (i--) { 4621 ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr); 4622 } 4623 if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);} 4624 } else if (scall == MAT_REUSE_MATRIX) { 4625 sstartsj = *startsj_s; 4626 rstartsj = *startsj_r; 4627 bufa = *bufa_ptr; 4628 b_oth = (Mat_SeqAIJ*)(*B_oth)->data; 4629 b_otha = b_oth->a; 4630 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container"); 4631 4632 /* a-array */ 4633 /*---------*/ 4634 /* post receives of a-array */ 4635 for (i=0; i<nrecvs; i++) { 4636 nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */ 4637 ierr = MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr); 4638 } 4639 4640 /* pack the outgoing message a-array */ 4641 k = 0; 4642 for (i=0; i<nsends; i++) { 4643 nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */ 4644 bufA = bufa+sstartsj[i]; 4645 for (j=0; j<nrows; j++) { 4646 row = srow[k++] + B->rmap->range[rank]; /* global row idx */ 4647 for (ll=0; ll<sbs; ll++) { 4648 ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);CHKERRQ(ierr); 4649 for (l=0; l<ncols; l++) { 4650 *bufA++ = vals[l]; 4651 } 4652 ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);CHKERRQ(ierr); 4653 } 4654 } 4655 ierr = MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr); 4656 } 4657 /* recvs and sends of a-array are completed */ 4658 i = nrecvs; 4659 while (i--) { 4660 ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr); 4661 } 4662 if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);} 4663 ierr = PetscFree2(rwaits,swaits);CHKERRQ(ierr); 4664 4665 if (scall == MAT_INITIAL_MATRIX) { 4666 /* put together the new matrix */ 4667 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);CHKERRQ(ierr); 4668 4669 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 4670 /* Since these are PETSc arrays, change flags to free them as necessary. */ 4671 b_oth = (Mat_SeqAIJ*)(*B_oth)->data; 4672 b_oth->free_a = PETSC_TRUE; 4673 b_oth->free_ij = PETSC_TRUE; 4674 b_oth->nonew = 0; 4675 4676 ierr = PetscFree(bufj);CHKERRQ(ierr); 4677 if (!startsj_s || !bufa_ptr) { 4678 ierr = PetscFree2(sstartsj,rstartsj);CHKERRQ(ierr); 4679 ierr = PetscFree(bufa_ptr);CHKERRQ(ierr); 4680 } else { 4681 *startsj_s = sstartsj; 4682 *startsj_r = rstartsj; 4683 *bufa_ptr = bufa; 4684 } 4685 } 4686 ierr = PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr); 4687 PetscFunctionReturn(0); 4688 } 4689 4690 /*@C 4691 MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication. 4692 4693 Not Collective 4694 4695 Input Parameters: 4696 . A - The matrix in mpiaij format 4697 4698 Output Parameter: 4699 + lvec - The local vector holding off-process values from the argument to a matrix-vector product 4700 . colmap - A map from global column index to local index into lvec 4701 - multScatter - A scatter from the argument of a matrix-vector product to lvec 4702 4703 Level: developer 4704 4705 @*/ 4706 #if defined(PETSC_USE_CTABLE) 4707 PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter) 4708 #else 4709 PetscErrorCode MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter) 4710 #endif 4711 { 4712 Mat_MPIAIJ *a; 4713 4714 PetscFunctionBegin; 4715 PetscValidHeaderSpecific(A, MAT_CLASSID, 1); 4716 PetscValidPointer(lvec, 2); 4717 PetscValidPointer(colmap, 3); 4718 PetscValidPointer(multScatter, 4); 4719 a = (Mat_MPIAIJ*) A->data; 4720 if (lvec) *lvec = a->lvec; 4721 if (colmap) *colmap = a->colmap; 4722 if (multScatter) *multScatter = a->Mvctx; 4723 PetscFunctionReturn(0); 4724 } 4725 4726 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*); 4727 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*); 4728 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*); 4729 #if defined(PETSC_HAVE_ELEMENTAL) 4730 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*); 4731 #endif 4732 #if defined(PETSC_HAVE_HYPRE) 4733 PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*); 4734 PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*); 4735 #endif 4736 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_IS(Mat,MatType,MatReuse,Mat*); 4737 4738 /* 4739 Computes (B'*A')' since computing B*A directly is untenable 4740 4741 n p p 4742 ( ) ( ) ( ) 4743 m ( A ) * n ( B ) = m ( C ) 4744 ( ) ( ) ( ) 4745 4746 */ 4747 PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C) 4748 { 4749 PetscErrorCode ierr; 4750 Mat At,Bt,Ct; 4751 4752 PetscFunctionBegin; 4753 ierr = MatTranspose(A,MAT_INITIAL_MATRIX,&At);CHKERRQ(ierr); 4754 ierr = MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);CHKERRQ(ierr); 4755 ierr = MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);CHKERRQ(ierr); 4756 ierr = MatDestroy(&At);CHKERRQ(ierr); 4757 ierr = MatDestroy(&Bt);CHKERRQ(ierr); 4758 ierr = MatTranspose(Ct,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 4759 ierr = MatDestroy(&Ct);CHKERRQ(ierr); 4760 PetscFunctionReturn(0); 4761 } 4762 4763 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C) 4764 { 4765 PetscErrorCode ierr; 4766 PetscInt m=A->rmap->n,n=B->cmap->n; 4767 Mat Cmat; 4768 4769 PetscFunctionBegin; 4770 if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n); 4771 ierr = MatCreate(PetscObjectComm((PetscObject)A),&Cmat);CHKERRQ(ierr); 4772 ierr = MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 4773 ierr = MatSetBlockSizesFromMats(Cmat,A,B);CHKERRQ(ierr); 4774 ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr); 4775 ierr = MatMPIDenseSetPreallocation(Cmat,NULL);CHKERRQ(ierr); 4776 ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4777 ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4778 4779 Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ; 4780 4781 *C = Cmat; 4782 PetscFunctionReturn(0); 4783 } 4784 4785 /* ----------------------------------------------------------------*/ 4786 PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C) 4787 { 4788 PetscErrorCode ierr; 4789 4790 PetscFunctionBegin; 4791 if (scall == MAT_INITIAL_MATRIX) { 4792 ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 4793 ierr = MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);CHKERRQ(ierr); 4794 ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr); 4795 } 4796 ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 4797 ierr = MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);CHKERRQ(ierr); 4798 ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr); 4799 PetscFunctionReturn(0); 4800 } 4801 4802 /*MC 4803 MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices. 4804 4805 Options Database Keys: 4806 . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions() 4807 4808 Level: beginner 4809 4810 .seealso: MatCreateAIJ() 4811 M*/ 4812 4813 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B) 4814 { 4815 Mat_MPIAIJ *b; 4816 PetscErrorCode ierr; 4817 PetscMPIInt size; 4818 4819 PetscFunctionBegin; 4820 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);CHKERRQ(ierr); 4821 4822 ierr = PetscNewLog(B,&b);CHKERRQ(ierr); 4823 B->data = (void*)b; 4824 ierr = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr); 4825 B->assembled = PETSC_FALSE; 4826 B->insertmode = NOT_SET_VALUES; 4827 b->size = size; 4828 4829 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);CHKERRQ(ierr); 4830 4831 /* build cache for off array entries formed */ 4832 ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);CHKERRQ(ierr); 4833 4834 b->donotstash = PETSC_FALSE; 4835 b->colmap = 0; 4836 b->garray = 0; 4837 b->roworiented = PETSC_TRUE; 4838 4839 /* stuff used for matrix vector multiply */ 4840 b->lvec = NULL; 4841 b->Mvctx = NULL; 4842 4843 /* stuff for MatGetRow() */ 4844 b->rowindices = 0; 4845 b->rowvalues = 0; 4846 b->getrowactive = PETSC_FALSE; 4847 4848 /* flexible pointer used in CUSP/CUSPARSE classes */ 4849 b->spptr = NULL; 4850 4851 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);CHKERRQ(ierr); 4852 ierr = PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);CHKERRQ(ierr); 4853 ierr = PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);CHKERRQ(ierr); 4854 ierr = PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);CHKERRQ(ierr); 4855 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);CHKERRQ(ierr); 4856 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);CHKERRQ(ierr); 4857 ierr = PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);CHKERRQ(ierr); 4858 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);CHKERRQ(ierr); 4859 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);CHKERRQ(ierr); 4860 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);CHKERRQ(ierr); 4861 #if defined(PETSC_HAVE_ELEMENTAL) 4862 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);CHKERRQ(ierr); 4863 #endif 4864 #if defined(PETSC_HAVE_HYPRE) 4865 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr); 4866 #endif 4867 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_MPIAIJ_IS);CHKERRQ(ierr); 4868 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);CHKERRQ(ierr); 4869 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);CHKERRQ(ierr); 4870 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);CHKERRQ(ierr); 4871 #if defined(PETSC_HAVE_HYPRE) 4872 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);CHKERRQ(ierr); 4873 #endif 4874 ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);CHKERRQ(ierr); 4875 PetscFunctionReturn(0); 4876 } 4877 4878 /*@C 4879 MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal" 4880 and "off-diagonal" part of the matrix in CSR format. 4881 4882 Collective on MPI_Comm 4883 4884 Input Parameters: 4885 + comm - MPI communicator 4886 . m - number of local rows (Cannot be PETSC_DECIDE) 4887 . n - This value should be the same as the local size used in creating the 4888 x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have 4889 calculated if N is given) For square matrices n is almost always m. 4890 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 4891 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 4892 . i - row indices for "diagonal" portion of matrix 4893 . j - column indices 4894 . a - matrix values 4895 . oi - row indices for "off-diagonal" portion of matrix 4896 . oj - column indices 4897 - oa - matrix values 4898 4899 Output Parameter: 4900 . mat - the matrix 4901 4902 Level: advanced 4903 4904 Notes: 4905 The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user 4906 must free the arrays once the matrix has been destroyed and not before. 4907 4908 The i and j indices are 0 based 4909 4910 See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix 4911 4912 This sets local rows and cannot be used to set off-processor values. 4913 4914 Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a 4915 legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does 4916 not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because 4917 the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to 4918 keep track of the underlying array. Use MatSetOption(A,MAT_IGNORE_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all 4919 communication if it is known that only local entries will be set. 4920 4921 .keywords: matrix, aij, compressed row, sparse, parallel 4922 4923 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(), 4924 MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays() 4925 @*/ 4926 PetscErrorCode MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat) 4927 { 4928 PetscErrorCode ierr; 4929 Mat_MPIAIJ *maij; 4930 4931 PetscFunctionBegin; 4932 if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative"); 4933 if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0"); 4934 if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0"); 4935 ierr = MatCreate(comm,mat);CHKERRQ(ierr); 4936 ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr); 4937 ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr); 4938 maij = (Mat_MPIAIJ*) (*mat)->data; 4939 4940 (*mat)->preallocated = PETSC_TRUE; 4941 4942 ierr = PetscLayoutSetUp((*mat)->rmap);CHKERRQ(ierr); 4943 ierr = PetscLayoutSetUp((*mat)->cmap);CHKERRQ(ierr); 4944 4945 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);CHKERRQ(ierr); 4946 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);CHKERRQ(ierr); 4947 4948 ierr = MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4949 ierr = MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4950 ierr = MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4951 ierr = MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4952 4953 ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4954 ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 4955 ierr = MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 4956 PetscFunctionReturn(0); 4957 } 4958 4959 /* 4960 Special version for direct calls from Fortran 4961 */ 4962 #include <petsc/private/fortranimpl.h> 4963 4964 /* Change these macros so can be used in void function */ 4965 #undef CHKERRQ 4966 #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr) 4967 #undef SETERRQ2 4968 #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr) 4969 #undef SETERRQ3 4970 #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr) 4971 #undef SETERRQ 4972 #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr) 4973 4974 #if defined(PETSC_HAVE_FORTRAN_CAPS) 4975 #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ 4976 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE) 4977 #define matsetvaluesmpiaij_ matsetvaluesmpiaij 4978 #else 4979 #endif 4980 PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr) 4981 { 4982 Mat mat = *mmat; 4983 PetscInt m = *mm, n = *mn; 4984 InsertMode addv = *maddv; 4985 Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data; 4986 PetscScalar value; 4987 PetscErrorCode ierr; 4988 4989 MatCheckPreallocated(mat,1); 4990 if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv; 4991 4992 #if defined(PETSC_USE_DEBUG) 4993 else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values"); 4994 #endif 4995 { 4996 PetscInt i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend; 4997 PetscInt cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col; 4998 PetscBool roworiented = aij->roworiented; 4999 5000 /* Some Variables required in the macro */ 5001 Mat A = aij->A; 5002 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; 5003 PetscInt *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j; 5004 MatScalar *aa = a->a; 5005 PetscBool ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE); 5006 Mat B = aij->B; 5007 Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; 5008 PetscInt *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n; 5009 MatScalar *ba = b->a; 5010 5011 PetscInt *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2; 5012 PetscInt nonew = a->nonew; 5013 MatScalar *ap1,*ap2; 5014 5015 PetscFunctionBegin; 5016 for (i=0; i<m; i++) { 5017 if (im[i] < 0) continue; 5018 #if defined(PETSC_USE_DEBUG) 5019 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 5020 #endif 5021 if (im[i] >= rstart && im[i] < rend) { 5022 row = im[i] - rstart; 5023 lastcol1 = -1; 5024 rp1 = aj + ai[row]; 5025 ap1 = aa + ai[row]; 5026 rmax1 = aimax[row]; 5027 nrow1 = ailen[row]; 5028 low1 = 0; 5029 high1 = nrow1; 5030 lastcol2 = -1; 5031 rp2 = bj + bi[row]; 5032 ap2 = ba + bi[row]; 5033 rmax2 = bimax[row]; 5034 nrow2 = bilen[row]; 5035 low2 = 0; 5036 high2 = nrow2; 5037 5038 for (j=0; j<n; j++) { 5039 if (roworiented) value = v[i*n+j]; 5040 else value = v[i+j*m]; 5041 if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue; 5042 if (in[j] >= cstart && in[j] < cend) { 5043 col = in[j] - cstart; 5044 MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]); 5045 } else if (in[j] < 0) continue; 5046 #if defined(PETSC_USE_DEBUG) 5047 else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1); 5048 #endif 5049 else { 5050 if (mat->was_assembled) { 5051 if (!aij->colmap) { 5052 ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr); 5053 } 5054 #if defined(PETSC_USE_CTABLE) 5055 ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr); 5056 col--; 5057 #else 5058 col = aij->colmap[in[j]] - 1; 5059 #endif 5060 if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) { 5061 ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr); 5062 col = in[j]; 5063 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */ 5064 B = aij->B; 5065 b = (Mat_SeqAIJ*)B->data; 5066 bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; 5067 rp2 = bj + bi[row]; 5068 ap2 = ba + bi[row]; 5069 rmax2 = bimax[row]; 5070 nrow2 = bilen[row]; 5071 low2 = 0; 5072 high2 = nrow2; 5073 bm = aij->B->rmap->n; 5074 ba = b->a; 5075 } 5076 } else col = in[j]; 5077 MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]); 5078 } 5079 } 5080 } else if (!aij->donotstash) { 5081 if (roworiented) { 5082 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 5083 } else { 5084 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 5085 } 5086 } 5087 } 5088 } 5089 PetscFunctionReturnVoid(); 5090 } 5091 5092