1 /* 2 Defines projective product routines where A is a AIJ matrix 3 C = P^T * A * P 4 */ 5 6 #include "src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/ 7 #include "src/mat/utils/freespace.h" 8 #include "src/mat/impls/aij/mpi/mpiaij.h" 9 #include "petscbt.h" 10 11 #undef __FUNCT__ 12 #define __FUNCT__ "MatPtAP" 13 /*@ 14 MatPtAP - Creates the matrix projection C = P^T * A * P 15 16 Collective on Mat 17 18 Input Parameters: 19 + A - the matrix 20 . P - the projection matrix 21 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 22 - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)) 23 24 Output Parameters: 25 . C - the product matrix 26 27 Notes: 28 C will be created and must be destroyed by the user with MatDestroy(). 29 30 This routine is currently only implemented for pairs of SeqAIJ matrices and classes 31 which inherit from SeqAIJ. C will be of type MATSEQAIJ. 32 33 Level: intermediate 34 35 .seealso: MatPtAPSymbolic(),MatPtAPNumeric(),MatMatMult() 36 @*/ 37 PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 38 { 39 PetscErrorCode ierr; 40 PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat *); 41 PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat *); 42 43 PetscFunctionBegin; 44 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 45 PetscValidType(A,1); 46 MatPreallocated(A); 47 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 48 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 49 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 50 PetscValidType(P,2); 51 MatPreallocated(P); 52 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 53 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 54 PetscValidPointer(C,3); 55 56 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 57 58 if (fill <=0.0) SETERRQ1(PETSC_ERR_ARG_SIZ,"fill=%g must be > 0.0",fill); 59 60 /* For now, we do not dispatch based on the type of A and P */ 61 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 62 fA = A->ops->ptap; 63 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",A->type_name); 64 fP = P->ops->ptap; 65 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAP not supported for P of type %s",P->type_name); 66 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 67 68 ierr = PetscLogEventBegin(MAT_PtAP,A,P,0,0);CHKERRQ(ierr); 69 ierr = (*fA)(A,P,scall,fill,C);CHKERRQ(ierr); 70 ierr = PetscLogEventEnd(MAT_PtAP,A,P,0,0);CHKERRQ(ierr); 71 PetscFunctionReturn(0); 72 } 73 74 EXTERN PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,MatReuse,PetscReal,int,int,Mat*); 75 EXTERN PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,PetscReal,int,int,Mat*); 76 EXTERN PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,int,int,Mat); 77 78 #undef __FUNCT__ 79 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ" 80 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 81 { 82 PetscErrorCode ierr; 83 84 PetscFunctionBegin; 85 if (scall == MAT_INITIAL_MATRIX){ 86 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 87 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */ 88 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 89 } else if (scall == MAT_REUSE_MATRIX){ 90 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 91 ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr); 92 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 93 } else { 94 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",scall); 95 } 96 PetscFunctionReturn(0); 97 } 98 99 #undef __FUNCT__ 100 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 101 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 102 { 103 PetscErrorCode ierr; 104 Mat P_seq,A_loc,C_seq; 105 int prstart,prend,m=P->m; 106 IS isrowp,iscolp; 107 108 PetscFunctionBegin; 109 /* get P_seq = submatrix of P by taking rows of P that equal to nonzero col of A */ 110 ierr = MatGetBrowsOfAcols(A,P,MAT_INITIAL_MATRIX,&isrowp,&iscolp,&prstart,&P_seq);CHKERRQ(ierr); 111 ierr = ISDestroy(iscolp);CHKERRQ(ierr); 112 113 /* get A_loc = submatrix of A by taking all local rows of A */ 114 ierr = MatGetLocalMat(A,MAT_INITIAL_MATRIX,PETSC_NULL,&isrowp,&A_loc);CHKERRQ(ierr); 115 ierr = ISDestroy(isrowp);CHKERRQ(ierr); 116 117 /* compute C_seq = P_loc^T * A_loc * P_seq */ 118 prend = prstart + m; 119 ierr = MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(A_loc,P_seq,MAT_INITIAL_MATRIX,fill,prstart,prend,&C_seq);CHKERRQ(ierr); 120 ierr = MatDestroy(P_seq);CHKERRQ(ierr); 121 ierr = MatDestroy(A_loc);CHKERRQ(ierr); 122 123 /* add C_seq into mpi C */ 124 ierr = MatMerge_SeqsToMPI(A->comm,C_seq,P->n,P->n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr); 125 126 PetscFunctionReturn(0); 127 } 128 129 #undef __FUNCT__ 130 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 131 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 132 { 133 PetscErrorCode ierr; 134 Mat P_seq,A_loc,C_seq; 135 int prstart,prend,m=P->m; 136 IS isrowp,iscolp; 137 Mat_Merge_SeqsToMPI *merge; 138 PetscObjectContainer container; 139 140 PetscFunctionBegin; 141 ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr); 142 if (container) { 143 ierr = PetscObjectContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr); 144 } else { 145 SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses and object container"); 146 } 147 148 /* get P_seq = submatrix of P by taking rows of P that equal to nonzero col of A */ 149 ierr = MatGetBrowsOfAcols(A,P,MAT_INITIAL_MATRIX,&isrowp,&iscolp,&prstart,&P_seq);CHKERRQ(ierr); 150 ierr = ISDestroy(iscolp);CHKERRQ(ierr); 151 152 /* get A_loc = submatrix of A by taking all local rows of A */ 153 ierr = MatGetLocalMat(A,MAT_INITIAL_MATRIX,PETSC_NULL,&isrowp,&A_loc);CHKERRQ(ierr); 154 ierr = ISDestroy(isrowp);CHKERRQ(ierr); 155 156 /* compute C_seq = P_loc^T * A_loc * P_seq */ 157 prend = prstart + m; 158 C_seq = merge->C_seq; 159 ierr = MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(A_loc,P_seq,MAT_REUSE_MATRIX,1.0,prstart,prend,&C_seq);CHKERRQ(ierr); 160 ierr = MatDestroy(P_seq);CHKERRQ(ierr); 161 ierr = MatDestroy(A_loc);CHKERRQ(ierr); 162 163 /* add C_seq into mpi C */ 164 ierr = MatMerge_SeqsToMPI(A->comm,C_seq,P->n,P->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 165 166 PetscFunctionReturn(0); 167 } 168 169 #undef __FUNCT__ 170 #define __FUNCT__ "MatPtAP_SeqAIJ_SeqAIJ" 171 PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 172 { 173 PetscErrorCode ierr; 174 PetscFunctionBegin; 175 if (scall == MAT_INITIAL_MATRIX){ 176 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 177 ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ(A,P,fill,C);CHKERRQ(ierr); 178 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 179 } 180 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 181 ierr = MatPtAPNumeric_SeqAIJ_SeqAIJ(A,P,*C);CHKERRQ(ierr); 182 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 183 PetscFunctionReturn(0); 184 } 185 186 #undef __FUNCT__ 187 #define __FUNCT__ "MatPtAPSymbolic" 188 /* 189 MatPtAPSymbolic - Creates the (i,j) structure of the matrix projection C = P^T * A * P 190 191 Collective on Mat 192 193 Input Parameters: 194 + A - the matrix 195 - P - the projection matrix 196 197 Output Parameters: 198 . C - the (i,j) structure of the product matrix 199 200 Notes: 201 C will be created and must be destroyed by the user with MatDestroy(). 202 203 This routine is currently only implemented for pairs of SeqAIJ matrices and classes 204 which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using 205 this (i,j) structure by calling MatPtAPNumeric(). 206 207 Level: intermediate 208 209 .seealso: MatPtAP(),MatPtAPNumeric(),MatMatMultSymbolic() 210 */ 211 PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C) 212 { 213 PetscErrorCode ierr; 214 PetscErrorCode (*fA)(Mat,Mat,PetscReal,Mat*); 215 PetscErrorCode (*fP)(Mat,Mat,PetscReal,Mat*); 216 217 PetscFunctionBegin; 218 219 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 220 PetscValidType(A,1); 221 MatPreallocated(A); 222 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 223 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 224 225 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 226 PetscValidType(P,2); 227 MatPreallocated(P); 228 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 229 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 230 231 PetscValidPointer(C,3); 232 233 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 234 if (A->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %d != %d",A->M,A->N); 235 236 /* For now, we do not dispatch based on the type of A and P */ 237 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 238 fA = A->ops->ptapsymbolic; 239 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAPSymbolic not supported for A of type %s",A->type_name); 240 fP = P->ops->ptapsymbolic; 241 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAPSymbolic not supported for P of type %s",P->type_name); 242 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAPSymbolic requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 243 244 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 245 ierr = (*fA)(A,P,fill,C);CHKERRQ(ierr); 246 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 247 248 PetscFunctionReturn(0); 249 } 250 251 typedef struct { 252 Mat symAP; 253 } Mat_PtAPstruct; 254 255 EXTERN PetscErrorCode MatDestroy_SeqAIJ(Mat); 256 257 #undef __FUNCT__ 258 #define __FUNCT__ "MatDestroy_SeqAIJ_PtAP" 259 PetscErrorCode MatDestroy_SeqAIJ_PtAP(Mat A) 260 { 261 PetscErrorCode ierr; 262 Mat_PtAPstruct *ptap=(Mat_PtAPstruct*)A->spptr; 263 264 PetscFunctionBegin; 265 ierr = MatDestroy(ptap->symAP);CHKERRQ(ierr); 266 ierr = PetscFree(ptap);CHKERRQ(ierr); 267 ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 268 PetscFunctionReturn(0); 269 } 270 271 #undef __FUNCT__ 272 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ" 273 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 274 { 275 PetscErrorCode ierr; 276 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 277 Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*p=(Mat_SeqAIJ*)P->data,*c; 278 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 279 PetscInt *ci,*cj,*ptadenserow,*ptasparserow,*ptaj,nlnk,*lnk; 280 PetscInt an=A->N,am=A->M,pn=P->N,pm=P->M; 281 PetscInt i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi; 282 MatScalar *ca; 283 PetscBT lnkbt; 284 285 PetscFunctionBegin; 286 /* Get ij structure of P^T */ 287 ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 288 ptJ=ptj; 289 290 /* Allocate ci array, arrays for fill computation and */ 291 /* free space for accumulating nonzero column info */ 292 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 293 ci[0] = 0; 294 295 ierr = PetscMalloc((2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 296 ierr = PetscMemzero(ptadenserow,(2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 297 ptasparserow = ptadenserow + an; 298 299 /* create and initialize a linked list */ 300 nlnk = pn+1; 301 ierr = PetscLLCreate(pn,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 302 303 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 304 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 305 ierr = GetMoreSpace((ai[am]/pm)*pn,&free_space); 306 current_space = free_space; 307 308 /* Determine symbolic info for each row of C: */ 309 for (i=0;i<pn;i++) { 310 ptnzi = pti[i+1] - pti[i]; 311 ptanzi = 0; 312 /* Determine symbolic row of PtA: */ 313 for (j=0;j<ptnzi;j++) { 314 arow = *ptJ++; 315 anzj = ai[arow+1] - ai[arow]; 316 ajj = aj + ai[arow]; 317 for (k=0;k<anzj;k++) { 318 if (!ptadenserow[ajj[k]]) { 319 ptadenserow[ajj[k]] = -1; 320 ptasparserow[ptanzi++] = ajj[k]; 321 } 322 } 323 } 324 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 325 ptaj = ptasparserow; 326 cnzi = 0; 327 for (j=0;j<ptanzi;j++) { 328 prow = *ptaj++; 329 pnzj = pi[prow+1] - pi[prow]; 330 pjj = pj + pi[prow]; 331 /* add non-zero cols of P into the sorted linked list lnk */ 332 ierr = PetscLLAdd(pnzj,pjj,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 333 cnzi += nlnk; 334 } 335 336 /* If free space is not available, make more free space */ 337 /* Double the amount of total space in the list */ 338 if (current_space->local_remaining<cnzi) { 339 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 340 } 341 342 /* Copy data into free space, and zero out denserows */ 343 ierr = PetscLLClean(pn,pn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 344 current_space->array += cnzi; 345 current_space->local_used += cnzi; 346 current_space->local_remaining -= cnzi; 347 348 for (j=0;j<ptanzi;j++) { 349 ptadenserow[ptasparserow[j]] = 0; 350 } 351 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 352 /* For now, we will recompute what is needed. */ 353 ci[i+1] = ci[i] + cnzi; 354 } 355 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 356 /* Allocate space for cj, initialize cj, and */ 357 /* destroy list of free space and other temporary array(s) */ 358 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 359 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 360 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 361 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 362 363 /* Allocate space for ca */ 364 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 365 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 366 367 /* put together the new matrix */ 368 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 369 370 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 371 /* Since these are PETSc arrays, change flags to free them as necessary. */ 372 c = (Mat_SeqAIJ *)((*C)->data); 373 c->freedata = PETSC_TRUE; 374 c->nonew = 0; 375 376 /* Clean up. */ 377 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 378 379 PetscFunctionReturn(0); 380 } 381 382 #include "src/mat/impls/maij/maij.h" 383 EXTERN_C_BEGIN 384 #undef __FUNCT__ 385 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqMAIJ" 386 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqMAIJ(Mat A,Mat PP,Mat *C) 387 { 388 /* This routine requires testing -- I don't think it works. */ 389 PetscErrorCode ierr; 390 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 391 Mat_SeqMAIJ *pp=(Mat_SeqMAIJ*)PP->data; 392 Mat P=pp->AIJ; 393 Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*p=(Mat_SeqAIJ*)P->data,*c; 394 int *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 395 int *ci,*cj,*denserow,*sparserow,*ptadenserow,*ptasparserow,*ptaj; 396 int an=A->N,am=A->M,pn=P->N,pm=P->M,ppdof=pp->dof; 397 int i,j,k,dof,pdof,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi; 398 MatScalar *ca; 399 400 PetscFunctionBegin; 401 /* Start timer */ 402 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,PP,0,0);CHKERRQ(ierr); 403 404 /* Get ij structure of P^T */ 405 ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 406 407 /* Allocate ci array, arrays for fill computation and */ 408 /* free space for accumulating nonzero column info */ 409 ierr = PetscMalloc((pn+1)*sizeof(int),&ci);CHKERRQ(ierr); 410 ci[0] = 0; 411 412 ierr = PetscMalloc((2*pn+2*an+1)*sizeof(int),&ptadenserow);CHKERRQ(ierr); 413 ierr = PetscMemzero(ptadenserow,(2*pn+2*an+1)*sizeof(int));CHKERRQ(ierr); 414 ptasparserow = ptadenserow + an; 415 denserow = ptasparserow + an; 416 sparserow = denserow + pn; 417 418 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 419 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 420 ierr = GetMoreSpace((ai[am]/pm)*pn,&free_space); 421 current_space = free_space; 422 423 /* Determine symbolic info for each row of C: */ 424 for (i=0;i<pn/ppdof;i++) { 425 ptnzi = pti[i+1] - pti[i]; 426 ptanzi = 0; 427 ptJ = ptj + pti[i]; 428 for (dof=0;dof<ppdof;dof++) { 429 /* Determine symbolic row of PtA: */ 430 for (j=0;j<ptnzi;j++) { 431 arow = ptJ[j] + dof; 432 anzj = ai[arow+1] - ai[arow]; 433 ajj = aj + ai[arow]; 434 for (k=0;k<anzj;k++) { 435 if (!ptadenserow[ajj[k]]) { 436 ptadenserow[ajj[k]] = -1; 437 ptasparserow[ptanzi++] = ajj[k]; 438 } 439 } 440 } 441 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 442 ptaj = ptasparserow; 443 cnzi = 0; 444 for (j=0;j<ptanzi;j++) { 445 pdof = *ptaj%dof; 446 prow = (*ptaj++)/dof; 447 pnzj = pi[prow+1] - pi[prow]; 448 pjj = pj + pi[prow]; 449 for (k=0;k<pnzj;k++) { 450 if (!denserow[pjj[k]+pdof]) { 451 denserow[pjj[k]+pdof] = -1; 452 sparserow[cnzi++] = pjj[k]+pdof; 453 } 454 } 455 } 456 457 /* sort sparserow */ 458 ierr = PetscSortInt(cnzi,sparserow);CHKERRQ(ierr); 459 460 /* If free space is not available, make more free space */ 461 /* Double the amount of total space in the list */ 462 if (current_space->local_remaining<cnzi) { 463 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 464 } 465 466 /* Copy data into free space, and zero out denserows */ 467 ierr = PetscMemcpy(current_space->array,sparserow,cnzi*sizeof(int));CHKERRQ(ierr); 468 current_space->array += cnzi; 469 current_space->local_used += cnzi; 470 current_space->local_remaining -= cnzi; 471 472 for (j=0;j<ptanzi;j++) { 473 ptadenserow[ptasparserow[j]] = 0; 474 } 475 for (j=0;j<cnzi;j++) { 476 denserow[sparserow[j]] = 0; 477 } 478 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 479 /* For now, we will recompute what is needed. */ 480 ci[i+1+dof] = ci[i+dof] + cnzi; 481 } 482 } 483 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 484 /* Allocate space for cj, initialize cj, and */ 485 /* destroy list of free space and other temporary array(s) */ 486 ierr = PetscMalloc((ci[pn]+1)*sizeof(int),&cj);CHKERRQ(ierr); 487 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 488 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 489 490 /* Allocate space for ca */ 491 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 492 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 493 494 /* put together the new matrix */ 495 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 496 497 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 498 /* Since these are PETSc arrays, change flags to free them as necessary. */ 499 c = (Mat_SeqAIJ *)((*C)->data); 500 c->freedata = PETSC_TRUE; 501 c->nonew = 0; 502 503 /* Clean up. */ 504 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 505 506 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,PP,0,0);CHKERRQ(ierr); 507 PetscFunctionReturn(0); 508 } 509 EXTERN_C_END 510 511 #undef __FUNCT__ 512 #define __FUNCT__ "MatPtAPNumeric" 513 /* 514 MatPtAPNumeric - Computes the matrix projection C = P^T * A * P 515 516 Collective on Mat 517 518 Input Parameters: 519 + A - the matrix 520 - P - the projection matrix 521 522 Output Parameters: 523 . C - the product matrix 524 525 Notes: 526 C must have been created by calling MatPtAPSymbolic and must be destroyed by 527 the user using MatDeatroy(). 528 529 This routine is currently only implemented for pairs of AIJ matrices and classes 530 which inherit from AIJ. C will be of type MATAIJ. 531 532 Level: intermediate 533 534 .seealso: MatPtAP(),MatPtAPSymbolic(),MatMatMultNumeric() 535 */ 536 PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C) 537 { 538 PetscErrorCode ierr; 539 PetscErrorCode (*fA)(Mat,Mat,Mat); 540 PetscErrorCode (*fP)(Mat,Mat,Mat); 541 542 PetscFunctionBegin; 543 544 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 545 PetscValidType(A,1); 546 MatPreallocated(A); 547 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 548 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 549 550 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 551 PetscValidType(P,2); 552 MatPreallocated(P); 553 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 554 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 555 556 PetscValidHeaderSpecific(C,MAT_COOKIE,3); 557 PetscValidType(C,3); 558 MatPreallocated(C); 559 if (!C->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 560 if (C->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 561 562 if (P->N!=C->M) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->N,C->M); 563 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 564 if (A->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %d != %d",A->M,A->N); 565 if (P->N!=C->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->N,C->N); 566 567 /* For now, we do not dispatch based on the type of A and P */ 568 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 569 fA = A->ops->ptapnumeric; 570 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAPNumeric not supported for A of type %s",A->type_name); 571 fP = P->ops->ptapnumeric; 572 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAPNumeric not supported for P of type %s",P->type_name); 573 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAPNumeric requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 574 575 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 576 ierr = (*fA)(A,P,C);CHKERRQ(ierr); 577 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 578 579 PetscFunctionReturn(0); 580 } 581 582 #undef __FUNCT__ 583 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ" 584 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ(Mat A,Mat P,Mat C) 585 { 586 PetscErrorCode ierr; 587 int flops=0; 588 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 589 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 590 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 591 int *ai=a->i,*aj=a->j,*apj,*apjdense,*pi=p->i,*pj=p->j,*pJ=p->j,*pjj; 592 int *ci=c->i,*cj=c->j,*cjj; 593 int am=A->M,cn=C->N,cm=C->M; 594 int i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow; 595 MatScalar *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj; 596 597 PetscFunctionBegin; 598 /* Allocate temporary array for storage of one row of A*P */ 599 ierr = PetscMalloc(cn*(sizeof(MatScalar)+2*sizeof(int)),&apa);CHKERRQ(ierr); 600 ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+2*sizeof(int)));CHKERRQ(ierr); 601 602 apj = (int *)(apa + cn); 603 apjdense = apj + cn; 604 605 /* Clear old values in C */ 606 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 607 608 for (i=0;i<am;i++) { 609 /* Form sparse row of A*P */ 610 anzi = ai[i+1] - ai[i]; 611 apnzj = 0; 612 for (j=0;j<anzi;j++) { 613 prow = *aj++; 614 pnzj = pi[prow+1] - pi[prow]; 615 pjj = pj + pi[prow]; 616 paj = pa + pi[prow]; 617 for (k=0;k<pnzj;k++) { 618 if (!apjdense[pjj[k]]) { 619 apjdense[pjj[k]] = -1; 620 apj[apnzj++] = pjj[k]; 621 } 622 apa[pjj[k]] += (*aa)*paj[k]; 623 } 624 flops += 2*pnzj; 625 aa++; 626 } 627 628 /* Sort the j index array for quick sparse axpy. */ 629 ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr); 630 631 /* Compute P^T*A*P using outer product (P^T)[:,j]*(A*P)[j,:]. */ 632 pnzi = pi[i+1] - pi[i]; 633 for (j=0;j<pnzi;j++) { 634 nextap = 0; 635 crow = *pJ++; 636 cjj = cj + ci[crow]; 637 caj = ca + ci[crow]; 638 /* Perform sparse axpy operation. Note cjj includes apj. */ 639 for (k=0;nextap<apnzj;k++) { 640 if (cjj[k]==apj[nextap]) { 641 caj[k] += (*pA)*apa[apj[nextap++]]; 642 } 643 } 644 flops += 2*apnzj; 645 pA++; 646 } 647 648 /* Zero the current row info for A*P */ 649 for (j=0;j<apnzj;j++) { 650 apa[apj[j]] = 0.; 651 apjdense[apj[j]] = 0; 652 } 653 } 654 655 /* Assemble the final matrix and clean up */ 656 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 657 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 658 ierr = PetscFree(apa);CHKERRQ(ierr); 659 ierr = PetscLogFlops(flops);CHKERRQ(ierr); 660 661 PetscFunctionReturn(0); 662 } 663 664 /* Compute C = P[rstart:rend,:]^T * A * P of seqaij matrices - used by MatPtAP_MPIAIJ_MPIAIJ() */ 665 666 #undef __FUNCT__ 667 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt" 668 /*@C 669 MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt - Creates C = P[rstart:rend,:]^T * A * P of seqaij matrices, 670 used by MatPtAP_MPIAIJ_MPIAIJ() 671 672 Collective on Mat 673 674 Input Parameters: 675 + A - the matrix in seqaij format 676 . P - the projection matrix in seqaij format 677 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 678 . fill - expected fill (not being used when scall==MAT_REUSE_MATRIX) 679 . prstart, prend - the starting and ending-1-th row of the matrix P to be used for transpose 680 681 Output Parameters: 682 . C - the product matrix in seqaij format 683 684 Level: developer 685 686 .seealso: MatPtAPSymbolic(),MatPtAPNumeric(),MatMatMult() 687 @*/ 688 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,int prstart,int prend,Mat C) 689 { 690 PetscErrorCode ierr; 691 int flops=0; 692 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 693 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 694 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 695 int *ai=a->i,*aj=a->j,*apj,*apjdense; 696 int *pi=p->i,*pj=p->j,*pJ=p->j+pi[prstart],*pjj; 697 int *ci=c->i,*cj=c->j,*cjj; 698 int am=A->M,cn=C->N,cm=C->M; 699 int i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow; 700 MatScalar *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj; 701 702 PetscFunctionBegin; 703 pA=p->a+pi[prstart]; 704 /* Allocate temporary array for storage of one row of A*P */ 705 ierr = PetscMalloc(cn*(sizeof(MatScalar)+2*sizeof(int)),&apa);CHKERRQ(ierr); 706 ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+2*sizeof(int)));CHKERRQ(ierr); 707 708 apj = (int *)(apa + cn); 709 apjdense = apj + cn; 710 711 /* Clear old values in C */ 712 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 713 714 for (i=0;i<am;i++) { 715 /* Form sparse row of A*P */ 716 anzi = ai[i+1] - ai[i]; 717 apnzj = 0; 718 for (j=0;j<anzi;j++) { 719 prow = *aj++; 720 pnzj = pi[prow+1] - pi[prow]; 721 pjj = pj + pi[prow]; 722 paj = pa + pi[prow]; 723 for (k=0;k<pnzj;k++) { 724 if (!apjdense[pjj[k]]) { 725 apjdense[pjj[k]] = -1; 726 apj[apnzj++] = pjj[k]; 727 } 728 apa[pjj[k]] += (*aa)*paj[k]; 729 } 730 flops += 2*pnzj; 731 aa++; 732 } 733 734 /* Sort the j index array for quick sparse axpy. */ 735 ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr); 736 737 /* Compute P[[prstart:prend,:]^T*A*P using outer product (P^T)[:,j+prstart]*(A*P)[j,:]. */ 738 pnzi = pi[i+1+prstart] - pi[i+prstart]; 739 for (j=0;j<pnzi;j++) { 740 nextap = 0; 741 crow = *pJ++; 742 cjj = cj + ci[crow]; 743 caj = ca + ci[crow]; 744 /* Perform sparse axpy operation. Note cjj includes apj. */ 745 for (k=0;nextap<apnzj;k++) { 746 if (cjj[k]==apj[nextap]) { 747 caj[k] += (*pA)*apa[apj[nextap++]]; 748 } 749 } 750 flops += 2*apnzj; 751 pA++; 752 } 753 754 /* Zero the current row info for A*P */ 755 for (j=0;j<apnzj;j++) { 756 apa[apj[j]] = 0.; 757 apjdense[apj[j]] = 0; 758 } 759 } 760 761 /* Assemble the final matrix and clean up */ 762 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 763 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 764 ierr = PetscFree(apa);CHKERRQ(ierr); 765 ierr = PetscLogFlops(flops);CHKERRQ(ierr); 766 767 PetscFunctionReturn(0); 768 } 769 770 #undef __FUNCT__ 771 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt" 772 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,PetscReal fill,int prstart,int prend,Mat *C) 773 { 774 PetscErrorCode ierr; 775 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 776 Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*p=(Mat_SeqAIJ*)P->data,*c; 777 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 778 PetscInt *ci,*cj,*ptadenserow,*ptasparserow,*ptaj,nlnk,*lnk; 779 PetscInt an=A->N,am=A->M,pn=P->N,pm=P->M; 780 PetscInt i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi,m=prend - prstart; 781 MatScalar *ca; 782 Mat *psub,P_sub; 783 IS isrow,iscol; 784 PetscBT lnkbt; 785 786 PetscFunctionBegin; 787 /* Get ij structure of P[rstart:rend,:]^T */ 788 ierr = ISCreateStride(PETSC_COMM_SELF,m,prstart,1,&isrow);CHKERRQ(ierr); 789 ierr = ISCreateStride(PETSC_COMM_SELF,P->n,0,1,&iscol);CHKERRQ(ierr); 790 ierr = MatGetSubMatrices(P,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&psub);CHKERRQ(ierr); 791 ierr = ISDestroy(isrow);CHKERRQ(ierr); 792 ierr = ISDestroy(iscol);CHKERRQ(ierr); 793 P_sub = psub[0]; 794 ierr = MatGetSymbolicTranspose_SeqAIJ(P_sub,&pti,&ptj);CHKERRQ(ierr); 795 ierr = MatDestroyMatrices(1,&psub);CHKERRQ(ierr); 796 ptJ=ptj; 797 798 /* Allocate ci array, arrays for fill computation and */ 799 /* free space for accumulating nonzero column info */ 800 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 801 ci[0] = 0; 802 803 ierr = PetscMalloc((2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 804 ierr = PetscMemzero(ptadenserow,(2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 805 ptasparserow = ptadenserow + an; 806 807 /* create and initialize a linked list */ 808 nlnk = pn+1; 809 ierr = PetscLLCreate(pn,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 810 811 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 812 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 813 ierr = GetMoreSpace((PetscInt)(fill*ai[am]/pm)*pn,&free_space); 814 current_space = free_space; 815 816 /* Determine symbolic info for each row of C: */ 817 for (i=0;i<pn;i++) { 818 ptnzi = pti[i+1] - pti[i]; 819 ptanzi = 0; 820 /* Determine symbolic row of PtA_reduced: */ 821 for (j=0;j<ptnzi;j++) { 822 arow = *ptJ++; 823 anzj = ai[arow+1] - ai[arow]; 824 ajj = aj + ai[arow]; 825 for (k=0;k<anzj;k++) { 826 if (!ptadenserow[ajj[k]]) { 827 ptadenserow[ajj[k]] = -1; 828 ptasparserow[ptanzi++] = ajj[k]; 829 } 830 } 831 } 832 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 833 ptaj = ptasparserow; 834 cnzi = 0; 835 for (j=0;j<ptanzi;j++) { 836 prow = *ptaj++; 837 pnzj = pi[prow+1] - pi[prow]; 838 pjj = pj + pi[prow]; 839 /* add non-zero cols of P into the sorted linked list lnk */ 840 ierr = PetscLLAdd(pnzj,pjj,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 841 cnzi += nlnk; 842 } 843 844 /* If free space is not available, make more free space */ 845 /* Double the amount of total space in the list */ 846 if (current_space->local_remaining<cnzi) { 847 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 848 } 849 850 /* Copy data into free space, and zero out denserows */ 851 ierr = PetscLLClean(pn,pn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 852 current_space->array += cnzi; 853 current_space->local_used += cnzi; 854 current_space->local_remaining -= cnzi; 855 856 for (j=0;j<ptanzi;j++) { 857 ptadenserow[ptasparserow[j]] = 0; 858 } 859 860 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 861 /* For now, we will recompute what is needed. */ 862 ci[i+1] = ci[i] + cnzi; 863 } 864 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 865 /* Allocate space for cj, initialize cj, and */ 866 /* destroy list of free space and other temporary array(s) */ 867 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 868 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 869 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 870 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 871 872 /* Allocate space for ca */ 873 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 874 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 875 876 /* put together the new matrix */ 877 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 878 879 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 880 /* Since these are PETSc arrays, change flags to free them as necessary. */ 881 c = (Mat_SeqAIJ *)((*C)->data); 882 c->freedata = PETSC_TRUE; 883 c->nonew = 0; 884 885 /* Clean up. */ 886 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 887 888 PetscFunctionReturn(0); 889 } 890 891 #undef __FUNCT__ 892 #define __FUNCT__ "MatPtAP_SeqAIJ_SeqAIJ_ReducedPt" 893 PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,MatReuse scall,PetscReal fill,PetscInt prstart,PetscInt prend,Mat *C) 894 { 895 PetscErrorCode ierr; 896 PetscFunctionBegin; 897 if (A->m != prend-prstart) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",A->m,prend-prstart); 898 if (prend-prstart > P->m) SETERRQ2(PETSC_ERR_ARG_SIZ," prend-prstart %d cannot be larger than P->m %d",prend-prstart,P->m); 899 if (scall == MAT_INITIAL_MATRIX){ 900 ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(A,P,fill,prstart,prend,C);CHKERRQ(ierr); 901 } 902 903 ierr = MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(A,P,prstart,prend,*C);CHKERRQ(ierr); 904 905 PetscFunctionReturn(0); 906 } 907 908