1 /* 2 Defines projective product routines where A is a AIJ matrix 3 C = P^T * A * P 4 */ 5 6 #include "src/mat/impls/aij/seq/aij.h" /*I "petscmat.h" I*/ 7 #include "src/mat/utils/freespace.h" 8 #include "src/mat/impls/aij/mpi/mpiaij.h" 9 10 #undef __FUNCT__ 11 #define __FUNCT__ "MatPtAP" 12 /*@ 13 MatPtAP - Creates the matrix projection C = P^T * A * P 14 15 Collective on Mat 16 17 Input Parameters: 18 + A - the matrix 19 . P - the projection matrix 20 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 21 - fill - expected fill as ratio of nnz(C)/(nnz(A) + nnz(P)) 22 23 Output Parameters: 24 . C - the product matrix 25 26 Notes: 27 C will be created and must be destroyed by the user with MatDestroy(). 28 29 This routine is currently only implemented for pairs of SeqAIJ matrices and classes 30 which inherit from SeqAIJ. C will be of type MATSEQAIJ. 31 32 Level: intermediate 33 34 .seealso: MatPtAPSymbolic(),MatPtAPNumeric(),MatMatMult() 35 @*/ 36 PetscErrorCode MatPtAP(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 37 { 38 PetscErrorCode ierr; 39 PetscErrorCode (*fA)(Mat,Mat,MatReuse,PetscReal,Mat *); 40 PetscErrorCode (*fP)(Mat,Mat,MatReuse,PetscReal,Mat *); 41 42 PetscFunctionBegin; 43 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 44 PetscValidType(A,1); 45 MatPreallocated(A); 46 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 47 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 48 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 49 PetscValidType(P,2); 50 MatPreallocated(P); 51 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 52 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 53 PetscValidPointer(C,3); 54 55 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 56 57 if (fill <=0.0) SETERRQ1(PETSC_ERR_ARG_SIZ,"fill=%g must be > 0.0",fill); 58 59 /* For now, we do not dispatch based on the type of A and P */ 60 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 61 fA = A->ops->ptap; 62 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAP not supported for A of type %s",A->type_name); 63 fP = P->ops->ptap; 64 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAP not supported for P of type %s",P->type_name); 65 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAP requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 66 67 ierr = PetscLogEventBegin(MAT_PtAP,A,P,0,0);CHKERRQ(ierr); 68 ierr = (*fA)(A,P,scall,fill,C);CHKERRQ(ierr); 69 ierr = PetscLogEventEnd(MAT_PtAP,A,P,0,0);CHKERRQ(ierr); 70 PetscFunctionReturn(0); 71 } 72 73 EXTERN PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,MatReuse,PetscReal,PetscInt,PetscInt,Mat*); 74 EXTERN PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,PetscReal,PetscInt,PetscInt,Mat*); 75 EXTERN PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(Mat,Mat,PetscInt,PetscInt,Mat); 76 77 #undef __FUNCT__ 78 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ" 79 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 80 { 81 PetscErrorCode ierr; 82 83 PetscFunctionBegin; 84 if (scall == MAT_INITIAL_MATRIX){ 85 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */ 86 } else if (scall == MAT_REUSE_MATRIX){ 87 ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr); 88 } else { 89 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",scall); 90 } 91 PetscFunctionReturn(0); 92 } 93 94 #undef __FUNCT__ 95 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 96 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 97 { 98 PetscErrorCode ierr; 99 Mat P_seq,A_loc,C_seq; 100 PetscInt prstart,prend,m=P->m; 101 IS isrowp,iscolp; 102 103 PetscFunctionBegin; 104 /* get P_seq = submatrix of P by taking rows of P that equal to nonzero col of A */ 105 ierr = MatGetBrowsOfAcols(A,P,MAT_INITIAL_MATRIX,&isrowp,&iscolp,&prstart,&P_seq);CHKERRQ(ierr); 106 ierr = ISDestroy(iscolp);CHKERRQ(ierr); 107 108 /* get A_loc = submatrix of A by taking all local rows of A */ 109 ierr = MatGetLocalMat(A,MAT_INITIAL_MATRIX,PETSC_NULL,&isrowp,&A_loc);CHKERRQ(ierr); 110 ierr = ISDestroy(isrowp);CHKERRQ(ierr); 111 112 /* compute C_seq = P_loc^T * A_loc * P_seq */ 113 prend = prstart + m; 114 ierr = MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(A_loc,P_seq,MAT_INITIAL_MATRIX,fill,prstart,prend,&C_seq);CHKERRQ(ierr); 115 ierr = MatDestroy(P_seq);CHKERRQ(ierr); 116 ierr = MatDestroy(A_loc);CHKERRQ(ierr); 117 118 /* add C_seq into mpi C */ 119 ierr = MatMerge_SeqsToMPI(A->comm,C_seq,P->n,P->n,MAT_INITIAL_MATRIX,C);CHKERRQ(ierr); 120 121 PetscFunctionReturn(0); 122 } 123 124 #undef __FUNCT__ 125 #define __FUNCT__ "MatMatMultSymbolic_MPIAIJ_MPIAIJ" 126 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 127 { 128 PetscErrorCode ierr; 129 Mat P_seq,A_loc,C_seq; 130 PetscInt prstart,prend,m=P->m; 131 IS isrowp,iscolp; 132 Mat_Merge_SeqsToMPI *merge; 133 PetscObjectContainer container; 134 135 PetscFunctionBegin; 136 ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr); 137 if (container) { 138 ierr = PetscObjectContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr); 139 } else { 140 SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses and object container"); 141 } 142 143 /* get P_seq = submatrix of P by taking rows of P that equal to nonzero col of A */ 144 ierr = MatGetBrowsOfAcols(A,P,MAT_INITIAL_MATRIX,&isrowp,&iscolp,&prstart,&P_seq);CHKERRQ(ierr); 145 ierr = ISDestroy(iscolp);CHKERRQ(ierr); 146 147 /* get A_loc = submatrix of A by taking all local rows of A */ 148 ierr = MatGetLocalMat(A,MAT_INITIAL_MATRIX,PETSC_NULL,&isrowp,&A_loc);CHKERRQ(ierr); 149 ierr = ISDestroy(isrowp);CHKERRQ(ierr); 150 151 /* compute C_seq = P_loc^T * A_loc * P_seq */ 152 prend = prstart + m; 153 C_seq = merge->C_seq; 154 ierr = MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(A_loc,P_seq,MAT_REUSE_MATRIX,1.0,prstart,prend,&C_seq);CHKERRQ(ierr); 155 ierr = MatDestroy(P_seq);CHKERRQ(ierr); 156 ierr = MatDestroy(A_loc);CHKERRQ(ierr); 157 158 /* add C_seq into mpi C */ 159 ierr = MatMerge_SeqsToMPI(A->comm,C_seq,P->n,P->n,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr); 160 161 PetscFunctionReturn(0); 162 } 163 164 #undef __FUNCT__ 165 #define __FUNCT__ "MatPtAP_SeqAIJ_SeqAIJ" 166 PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 167 { 168 PetscErrorCode ierr; 169 170 PetscFunctionBegin; 171 if (scall == MAT_INITIAL_MATRIX){ 172 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 173 ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ(A,P,fill,C);CHKERRQ(ierr); 174 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 175 } 176 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 177 ierr = MatPtAPNumeric_SeqAIJ_SeqAIJ(A,P,*C);CHKERRQ(ierr); 178 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 179 PetscFunctionReturn(0); 180 } 181 182 #undef __FUNCT__ 183 #define __FUNCT__ "MatPtAPSymbolic" 184 /* 185 MatPtAPSymbolic - Creates the (i,j) structure of the matrix projection C = P^T * A * P 186 187 Collective on Mat 188 189 Input Parameters: 190 + A - the matrix 191 - P - the projection matrix 192 193 Output Parameters: 194 . C - the (i,j) structure of the product matrix 195 196 Notes: 197 C will be created and must be destroyed by the user with MatDestroy(). 198 199 This routine is currently only implemented for pairs of SeqAIJ matrices and classes 200 which inherit from SeqAIJ. C will be of type MATSEQAIJ. The product is computed using 201 this (i,j) structure by calling MatPtAPNumeric(). 202 203 Level: intermediate 204 205 .seealso: MatPtAP(),MatPtAPNumeric(),MatMatMultSymbolic() 206 */ 207 PetscErrorCode MatPtAPSymbolic(Mat A,Mat P,PetscReal fill,Mat *C) 208 { 209 PetscErrorCode ierr; 210 PetscErrorCode (*fA)(Mat,Mat,PetscReal,Mat*); 211 PetscErrorCode (*fP)(Mat,Mat,PetscReal,Mat*); 212 213 PetscFunctionBegin; 214 215 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 216 PetscValidType(A,1); 217 MatPreallocated(A); 218 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 219 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 220 221 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 222 PetscValidType(P,2); 223 MatPreallocated(P); 224 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 225 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 226 227 PetscValidPointer(C,3); 228 229 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 230 if (A->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %d != %d",A->M,A->N); 231 232 /* For now, we do not dispatch based on the type of A and P */ 233 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 234 fA = A->ops->ptapsymbolic; 235 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAPSymbolic not supported for A of type %s",A->type_name); 236 fP = P->ops->ptapsymbolic; 237 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAPSymbolic not supported for P of type %s",P->type_name); 238 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAPSymbolic requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 239 240 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 241 ierr = (*fA)(A,P,fill,C);CHKERRQ(ierr); 242 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 243 244 PetscFunctionReturn(0); 245 } 246 247 typedef struct { 248 Mat symAP; 249 } Mat_PtAPstruct; 250 251 EXTERN PetscErrorCode MatDestroy_SeqAIJ(Mat); 252 253 #undef __FUNCT__ 254 #define __FUNCT__ "MatDestroy_SeqAIJ_PtAP" 255 PetscErrorCode MatDestroy_SeqAIJ_PtAP(Mat A) 256 { 257 PetscErrorCode ierr; 258 Mat_PtAPstruct *ptap=(Mat_PtAPstruct*)A->spptr; 259 260 PetscFunctionBegin; 261 ierr = MatDestroy(ptap->symAP);CHKERRQ(ierr); 262 ierr = PetscFree(ptap);CHKERRQ(ierr); 263 ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); 264 PetscFunctionReturn(0); 265 } 266 267 #undef __FUNCT__ 268 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ" 269 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 270 { 271 PetscErrorCode ierr; 272 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 273 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*p = (Mat_SeqAIJ*)P->data,*c; 274 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 275 PetscInt *ci,*cj,*denserow,*sparserow,*ptadenserow,*ptasparserow,*ptaj; 276 PetscInt an=A->N,am=A->M,pn=P->N,pm=P->M; 277 PetscInt i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi; 278 MatScalar *ca; 279 280 PetscFunctionBegin; 281 /* Get ij structure of P^T */ 282 ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 283 ptJ=ptj; 284 285 /* Allocate ci array, arrays for fill computation and */ 286 /* free space for accumulating nonzero column info */ 287 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 288 ci[0] = 0; 289 290 ierr = PetscMalloc((2*pn+2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 291 ierr = PetscMemzero(ptadenserow,(2*pn+2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 292 ptasparserow = ptadenserow + an; 293 denserow = ptasparserow + an; 294 sparserow = denserow + pn; 295 296 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 297 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 298 ierr = GetMoreSpace((ai[am]/pm)*pn,&free_space); 299 current_space = free_space; 300 301 /* Determine symbolic info for each row of C: */ 302 for (i=0;i<pn;i++) { 303 ptnzi = pti[i+1] - pti[i]; 304 ptanzi = 0; 305 /* Determine symbolic row of PtA: */ 306 for (j=0;j<ptnzi;j++) { 307 arow = *ptJ++; 308 anzj = ai[arow+1] - ai[arow]; 309 ajj = aj + ai[arow]; 310 for (k=0;k<anzj;k++) { 311 if (!ptadenserow[ajj[k]]) { 312 ptadenserow[ajj[k]] = -1; 313 ptasparserow[ptanzi++] = ajj[k]; 314 } 315 } 316 } 317 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 318 ptaj = ptasparserow; 319 cnzi = 0; 320 for (j=0;j<ptanzi;j++) { 321 prow = *ptaj++; 322 pnzj = pi[prow+1] - pi[prow]; 323 pjj = pj + pi[prow]; 324 for (k=0;k<pnzj;k++) { 325 if (!denserow[pjj[k]]) { 326 denserow[pjj[k]] = -1; 327 sparserow[cnzi++] = pjj[k]; 328 } 329 } 330 } 331 332 /* sort sparserow */ 333 ierr = PetscSortInt(cnzi,sparserow);CHKERRQ(ierr); 334 335 /* If free space is not available, make more free space */ 336 /* Double the amount of total space in the list */ 337 if (current_space->local_remaining<cnzi) { 338 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 339 } 340 341 /* Copy data into free space, and zero out denserows */ 342 ierr = PetscMemcpy(current_space->array,sparserow,cnzi*sizeof(PetscInt));CHKERRQ(ierr); 343 current_space->array += cnzi; 344 current_space->local_used += cnzi; 345 current_space->local_remaining -= cnzi; 346 347 for (j=0;j<ptanzi;j++) { 348 ptadenserow[ptasparserow[j]] = 0; 349 } 350 for (j=0;j<cnzi;j++) { 351 denserow[sparserow[j]] = 0; 352 } 353 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 354 /* For now, we will recompute what is needed. */ 355 ci[i+1] = ci[i] + cnzi; 356 } 357 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 358 /* Allocate space for cj, initialize cj, and */ 359 /* destroy list of free space and other temporary array(s) */ 360 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 361 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 362 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 363 364 /* Allocate space for ca */ 365 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 366 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 367 368 /* put together the new matrix */ 369 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 370 371 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 372 /* Since these are PETSc arrays, change flags to free them as necessary. */ 373 c = (Mat_SeqAIJ *)((*C)->data); 374 c->freedata = PETSC_TRUE; 375 c->nonew = 0; 376 377 /* Clean up. */ 378 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 379 380 PetscFunctionReturn(0); 381 } 382 383 #include "src/mat/impls/maij/maij.h" 384 EXTERN_C_BEGIN 385 #undef __FUNCT__ 386 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqMAIJ" 387 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqMAIJ(Mat A,Mat PP,Mat *C) 388 { 389 /* This routine requires testing -- I don't think it works. */ 390 PetscErrorCode ierr; 391 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 392 Mat_SeqMAIJ *pp=(Mat_SeqMAIJ*)PP->data; 393 Mat P=pp->AIJ; 394 Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*p=(Mat_SeqAIJ*)P->data,*c; 395 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 396 PetscInt *ci,*cj,*denserow,*sparserow,*ptadenserow,*ptasparserow,*ptaj; 397 PetscInt an=A->N,am=A->M,pn=P->N,pm=P->M,ppdof=pp->dof; 398 PetscInt i,j,k,dof,pdof,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi; 399 MatScalar *ca; 400 401 PetscFunctionBegin; 402 /* Start timer */ 403 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,PP,0,0);CHKERRQ(ierr); 404 405 /* Get ij structure of P^T */ 406 ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 407 408 /* Allocate ci array, arrays for fill computation and */ 409 /* free space for accumulating nonzero column info */ 410 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 411 ci[0] = 0; 412 413 ierr = PetscMalloc((2*pn+2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 414 ierr = PetscMemzero(ptadenserow,(2*pn+2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 415 ptasparserow = ptadenserow + an; 416 denserow = ptasparserow + an; 417 sparserow = denserow + pn; 418 419 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 420 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 421 ierr = GetMoreSpace((ai[am]/pm)*pn,&free_space); 422 current_space = free_space; 423 424 /* Determine symbolic info for each row of C: */ 425 for (i=0;i<pn/ppdof;i++) { 426 ptnzi = pti[i+1] - pti[i]; 427 ptanzi = 0; 428 ptJ = ptj + pti[i]; 429 for (dof=0;dof<ppdof;dof++) { 430 /* Determine symbolic row of PtA: */ 431 for (j=0;j<ptnzi;j++) { 432 arow = ptJ[j] + dof; 433 anzj = ai[arow+1] - ai[arow]; 434 ajj = aj + ai[arow]; 435 for (k=0;k<anzj;k++) { 436 if (!ptadenserow[ajj[k]]) { 437 ptadenserow[ajj[k]] = -1; 438 ptasparserow[ptanzi++] = ajj[k]; 439 } 440 } 441 } 442 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 443 ptaj = ptasparserow; 444 cnzi = 0; 445 for (j=0;j<ptanzi;j++) { 446 pdof = *ptaj%dof; 447 prow = (*ptaj++)/dof; 448 pnzj = pi[prow+1] - pi[prow]; 449 pjj = pj + pi[prow]; 450 for (k=0;k<pnzj;k++) { 451 if (!denserow[pjj[k]+pdof]) { 452 denserow[pjj[k]+pdof] = -1; 453 sparserow[cnzi++] = pjj[k]+pdof; 454 } 455 } 456 } 457 458 /* sort sparserow */ 459 ierr = PetscSortInt(cnzi,sparserow);CHKERRQ(ierr); 460 461 /* If free space is not available, make more free space */ 462 /* Double the amount of total space in the list */ 463 if (current_space->local_remaining<cnzi) { 464 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 465 } 466 467 /* Copy data into free space, and zero out denserows */ 468 ierr = PetscMemcpy(current_space->array,sparserow,cnzi*sizeof(PetscInt));CHKERRQ(ierr); 469 current_space->array += cnzi; 470 current_space->local_used += cnzi; 471 current_space->local_remaining -= cnzi; 472 473 for (j=0;j<ptanzi;j++) { 474 ptadenserow[ptasparserow[j]] = 0; 475 } 476 for (j=0;j<cnzi;j++) { 477 denserow[sparserow[j]] = 0; 478 } 479 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 480 /* For now, we will recompute what is needed. */ 481 ci[i+1+dof] = ci[i+dof] + cnzi; 482 } 483 } 484 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 485 /* Allocate space for cj, initialize cj, and */ 486 /* destroy list of free space and other temporary array(s) */ 487 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 488 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 489 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 490 491 /* Allocate space for ca */ 492 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 493 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 494 495 /* put together the new matrix */ 496 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 497 498 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 499 /* Since these are PETSc arrays, change flags to free them as necessary. */ 500 c = (Mat_SeqAIJ *)((*C)->data); 501 c->freedata = PETSC_TRUE; 502 c->nonew = 0; 503 504 /* Clean up. */ 505 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 506 507 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,PP,0,0);CHKERRQ(ierr); 508 PetscFunctionReturn(0); 509 } 510 EXTERN_C_END 511 512 #undef __FUNCT__ 513 #define __FUNCT__ "MatPtAPNumeric" 514 /* 515 MatPtAPNumeric - Computes the matrix projection C = P^T * A * P 516 517 Collective on Mat 518 519 Input Parameters: 520 + A - the matrix 521 - P - the projection matrix 522 523 Output Parameters: 524 . C - the product matrix 525 526 Notes: 527 C must have been created by calling MatPtAPSymbolic and must be destroyed by 528 the user using MatDeatroy(). 529 530 This routine is currently only implemented for pairs of AIJ matrices and classes 531 which inherit from AIJ. C will be of type MATAIJ. 532 533 Level: intermediate 534 535 .seealso: MatPtAP(),MatPtAPSymbolic(),MatMatMultNumeric() 536 */ 537 PetscErrorCode MatPtAPNumeric(Mat A,Mat P,Mat C) 538 { 539 PetscErrorCode ierr; 540 PetscErrorCode (*fA)(Mat,Mat,Mat); 541 PetscErrorCode (*fP)(Mat,Mat,Mat); 542 543 PetscFunctionBegin; 544 545 PetscValidHeaderSpecific(A,MAT_COOKIE,1); 546 PetscValidType(A,1); 547 MatPreallocated(A); 548 if (!A->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 549 if (A->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 550 551 PetscValidHeaderSpecific(P,MAT_COOKIE,2); 552 PetscValidType(P,2); 553 MatPreallocated(P); 554 if (!P->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 555 if (P->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 556 557 PetscValidHeaderSpecific(C,MAT_COOKIE,3); 558 PetscValidType(C,3); 559 MatPreallocated(C); 560 if (!C->assembled) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for unassembled matrix"); 561 if (C->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 562 563 if (P->N!=C->M) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->N,C->M); 564 if (P->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->M,A->N); 565 if (A->M!=A->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix 'A' must be square, %d != %d",A->M,A->N); 566 if (P->N!=C->N) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",P->N,C->N); 567 568 /* For now, we do not dispatch based on the type of A and P */ 569 /* When implementations like _SeqAIJ_MAIJ exist, attack the multiple dispatch problem. */ 570 fA = A->ops->ptapnumeric; 571 if (!fA) SETERRQ1(PETSC_ERR_SUP,"MatPtAPNumeric not supported for A of type %s",A->type_name); 572 fP = P->ops->ptapnumeric; 573 if (!fP) SETERRQ1(PETSC_ERR_SUP,"MatPtAPNumeric not supported for P of type %s",P->type_name); 574 if (fP!=fA) SETERRQ2(PETSC_ERR_ARG_INCOMP,"MatPtAPNumeric requires A, %s, to be compatible with P, %s",A->type_name,P->type_name); 575 576 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 577 ierr = (*fA)(A,P,C);CHKERRQ(ierr); 578 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 579 580 PetscFunctionReturn(0); 581 } 582 583 #undef __FUNCT__ 584 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ" 585 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ(Mat A,Mat P,Mat C) 586 { 587 PetscErrorCode ierr; 588 PetscInt flops=0; 589 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 590 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 591 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 592 PetscInt *ai=a->i,*aj=a->j,*apj,*apjdense,*pi=p->i,*pj=p->j,*pJ=p->j,*pjj; 593 PetscInt *ci=c->i,*cj=c->j,*cjj; 594 PetscInt am=A->M,cn=C->N,cm=C->M; 595 PetscInt i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow; 596 MatScalar *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj; 597 598 PetscFunctionBegin; 599 /* Allocate temporary array for storage of one row of A*P */ 600 ierr = PetscMalloc(cn*(sizeof(MatScalar)+2*sizeof(PetscInt)),&apa);CHKERRQ(ierr); 601 ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+2*sizeof(PetscInt)));CHKERRQ(ierr); 602 603 apj = (PetscInt *)(apa + cn); 604 apjdense = apj + cn; 605 606 /* Clear old values in C */ 607 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 608 609 for (i=0;i<am;i++) { 610 /* Form sparse row of A*P */ 611 anzi = ai[i+1] - ai[i]; 612 apnzj = 0; 613 for (j=0;j<anzi;j++) { 614 prow = *aj++; 615 pnzj = pi[prow+1] - pi[prow]; 616 pjj = pj + pi[prow]; 617 paj = pa + pi[prow]; 618 for (k=0;k<pnzj;k++) { 619 if (!apjdense[pjj[k]]) { 620 apjdense[pjj[k]] = -1; 621 apj[apnzj++] = pjj[k]; 622 } 623 apa[pjj[k]] += (*aa)*paj[k]; 624 } 625 flops += 2*pnzj; 626 aa++; 627 } 628 629 /* Sort the j index array for quick sparse axpy. */ 630 ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr); 631 632 /* Compute P^T*A*P using outer product (P^T)[:,j]*(A*P)[j,:]. */ 633 pnzi = pi[i+1] - pi[i]; 634 for (j=0;j<pnzi;j++) { 635 nextap = 0; 636 crow = *pJ++; 637 cjj = cj + ci[crow]; 638 caj = ca + ci[crow]; 639 /* Perform sparse axpy operation. Note cjj includes apj. */ 640 for (k=0;nextap<apnzj;k++) { 641 if (cjj[k]==apj[nextap]) { 642 caj[k] += (*pA)*apa[apj[nextap++]]; 643 } 644 } 645 flops += 2*apnzj; 646 pA++; 647 } 648 649 /* Zero the current row info for A*P */ 650 for (j=0;j<apnzj;j++) { 651 apa[apj[j]] = 0.; 652 apjdense[apj[j]] = 0; 653 } 654 } 655 656 /* Assemble the final matrix and clean up */ 657 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 658 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 659 ierr = PetscFree(apa);CHKERRQ(ierr); 660 ierr = PetscLogFlops(flops);CHKERRQ(ierr); 661 662 PetscFunctionReturn(0); 663 } 664 665 /* Compute C = P[rstart:rend,:]^T * A * P of seqaij matrices - used by MatPtAP_MPIAIJ_MPIAIJ() */ 666 667 #undef __FUNCT__ 668 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt" 669 /*@C 670 MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt - Creates C = P[rstart:rend,:]^T * A * P of seqaij matrices, 671 used by MatPtAP_MPIAIJ_MPIAIJ() 672 673 Collective on Mat 674 675 Input Parameters: 676 + A - the matrix in seqaij format 677 . P - the projection matrix in seqaij format 678 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 679 . fill - expected fill (not being used when scall==MAT_REUSE_MATRIX) 680 . prstart, prend - the starting and ending-1-th row of the matrix P to be used for transpose 681 682 Output Parameters: 683 . C - the product matrix in seqaij format 684 685 Level: developer 686 687 .seealso: MatPtAPSymbolic(),MatPtAPNumeric(),MatMatMult() 688 @*/ 689 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,PetscInt prstart,PetscInt prend,Mat C) 690 { 691 PetscErrorCode ierr; 692 PetscInt flops=0; 693 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 694 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 695 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 696 PetscInt *ai=a->i,*aj=a->j,*apj,*apjdense; 697 PetscInt *pi=p->i,*pj=p->j,*pJ=p->j+pi[prstart],*pjj; 698 PetscInt *ci=c->i,*cj=c->j,*cjj; 699 PetscInt am=A->M,cn=C->N,cm=C->M; 700 PetscInt i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow; 701 MatScalar *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj; 702 703 PetscFunctionBegin; 704 pA=p->a+pi[prstart]; 705 /* Allocate temporary array for storage of one row of A*P */ 706 ierr = PetscMalloc(cn*(sizeof(MatScalar)+2*sizeof(PetscInt)),&apa);CHKERRQ(ierr); 707 ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+2*sizeof(PetscInt)));CHKERRQ(ierr); 708 709 apj = (PetscInt *)(apa + cn); 710 apjdense = apj + cn; 711 712 /* Clear old values in C */ 713 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 714 715 for (i=0;i<am;i++) { 716 /* Form sparse row of A*P */ 717 anzi = ai[i+1] - ai[i]; 718 apnzj = 0; 719 for (j=0;j<anzi;j++) { 720 prow = *aj++; 721 pnzj = pi[prow+1] - pi[prow]; 722 pjj = pj + pi[prow]; 723 paj = pa + pi[prow]; 724 for (k=0;k<pnzj;k++) { 725 if (!apjdense[pjj[k]]) { 726 apjdense[pjj[k]] = -1; 727 apj[apnzj++] = pjj[k]; 728 } 729 apa[pjj[k]] += (*aa)*paj[k]; 730 } 731 flops += 2*pnzj; 732 aa++; 733 } 734 735 /* Sort the j index array for quick sparse axpy. */ 736 ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr); 737 738 /* Compute P[[prstart:prend,:]^T*A*P using outer product (P^T)[:,j+prstart]*(A*P)[j,:]. */ 739 pnzi = pi[i+1+prstart] - pi[i+prstart]; 740 for (j=0;j<pnzi;j++) { 741 nextap = 0; 742 crow = *pJ++; 743 cjj = cj + ci[crow]; 744 caj = ca + ci[crow]; 745 /* Perform sparse axpy operation. Note cjj includes apj. */ 746 for (k=0;nextap<apnzj;k++) { 747 if (cjj[k]==apj[nextap]) { 748 caj[k] += (*pA)*apa[apj[nextap++]]; 749 } 750 } 751 flops += 2*apnzj; 752 pA++; 753 } 754 755 /* Zero the current row info for A*P */ 756 for (j=0;j<apnzj;j++) { 757 apa[apj[j]] = 0.; 758 apjdense[apj[j]] = 0; 759 } 760 } 761 762 /* Assemble the final matrix and clean up */ 763 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 764 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 765 ierr = PetscFree(apa);CHKERRQ(ierr); 766 ierr = PetscLogFlops(flops);CHKERRQ(ierr); 767 768 PetscFunctionReturn(0); 769 } 770 771 #undef __FUNCT__ 772 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt" 773 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,PetscReal fill,PetscInt prstart,PetscInt prend,Mat *C) 774 { 775 PetscErrorCode ierr; 776 FreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 777 Mat_SeqAIJ *a=(Mat_SeqAIJ*)A->data,*p=(Mat_SeqAIJ*)P->data,*c; 778 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 779 PetscInt *ci,*cj,*denserow,*sparserow,*ptadenserow,*ptasparserow,*ptaj; 780 PetscInt an=A->N,am=A->M,pn=P->N,pm=P->M; 781 PetscInt i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi; 782 PetscInt m = prend - prstart; 783 MatScalar *ca; 784 Mat *psub,P_sub; 785 IS isrow,iscol; 786 787 PetscFunctionBegin; 788 /* Get ij structure of P[rstart:rend,:]^T */ 789 ierr = ISCreateStride(PETSC_COMM_SELF,m,prstart,1,&isrow);CHKERRQ(ierr); 790 ierr = ISCreateStride(PETSC_COMM_SELF,P->n,0,1,&iscol);CHKERRQ(ierr); 791 ierr = MatGetSubMatrices(P,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&psub);CHKERRQ(ierr); 792 ierr = ISDestroy(isrow);CHKERRQ(ierr); 793 ierr = ISDestroy(iscol);CHKERRQ(ierr); 794 P_sub = psub[0]; 795 ierr = MatGetSymbolicTranspose_SeqAIJ(P_sub,&pti,&ptj);CHKERRQ(ierr); 796 ierr = MatDestroyMatrices(1,&psub);CHKERRQ(ierr); 797 ptJ=ptj; 798 799 /* Allocate ci array, arrays for fill computation and */ 800 /* free space for accumulating nonzero column info */ 801 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 802 ci[0] = 0; 803 804 ierr = PetscMalloc((2*pn+2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 805 ierr = PetscMemzero(ptadenserow,(2*pn+2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 806 ptasparserow = ptadenserow + an; 807 denserow = ptasparserow + an; 808 sparserow = denserow + pn; 809 810 /* Set initial free space to be nnz(A) scaled by aspect ratio of P. */ 811 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 812 ierr = GetMoreSpace((PetscInt)(fill*ai[am]/pm)*pn,&free_space); 813 current_space = free_space; 814 815 /* Determine symbolic info for each row of C: */ 816 for (i=0;i<pn;i++) { 817 ptnzi = pti[i+1] - pti[i]; 818 ptanzi = 0; 819 /* Determine symbolic row of PtA_reduced: */ 820 for (j=0;j<ptnzi;j++) { 821 arow = *ptJ++; 822 anzj = ai[arow+1] - ai[arow]; 823 ajj = aj + ai[arow]; 824 for (k=0;k<anzj;k++) { 825 if (!ptadenserow[ajj[k]]) { 826 ptadenserow[ajj[k]] = -1; 827 ptasparserow[ptanzi++] = ajj[k]; 828 } 829 } 830 } 831 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 832 ptaj = ptasparserow; 833 cnzi = 0; 834 for (j=0;j<ptanzi;j++) { 835 prow = *ptaj++; 836 pnzj = pi[prow+1] - pi[prow]; 837 pjj = pj + pi[prow]; 838 for (k=0;k<pnzj;k++) { 839 if (!denserow[pjj[k]]) { 840 denserow[pjj[k]] = -1; 841 sparserow[cnzi++] = pjj[k]; 842 } 843 } 844 } 845 846 /* sort sparserow */ 847 ierr = PetscSortInt(cnzi,sparserow);CHKERRQ(ierr); 848 849 /* If free space is not available, make more free space */ 850 /* Double the amount of total space in the list */ 851 if (current_space->local_remaining<cnzi) { 852 ierr = GetMoreSpace(current_space->total_array_size,¤t_space);CHKERRQ(ierr); 853 } 854 855 /* Copy data into free space, and zero out denserows */ 856 ierr = PetscMemcpy(current_space->array,sparserow,cnzi*sizeof(PetscInt));CHKERRQ(ierr); 857 current_space->array += cnzi; 858 current_space->local_used += cnzi; 859 current_space->local_remaining -= cnzi; 860 861 for (j=0;j<ptanzi;j++) { 862 ptadenserow[ptasparserow[j]] = 0; 863 } 864 for (j=0;j<cnzi;j++) { 865 denserow[sparserow[j]] = 0; 866 } 867 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 868 /* For now, we will recompute what is needed. */ 869 ci[i+1] = ci[i] + cnzi; 870 } 871 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 872 /* Allocate space for cj, initialize cj, and */ 873 /* destroy list of free space and other temporary array(s) */ 874 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 875 ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 876 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 877 878 /* Allocate space for ca */ 879 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 880 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 881 882 /* put together the new matrix */ 883 ierr = MatCreateSeqAIJWithArrays(A->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 884 885 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 886 /* Since these are PETSc arrays, change flags to free them as necessary. */ 887 c = (Mat_SeqAIJ *)((*C)->data); 888 c->freedata = PETSC_TRUE; 889 c->nonew = 0; 890 891 /* Clean up. */ 892 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 893 894 PetscFunctionReturn(0); 895 } 896 897 #undef __FUNCT__ 898 #define __FUNCT__ "MatPtAP_SeqAIJ_SeqAIJ_ReducedPt" 899 PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ_ReducedPt(Mat A,Mat P,MatReuse scall,PetscReal fill,PetscInt prstart,PetscInt prend,Mat *C) 900 { 901 PetscErrorCode ierr; 902 903 PetscFunctionBegin; 904 if (A->m != prend-prstart) SETERRQ2(PETSC_ERR_ARG_SIZ,"Matrix dimensions are incompatible, %d != %d",A->m,prend-prstart); 905 if (prend-prstart > P->m) SETERRQ2(PETSC_ERR_ARG_SIZ," prend-prstart %d cannot be larger than P->m %d",prend-prstart,P->m); 906 if (scall == MAT_INITIAL_MATRIX){ 907 ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ_ReducedPt(A,P,fill,prstart,prend,C);CHKERRQ(ierr); 908 } 909 910 ierr = MatPtAPNumeric_SeqAIJ_SeqAIJ_ReducedPt(A,P,prstart,prend,*C);CHKERRQ(ierr); 911 912 PetscFunctionReturn(0); 913 } 914 915