xref: /petsc/src/mat/impls/baij/seq/baijfact11.c (revision 6ba06ab77c48d2bbbcba7e829ffcd04868efd3d5)
1be1d678aSKris Buschelman #define PETSCMAT_DLL
2be1d678aSKris Buschelman 
383287d42SBarry Smith /*
483287d42SBarry Smith     Factorization code for BAIJ format.
583287d42SBarry Smith */
67c4f633dSBarry Smith #include "../src/mat/impls/baij/seq/baij.h"
7c60f0209SBarry Smith #include "../src/mat/blockinvert.h"
883287d42SBarry Smith 
983287d42SBarry Smith /* ------------------------------------------------------------*/
1083287d42SBarry Smith /*
1183287d42SBarry Smith       Version for when blocks are 4 by 4
1283287d42SBarry Smith */
134a2ae208SSatish Balay #undef __FUNCT__
1406e38f1dSHong Zhang #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_inplace"
1506e38f1dSHong Zhang PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_inplace(Mat C,Mat A,const MatFactorInfo *info)
1683287d42SBarry Smith {
1783287d42SBarry Smith   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ *)C->data;
1883287d42SBarry Smith   IS             isrow = b->row,isicol = b->icol;
196849ba73SBarry Smith   PetscErrorCode ierr;
205d0c19d7SBarry Smith   const PetscInt *r,*ic;
215d0c19d7SBarry Smith   PetscInt       i,j,n = a->mbs,*bi = b->i,*bj = b->j;
22690b6cddSBarry Smith   PetscInt       *ajtmpold,*ajtmp,nz,row;
23690b6cddSBarry Smith   PetscInt       *diag_offset = b->diag,idx,*ai=a->i,*aj=a->j,*pj;
2483287d42SBarry Smith   MatScalar      *pv,*v,*rtmp,*pc,*w,*x;
2583287d42SBarry Smith   MatScalar      p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4;
2683287d42SBarry Smith   MatScalar      p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16;
2783287d42SBarry Smith   MatScalar      p10,p11,p12,p13,p14,p15,p16,m10,m11,m12;
2883287d42SBarry Smith   MatScalar      m13,m14,m15,m16;
2983287d42SBarry Smith   MatScalar      *ba = b->a,*aa = a->a;
30bcd9e38bSBarry Smith   PetscTruth     pivotinblocks = b->pivotinblocks;
31182b8fbaSHong Zhang   PetscReal      shift = info->shiftamount;
3283287d42SBarry Smith 
3383287d42SBarry Smith   PetscFunctionBegin;
3483287d42SBarry Smith   ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
3583287d42SBarry Smith   ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
36b0a32e0cSBarry Smith   ierr = PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);CHKERRQ(ierr);
3783287d42SBarry Smith 
3883287d42SBarry Smith   for (i=0; i<n; i++) {
3983287d42SBarry Smith     nz    = bi[i+1] - bi[i];
4083287d42SBarry Smith     ajtmp = bj + bi[i];
4183287d42SBarry Smith     for  (j=0; j<nz; j++) {
4283287d42SBarry Smith       x = rtmp+16*ajtmp[j];
4383287d42SBarry Smith       x[0]  = x[1]  = x[2]  = x[3]  = x[4]  = x[5]  = x[6] = x[7] = x[8] = x[9] = 0.0;
4483287d42SBarry Smith       x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0;
4583287d42SBarry Smith     }
4683287d42SBarry Smith     /* load in initial (unfactored row) */
4783287d42SBarry Smith     idx      = r[i];
4883287d42SBarry Smith     nz       = ai[idx+1] - ai[idx];
4983287d42SBarry Smith     ajtmpold = aj + ai[idx];
5083287d42SBarry Smith     v        = aa + 16*ai[idx];
5183287d42SBarry Smith     for (j=0; j<nz; j++) {
5283287d42SBarry Smith       x    = rtmp+16*ic[ajtmpold[j]];
5383287d42SBarry Smith       x[0]  = v[0];  x[1]  = v[1];  x[2]  = v[2];  x[3]  = v[3];
5483287d42SBarry Smith       x[4]  = v[4];  x[5]  = v[5];  x[6]  = v[6];  x[7]  = v[7];  x[8]  = v[8];
5583287d42SBarry Smith       x[9]  = v[9];  x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13];
5683287d42SBarry Smith       x[14] = v[14]; x[15] = v[15];
5783287d42SBarry Smith       v    += 16;
5883287d42SBarry Smith     }
5983287d42SBarry Smith     row = *ajtmp++;
6083287d42SBarry Smith     while (row < i) {
6183287d42SBarry Smith       pc  = rtmp + 16*row;
6283287d42SBarry Smith       p1  = pc[0];  p2  = pc[1];  p3  = pc[2];  p4  = pc[3];
6383287d42SBarry Smith       p5  = pc[4];  p6  = pc[5];  p7  = pc[6];  p8  = pc[7];  p9  = pc[8];
6483287d42SBarry Smith       p10 = pc[9];  p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13];
6583287d42SBarry Smith       p15 = pc[14]; p16 = pc[15];
6683287d42SBarry Smith       if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 ||
6783287d42SBarry Smith           p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 ||
6883287d42SBarry Smith           p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0
6983287d42SBarry Smith           || p16 != 0.0) {
7083287d42SBarry Smith         pv = ba + 16*diag_offset[row];
7183287d42SBarry Smith         pj = bj + diag_offset[row] + 1;
7283287d42SBarry Smith         x1  = pv[0];  x2  = pv[1];  x3  = pv[2];  x4  = pv[3];
7383287d42SBarry Smith         x5  = pv[4];  x6  = pv[5];  x7  = pv[6];  x8  = pv[7];  x9  = pv[8];
7483287d42SBarry Smith         x10 = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13];
7583287d42SBarry Smith         x15 = pv[14]; x16 = pv[15];
7683287d42SBarry Smith         pc[0] = m1 = p1*x1 + p5*x2  + p9*x3  + p13*x4;
7783287d42SBarry Smith         pc[1] = m2 = p2*x1 + p6*x2  + p10*x3 + p14*x4;
7883287d42SBarry Smith         pc[2] = m3 = p3*x1 + p7*x2  + p11*x3 + p15*x4;
7983287d42SBarry Smith         pc[3] = m4 = p4*x1 + p8*x2  + p12*x3 + p16*x4;
8083287d42SBarry Smith 
8183287d42SBarry Smith         pc[4] = m5 = p1*x5 + p5*x6  + p9*x7  + p13*x8;
8283287d42SBarry Smith         pc[5] = m6 = p2*x5 + p6*x6  + p10*x7 + p14*x8;
8383287d42SBarry Smith         pc[6] = m7 = p3*x5 + p7*x6  + p11*x7 + p15*x8;
8483287d42SBarry Smith         pc[7] = m8 = p4*x5 + p8*x6  + p12*x7 + p16*x8;
8583287d42SBarry Smith 
8683287d42SBarry Smith         pc[8]  = m9  = p1*x9 + p5*x10  + p9*x11  + p13*x12;
8783287d42SBarry Smith         pc[9]  = m10 = p2*x9 + p6*x10  + p10*x11 + p14*x12;
8883287d42SBarry Smith         pc[10] = m11 = p3*x9 + p7*x10  + p11*x11 + p15*x12;
8983287d42SBarry Smith         pc[11] = m12 = p4*x9 + p8*x10  + p12*x11 + p16*x12;
9083287d42SBarry Smith 
9183287d42SBarry Smith         pc[12] = m13 = p1*x13 + p5*x14  + p9*x15  + p13*x16;
9283287d42SBarry Smith         pc[13] = m14 = p2*x13 + p6*x14  + p10*x15 + p14*x16;
9383287d42SBarry Smith         pc[14] = m15 = p3*x13 + p7*x14  + p11*x15 + p15*x16;
9483287d42SBarry Smith         pc[15] = m16 = p4*x13 + p8*x14  + p12*x15 + p16*x16;
9583287d42SBarry Smith 
9683287d42SBarry Smith         nz = bi[row+1] - diag_offset[row] - 1;
9783287d42SBarry Smith         pv += 16;
9883287d42SBarry Smith         for (j=0; j<nz; j++) {
9983287d42SBarry Smith           x1   = pv[0];  x2  = pv[1];   x3 = pv[2];  x4  = pv[3];
10083287d42SBarry Smith           x5   = pv[4];  x6  = pv[5];   x7 = pv[6];  x8  = pv[7]; x9 = pv[8];
10183287d42SBarry Smith           x10  = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12];
10283287d42SBarry Smith           x14  = pv[13]; x15 = pv[14]; x16 = pv[15];
10383287d42SBarry Smith           x    = rtmp + 16*pj[j];
10483287d42SBarry Smith           x[0] -= m1*x1 + m5*x2  + m9*x3  + m13*x4;
10583287d42SBarry Smith           x[1] -= m2*x1 + m6*x2  + m10*x3 + m14*x4;
10683287d42SBarry Smith           x[2] -= m3*x1 + m7*x2  + m11*x3 + m15*x4;
10783287d42SBarry Smith           x[3] -= m4*x1 + m8*x2  + m12*x3 + m16*x4;
10883287d42SBarry Smith 
10983287d42SBarry Smith           x[4] -= m1*x5 + m5*x6  + m9*x7  + m13*x8;
11083287d42SBarry Smith           x[5] -= m2*x5 + m6*x6  + m10*x7 + m14*x8;
11183287d42SBarry Smith           x[6] -= m3*x5 + m7*x6  + m11*x7 + m15*x8;
11283287d42SBarry Smith           x[7] -= m4*x5 + m8*x6  + m12*x7 + m16*x8;
11383287d42SBarry Smith 
11483287d42SBarry Smith           x[8]  -= m1*x9 + m5*x10 + m9*x11  + m13*x12;
11583287d42SBarry Smith           x[9]  -= m2*x9 + m6*x10 + m10*x11 + m14*x12;
11683287d42SBarry Smith           x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12;
11783287d42SBarry Smith           x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12;
11883287d42SBarry Smith 
11983287d42SBarry Smith           x[12] -= m1*x13 + m5*x14  + m9*x15  + m13*x16;
12083287d42SBarry Smith           x[13] -= m2*x13 + m6*x14  + m10*x15 + m14*x16;
12183287d42SBarry Smith           x[14] -= m3*x13 + m7*x14  + m11*x15 + m15*x16;
12283287d42SBarry Smith           x[15] -= m4*x13 + m8*x14  + m12*x15 + m16*x16;
12383287d42SBarry Smith 
12483287d42SBarry Smith           pv   += 16;
12583287d42SBarry Smith         }
126dc0b31edSSatish Balay         ierr = PetscLogFlops(128.0*nz+112.0);CHKERRQ(ierr);
12783287d42SBarry Smith       }
12883287d42SBarry Smith       row = *ajtmp++;
12983287d42SBarry Smith     }
13083287d42SBarry Smith     /* finished row so stick it into b->a */
13183287d42SBarry Smith     pv = ba + 16*bi[i];
13283287d42SBarry Smith     pj = bj + bi[i];
13383287d42SBarry Smith     nz = bi[i+1] - bi[i];
13483287d42SBarry Smith     for (j=0; j<nz; j++) {
13583287d42SBarry Smith       x      = rtmp+16*pj[j];
13683287d42SBarry Smith       pv[0]  = x[0];  pv[1]  = x[1];  pv[2]  = x[2];  pv[3]  = x[3];
13783287d42SBarry Smith       pv[4]  = x[4];  pv[5]  = x[5];  pv[6]  = x[6];  pv[7]  = x[7]; pv[8] = x[8];
13883287d42SBarry Smith       pv[9]  = x[9];  pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12];
13983287d42SBarry Smith       pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15];
14083287d42SBarry Smith       pv   += 16;
14183287d42SBarry Smith     }
14283287d42SBarry Smith     /* invert diagonal block */
14383287d42SBarry Smith     w    = ba + 16*diag_offset[i];
144bcd9e38bSBarry Smith     if (pivotinblocks) {
14562bba022SBarry Smith       ierr = Kernel_A_gets_inverse_A_4(w,shift);CHKERRQ(ierr);
146bcd9e38bSBarry Smith     } else {
147bcd9e38bSBarry Smith       ierr = Kernel_A_gets_inverse_A_4_nopivot(w);CHKERRQ(ierr);
148bcd9e38bSBarry Smith     }
14983287d42SBarry Smith   }
15083287d42SBarry Smith 
15183287d42SBarry Smith   ierr = PetscFree(rtmp);CHKERRQ(ierr);
15283287d42SBarry Smith   ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
15383287d42SBarry Smith   ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
15406e38f1dSHong Zhang   C->ops->solve          = MatSolve_SeqBAIJ_4_inplace;
15506e38f1dSHong Zhang   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_inplace;
15683287d42SBarry Smith   C->assembled = PETSC_TRUE;
157766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*4*4*4*b->mbs);CHKERRQ(ierr); /* from inverting diagonal blocks */
15883287d42SBarry Smith   PetscFunctionReturn(0);
15983287d42SBarry Smith }
160e6580ceeSShri Abhyankar 
1614dd39f65SShri Abhyankar /* MatLUFactorNumeric_SeqBAIJ_4 -
1624dd39f65SShri Abhyankar      copied from MatLUFactorNumeric_SeqBAIJ_N_inplace() and manually re-implemented
163209027a4SShri Abhyankar        Kernel_A_gets_A_times_B()
164209027a4SShri Abhyankar        Kernel_A_gets_A_minus_B_times_C()
165209027a4SShri Abhyankar        Kernel_A_gets_inverse_A()
166209027a4SShri Abhyankar */
167c0c7eb62SShri Abhyankar 
168209027a4SShri Abhyankar #undef __FUNCT__
1694dd39f65SShri Abhyankar #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4"
1704dd39f65SShri Abhyankar PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4(Mat B,Mat A,const MatFactorInfo *info)
171209027a4SShri Abhyankar {
172209027a4SShri Abhyankar   Mat            C=B;
173209027a4SShri Abhyankar   Mat_SeqBAIJ    *a=(Mat_SeqBAIJ*)A->data,*b=(Mat_SeqBAIJ *)C->data;
174209027a4SShri Abhyankar   IS             isrow = b->row,isicol = b->icol;
175209027a4SShri Abhyankar   PetscErrorCode ierr;
176209027a4SShri Abhyankar   const PetscInt *r,*ic,*ics;
177bbd65245SShri Abhyankar   PetscInt       i,j,k,nz,nzL,row;
178bbd65245SShri Abhyankar   const PetscInt n=a->mbs,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
179bbd65245SShri Abhyankar   const PetscInt *ajtmp,*bjtmp,*bdiag=b->diag,*pj,bs2=a->bs2;
180209027a4SShri Abhyankar   MatScalar      *rtmp,*pc,*mwork,*v,*pv,*aa=a->a;
181bbd65245SShri Abhyankar   PetscInt       flg;
182*6ba06ab7SHong Zhang   PetscReal      shift;
183209027a4SShri Abhyankar 
184209027a4SShri Abhyankar   PetscFunctionBegin;
185209027a4SShri Abhyankar   ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr);
186209027a4SShri Abhyankar   ierr = ISGetIndices(isicol,&ic);CHKERRQ(ierr);
187209027a4SShri Abhyankar 
188*6ba06ab7SHong Zhang   if (info->shifttype == MAT_SHIFT_NONE){
189*6ba06ab7SHong Zhang     shift = 0;
190*6ba06ab7SHong Zhang   } else { /* info->shifttype == MAT_SHIFT_INBLOCKS */
191*6ba06ab7SHong Zhang     shift = info->shiftamount;
192*6ba06ab7SHong Zhang   }
193*6ba06ab7SHong Zhang 
194209027a4SShri Abhyankar   /* generate work space needed by the factorization */
195fca92195SBarry Smith   ierr = PetscMalloc2(bs2*n,MatScalar,&rtmp,bs2,MatScalar,&mwork);CHKERRQ(ierr);
196209027a4SShri Abhyankar   ierr = PetscMemzero(rtmp,bs2*n*sizeof(MatScalar));CHKERRQ(ierr);
197209027a4SShri Abhyankar   ics  = ic;
198209027a4SShri Abhyankar 
199209027a4SShri Abhyankar   for (i=0; i<n; i++){
200209027a4SShri Abhyankar     /* zero rtmp */
201209027a4SShri Abhyankar     /* L part */
202209027a4SShri Abhyankar     nz    = bi[i+1] - bi[i];
203209027a4SShri Abhyankar     bjtmp = bj + bi[i];
204209027a4SShri Abhyankar     for  (j=0; j<nz; j++){
205209027a4SShri Abhyankar       ierr = PetscMemzero(rtmp+bs2*bjtmp[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
206209027a4SShri Abhyankar     }
207209027a4SShri Abhyankar 
208209027a4SShri Abhyankar     /* U part */
20978bb4007SShri Abhyankar     nz = bdiag[i]-bdiag[i+1];
21078bb4007SShri Abhyankar     bjtmp = bj + bdiag[i+1]+1;
21178bb4007SShri Abhyankar     for  (j=0; j<nz; j++){
21278bb4007SShri Abhyankar       ierr = PetscMemzero(rtmp+bs2*bjtmp[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
21378bb4007SShri Abhyankar     }
21478bb4007SShri Abhyankar 
21578bb4007SShri Abhyankar     /* load in initial (unfactored row) */
21678bb4007SShri Abhyankar     nz    = ai[r[i]+1] - ai[r[i]];
21778bb4007SShri Abhyankar     ajtmp = aj + ai[r[i]];
21878bb4007SShri Abhyankar     v     = aa + bs2*ai[r[i]];
21978bb4007SShri Abhyankar     for (j=0; j<nz; j++) {
22078bb4007SShri Abhyankar       ierr = PetscMemcpy(rtmp+bs2*ic[ajtmp[j]],v+bs2*j,bs2*sizeof(MatScalar));CHKERRQ(ierr);
22178bb4007SShri Abhyankar     }
22278bb4007SShri Abhyankar 
22378bb4007SShri Abhyankar     /* elimination */
22478bb4007SShri Abhyankar     bjtmp = bj + bi[i];
22578bb4007SShri Abhyankar     nzL   = bi[i+1] - bi[i];
22678bb4007SShri Abhyankar     for(k=0;k < nzL;k++) {
22778bb4007SShri Abhyankar       row = bjtmp[k];
22878bb4007SShri Abhyankar       pc = rtmp + bs2*row;
22978bb4007SShri Abhyankar       for (flg=0,j=0; j<bs2; j++) { if (pc[j]!=0.0) { flg = 1; break; }}
23078bb4007SShri Abhyankar       if (flg) {
23178bb4007SShri Abhyankar         pv = b->a + bs2*bdiag[row];
23278bb4007SShri Abhyankar         /* Kernel_A_gets_A_times_B(bs,pc,pv,mwork); *pc = *pc * (*pv); */
23378bb4007SShri Abhyankar         ierr = Kernel_A_gets_A_times_B_4(pc,pv,mwork);CHKERRQ(ierr);
23478bb4007SShri Abhyankar 
23578bb4007SShri Abhyankar         pj = b->j + bdiag[row+1]+1; /* begining of U(row,:) */
23678bb4007SShri Abhyankar         pv = b->a + bs2*(bdiag[row+1]+1);
23778bb4007SShri Abhyankar         nz = bdiag[row] - bdiag[row+1] - 1; /* num of entries inU(row,:), excluding diag */
23878bb4007SShri Abhyankar         for (j=0; j<nz; j++) {
23978bb4007SShri Abhyankar           /* Kernel_A_gets_A_minus_B_times_C(bs,rtmp+bs2*pj[j],pc,pv+bs2*j); */
24078bb4007SShri Abhyankar           /* rtmp+bs2*pj[j] = rtmp+bs2*pj[j] - (*pc)*(pv+bs2*j) */
24178bb4007SShri Abhyankar           v    = rtmp + bs2*pj[j];
24278bb4007SShri Abhyankar           ierr = Kernel_A_gets_A_minus_B_times_C_4(v,pc,pv);CHKERRQ(ierr);
24378bb4007SShri Abhyankar           pv  += bs2;
24478bb4007SShri Abhyankar         }
24578bb4007SShri Abhyankar         ierr = PetscLogFlops(128*nz+112);CHKERRQ(ierr); /* flops = 2*bs^3*nz + 2*bs^3 - bs2) */
24678bb4007SShri Abhyankar       }
24778bb4007SShri Abhyankar     }
24878bb4007SShri Abhyankar 
24978bb4007SShri Abhyankar     /* finished row so stick it into b->a */
25078bb4007SShri Abhyankar     /* L part */
25178bb4007SShri Abhyankar     pv   = b->a + bs2*bi[i] ;
25278bb4007SShri Abhyankar     pj   = b->j + bi[i] ;
25378bb4007SShri Abhyankar     nz   = bi[i+1] - bi[i];
25478bb4007SShri Abhyankar     for (j=0; j<nz; j++) {
25578bb4007SShri Abhyankar       ierr = PetscMemcpy(pv+bs2*j,rtmp+bs2*pj[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
25678bb4007SShri Abhyankar     }
25778bb4007SShri Abhyankar 
25878bb4007SShri Abhyankar     /* Mark diagonal and invert diagonal for simplier triangular solves */
25978bb4007SShri Abhyankar     pv   = b->a + bs2*bdiag[i];
26078bb4007SShri Abhyankar     pj   = b->j + bdiag[i];
26178bb4007SShri Abhyankar     ierr = PetscMemcpy(pv,rtmp+bs2*pj[0],bs2*sizeof(MatScalar));CHKERRQ(ierr);
26278bb4007SShri Abhyankar     ierr = Kernel_A_gets_inverse_A_4(pv,shift);CHKERRQ(ierr);
26378bb4007SShri Abhyankar 
26478bb4007SShri Abhyankar     /* U part */
26578bb4007SShri Abhyankar     pv = b->a + bs2*(bdiag[i+1]+1);
26678bb4007SShri Abhyankar     pj = b->j + bdiag[i+1]+1;
26778bb4007SShri Abhyankar     nz = bdiag[i] - bdiag[i+1] - 1;
26878bb4007SShri Abhyankar     for (j=0; j<nz; j++){
26978bb4007SShri Abhyankar       ierr = PetscMemcpy(pv+bs2*j,rtmp+bs2*pj[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
27078bb4007SShri Abhyankar     }
27178bb4007SShri Abhyankar   }
27278bb4007SShri Abhyankar 
273fca92195SBarry Smith   ierr = PetscFree2(rtmp,mwork);CHKERRQ(ierr);
27478bb4007SShri Abhyankar   ierr = ISRestoreIndices(isicol,&ic);CHKERRQ(ierr);
27578bb4007SShri Abhyankar   ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr);
2764dd39f65SShri Abhyankar   C->ops->solve          = MatSolve_SeqBAIJ_4;
2774dd39f65SShri Abhyankar   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4;
27878bb4007SShri Abhyankar   C->assembled = PETSC_TRUE;
279766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*4*4*4*n);CHKERRQ(ierr); /* from inverting diagonal blocks */
28078bb4007SShri Abhyankar   PetscFunctionReturn(0);
28178bb4007SShri Abhyankar }
28278bb4007SShri Abhyankar 
28378bb4007SShri Abhyankar #undef __FUNCT__
28406e38f1dSHong Zhang #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_inplace"
28506e38f1dSHong Zhang PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_inplace(Mat C,Mat A,const MatFactorInfo *info)
286e6580ceeSShri Abhyankar {
287e6580ceeSShri Abhyankar /*
288e6580ceeSShri Abhyankar     Default Version for when blocks are 4 by 4 Using natural ordering
289e6580ceeSShri Abhyankar */
290e6580ceeSShri Abhyankar   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
291e6580ceeSShri Abhyankar   PetscErrorCode ierr;
292e6580ceeSShri Abhyankar   PetscInt       i,j,n = a->mbs,*bi = b->i,*bj = b->j;
293e6580ceeSShri Abhyankar   PetscInt       *ajtmpold,*ajtmp,nz,row;
294e6580ceeSShri Abhyankar   PetscInt       *diag_offset = b->diag,*ai=a->i,*aj=a->j,*pj;
295e6580ceeSShri Abhyankar   MatScalar      *pv,*v,*rtmp,*pc,*w,*x;
296e6580ceeSShri Abhyankar   MatScalar      p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4;
297e6580ceeSShri Abhyankar   MatScalar      p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16;
298e6580ceeSShri Abhyankar   MatScalar      p10,p11,p12,p13,p14,p15,p16,m10,m11,m12;
299e6580ceeSShri Abhyankar   MatScalar      m13,m14,m15,m16;
300e6580ceeSShri Abhyankar   MatScalar      *ba = b->a,*aa = a->a;
301e6580ceeSShri Abhyankar   PetscTruth     pivotinblocks = b->pivotinblocks;
302182b8fbaSHong Zhang   PetscReal      shift = info->shiftamount;
303e6580ceeSShri Abhyankar 
304e6580ceeSShri Abhyankar   PetscFunctionBegin;
305e6580ceeSShri Abhyankar   ierr = PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);CHKERRQ(ierr);
306e6580ceeSShri Abhyankar 
307e6580ceeSShri Abhyankar   for (i=0; i<n; i++) {
308e6580ceeSShri Abhyankar     nz    = bi[i+1] - bi[i];
309e6580ceeSShri Abhyankar     ajtmp = bj + bi[i];
310e6580ceeSShri Abhyankar     for  (j=0; j<nz; j++) {
311e6580ceeSShri Abhyankar       x = rtmp+16*ajtmp[j];
312e6580ceeSShri Abhyankar       x[0]  = x[1]  = x[2]  = x[3]  = x[4]  = x[5]  = x[6] = x[7] = x[8] = x[9] = 0.0;
313e6580ceeSShri Abhyankar       x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0;
314e6580ceeSShri Abhyankar     }
315e6580ceeSShri Abhyankar     /* load in initial (unfactored row) */
316e6580ceeSShri Abhyankar     nz       = ai[i+1] - ai[i];
317e6580ceeSShri Abhyankar     ajtmpold = aj + ai[i];
318e6580ceeSShri Abhyankar     v        = aa + 16*ai[i];
319e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
320e6580ceeSShri Abhyankar       x    = rtmp+16*ajtmpold[j];
321e6580ceeSShri Abhyankar       x[0]  = v[0];  x[1]  = v[1];  x[2]  = v[2];  x[3]  = v[3];
322e6580ceeSShri Abhyankar       x[4]  = v[4];  x[5]  = v[5];  x[6]  = v[6];  x[7]  = v[7];  x[8]  = v[8];
323e6580ceeSShri Abhyankar       x[9]  = v[9];  x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13];
324e6580ceeSShri Abhyankar       x[14] = v[14]; x[15] = v[15];
325e6580ceeSShri Abhyankar       v    += 16;
326e6580ceeSShri Abhyankar     }
327e6580ceeSShri Abhyankar     row = *ajtmp++;
328e6580ceeSShri Abhyankar     while (row < i) {
329e6580ceeSShri Abhyankar       pc  = rtmp + 16*row;
330e6580ceeSShri Abhyankar       p1  = pc[0];  p2  = pc[1];  p3  = pc[2];  p4  = pc[3];
331e6580ceeSShri Abhyankar       p5  = pc[4];  p6  = pc[5];  p7  = pc[6];  p8  = pc[7];  p9  = pc[8];
332e6580ceeSShri Abhyankar       p10 = pc[9];  p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13];
333e6580ceeSShri Abhyankar       p15 = pc[14]; p16 = pc[15];
334e6580ceeSShri Abhyankar       if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 ||
335e6580ceeSShri Abhyankar           p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 ||
336e6580ceeSShri Abhyankar           p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0
337e6580ceeSShri Abhyankar           || p16 != 0.0) {
338e6580ceeSShri Abhyankar         pv = ba + 16*diag_offset[row];
339e6580ceeSShri Abhyankar         pj = bj + diag_offset[row] + 1;
340e6580ceeSShri Abhyankar         x1  = pv[0];  x2  = pv[1];  x3  = pv[2];  x4  = pv[3];
341e6580ceeSShri Abhyankar         x5  = pv[4];  x6  = pv[5];  x7  = pv[6];  x8  = pv[7];  x9  = pv[8];
342e6580ceeSShri Abhyankar         x10 = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13];
343e6580ceeSShri Abhyankar         x15 = pv[14]; x16 = pv[15];
344e6580ceeSShri Abhyankar         pc[0] = m1 = p1*x1 + p5*x2  + p9*x3  + p13*x4;
345e6580ceeSShri Abhyankar         pc[1] = m2 = p2*x1 + p6*x2  + p10*x3 + p14*x4;
346e6580ceeSShri Abhyankar         pc[2] = m3 = p3*x1 + p7*x2  + p11*x3 + p15*x4;
347e6580ceeSShri Abhyankar         pc[3] = m4 = p4*x1 + p8*x2  + p12*x3 + p16*x4;
348e6580ceeSShri Abhyankar 
349e6580ceeSShri Abhyankar         pc[4] = m5 = p1*x5 + p5*x6  + p9*x7  + p13*x8;
350e6580ceeSShri Abhyankar         pc[5] = m6 = p2*x5 + p6*x6  + p10*x7 + p14*x8;
351e6580ceeSShri Abhyankar         pc[6] = m7 = p3*x5 + p7*x6  + p11*x7 + p15*x8;
352e6580ceeSShri Abhyankar         pc[7] = m8 = p4*x5 + p8*x6  + p12*x7 + p16*x8;
353e6580ceeSShri Abhyankar 
354e6580ceeSShri Abhyankar         pc[8]  = m9  = p1*x9 + p5*x10  + p9*x11  + p13*x12;
355e6580ceeSShri Abhyankar         pc[9]  = m10 = p2*x9 + p6*x10  + p10*x11 + p14*x12;
356e6580ceeSShri Abhyankar         pc[10] = m11 = p3*x9 + p7*x10  + p11*x11 + p15*x12;
357e6580ceeSShri Abhyankar         pc[11] = m12 = p4*x9 + p8*x10  + p12*x11 + p16*x12;
358e6580ceeSShri Abhyankar 
359e6580ceeSShri Abhyankar         pc[12] = m13 = p1*x13 + p5*x14  + p9*x15  + p13*x16;
360e6580ceeSShri Abhyankar         pc[13] = m14 = p2*x13 + p6*x14  + p10*x15 + p14*x16;
361e6580ceeSShri Abhyankar         pc[14] = m15 = p3*x13 + p7*x14  + p11*x15 + p15*x16;
362e6580ceeSShri Abhyankar         pc[15] = m16 = p4*x13 + p8*x14  + p12*x15 + p16*x16;
363e6580ceeSShri Abhyankar         nz = bi[row+1] - diag_offset[row] - 1;
364e6580ceeSShri Abhyankar         pv += 16;
365e6580ceeSShri Abhyankar         for (j=0; j<nz; j++) {
366e6580ceeSShri Abhyankar           x1   = pv[0];  x2  = pv[1];   x3 = pv[2];  x4  = pv[3];
367e6580ceeSShri Abhyankar           x5   = pv[4];  x6  = pv[5];   x7 = pv[6];  x8  = pv[7]; x9 = pv[8];
368e6580ceeSShri Abhyankar           x10  = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12];
369e6580ceeSShri Abhyankar           x14  = pv[13]; x15 = pv[14]; x16 = pv[15];
370e6580ceeSShri Abhyankar           x    = rtmp + 16*pj[j];
371e6580ceeSShri Abhyankar           x[0] -= m1*x1 + m5*x2  + m9*x3  + m13*x4;
372e6580ceeSShri Abhyankar           x[1] -= m2*x1 + m6*x2  + m10*x3 + m14*x4;
373e6580ceeSShri Abhyankar           x[2] -= m3*x1 + m7*x2  + m11*x3 + m15*x4;
374e6580ceeSShri Abhyankar           x[3] -= m4*x1 + m8*x2  + m12*x3 + m16*x4;
375e6580ceeSShri Abhyankar 
376e6580ceeSShri Abhyankar           x[4] -= m1*x5 + m5*x6  + m9*x7  + m13*x8;
377e6580ceeSShri Abhyankar           x[5] -= m2*x5 + m6*x6  + m10*x7 + m14*x8;
378e6580ceeSShri Abhyankar           x[6] -= m3*x5 + m7*x6  + m11*x7 + m15*x8;
379e6580ceeSShri Abhyankar           x[7] -= m4*x5 + m8*x6  + m12*x7 + m16*x8;
380e6580ceeSShri Abhyankar 
381e6580ceeSShri Abhyankar           x[8]  -= m1*x9 + m5*x10 + m9*x11  + m13*x12;
382e6580ceeSShri Abhyankar           x[9]  -= m2*x9 + m6*x10 + m10*x11 + m14*x12;
383e6580ceeSShri Abhyankar           x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12;
384e6580ceeSShri Abhyankar           x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12;
385e6580ceeSShri Abhyankar 
386e6580ceeSShri Abhyankar           x[12] -= m1*x13 + m5*x14  + m9*x15  + m13*x16;
387e6580ceeSShri Abhyankar           x[13] -= m2*x13 + m6*x14  + m10*x15 + m14*x16;
388e6580ceeSShri Abhyankar           x[14] -= m3*x13 + m7*x14  + m11*x15 + m15*x16;
389e6580ceeSShri Abhyankar           x[15] -= m4*x13 + m8*x14  + m12*x15 + m16*x16;
390e6580ceeSShri Abhyankar 
391e6580ceeSShri Abhyankar           pv   += 16;
392e6580ceeSShri Abhyankar         }
393e6580ceeSShri Abhyankar         ierr = PetscLogFlops(128.0*nz+112.0);CHKERRQ(ierr);
394e6580ceeSShri Abhyankar       }
395e6580ceeSShri Abhyankar       row = *ajtmp++;
396e6580ceeSShri Abhyankar     }
397e6580ceeSShri Abhyankar     /* finished row so stick it into b->a */
398e6580ceeSShri Abhyankar     pv = ba + 16*bi[i];
399e6580ceeSShri Abhyankar     pj = bj + bi[i];
400e6580ceeSShri Abhyankar     nz = bi[i+1] - bi[i];
401e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
402e6580ceeSShri Abhyankar       x      = rtmp+16*pj[j];
403e6580ceeSShri Abhyankar       pv[0]  = x[0];  pv[1]  = x[1];  pv[2]  = x[2];  pv[3]  = x[3];
404e6580ceeSShri Abhyankar       pv[4]  = x[4];  pv[5]  = x[5];  pv[6]  = x[6];  pv[7]  = x[7]; pv[8] = x[8];
405e6580ceeSShri Abhyankar       pv[9]  = x[9];  pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12];
406e6580ceeSShri Abhyankar       pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15];
407e6580ceeSShri Abhyankar       pv   += 16;
408e6580ceeSShri Abhyankar     }
409e6580ceeSShri Abhyankar     /* invert diagonal block */
410e6580ceeSShri Abhyankar     w = ba + 16*diag_offset[i];
411e6580ceeSShri Abhyankar     if (pivotinblocks) {
412e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4(w,shift);CHKERRQ(ierr);
413e6580ceeSShri Abhyankar     } else {
414e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4_nopivot(w);CHKERRQ(ierr);
415e6580ceeSShri Abhyankar     }
416e6580ceeSShri Abhyankar   }
417e6580ceeSShri Abhyankar 
418e6580ceeSShri Abhyankar   ierr = PetscFree(rtmp);CHKERRQ(ierr);
41906e38f1dSHong Zhang   C->ops->solve          = MatSolve_SeqBAIJ_4_NaturalOrdering_inplace;
42006e38f1dSHong Zhang   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_NaturalOrdering_inplace;
421e6580ceeSShri Abhyankar   C->assembled = PETSC_TRUE;
422766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*4*4*4*b->mbs);CHKERRQ(ierr); /* from inverting diagonal blocks */
423e6580ceeSShri Abhyankar   PetscFunctionReturn(0);
424e6580ceeSShri Abhyankar }
425c0c7eb62SShri Abhyankar 
426209027a4SShri Abhyankar /*
4274dd39f65SShri Abhyankar   MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering -
4284dd39f65SShri Abhyankar     copied from MatLUFactorNumeric_SeqBAIJ_3_NaturalOrdering_inplace()
429209027a4SShri Abhyankar */
430209027a4SShri Abhyankar #undef __FUNCT__
4314dd39f65SShri Abhyankar #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering"
4324dd39f65SShri Abhyankar PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering(Mat B,Mat A,const MatFactorInfo *info)
433209027a4SShri Abhyankar {
434209027a4SShri Abhyankar   Mat            C=B;
435209027a4SShri Abhyankar   Mat_SeqBAIJ    *a=(Mat_SeqBAIJ*)A->data,*b=(Mat_SeqBAIJ *)C->data;
436209027a4SShri Abhyankar   PetscErrorCode ierr;
437bbd65245SShri Abhyankar   PetscInt       i,j,k,nz,nzL,row;
438bbd65245SShri Abhyankar   const PetscInt n=a->mbs,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
439bbd65245SShri Abhyankar   const PetscInt *ajtmp,*bjtmp,*bdiag=b->diag,*pj,bs2=a->bs2;
440209027a4SShri Abhyankar   MatScalar      *rtmp,*pc,*mwork,*v,*pv,*aa=a->a;
441bbd65245SShri Abhyankar   PetscInt       flg;
442*6ba06ab7SHong Zhang   PetscReal      shift;
443e6580ceeSShri Abhyankar 
444209027a4SShri Abhyankar   PetscFunctionBegin;
445209027a4SShri Abhyankar   /* generate work space needed by the factorization */
446fca92195SBarry Smith   ierr = PetscMalloc2(bs2*n,MatScalar,&rtmp,bs2,MatScalar,&mwork);CHKERRQ(ierr);
447209027a4SShri Abhyankar   ierr = PetscMemzero(rtmp,bs2*n*sizeof(MatScalar));CHKERRQ(ierr);
448209027a4SShri Abhyankar 
449*6ba06ab7SHong Zhang   if (info->shifttype == MAT_SHIFT_NONE){
450*6ba06ab7SHong Zhang     shift = 0;
451*6ba06ab7SHong Zhang   } else { /* info->shifttype == MAT_SHIFT_INBLOCKS */
452*6ba06ab7SHong Zhang     shift = info->shiftamount;
453*6ba06ab7SHong Zhang   }
454*6ba06ab7SHong Zhang 
455209027a4SShri Abhyankar   for (i=0; i<n; i++){
456209027a4SShri Abhyankar     /* zero rtmp */
457209027a4SShri Abhyankar     /* L part */
458209027a4SShri Abhyankar     nz    = bi[i+1] - bi[i];
459209027a4SShri Abhyankar     bjtmp = bj + bi[i];
460209027a4SShri Abhyankar     for  (j=0; j<nz; j++){
461209027a4SShri Abhyankar       ierr = PetscMemzero(rtmp+bs2*bjtmp[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
462209027a4SShri Abhyankar     }
463209027a4SShri Abhyankar 
464209027a4SShri Abhyankar     /* U part */
465b2b2dd24SShri Abhyankar     nz = bdiag[i] - bdiag[i+1];
466b2b2dd24SShri Abhyankar     bjtmp = bj + bdiag[i+1]+1;
467b2b2dd24SShri Abhyankar     for  (j=0; j<nz; j++){
468b2b2dd24SShri Abhyankar       ierr = PetscMemzero(rtmp+bs2*bjtmp[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
469b2b2dd24SShri Abhyankar     }
470b2b2dd24SShri Abhyankar 
471b2b2dd24SShri Abhyankar     /* load in initial (unfactored row) */
472b2b2dd24SShri Abhyankar     nz    = ai[i+1] - ai[i];
473b2b2dd24SShri Abhyankar     ajtmp = aj + ai[i];
474b2b2dd24SShri Abhyankar     v     = aa + bs2*ai[i];
475b2b2dd24SShri Abhyankar     for (j=0; j<nz; j++) {
476b2b2dd24SShri Abhyankar       ierr = PetscMemcpy(rtmp+bs2*ajtmp[j],v+bs2*j,bs2*sizeof(MatScalar));CHKERRQ(ierr);
477b2b2dd24SShri Abhyankar     }
478b2b2dd24SShri Abhyankar 
479b2b2dd24SShri Abhyankar     /* elimination */
480b2b2dd24SShri Abhyankar     bjtmp = bj + bi[i];
481b2b2dd24SShri Abhyankar     nzL   = bi[i+1] - bi[i];
482b2b2dd24SShri Abhyankar     for(k=0;k < nzL;k++) {
483b2b2dd24SShri Abhyankar       row = bjtmp[k];
484b2b2dd24SShri Abhyankar       pc = rtmp + bs2*row;
485b2b2dd24SShri Abhyankar       for (flg=0,j=0; j<bs2; j++) { if (pc[j]!=0.0) { flg = 1; break; }}
486b2b2dd24SShri Abhyankar       if (flg) {
487b2b2dd24SShri Abhyankar         pv = b->a + bs2*bdiag[row];
488b2b2dd24SShri Abhyankar         /* Kernel_A_gets_A_times_B(bs,pc,pv,mwork); *pc = *pc * (*pv); */
489b2b2dd24SShri Abhyankar         ierr = Kernel_A_gets_A_times_B_4(pc,pv,mwork);CHKERRQ(ierr);
490b2b2dd24SShri Abhyankar 
491b2b2dd24SShri Abhyankar         pj = b->j + bdiag[row+1]+1; /* begining of U(row,:) */
492b2b2dd24SShri Abhyankar         pv = b->a + bs2*(bdiag[row+1]+1);
493b2b2dd24SShri Abhyankar         nz = bdiag[row] - bdiag[row+1] - 1; /* num of entries inU(row,:), excluding diag */
494b2b2dd24SShri Abhyankar         for (j=0; j<nz; j++) {
495b2b2dd24SShri Abhyankar           /* Kernel_A_gets_A_minus_B_times_C(bs,rtmp+bs2*pj[j],pc,pv+bs2*j); */
496b2b2dd24SShri Abhyankar           /* rtmp+bs2*pj[j] = rtmp+bs2*pj[j] - (*pc)*(pv+bs2*j) */
497b2b2dd24SShri Abhyankar           v    = rtmp + bs2*pj[j];
498b2b2dd24SShri Abhyankar           ierr = Kernel_A_gets_A_minus_B_times_C_4(v,pc,pv);CHKERRQ(ierr);
499b2b2dd24SShri Abhyankar           pv  += bs2;
500b2b2dd24SShri Abhyankar         }
501b2b2dd24SShri Abhyankar         ierr = PetscLogFlops(128*nz+112);CHKERRQ(ierr); /* flops = 2*bs^3*nz + 2*bs^3 - bs2) */
502b2b2dd24SShri Abhyankar       }
503b2b2dd24SShri Abhyankar     }
504b2b2dd24SShri Abhyankar 
505b2b2dd24SShri Abhyankar     /* finished row so stick it into b->a */
506b2b2dd24SShri Abhyankar     /* L part */
507b2b2dd24SShri Abhyankar     pv   = b->a + bs2*bi[i] ;
508b2b2dd24SShri Abhyankar     pj   = b->j + bi[i] ;
509b2b2dd24SShri Abhyankar     nz   = bi[i+1] - bi[i];
510b2b2dd24SShri Abhyankar     for (j=0; j<nz; j++) {
511b2b2dd24SShri Abhyankar       ierr = PetscMemcpy(pv+bs2*j,rtmp+bs2*pj[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
512b2b2dd24SShri Abhyankar     }
513b2b2dd24SShri Abhyankar 
514b2b2dd24SShri Abhyankar     /* Mark diagonal and invert diagonal for simplier triangular solves */
515b2b2dd24SShri Abhyankar     pv   = b->a + bs2*bdiag[i];
516b2b2dd24SShri Abhyankar     pj   = b->j + bdiag[i];
517b2b2dd24SShri Abhyankar     ierr = PetscMemcpy(pv,rtmp+bs2*pj[0],bs2*sizeof(MatScalar));CHKERRQ(ierr);
518b2b2dd24SShri Abhyankar     ierr = Kernel_A_gets_inverse_A_4(pv,shift);CHKERRQ(ierr);
519b2b2dd24SShri Abhyankar 
520b2b2dd24SShri Abhyankar     /* U part */
521b2b2dd24SShri Abhyankar     pv = b->a + bs2*(bdiag[i+1]+1);
522b2b2dd24SShri Abhyankar     pj = b->j + bdiag[i+1]+1;
523b2b2dd24SShri Abhyankar     nz = bdiag[i] - bdiag[i+1] - 1;
524b2b2dd24SShri Abhyankar     for (j=0; j<nz; j++){
525b2b2dd24SShri Abhyankar       ierr = PetscMemcpy(pv+bs2*j,rtmp+bs2*pj[j],bs2*sizeof(MatScalar));CHKERRQ(ierr);
526b2b2dd24SShri Abhyankar     }
527b2b2dd24SShri Abhyankar   }
528fca92195SBarry Smith   ierr = PetscFree2(rtmp,mwork);CHKERRQ(ierr);
5294dd39f65SShri Abhyankar   C->ops->solve          = MatSolve_SeqBAIJ_4_NaturalOrdering;
5304dd39f65SShri Abhyankar   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_NaturalOrdering;
531b2b2dd24SShri Abhyankar   C->assembled = PETSC_TRUE;
532766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*4*4*4*n);CHKERRQ(ierr); /* from inverting diagonal blocks */
533b2b2dd24SShri Abhyankar   PetscFunctionReturn(0);
534b2b2dd24SShri Abhyankar }
535b2b2dd24SShri Abhyankar 
536e6580ceeSShri Abhyankar #if defined(PETSC_HAVE_SSE)
537e6580ceeSShri Abhyankar 
538e6580ceeSShri Abhyankar #include PETSC_HAVE_SSE
539e6580ceeSShri Abhyankar 
540e6580ceeSShri Abhyankar /* SSE Version for when blocks are 4 by 4 Using natural ordering */
541e6580ceeSShri Abhyankar #undef __FUNCT__
542e6580ceeSShri Abhyankar #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE"
543e6580ceeSShri Abhyankar PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE(Mat B,Mat A,const MatFactorInfo *info)
544e6580ceeSShri Abhyankar {
545e6580ceeSShri Abhyankar   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
546e6580ceeSShri Abhyankar   PetscErrorCode ierr;
547e6580ceeSShri Abhyankar   int i,j,n = a->mbs;
548e6580ceeSShri Abhyankar   int         *bj = b->j,*bjtmp,*pj;
549e6580ceeSShri Abhyankar   int         row;
550e6580ceeSShri Abhyankar   int         *ajtmpold,nz,*bi=b->i;
551e6580ceeSShri Abhyankar   int         *diag_offset = b->diag,*ai=a->i,*aj=a->j;
552e6580ceeSShri Abhyankar   MatScalar   *pv,*v,*rtmp,*pc,*w,*x;
553e6580ceeSShri Abhyankar   MatScalar   *ba = b->a,*aa = a->a;
554e6580ceeSShri Abhyankar   int         nonzero=0;
555e6580ceeSShri Abhyankar /*    int            nonzero=0,colscale = 16; */
556e6580ceeSShri Abhyankar   PetscTruth  pivotinblocks = b->pivotinblocks;
557182b8fbaSHong Zhang   PetscReal      shift = info->shiftamount;
558e6580ceeSShri Abhyankar 
559e6580ceeSShri Abhyankar   PetscFunctionBegin;
560e6580ceeSShri Abhyankar   SSE_SCOPE_BEGIN;
561e6580ceeSShri Abhyankar 
562e32f2f54SBarry Smith   if ((unsigned long)aa%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer aa is not 16 byte aligned.  SSE will not work.");
563e32f2f54SBarry Smith   if ((unsigned long)ba%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer ba is not 16 byte aligned.  SSE will not work.");
564e6580ceeSShri Abhyankar   ierr = PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);CHKERRQ(ierr);
565e32f2f54SBarry Smith   if ((unsigned long)rtmp%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer rtmp is not 16 byte aligned.  SSE will not work.");
566e6580ceeSShri Abhyankar /*    if ((unsigned long)bj==(unsigned long)aj) { */
567e6580ceeSShri Abhyankar /*      colscale = 4; */
568e6580ceeSShri Abhyankar /*    } */
569e6580ceeSShri Abhyankar   for (i=0; i<n; i++) {
570e6580ceeSShri Abhyankar     nz    = bi[i+1] - bi[i];
571e6580ceeSShri Abhyankar     bjtmp = bj + bi[i];
572e6580ceeSShri Abhyankar     /* zero out the 4x4 block accumulators */
573e6580ceeSShri Abhyankar     /* zero out one register */
574e6580ceeSShri Abhyankar     XOR_PS(XMM7,XMM7);
575e6580ceeSShri Abhyankar     for  (j=0; j<nz; j++) {
576e6580ceeSShri Abhyankar       x = rtmp+16*bjtmp[j];
577e6580ceeSShri Abhyankar /*        x = rtmp+4*bjtmp[j]; */
578e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(x)
579e6580ceeSShri Abhyankar         /* Copy zero register to memory locations */
580e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
581e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM7)
582e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM7)
583e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM7)
584e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM7)
585e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM7)
586e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM7)
587e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM7)
588e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM7)
589e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
590e6580ceeSShri Abhyankar     }
591e6580ceeSShri Abhyankar     /* load in initial (unfactored row) */
592e6580ceeSShri Abhyankar     nz       = ai[i+1] - ai[i];
593e6580ceeSShri Abhyankar     ajtmpold = aj + ai[i];
594e6580ceeSShri Abhyankar     v        = aa + 16*ai[i];
595e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
596e6580ceeSShri Abhyankar       x = rtmp+16*ajtmpold[j];
597e6580ceeSShri Abhyankar /*        x = rtmp+colscale*ajtmpold[j]; */
598e6580ceeSShri Abhyankar       /* Copy v block into x block */
599e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(v,x)
600e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
601e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
602e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM0)
603e6580ceeSShri Abhyankar 
604e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM1)
605e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM1)
606e6580ceeSShri Abhyankar 
607e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM2)
608e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM2)
609e6580ceeSShri Abhyankar 
610e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM3)
611e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM3)
612e6580ceeSShri Abhyankar 
613e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM4)
614e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM4)
615e6580ceeSShri Abhyankar 
616e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM5)
617e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM5)
618e6580ceeSShri Abhyankar 
619e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM6)
620e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM6)
621e6580ceeSShri Abhyankar 
622e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
623e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
624e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
625e6580ceeSShri Abhyankar 
626e6580ceeSShri Abhyankar       v += 16;
627e6580ceeSShri Abhyankar     }
628e6580ceeSShri Abhyankar /*      row = (*bjtmp++)/4; */
629e6580ceeSShri Abhyankar     row = *bjtmp++;
630e6580ceeSShri Abhyankar     while (row < i) {
631e6580ceeSShri Abhyankar       pc  = rtmp + 16*row;
632e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(pc)
633e6580ceeSShri Abhyankar         /* Load block from lower triangle */
634e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
635e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
636e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM0)
637e6580ceeSShri Abhyankar 
638e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM1)
639e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM1)
640e6580ceeSShri Abhyankar 
641e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM2)
642e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM2)
643e6580ceeSShri Abhyankar 
644e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM3)
645e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM3)
646e6580ceeSShri Abhyankar 
647e6580ceeSShri Abhyankar         /* Compare block to zero block */
648e6580ceeSShri Abhyankar 
649e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM4,XMM7)
650e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM4,XMM0)
651e6580ceeSShri Abhyankar 
652e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM5,XMM7)
653e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM5,XMM1)
654e6580ceeSShri Abhyankar 
655e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM6,XMM7)
656e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM6,XMM2)
657e6580ceeSShri Abhyankar 
658e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM7,XMM3)
659e6580ceeSShri Abhyankar 
660e6580ceeSShri Abhyankar         /* Reduce the comparisons to one SSE register */
661e6580ceeSShri Abhyankar         SSE_OR_PS(XMM6,XMM7)
662e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM4)
663e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM6)
664e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
665e6580ceeSShri Abhyankar 
666e6580ceeSShri Abhyankar       /* Reduce the one SSE register to an integer register for branching */
667e6580ceeSShri Abhyankar       /* Note: Since nonzero is an int, there is no INLINE block version of this call */
668e6580ceeSShri Abhyankar       MOVEMASK(nonzero,XMM5);
669e6580ceeSShri Abhyankar 
670e6580ceeSShri Abhyankar       /* If block is nonzero ... */
671e6580ceeSShri Abhyankar       if (nonzero) {
672e6580ceeSShri Abhyankar         pv = ba + 16*diag_offset[row];
673e6580ceeSShri Abhyankar         PREFETCH_L1(&pv[16]);
674e6580ceeSShri Abhyankar         pj = bj + diag_offset[row] + 1;
675e6580ceeSShri Abhyankar 
676e6580ceeSShri Abhyankar         /* Form Multiplier, one column at a time (Matrix-Matrix Product) */
677e6580ceeSShri Abhyankar         /* L_ij^(k+1) = L_ij^(k)*inv(L_jj^(k)) */
678e6580ceeSShri Abhyankar         /* but the diagonal was inverted already */
679e6580ceeSShri Abhyankar         /* and, L_ij^(k) is already loaded into registers XMM0-XMM3 columnwise */
680e6580ceeSShri Abhyankar 
681e6580ceeSShri Abhyankar         SSE_INLINE_BEGIN_2(pv,pc)
682e6580ceeSShri Abhyankar           /* Column 0, product is accumulated in XMM4 */
683e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_0,XMM4)
684e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM4,XMM4,0x00)
685e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM4,XMM0)
686e6580ceeSShri Abhyankar 
687e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_1,XMM5)
688e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
689e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM1)
690e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM5)
691e6580ceeSShri Abhyankar 
692e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_2,XMM6)
693e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
694e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM2)
695e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM6)
696e6580ceeSShri Abhyankar 
697e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_3,XMM7)
698e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
699e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
700e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM7)
701e6580ceeSShri Abhyankar 
702e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM4)
703e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM4)
704e6580ceeSShri Abhyankar 
705e6580ceeSShri Abhyankar           /* Column 1, product is accumulated in XMM5 */
706e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_4,XMM5)
707e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
708e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM0)
709e6580ceeSShri Abhyankar 
710e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_5,XMM6)
711e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
712e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM1)
713e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
714e6580ceeSShri Abhyankar 
715e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_6,XMM7)
716e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
717e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
718e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM7)
719e6580ceeSShri Abhyankar 
720e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_7,XMM6)
721e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
722e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM3)
723e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
724e6580ceeSShri Abhyankar 
725e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM5)
726e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM5)
727e6580ceeSShri Abhyankar 
728e6580ceeSShri Abhyankar           SSE_PREFETCH_L1(SSE_ARG_1,FLOAT_24)
729e6580ceeSShri Abhyankar 
730e6580ceeSShri Abhyankar           /* Column 2, product is accumulated in XMM6 */
731e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_8,XMM6)
732e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
733e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM0)
734e6580ceeSShri Abhyankar 
735e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_9,XMM7)
736e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
737e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM1)
738e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
739e6580ceeSShri Abhyankar 
740e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_10,XMM7)
741e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
742e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
743e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
744e6580ceeSShri Abhyankar 
745e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_11,XMM7)
746e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
747e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
748e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
749e6580ceeSShri Abhyankar 
750e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM6)
751e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
752e6580ceeSShri Abhyankar 
753e6580ceeSShri Abhyankar           /* Note: For the last column, we no longer need to preserve XMM0->XMM3 */
754e6580ceeSShri Abhyankar           /* Column 3, product is accumulated in XMM0 */
755e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_12,XMM7)
756e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
757e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM0,XMM7)
758e6580ceeSShri Abhyankar 
759e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_13,XMM7)
760e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
761e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM7)
762e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
763e6580ceeSShri Abhyankar 
764e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_14,XMM1)
765e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM1,XMM1,0x00)
766e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM2)
767e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
768e6580ceeSShri Abhyankar 
769e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_15,XMM7)
770e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
771e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM3,XMM7)
772e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM3)
773e6580ceeSShri Abhyankar 
774e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM0)
775e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
776e6580ceeSShri Abhyankar 
777e6580ceeSShri Abhyankar           /* Simplify Bookkeeping -- Completely Unnecessary Instructions */
778e6580ceeSShri Abhyankar           /* This is code to be maintained and read by humans afterall. */
779e6580ceeSShri Abhyankar           /* Copy Multiplier Col 3 into XMM3 */
780e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM3,XMM0)
781e6580ceeSShri Abhyankar           /* Copy Multiplier Col 2 into XMM2 */
782e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM2,XMM6)
783e6580ceeSShri Abhyankar           /* Copy Multiplier Col 1 into XMM1 */
784e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM1,XMM5)
785e6580ceeSShri Abhyankar           /* Copy Multiplier Col 0 into XMM0 */
786e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM0,XMM4)
787e6580ceeSShri Abhyankar         SSE_INLINE_END_2;
788e6580ceeSShri Abhyankar 
789e6580ceeSShri Abhyankar         /* Update the row: */
790e6580ceeSShri Abhyankar         nz = bi[row+1] - diag_offset[row] - 1;
791e6580ceeSShri Abhyankar         pv += 16;
792e6580ceeSShri Abhyankar         for (j=0; j<nz; j++) {
793e6580ceeSShri Abhyankar           PREFETCH_L1(&pv[16]);
794e6580ceeSShri Abhyankar           x = rtmp + 16*pj[j];
795e6580ceeSShri Abhyankar /*            x = rtmp + 4*pj[j]; */
796e6580ceeSShri Abhyankar 
797e6580ceeSShri Abhyankar           /* X:=X-M*PV, One column at a time */
798e6580ceeSShri Abhyankar           /* Note: M is already loaded columnwise into registers XMM0-XMM3 */
799e6580ceeSShri Abhyankar           SSE_INLINE_BEGIN_2(x,pv)
800e6580ceeSShri Abhyankar             /* Load First Column of X*/
801e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM4)
802e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM4)
803e6580ceeSShri Abhyankar 
804e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
805e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_0,XMM5)
806e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
807e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
808e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
809e6580ceeSShri Abhyankar 
810e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_1,XMM6)
811e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
812e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
813e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
814e6580ceeSShri Abhyankar 
815e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_2,XMM7)
816e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
817e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
818e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
819e6580ceeSShri Abhyankar 
820e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_3,XMM5)
821e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
822e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
823e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
824e6580ceeSShri Abhyankar 
825e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM4)
826e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM4)
827e6580ceeSShri Abhyankar 
828e6580ceeSShri Abhyankar             /* Second Column */
829e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM5)
830e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM5)
831e6580ceeSShri Abhyankar 
832e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
833e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_4,XMM6)
834e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
835e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM0)
836e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
837e6580ceeSShri Abhyankar 
838e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_5,XMM7)
839e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
840e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM1)
841e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM7)
842e6580ceeSShri Abhyankar 
843e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_6,XMM4)
844e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
845e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM2)
846e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM4)
847e6580ceeSShri Abhyankar 
848e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_7,XMM6)
849e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
850e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM3)
851e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
852e6580ceeSShri Abhyankar 
853e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM5)
854e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM5)
855e6580ceeSShri Abhyankar 
856e6580ceeSShri Abhyankar             SSE_PREFETCH_L1(SSE_ARG_2,FLOAT_24)
857e6580ceeSShri Abhyankar 
858e6580ceeSShri Abhyankar             /* Third Column */
859e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM6)
860e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
861e6580ceeSShri Abhyankar 
862e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
863e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_8,XMM7)
864e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
865e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM0)
866e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
867e6580ceeSShri Abhyankar 
868e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_9,XMM4)
869e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
870e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM1)
871e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM4)
872e6580ceeSShri Abhyankar 
873e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_10,XMM5)
874e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
875e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM2)
876e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM5)
877e6580ceeSShri Abhyankar 
878e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_11,XMM7)
879e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
880e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM3)
881e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
882e6580ceeSShri Abhyankar 
883e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM6)
884e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM6)
885e6580ceeSShri Abhyankar 
886e6580ceeSShri Abhyankar             /* Fourth Column */
887e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM4)
888e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM4)
889e6580ceeSShri Abhyankar 
890e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
891e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_12,XMM5)
892e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
893e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
894e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
895e6580ceeSShri Abhyankar 
896e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_13,XMM6)
897e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
898e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
899e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
900e6580ceeSShri Abhyankar 
901e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_14,XMM7)
902e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
903e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
904e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
905e6580ceeSShri Abhyankar 
906e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_15,XMM5)
907e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
908e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
909e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
910e6580ceeSShri Abhyankar 
911e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
912e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)
913e6580ceeSShri Abhyankar           SSE_INLINE_END_2;
914e6580ceeSShri Abhyankar           pv   += 16;
915e6580ceeSShri Abhyankar         }
916e6580ceeSShri Abhyankar         ierr = PetscLogFlops(128.0*nz+112.0);CHKERRQ(ierr);
917e6580ceeSShri Abhyankar       }
918e6580ceeSShri Abhyankar       row = *bjtmp++;
919e6580ceeSShri Abhyankar /*        row = (*bjtmp++)/4; */
920e6580ceeSShri Abhyankar     }
921e6580ceeSShri Abhyankar     /* finished row so stick it into b->a */
922e6580ceeSShri Abhyankar     pv = ba + 16*bi[i];
923e6580ceeSShri Abhyankar     pj = bj + bi[i];
924e6580ceeSShri Abhyankar     nz = bi[i+1] - bi[i];
925e6580ceeSShri Abhyankar 
926e6580ceeSShri Abhyankar     /* Copy x block back into pv block */
927e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
928e6580ceeSShri Abhyankar       x  = rtmp+16*pj[j];
929e6580ceeSShri Abhyankar /*        x  = rtmp+4*pj[j]; */
930e6580ceeSShri Abhyankar 
931e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(x,pv)
932e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
933e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM1)
934e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM1)
935e6580ceeSShri Abhyankar 
936e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM2)
937e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM2)
938e6580ceeSShri Abhyankar 
939e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM3)
940e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM3)
941e6580ceeSShri Abhyankar 
942e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM4)
943e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM4)
944e6580ceeSShri Abhyankar 
945e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
946e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM5)
947e6580ceeSShri Abhyankar 
948e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
949e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
950e6580ceeSShri Abhyankar 
951e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM7)
952e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM7)
953e6580ceeSShri Abhyankar 
954e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
955e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
956e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
957e6580ceeSShri Abhyankar       pv += 16;
958e6580ceeSShri Abhyankar     }
959e6580ceeSShri Abhyankar     /* invert diagonal block */
960e6580ceeSShri Abhyankar     w = ba + 16*diag_offset[i];
961e6580ceeSShri Abhyankar     if (pivotinblocks) {
962e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4(w,shift);CHKERRQ(ierr);
963e6580ceeSShri Abhyankar     } else {
964e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4_nopivot(w);CHKERRQ(ierr);
965e6580ceeSShri Abhyankar     }
966e6580ceeSShri Abhyankar /*      ierr = Kernel_A_gets_inverse_A_4_SSE(w);CHKERRQ(ierr); */
967e6580ceeSShri Abhyankar     /* Note: Using Kramer's rule, flop count below might be infairly high or low? */
968e6580ceeSShri Abhyankar   }
969e6580ceeSShri Abhyankar 
970e6580ceeSShri Abhyankar   ierr = PetscFree(rtmp);CHKERRQ(ierr);
971e6580ceeSShri Abhyankar   C->ops->solve          = MatSolve_SeqBAIJ_4_NaturalOrdering_SSE;
972e6580ceeSShri Abhyankar   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_NaturalOrdering_SSE;
973e6580ceeSShri Abhyankar   C->assembled = PETSC_TRUE;
974766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*bs*bs2*b->mbs);CHKERRQ(ierr);
975e6580ceeSShri Abhyankar   /* Flop Count from inverting diagonal blocks */
976e6580ceeSShri Abhyankar   SSE_SCOPE_END;
977e6580ceeSShri Abhyankar   PetscFunctionReturn(0);
978e6580ceeSShri Abhyankar }
979e6580ceeSShri Abhyankar 
980e6580ceeSShri Abhyankar #undef __FUNCT__
981e6580ceeSShri Abhyankar #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE_usj_Inplace"
982e6580ceeSShri Abhyankar PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE_usj_Inplace(Mat C)
983e6580ceeSShri Abhyankar {
984e6580ceeSShri Abhyankar   Mat            A=C;
985e6580ceeSShri Abhyankar   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
986e6580ceeSShri Abhyankar   PetscErrorCode ierr;
987e6580ceeSShri Abhyankar   int i,j,n = a->mbs;
988e6580ceeSShri Abhyankar   unsigned short *bj = (unsigned short *)(b->j),*bjtmp,*pj;
989e6580ceeSShri Abhyankar   unsigned short *aj = (unsigned short *)(a->j),*ajtmp;
990e6580ceeSShri Abhyankar   unsigned int   row;
991e6580ceeSShri Abhyankar   int            nz,*bi=b->i;
992e6580ceeSShri Abhyankar   int            *diag_offset = b->diag,*ai=a->i;
993e6580ceeSShri Abhyankar   MatScalar      *pv,*v,*rtmp,*pc,*w,*x;
994e6580ceeSShri Abhyankar   MatScalar      *ba = b->a,*aa = a->a;
995e6580ceeSShri Abhyankar   int            nonzero=0;
996e6580ceeSShri Abhyankar /*    int            nonzero=0,colscale = 16; */
997e6580ceeSShri Abhyankar   PetscTruth     pivotinblocks = b->pivotinblocks;
998182b8fbaSHong Zhang   PetscReal      shift = info->shiftamount;
999e6580ceeSShri Abhyankar 
1000e6580ceeSShri Abhyankar   PetscFunctionBegin;
1001e6580ceeSShri Abhyankar   SSE_SCOPE_BEGIN;
1002e6580ceeSShri Abhyankar 
1003e32f2f54SBarry Smith   if ((unsigned long)aa%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer aa is not 16 byte aligned.  SSE will not work.");
1004e32f2f54SBarry Smith   if ((unsigned long)ba%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer ba is not 16 byte aligned.  SSE will not work.");
1005e6580ceeSShri Abhyankar   ierr = PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);CHKERRQ(ierr);
1006e32f2f54SBarry Smith   if ((unsigned long)rtmp%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer rtmp is not 16 byte aligned.  SSE will not work.");
1007e6580ceeSShri Abhyankar /*    if ((unsigned long)bj==(unsigned long)aj) { */
1008e6580ceeSShri Abhyankar /*      colscale = 4; */
1009e6580ceeSShri Abhyankar /*    } */
1010e6580ceeSShri Abhyankar 
1011e6580ceeSShri Abhyankar   for (i=0; i<n; i++) {
1012e6580ceeSShri Abhyankar     nz    = bi[i+1] - bi[i];
1013e6580ceeSShri Abhyankar     bjtmp = bj + bi[i];
1014e6580ceeSShri Abhyankar     /* zero out the 4x4 block accumulators */
1015e6580ceeSShri Abhyankar     /* zero out one register */
1016e6580ceeSShri Abhyankar     XOR_PS(XMM7,XMM7);
1017e6580ceeSShri Abhyankar     for  (j=0; j<nz; j++) {
1018e6580ceeSShri Abhyankar       x = rtmp+16*((unsigned int)bjtmp[j]);
1019e6580ceeSShri Abhyankar /*        x = rtmp+4*bjtmp[j]; */
1020e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(x)
1021e6580ceeSShri Abhyankar         /* Copy zero register to memory locations */
1022e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1023e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM7)
1024e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM7)
1025e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM7)
1026e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM7)
1027e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM7)
1028e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM7)
1029e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM7)
1030e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM7)
1031e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
1032e6580ceeSShri Abhyankar     }
1033e6580ceeSShri Abhyankar     /* load in initial (unfactored row) */
1034e6580ceeSShri Abhyankar     nz    = ai[i+1] - ai[i];
1035e6580ceeSShri Abhyankar     ajtmp = aj + ai[i];
1036e6580ceeSShri Abhyankar     v     = aa + 16*ai[i];
1037e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
1038e6580ceeSShri Abhyankar       x = rtmp+16*((unsigned int)ajtmp[j]);
1039e6580ceeSShri Abhyankar /*        x = rtmp+colscale*ajtmp[j]; */
1040e6580ceeSShri Abhyankar       /* Copy v block into x block */
1041e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(v,x)
1042e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1043e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
1044e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM0)
1045e6580ceeSShri Abhyankar 
1046e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM1)
1047e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM1)
1048e6580ceeSShri Abhyankar 
1049e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM2)
1050e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM2)
1051e6580ceeSShri Abhyankar 
1052e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM3)
1053e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM3)
1054e6580ceeSShri Abhyankar 
1055e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM4)
1056e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM4)
1057e6580ceeSShri Abhyankar 
1058e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM5)
1059e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM5)
1060e6580ceeSShri Abhyankar 
1061e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM6)
1062e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM6)
1063e6580ceeSShri Abhyankar 
1064e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
1065e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1066e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
1067e6580ceeSShri Abhyankar 
1068e6580ceeSShri Abhyankar       v += 16;
1069e6580ceeSShri Abhyankar     }
1070e6580ceeSShri Abhyankar /*      row = (*bjtmp++)/4; */
1071e6580ceeSShri Abhyankar     row = (unsigned int)(*bjtmp++);
1072e6580ceeSShri Abhyankar     while (row < i) {
1073e6580ceeSShri Abhyankar       pc  = rtmp + 16*row;
1074e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(pc)
1075e6580ceeSShri Abhyankar         /* Load block from lower triangle */
1076e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1077e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
1078e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM0)
1079e6580ceeSShri Abhyankar 
1080e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM1)
1081e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM1)
1082e6580ceeSShri Abhyankar 
1083e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM2)
1084e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM2)
1085e6580ceeSShri Abhyankar 
1086e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM3)
1087e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM3)
1088e6580ceeSShri Abhyankar 
1089e6580ceeSShri Abhyankar         /* Compare block to zero block */
1090e6580ceeSShri Abhyankar 
1091e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM4,XMM7)
1092e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM4,XMM0)
1093e6580ceeSShri Abhyankar 
1094e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM5,XMM7)
1095e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM5,XMM1)
1096e6580ceeSShri Abhyankar 
1097e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM6,XMM7)
1098e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM6,XMM2)
1099e6580ceeSShri Abhyankar 
1100e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM7,XMM3)
1101e6580ceeSShri Abhyankar 
1102e6580ceeSShri Abhyankar         /* Reduce the comparisons to one SSE register */
1103e6580ceeSShri Abhyankar         SSE_OR_PS(XMM6,XMM7)
1104e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM4)
1105e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM6)
1106e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
1107e6580ceeSShri Abhyankar 
1108e6580ceeSShri Abhyankar       /* Reduce the one SSE register to an integer register for branching */
1109e6580ceeSShri Abhyankar       /* Note: Since nonzero is an int, there is no INLINE block version of this call */
1110e6580ceeSShri Abhyankar       MOVEMASK(nonzero,XMM5);
1111e6580ceeSShri Abhyankar 
1112e6580ceeSShri Abhyankar       /* If block is nonzero ... */
1113e6580ceeSShri Abhyankar       if (nonzero) {
1114e6580ceeSShri Abhyankar         pv = ba + 16*diag_offset[row];
1115e6580ceeSShri Abhyankar         PREFETCH_L1(&pv[16]);
1116e6580ceeSShri Abhyankar         pj = bj + diag_offset[row] + 1;
1117e6580ceeSShri Abhyankar 
1118e6580ceeSShri Abhyankar         /* Form Multiplier, one column at a time (Matrix-Matrix Product) */
1119e6580ceeSShri Abhyankar         /* L_ij^(k+1) = L_ij^(k)*inv(L_jj^(k)) */
1120e6580ceeSShri Abhyankar         /* but the diagonal was inverted already */
1121e6580ceeSShri Abhyankar         /* and, L_ij^(k) is already loaded into registers XMM0-XMM3 columnwise */
1122e6580ceeSShri Abhyankar 
1123e6580ceeSShri Abhyankar         SSE_INLINE_BEGIN_2(pv,pc)
1124e6580ceeSShri Abhyankar           /* Column 0, product is accumulated in XMM4 */
1125e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_0,XMM4)
1126e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM4,XMM4,0x00)
1127e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM4,XMM0)
1128e6580ceeSShri Abhyankar 
1129e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_1,XMM5)
1130e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
1131e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM1)
1132e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM5)
1133e6580ceeSShri Abhyankar 
1134e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_2,XMM6)
1135e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1136e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM2)
1137e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM6)
1138e6580ceeSShri Abhyankar 
1139e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_3,XMM7)
1140e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1141e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
1142e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM7)
1143e6580ceeSShri Abhyankar 
1144e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM4)
1145e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM4)
1146e6580ceeSShri Abhyankar 
1147e6580ceeSShri Abhyankar           /* Column 1, product is accumulated in XMM5 */
1148e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_4,XMM5)
1149e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
1150e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM0)
1151e6580ceeSShri Abhyankar 
1152e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_5,XMM6)
1153e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1154e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM1)
1155e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
1156e6580ceeSShri Abhyankar 
1157e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_6,XMM7)
1158e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1159e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
1160e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM7)
1161e6580ceeSShri Abhyankar 
1162e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_7,XMM6)
1163e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1164e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM3)
1165e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
1166e6580ceeSShri Abhyankar 
1167e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM5)
1168e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM5)
1169e6580ceeSShri Abhyankar 
1170e6580ceeSShri Abhyankar           SSE_PREFETCH_L1(SSE_ARG_1,FLOAT_24)
1171e6580ceeSShri Abhyankar 
1172e6580ceeSShri Abhyankar           /* Column 2, product is accumulated in XMM6 */
1173e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_8,XMM6)
1174e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1175e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM0)
1176e6580ceeSShri Abhyankar 
1177e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_9,XMM7)
1178e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1179e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM1)
1180e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1181e6580ceeSShri Abhyankar 
1182e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_10,XMM7)
1183e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1184e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
1185e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1186e6580ceeSShri Abhyankar 
1187e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_11,XMM7)
1188e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1189e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
1190e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1191e6580ceeSShri Abhyankar 
1192e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM6)
1193e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
1194e6580ceeSShri Abhyankar 
1195e6580ceeSShri Abhyankar           /* Note: For the last column, we no longer need to preserve XMM0->XMM3 */
1196e6580ceeSShri Abhyankar           /* Column 3, product is accumulated in XMM0 */
1197e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_12,XMM7)
1198e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1199e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM0,XMM7)
1200e6580ceeSShri Abhyankar 
1201e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_13,XMM7)
1202e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1203e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM7)
1204e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
1205e6580ceeSShri Abhyankar 
1206e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_14,XMM1)
1207e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM1,XMM1,0x00)
1208e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM2)
1209e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
1210e6580ceeSShri Abhyankar 
1211e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_15,XMM7)
1212e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1213e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM3,XMM7)
1214e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM3)
1215e6580ceeSShri Abhyankar 
1216e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM0)
1217e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1218e6580ceeSShri Abhyankar 
1219e6580ceeSShri Abhyankar           /* Simplify Bookkeeping -- Completely Unnecessary Instructions */
1220e6580ceeSShri Abhyankar           /* This is code to be maintained and read by humans afterall. */
1221e6580ceeSShri Abhyankar           /* Copy Multiplier Col 3 into XMM3 */
1222e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM3,XMM0)
1223e6580ceeSShri Abhyankar           /* Copy Multiplier Col 2 into XMM2 */
1224e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM2,XMM6)
1225e6580ceeSShri Abhyankar           /* Copy Multiplier Col 1 into XMM1 */
1226e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM1,XMM5)
1227e6580ceeSShri Abhyankar           /* Copy Multiplier Col 0 into XMM0 */
1228e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM0,XMM4)
1229e6580ceeSShri Abhyankar         SSE_INLINE_END_2;
1230e6580ceeSShri Abhyankar 
1231e6580ceeSShri Abhyankar         /* Update the row: */
1232e6580ceeSShri Abhyankar         nz = bi[row+1] - diag_offset[row] - 1;
1233e6580ceeSShri Abhyankar         pv += 16;
1234e6580ceeSShri Abhyankar         for (j=0; j<nz; j++) {
1235e6580ceeSShri Abhyankar           PREFETCH_L1(&pv[16]);
1236e6580ceeSShri Abhyankar           x = rtmp + 16*((unsigned int)pj[j]);
1237e6580ceeSShri Abhyankar /*            x = rtmp + 4*pj[j]; */
1238e6580ceeSShri Abhyankar 
1239e6580ceeSShri Abhyankar           /* X:=X-M*PV, One column at a time */
1240e6580ceeSShri Abhyankar           /* Note: M is already loaded columnwise into registers XMM0-XMM3 */
1241e6580ceeSShri Abhyankar           SSE_INLINE_BEGIN_2(x,pv)
1242e6580ceeSShri Abhyankar             /* Load First Column of X*/
1243e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM4)
1244e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM4)
1245e6580ceeSShri Abhyankar 
1246e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1247e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_0,XMM5)
1248e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1249e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
1250e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1251e6580ceeSShri Abhyankar 
1252e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_1,XMM6)
1253e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1254e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
1255e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
1256e6580ceeSShri Abhyankar 
1257e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_2,XMM7)
1258e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1259e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
1260e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
1261e6580ceeSShri Abhyankar 
1262e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_3,XMM5)
1263e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1264e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
1265e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1266e6580ceeSShri Abhyankar 
1267e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM4)
1268e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM4)
1269e6580ceeSShri Abhyankar 
1270e6580ceeSShri Abhyankar             /* Second Column */
1271e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM5)
1272e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM5)
1273e6580ceeSShri Abhyankar 
1274e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1275e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_4,XMM6)
1276e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1277e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM0)
1278e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
1279e6580ceeSShri Abhyankar 
1280e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_5,XMM7)
1281e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1282e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM1)
1283e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM7)
1284e6580ceeSShri Abhyankar 
1285e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_6,XMM4)
1286e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
1287e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM2)
1288e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM4)
1289e6580ceeSShri Abhyankar 
1290e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_7,XMM6)
1291e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1292e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM3)
1293e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
1294e6580ceeSShri Abhyankar 
1295e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM5)
1296e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM5)
1297e6580ceeSShri Abhyankar 
1298e6580ceeSShri Abhyankar             SSE_PREFETCH_L1(SSE_ARG_2,FLOAT_24)
1299e6580ceeSShri Abhyankar 
1300e6580ceeSShri Abhyankar             /* Third Column */
1301e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM6)
1302e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1303e6580ceeSShri Abhyankar 
1304e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1305e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_8,XMM7)
1306e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1307e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM0)
1308e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
1309e6580ceeSShri Abhyankar 
1310e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_9,XMM4)
1311e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
1312e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM1)
1313e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM4)
1314e6580ceeSShri Abhyankar 
1315e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_10,XMM5)
1316e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1317e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM2)
1318e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM5)
1319e6580ceeSShri Abhyankar 
1320e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_11,XMM7)
1321e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1322e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM3)
1323e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
1324e6580ceeSShri Abhyankar 
1325e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM6)
1326e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1327e6580ceeSShri Abhyankar 
1328e6580ceeSShri Abhyankar             /* Fourth Column */
1329e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM4)
1330e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM4)
1331e6580ceeSShri Abhyankar 
1332e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1333e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_12,XMM5)
1334e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1335e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
1336e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1337e6580ceeSShri Abhyankar 
1338e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_13,XMM6)
1339e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1340e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
1341e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
1342e6580ceeSShri Abhyankar 
1343e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_14,XMM7)
1344e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1345e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
1346e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
1347e6580ceeSShri Abhyankar 
1348e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_15,XMM5)
1349e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1350e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
1351e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1352e6580ceeSShri Abhyankar 
1353e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
1354e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)
1355e6580ceeSShri Abhyankar           SSE_INLINE_END_2;
1356e6580ceeSShri Abhyankar           pv   += 16;
1357e6580ceeSShri Abhyankar         }
1358e6580ceeSShri Abhyankar         ierr = PetscLogFlops(128.0*nz+112.0);CHKERRQ(ierr);
1359e6580ceeSShri Abhyankar       }
1360e6580ceeSShri Abhyankar       row = (unsigned int)(*bjtmp++);
1361e6580ceeSShri Abhyankar /*        row = (*bjtmp++)/4; */
1362e6580ceeSShri Abhyankar /*        bjtmp++; */
1363e6580ceeSShri Abhyankar     }
1364e6580ceeSShri Abhyankar     /* finished row so stick it into b->a */
1365e6580ceeSShri Abhyankar     pv = ba + 16*bi[i];
1366e6580ceeSShri Abhyankar     pj = bj + bi[i];
1367e6580ceeSShri Abhyankar     nz = bi[i+1] - bi[i];
1368e6580ceeSShri Abhyankar 
1369e6580ceeSShri Abhyankar     /* Copy x block back into pv block */
1370e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
1371e6580ceeSShri Abhyankar       x  = rtmp+16*((unsigned int)pj[j]);
1372e6580ceeSShri Abhyankar /*        x  = rtmp+4*pj[j]; */
1373e6580ceeSShri Abhyankar 
1374e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(x,pv)
1375e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1376e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM1)
1377e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM1)
1378e6580ceeSShri Abhyankar 
1379e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM2)
1380e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM2)
1381e6580ceeSShri Abhyankar 
1382e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM3)
1383e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM3)
1384e6580ceeSShri Abhyankar 
1385e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM4)
1386e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM4)
1387e6580ceeSShri Abhyankar 
1388e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
1389e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM5)
1390e6580ceeSShri Abhyankar 
1391e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1392e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
1393e6580ceeSShri Abhyankar 
1394e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM7)
1395e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM7)
1396e6580ceeSShri Abhyankar 
1397e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
1398e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1399e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
1400e6580ceeSShri Abhyankar       pv += 16;
1401e6580ceeSShri Abhyankar     }
1402e6580ceeSShri Abhyankar     /* invert diagonal block */
1403e6580ceeSShri Abhyankar     w = ba + 16*diag_offset[i];
1404e6580ceeSShri Abhyankar     if (pivotinblocks) {
1405e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4(w,shift);CHKERRQ(ierr);
1406e6580ceeSShri Abhyankar     } else {
1407e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4_nopivot(w);CHKERRQ(ierr);
1408e6580ceeSShri Abhyankar     }
1409e6580ceeSShri Abhyankar /*      ierr = Kernel_A_gets_inverse_A_4_SSE(w);CHKERRQ(ierr); */
1410e6580ceeSShri Abhyankar     /* Note: Using Kramer's rule, flop count below might be infairly high or low? */
1411e6580ceeSShri Abhyankar   }
1412e6580ceeSShri Abhyankar 
1413e6580ceeSShri Abhyankar   ierr = PetscFree(rtmp);CHKERRQ(ierr);
1414e6580ceeSShri Abhyankar   C->ops->solve          = MatSolve_SeqBAIJ_4_NaturalOrdering_SSE;
1415e6580ceeSShri Abhyankar   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_NaturalOrdering_SSE;
1416e6580ceeSShri Abhyankar   C->assembled = PETSC_TRUE;
1417766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*bs*bs2*b->mbs);CHKERRQ(ierr);
1418e6580ceeSShri Abhyankar   /* Flop Count from inverting diagonal blocks */
1419e6580ceeSShri Abhyankar   SSE_SCOPE_END;
1420e6580ceeSShri Abhyankar   PetscFunctionReturn(0);
1421e6580ceeSShri Abhyankar }
1422e6580ceeSShri Abhyankar 
1423e6580ceeSShri Abhyankar #undef __FUNCT__
1424e6580ceeSShri Abhyankar #define __FUNCT__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE_usj"
1425e6580ceeSShri Abhyankar PetscErrorCode MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE_usj(Mat C,Mat A,const MatFactorInfo *info)
1426e6580ceeSShri Abhyankar {
1427e6580ceeSShri Abhyankar   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
1428e6580ceeSShri Abhyankar   PetscErrorCode ierr;
1429e6580ceeSShri Abhyankar   int  i,j,n = a->mbs;
1430e6580ceeSShri Abhyankar   unsigned short *bj = (unsigned short *)(b->j),*bjtmp,*pj;
1431e6580ceeSShri Abhyankar   unsigned int   row;
1432e6580ceeSShri Abhyankar   int            *ajtmpold,nz,*bi=b->i;
1433e6580ceeSShri Abhyankar   int            *diag_offset = b->diag,*ai=a->i,*aj=a->j;
1434e6580ceeSShri Abhyankar   MatScalar      *pv,*v,*rtmp,*pc,*w,*x;
1435e6580ceeSShri Abhyankar   MatScalar      *ba = b->a,*aa = a->a;
1436e6580ceeSShri Abhyankar   int            nonzero=0;
1437e6580ceeSShri Abhyankar /*    int            nonzero=0,colscale = 16; */
1438e6580ceeSShri Abhyankar   PetscTruth     pivotinblocks = b->pivotinblocks;
1439182b8fbaSHong Zhang   PetscReal      shift = info->shiftamount;
1440e6580ceeSShri Abhyankar 
1441e6580ceeSShri Abhyankar   PetscFunctionBegin;
1442e6580ceeSShri Abhyankar   SSE_SCOPE_BEGIN;
1443e6580ceeSShri Abhyankar 
1444e32f2f54SBarry Smith   if ((unsigned long)aa%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer aa is not 16 byte aligned.  SSE will not work.");
1445e32f2f54SBarry Smith   if ((unsigned long)ba%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer ba is not 16 byte aligned.  SSE will not work.");
1446e6580ceeSShri Abhyankar   ierr = PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);CHKERRQ(ierr);
1447e32f2f54SBarry Smith   if ((unsigned long)rtmp%16!=0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_BADPTR,"Pointer rtmp is not 16 byte aligned.  SSE will not work.");
1448e6580ceeSShri Abhyankar /*    if ((unsigned long)bj==(unsigned long)aj) { */
1449e6580ceeSShri Abhyankar /*      colscale = 4; */
1450e6580ceeSShri Abhyankar /*    } */
1451e6580ceeSShri Abhyankar   if ((unsigned long)bj==(unsigned long)aj) {
1452e6580ceeSShri Abhyankar     return(MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_SSE_usj_Inplace(C));
1453e6580ceeSShri Abhyankar   }
1454e6580ceeSShri Abhyankar 
1455e6580ceeSShri Abhyankar   for (i=0; i<n; i++) {
1456e6580ceeSShri Abhyankar     nz    = bi[i+1] - bi[i];
1457e6580ceeSShri Abhyankar     bjtmp = bj + bi[i];
1458e6580ceeSShri Abhyankar     /* zero out the 4x4 block accumulators */
1459e6580ceeSShri Abhyankar     /* zero out one register */
1460e6580ceeSShri Abhyankar     XOR_PS(XMM7,XMM7);
1461e6580ceeSShri Abhyankar     for  (j=0; j<nz; j++) {
1462e6580ceeSShri Abhyankar       x = rtmp+16*((unsigned int)bjtmp[j]);
1463e6580ceeSShri Abhyankar /*        x = rtmp+4*bjtmp[j]; */
1464e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(x)
1465e6580ceeSShri Abhyankar         /* Copy zero register to memory locations */
1466e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1467e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM7)
1468e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM7)
1469e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM7)
1470e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM7)
1471e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM7)
1472e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM7)
1473e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM7)
1474e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM7)
1475e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
1476e6580ceeSShri Abhyankar     }
1477e6580ceeSShri Abhyankar     /* load in initial (unfactored row) */
1478e6580ceeSShri Abhyankar     nz       = ai[i+1] - ai[i];
1479e6580ceeSShri Abhyankar     ajtmpold = aj + ai[i];
1480e6580ceeSShri Abhyankar     v        = aa + 16*ai[i];
1481e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
1482e6580ceeSShri Abhyankar       x = rtmp+16*ajtmpold[j];
1483e6580ceeSShri Abhyankar /*        x = rtmp+colscale*ajtmpold[j]; */
1484e6580ceeSShri Abhyankar       /* Copy v block into x block */
1485e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(v,x)
1486e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1487e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
1488e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM0)
1489e6580ceeSShri Abhyankar 
1490e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM1)
1491e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM1)
1492e6580ceeSShri Abhyankar 
1493e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM2)
1494e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM2)
1495e6580ceeSShri Abhyankar 
1496e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM3)
1497e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM3)
1498e6580ceeSShri Abhyankar 
1499e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM4)
1500e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM4)
1501e6580ceeSShri Abhyankar 
1502e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM5)
1503e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM5)
1504e6580ceeSShri Abhyankar 
1505e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM6)
1506e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM6)
1507e6580ceeSShri Abhyankar 
1508e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
1509e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1510e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
1511e6580ceeSShri Abhyankar 
1512e6580ceeSShri Abhyankar       v += 16;
1513e6580ceeSShri Abhyankar     }
1514e6580ceeSShri Abhyankar /*      row = (*bjtmp++)/4; */
1515e6580ceeSShri Abhyankar     row = (unsigned int)(*bjtmp++);
1516e6580ceeSShri Abhyankar     while (row < i) {
1517e6580ceeSShri Abhyankar       pc  = rtmp + 16*row;
1518e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_1(pc)
1519e6580ceeSShri Abhyankar         /* Load block from lower triangle */
1520e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1521e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
1522e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM0)
1523e6580ceeSShri Abhyankar 
1524e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM1)
1525e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM1)
1526e6580ceeSShri Abhyankar 
1527e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM2)
1528e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM2)
1529e6580ceeSShri Abhyankar 
1530e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM3)
1531e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM3)
1532e6580ceeSShri Abhyankar 
1533e6580ceeSShri Abhyankar         /* Compare block to zero block */
1534e6580ceeSShri Abhyankar 
1535e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM4,XMM7)
1536e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM4,XMM0)
1537e6580ceeSShri Abhyankar 
1538e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM5,XMM7)
1539e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM5,XMM1)
1540e6580ceeSShri Abhyankar 
1541e6580ceeSShri Abhyankar         SSE_COPY_PS(XMM6,XMM7)
1542e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM6,XMM2)
1543e6580ceeSShri Abhyankar 
1544e6580ceeSShri Abhyankar         SSE_CMPNEQ_PS(XMM7,XMM3)
1545e6580ceeSShri Abhyankar 
1546e6580ceeSShri Abhyankar         /* Reduce the comparisons to one SSE register */
1547e6580ceeSShri Abhyankar         SSE_OR_PS(XMM6,XMM7)
1548e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM4)
1549e6580ceeSShri Abhyankar         SSE_OR_PS(XMM5,XMM6)
1550e6580ceeSShri Abhyankar       SSE_INLINE_END_1;
1551e6580ceeSShri Abhyankar 
1552e6580ceeSShri Abhyankar       /* Reduce the one SSE register to an integer register for branching */
1553e6580ceeSShri Abhyankar       /* Note: Since nonzero is an int, there is no INLINE block version of this call */
1554e6580ceeSShri Abhyankar       MOVEMASK(nonzero,XMM5);
1555e6580ceeSShri Abhyankar 
1556e6580ceeSShri Abhyankar       /* If block is nonzero ... */
1557e6580ceeSShri Abhyankar       if (nonzero) {
1558e6580ceeSShri Abhyankar         pv = ba + 16*diag_offset[row];
1559e6580ceeSShri Abhyankar         PREFETCH_L1(&pv[16]);
1560e6580ceeSShri Abhyankar         pj = bj + diag_offset[row] + 1;
1561e6580ceeSShri Abhyankar 
1562e6580ceeSShri Abhyankar         /* Form Multiplier, one column at a time (Matrix-Matrix Product) */
1563e6580ceeSShri Abhyankar         /* L_ij^(k+1) = L_ij^(k)*inv(L_jj^(k)) */
1564e6580ceeSShri Abhyankar         /* but the diagonal was inverted already */
1565e6580ceeSShri Abhyankar         /* and, L_ij^(k) is already loaded into registers XMM0-XMM3 columnwise */
1566e6580ceeSShri Abhyankar 
1567e6580ceeSShri Abhyankar         SSE_INLINE_BEGIN_2(pv,pc)
1568e6580ceeSShri Abhyankar           /* Column 0, product is accumulated in XMM4 */
1569e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_0,XMM4)
1570e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM4,XMM4,0x00)
1571e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM4,XMM0)
1572e6580ceeSShri Abhyankar 
1573e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_1,XMM5)
1574e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
1575e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM1)
1576e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM5)
1577e6580ceeSShri Abhyankar 
1578e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_2,XMM6)
1579e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1580e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM2)
1581e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM6)
1582e6580ceeSShri Abhyankar 
1583e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_3,XMM7)
1584e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1585e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
1586e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM4,XMM7)
1587e6580ceeSShri Abhyankar 
1588e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM4)
1589e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM4)
1590e6580ceeSShri Abhyankar 
1591e6580ceeSShri Abhyankar           /* Column 1, product is accumulated in XMM5 */
1592e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_4,XMM5)
1593e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM5,XMM5,0x00)
1594e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM5,XMM0)
1595e6580ceeSShri Abhyankar 
1596e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_5,XMM6)
1597e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1598e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM1)
1599e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
1600e6580ceeSShri Abhyankar 
1601e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_6,XMM7)
1602e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1603e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
1604e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM7)
1605e6580ceeSShri Abhyankar 
1606e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_7,XMM6)
1607e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1608e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM3)
1609e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM5,XMM6)
1610e6580ceeSShri Abhyankar 
1611e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM5)
1612e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM5)
1613e6580ceeSShri Abhyankar 
1614e6580ceeSShri Abhyankar           SSE_PREFETCH_L1(SSE_ARG_1,FLOAT_24)
1615e6580ceeSShri Abhyankar 
1616e6580ceeSShri Abhyankar           /* Column 2, product is accumulated in XMM6 */
1617e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_8,XMM6)
1618e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM6,XMM6,0x00)
1619e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM6,XMM0)
1620e6580ceeSShri Abhyankar 
1621e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_9,XMM7)
1622e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1623e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM1)
1624e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1625e6580ceeSShri Abhyankar 
1626e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_10,XMM7)
1627e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1628e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM2)
1629e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1630e6580ceeSShri Abhyankar 
1631e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_11,XMM7)
1632e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1633e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM7,XMM3)
1634e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM6,XMM7)
1635e6580ceeSShri Abhyankar 
1636e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM6)
1637e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
1638e6580ceeSShri Abhyankar 
1639e6580ceeSShri Abhyankar           /* Note: For the last column, we no longer need to preserve XMM0->XMM3 */
1640e6580ceeSShri Abhyankar           /* Column 3, product is accumulated in XMM0 */
1641e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_12,XMM7)
1642e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1643e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM0,XMM7)
1644e6580ceeSShri Abhyankar 
1645e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_13,XMM7)
1646e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1647e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM7)
1648e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
1649e6580ceeSShri Abhyankar 
1650e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_14,XMM1)
1651e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM1,XMM1,0x00)
1652e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM1,XMM2)
1653e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM1)
1654e6580ceeSShri Abhyankar 
1655e6580ceeSShri Abhyankar           SSE_LOAD_SS(SSE_ARG_1,FLOAT_15,XMM7)
1656e6580ceeSShri Abhyankar           SSE_SHUFFLE(XMM7,XMM7,0x00)
1657e6580ceeSShri Abhyankar           SSE_MULT_PS(XMM3,XMM7)
1658e6580ceeSShri Abhyankar           SSE_ADD_PS(XMM0,XMM3)
1659e6580ceeSShri Abhyankar 
1660e6580ceeSShri Abhyankar           SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM0)
1661e6580ceeSShri Abhyankar           SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1662e6580ceeSShri Abhyankar 
1663e6580ceeSShri Abhyankar           /* Simplify Bookkeeping -- Completely Unnecessary Instructions */
1664e6580ceeSShri Abhyankar           /* This is code to be maintained and read by humans afterall. */
1665e6580ceeSShri Abhyankar           /* Copy Multiplier Col 3 into XMM3 */
1666e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM3,XMM0)
1667e6580ceeSShri Abhyankar           /* Copy Multiplier Col 2 into XMM2 */
1668e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM2,XMM6)
1669e6580ceeSShri Abhyankar           /* Copy Multiplier Col 1 into XMM1 */
1670e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM1,XMM5)
1671e6580ceeSShri Abhyankar           /* Copy Multiplier Col 0 into XMM0 */
1672e6580ceeSShri Abhyankar           SSE_COPY_PS(XMM0,XMM4)
1673e6580ceeSShri Abhyankar         SSE_INLINE_END_2;
1674e6580ceeSShri Abhyankar 
1675e6580ceeSShri Abhyankar         /* Update the row: */
1676e6580ceeSShri Abhyankar         nz = bi[row+1] - diag_offset[row] - 1;
1677e6580ceeSShri Abhyankar         pv += 16;
1678e6580ceeSShri Abhyankar         for (j=0; j<nz; j++) {
1679e6580ceeSShri Abhyankar           PREFETCH_L1(&pv[16]);
1680e6580ceeSShri Abhyankar           x = rtmp + 16*((unsigned int)pj[j]);
1681e6580ceeSShri Abhyankar /*            x = rtmp + 4*pj[j]; */
1682e6580ceeSShri Abhyankar 
1683e6580ceeSShri Abhyankar           /* X:=X-M*PV, One column at a time */
1684e6580ceeSShri Abhyankar           /* Note: M is already loaded columnwise into registers XMM0-XMM3 */
1685e6580ceeSShri Abhyankar           SSE_INLINE_BEGIN_2(x,pv)
1686e6580ceeSShri Abhyankar             /* Load First Column of X*/
1687e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM4)
1688e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM4)
1689e6580ceeSShri Abhyankar 
1690e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1691e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_0,XMM5)
1692e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1693e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
1694e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1695e6580ceeSShri Abhyankar 
1696e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_1,XMM6)
1697e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1698e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
1699e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
1700e6580ceeSShri Abhyankar 
1701e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_2,XMM7)
1702e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1703e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
1704e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
1705e6580ceeSShri Abhyankar 
1706e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_3,XMM5)
1707e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1708e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
1709e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1710e6580ceeSShri Abhyankar 
1711e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM4)
1712e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM4)
1713e6580ceeSShri Abhyankar 
1714e6580ceeSShri Abhyankar             /* Second Column */
1715e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM5)
1716e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM5)
1717e6580ceeSShri Abhyankar 
1718e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1719e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_4,XMM6)
1720e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1721e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM0)
1722e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
1723e6580ceeSShri Abhyankar 
1724e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_5,XMM7)
1725e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1726e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM1)
1727e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM7)
1728e6580ceeSShri Abhyankar 
1729e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_6,XMM4)
1730e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
1731e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM2)
1732e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM4)
1733e6580ceeSShri Abhyankar 
1734e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_7,XMM6)
1735e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1736e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM3)
1737e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM5,XMM6)
1738e6580ceeSShri Abhyankar 
1739e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM5)
1740e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM5)
1741e6580ceeSShri Abhyankar 
1742e6580ceeSShri Abhyankar             SSE_PREFETCH_L1(SSE_ARG_2,FLOAT_24)
1743e6580ceeSShri Abhyankar 
1744e6580ceeSShri Abhyankar             /* Third Column */
1745e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM6)
1746e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1747e6580ceeSShri Abhyankar 
1748e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1749e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_8,XMM7)
1750e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1751e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM0)
1752e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
1753e6580ceeSShri Abhyankar 
1754e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_9,XMM4)
1755e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM4,XMM4,0x00)
1756e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM4,XMM1)
1757e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM4)
1758e6580ceeSShri Abhyankar 
1759e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_10,XMM5)
1760e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1761e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM2)
1762e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM5)
1763e6580ceeSShri Abhyankar 
1764e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_11,XMM7)
1765e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1766e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM3)
1767e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM6,XMM7)
1768e6580ceeSShri Abhyankar 
1769e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM6)
1770e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1771e6580ceeSShri Abhyankar 
1772e6580ceeSShri Abhyankar             /* Fourth Column */
1773e6580ceeSShri Abhyankar             SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM4)
1774e6580ceeSShri Abhyankar             SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM4)
1775e6580ceeSShri Abhyankar 
1776e6580ceeSShri Abhyankar             /* Matrix-Vector Product: */
1777e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_12,XMM5)
1778e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1779e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM0)
1780e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1781e6580ceeSShri Abhyankar 
1782e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_13,XMM6)
1783e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM6,XMM6,0x00)
1784e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM6,XMM1)
1785e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM6)
1786e6580ceeSShri Abhyankar 
1787e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_14,XMM7)
1788e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM7,XMM7,0x00)
1789e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM7,XMM2)
1790e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM7)
1791e6580ceeSShri Abhyankar 
1792e6580ceeSShri Abhyankar             SSE_LOAD_SS(SSE_ARG_2,FLOAT_15,XMM5)
1793e6580ceeSShri Abhyankar             SSE_SHUFFLE(XMM5,XMM5,0x00)
1794e6580ceeSShri Abhyankar             SSE_MULT_PS(XMM5,XMM3)
1795e6580ceeSShri Abhyankar             SSE_SUB_PS(XMM4,XMM5)
1796e6580ceeSShri Abhyankar 
1797e6580ceeSShri Abhyankar             SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
1798e6580ceeSShri Abhyankar             SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)
1799e6580ceeSShri Abhyankar           SSE_INLINE_END_2;
1800e6580ceeSShri Abhyankar           pv   += 16;
1801e6580ceeSShri Abhyankar         }
1802e6580ceeSShri Abhyankar         ierr = PetscLogFlops(128.0*nz+112.0);CHKERRQ(ierr);
1803e6580ceeSShri Abhyankar       }
1804e6580ceeSShri Abhyankar       row = (unsigned int)(*bjtmp++);
1805e6580ceeSShri Abhyankar /*        row = (*bjtmp++)/4; */
1806e6580ceeSShri Abhyankar /*        bjtmp++; */
1807e6580ceeSShri Abhyankar     }
1808e6580ceeSShri Abhyankar     /* finished row so stick it into b->a */
1809e6580ceeSShri Abhyankar     pv = ba + 16*bi[i];
1810e6580ceeSShri Abhyankar     pj = bj + bi[i];
1811e6580ceeSShri Abhyankar     nz = bi[i+1] - bi[i];
1812e6580ceeSShri Abhyankar 
1813e6580ceeSShri Abhyankar     /* Copy x block back into pv block */
1814e6580ceeSShri Abhyankar     for (j=0; j<nz; j++) {
1815e6580ceeSShri Abhyankar       x  = rtmp+16*((unsigned int)pj[j]);
1816e6580ceeSShri Abhyankar /*        x  = rtmp+4*pj[j]; */
1817e6580ceeSShri Abhyankar 
1818e6580ceeSShri Abhyankar       SSE_INLINE_BEGIN_2(x,pv)
1819e6580ceeSShri Abhyankar         /* Note: on future SSE architectures, STORE might be more efficient than STOREL/H */
1820e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM1)
1821e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_0,XMM1)
1822e6580ceeSShri Abhyankar 
1823e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_2,XMM2)
1824e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_2,XMM2)
1825e6580ceeSShri Abhyankar 
1826e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_4,XMM3)
1827e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_4,XMM3)
1828e6580ceeSShri Abhyankar 
1829e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM4)
1830e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_6,XMM4)
1831e6580ceeSShri Abhyankar 
1832e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
1833e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_8,XMM5)
1834e6580ceeSShri Abhyankar 
1835e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_10,XMM6)
1836e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_10,XMM6)
1837e6580ceeSShri Abhyankar 
1838e6580ceeSShri Abhyankar         SSE_LOADL_PS(SSE_ARG_1,FLOAT_12,XMM7)
1839e6580ceeSShri Abhyankar         SSE_STOREL_PS(SSE_ARG_2,FLOAT_12,XMM7)
1840e6580ceeSShri Abhyankar 
1841e6580ceeSShri Abhyankar         SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM0)
1842e6580ceeSShri Abhyankar         SSE_STOREH_PS(SSE_ARG_2,FLOAT_14,XMM0)
1843e6580ceeSShri Abhyankar       SSE_INLINE_END_2;
1844e6580ceeSShri Abhyankar       pv += 16;
1845e6580ceeSShri Abhyankar     }
1846e6580ceeSShri Abhyankar     /* invert diagonal block */
1847e6580ceeSShri Abhyankar     w = ba + 16*diag_offset[i];
1848e6580ceeSShri Abhyankar     if (pivotinblocks) {
1849e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4(w,shift);CHKERRQ(ierr);
1850e6580ceeSShri Abhyankar     } else {
1851e6580ceeSShri Abhyankar       ierr = Kernel_A_gets_inverse_A_4_nopivot(w);CHKERRQ(ierr);
1852e6580ceeSShri Abhyankar     }
1853e6580ceeSShri Abhyankar /*      ierr = Kernel_A_gets_inverse_A_4_SSE(w);CHKERRQ(ierr); */
1854e6580ceeSShri Abhyankar     /* Note: Using Kramer's rule, flop count below might be infairly high or low? */
1855e6580ceeSShri Abhyankar   }
1856e6580ceeSShri Abhyankar 
1857e6580ceeSShri Abhyankar   ierr = PetscFree(rtmp);CHKERRQ(ierr);
1858e6580ceeSShri Abhyankar   C->ops->solve          = MatSolve_SeqBAIJ_4_NaturalOrdering_SSE;
1859e6580ceeSShri Abhyankar   C->ops->solvetranspose = MatSolveTranspose_SeqBAIJ_4_NaturalOrdering_SSE;
1860e6580ceeSShri Abhyankar   C->assembled = PETSC_TRUE;
1861766f9fbaSBarry Smith   ierr = PetscLogFlops(1.333333333333*bs*bs2*b->mbs);CHKERRQ(ierr);
1862e6580ceeSShri Abhyankar   /* Flop Count from inverting diagonal blocks */
1863e6580ceeSShri Abhyankar   SSE_SCOPE_END;
1864e6580ceeSShri Abhyankar   PetscFunctionReturn(0);
1865e6580ceeSShri Abhyankar }
1866e6580ceeSShri Abhyankar 
1867e6580ceeSShri Abhyankar #endif
1868