Actual source code: baijfact12.c

  1: /*$Id: baijfact12.c,v 1.13 2001/04/13 18:42:37 buschelm Exp $*/
  2: /*
  3:     Factorization code for BAIJ format. 
  4: */
 5:  #include src/mat/impls/baij/seq/baij.h
 6:  #include src/vec/vecimpl.h
 7:  #include src/inline/ilu.h

  9: int MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering(Mat A,Mat *B)
 10: {
 11: /*
 12:     Default Version for when blocks are 4 by 4 Using natural ordering
 13: */
 14:   Mat         C = *B;
 15:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
 16:   int         ierr,i,j,n = a->mbs,*bi = b->i,*bj = b->j;
 17:   int         *ajtmpold,*ajtmp,nz,row;
 18:   int         *diag_offset = b->diag,*ai=a->i,*aj=a->j,*pj;
 19:   MatScalar   *pv,*v,*rtmp,*pc,*w,*x;
 20:   MatScalar   p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4;
 21:   MatScalar   p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16;
 22:   MatScalar   p10,p11,p12,p13,p14,p15,p16,m10,m11,m12;
 23:   MatScalar   m13,m14,m15,m16;
 24:   MatScalar   *ba = b->a,*aa = a->a;

 27:   PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);

 29:   for (i=0; i<n; i++) {
 30:     nz    = bi[i+1] - bi[i];
 31:     ajtmp = bj + bi[i];
 32:     for  (j=0; j<nz; j++) {
 33:       x = rtmp+16*ajtmp[j];
 34:       x[0]  = x[1]  = x[2]  = x[3]  = x[4]  = x[5]  = x[6] = x[7] = x[8] = x[9] = 0.0;
 35:       x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0;
 36:     }
 37:     /* load in initial (unfactored row) */
 38:     nz       = ai[i+1] - ai[i];
 39:     ajtmpold = aj + ai[i];
 40:     v        = aa + 16*ai[i];
 41:     for (j=0; j<nz; j++) {
 42:       x    = rtmp+16*ajtmpold[j];
 43:       x[0]  = v[0];  x[1]  = v[1];  x[2]  = v[2];  x[3]  = v[3];
 44:       x[4]  = v[4];  x[5]  = v[5];  x[6]  = v[6];  x[7]  = v[7];  x[8]  = v[8];
 45:       x[9]  = v[9];  x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13];
 46:       x[14] = v[14]; x[15] = v[15];
 47:       v    += 16;
 48:     }
 49:     row = *ajtmp++;
 50:     while (row < i) {
 51:       pc  = rtmp + 16*row;
 52:       p1  = pc[0];  p2  = pc[1];  p3  = pc[2];  p4  = pc[3];
 53:       p5  = pc[4];  p6  = pc[5];  p7  = pc[6];  p8  = pc[7];  p9  = pc[8];
 54:       p10 = pc[9];  p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13];
 55:       p15 = pc[14]; p16 = pc[15];
 56:       if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 ||
 57:           p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 ||
 58:           p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0
 59:           || p16 != 0.0) {
 60:         pv = ba + 16*diag_offset[row];
 61:         pj = bj + diag_offset[row] + 1;
 62:         x1  = pv[0];  x2  = pv[1];  x3  = pv[2];  x4  = pv[3];
 63:         x5  = pv[4];  x6  = pv[5];  x7  = pv[6];  x8  = pv[7];  x9  = pv[8];
 64:         x10 = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13];
 65:         x15 = pv[14]; x16 = pv[15];
 66:         pc[0] = m1 = p1*x1 + p5*x2  + p9*x3  + p13*x4;
 67:         pc[1] = m2 = p2*x1 + p6*x2  + p10*x3 + p14*x4;
 68:         pc[2] = m3 = p3*x1 + p7*x2  + p11*x3 + p15*x4;
 69:         pc[3] = m4 = p4*x1 + p8*x2  + p12*x3 + p16*x4;

 71:         pc[4] = m5 = p1*x5 + p5*x6  + p9*x7  + p13*x8;
 72:         pc[5] = m6 = p2*x5 + p6*x6  + p10*x7 + p14*x8;
 73:         pc[6] = m7 = p3*x5 + p7*x6  + p11*x7 + p15*x8;
 74:         pc[7] = m8 = p4*x5 + p8*x6  + p12*x7 + p16*x8;

 76:         pc[8]  = m9  = p1*x9 + p5*x10  + p9*x11  + p13*x12;
 77:         pc[9]  = m10 = p2*x9 + p6*x10  + p10*x11 + p14*x12;
 78:         pc[10] = m11 = p3*x9 + p7*x10  + p11*x11 + p15*x12;
 79:         pc[11] = m12 = p4*x9 + p8*x10  + p12*x11 + p16*x12;

 81:         pc[12] = m13 = p1*x13 + p5*x14  + p9*x15  + p13*x16;
 82:         pc[13] = m14 = p2*x13 + p6*x14  + p10*x15 + p14*x16;
 83:         pc[14] = m15 = p3*x13 + p7*x14  + p11*x15 + p15*x16;
 84:         pc[15] = m16 = p4*x13 + p8*x14  + p12*x15 + p16*x16;
 85:         nz = bi[row+1] - diag_offset[row] - 1;
 86:         pv += 16;
 87:         for (j=0; j<nz; j++) {
 88:           x1   = pv[0];  x2  = pv[1];   x3 = pv[2];  x4  = pv[3];
 89:           x5   = pv[4];  x6  = pv[5];   x7 = pv[6];  x8  = pv[7]; x9 = pv[8];
 90:           x10  = pv[9];  x11 = pv[10]; x12 = pv[11]; x13 = pv[12];
 91:           x14  = pv[13]; x15 = pv[14]; x16 = pv[15];
 92:           x    = rtmp + 16*pj[j];
 93:           x[0] -= m1*x1 + m5*x2  + m9*x3  + m13*x4;
 94:           x[1] -= m2*x1 + m6*x2  + m10*x3 + m14*x4;
 95:           x[2] -= m3*x1 + m7*x2  + m11*x3 + m15*x4;
 96:           x[3] -= m4*x1 + m8*x2  + m12*x3 + m16*x4;

 98:           x[4] -= m1*x5 + m5*x6  + m9*x7  + m13*x8;
 99:           x[5] -= m2*x5 + m6*x6  + m10*x7 + m14*x8;
100:           x[6] -= m3*x5 + m7*x6  + m11*x7 + m15*x8;
101:           x[7] -= m4*x5 + m8*x6  + m12*x7 + m16*x8;

103:           x[8]  -= m1*x9 + m5*x10 + m9*x11  + m13*x12;
104:           x[9]  -= m2*x9 + m6*x10 + m10*x11 + m14*x12;
105:           x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12;
106:           x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12;

108:           x[12] -= m1*x13 + m5*x14  + m9*x15  + m13*x16;
109:           x[13] -= m2*x13 + m6*x14  + m10*x15 + m14*x16;
110:           x[14] -= m3*x13 + m7*x14  + m11*x15 + m15*x16;
111:           x[15] -= m4*x13 + m8*x14  + m12*x15 + m16*x16;

113:           pv   += 16;
114:         }
115:         PetscLogFlops(128*nz+112);
116:       }
117:       row = *ajtmp++;
118:     }
119:     /* finished row so stick it into b->a */
120:     pv = ba + 16*bi[i];
121:     pj = bj + bi[i];
122:     nz = bi[i+1] - bi[i];
123:     for (j=0; j<nz; j++) {
124:       x      = rtmp+16*pj[j];
125:       pv[0]  = x[0];  pv[1]  = x[1];  pv[2]  = x[2];  pv[3]  = x[3];
126:       pv[4]  = x[4];  pv[5]  = x[5];  pv[6]  = x[6];  pv[7]  = x[7]; pv[8] = x[8];
127:       pv[9]  = x[9];  pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12];
128:       pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15];
129:       pv   += 16;
130:     }
131:     /* invert diagonal block */
132:     w = ba + 16*diag_offset[i];
133:     Kernel_A_gets_inverse_A_4(w);
134:   }

136:   PetscFree(rtmp);
137:   C->factor    = FACTOR_LU;
138:   C->assembled = PETSC_TRUE;
139:   PetscLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */
140:   return(0);
141: }


144: #if defined(PETSC_HAVE_ICL_SSE)

146: #include "xmmintrin.h"
147: EXTERN int Kernel_A_gets_inverse_A_4_ICL_SSE(float*);


150: /*
151:     SSE Version for when blocks are 4 by 4 Using natural ordering
152:     Uses Intel Compiler Intrinsics to perform SSE operations
153: */
154: int MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering_ICL_SSE(Mat A,Mat *B)
155: {
156:   Mat         C = *B;
157:   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
158:   int         ierr,i,j,n = a->mbs,*bi = b->i,*bj = b->j;
159:   int         *ajtmpold,*ajtmp,nz,row;
160:   int         *diag_offset = b->diag,*ai=a->i,*aj=a->j,*pj;
161:   MatScalar   *pv,*v,*rtmp,*pc,*w,*x;
162:   MatScalar   *ba = b->a,*aa = a->a;
163:   __m128      X0,X1,X2,X3,M0,M1,M2,M3,P0,P1,P2,P3;
164:   __m128      COMP0,COMP1,COMP2,COMP3;

167:   PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);

169:   for (i=0; i<n; i++) {
170:     nz    = bi[i+1] - bi[i];
171:     ajtmp = bj + bi[i];
172:     for  (j=0; j<nz; j++) {
173:       /* zero out the accumulators */
174:       x = rtmp+16*ajtmp[j];
175:       _mm_storel_pi((__m64*)(x),   _mm_setzero_ps());
176:       _mm_storeh_pi((__m64*)(x+2), _mm_setzero_ps());
177:       _mm_storel_pi((__m64*)(x+4), _mm_setzero_ps());
178:       _mm_storeh_pi((__m64*)(x+6), _mm_setzero_ps());
179:       _mm_storel_pi((__m64*)(x+8), _mm_setzero_ps());
180:       _mm_storeh_pi((__m64*)(x+10),_mm_setzero_ps());
181:       _mm_storel_pi((__m64*)(x+12),_mm_setzero_ps());
182:       _mm_storeh_pi((__m64*)(x+14),_mm_setzero_ps());
183:     }
184:     /* load in initial (unfactored row) */
185:     nz       = ai[i+1] - ai[i];
186:     ajtmpold = aj + ai[i];
187:     v        = aa + 16*ai[i];
188:     for (j=0; j<nz; j++) {
189:       __m128 tmp;
190:       x = rtmp+16*ajtmpold[j];
191:       /* Copy v block into x block */
192:       _mm_storel_pi((__m64*)(x),   _mm_loadl_pi(tmp,(__m64*)(v)));
193:       _mm_storeh_pi((__m64*)(x+2), _mm_loadh_pi(tmp,(__m64*)(v+2)));
194:       _mm_storel_pi((__m64*)(x+4), _mm_loadl_pi(tmp,(__m64*)(v+4)));
195:       _mm_storeh_pi((__m64*)(x+6), _mm_loadh_pi(tmp,(__m64*)(v+6)));
196:       _mm_storel_pi((__m64*)(x+8), _mm_loadl_pi(tmp,(__m64*)(v+8)));
197:       _mm_storeh_pi((__m64*)(x+10),_mm_loadh_pi(tmp,(__m64*)(v+10)));
198:       _mm_storel_pi((__m64*)(x+12),_mm_loadl_pi(tmp,(__m64*)(v+12)));
199:       _mm_storeh_pi((__m64*)(x+14),_mm_loadh_pi(tmp,(__m64*)(v+14)));
200:       v += 16;
201:     }
202:     row = *ajtmp++;
203:     while (row < i) {
204:       pc  = rtmp + 16*row;
205:       /* Load block from lower triangle */
206:       P0 = _mm_loadh_pi(_mm_loadl_pi(P0,(__m64*)(pc)),   (__m64*)(pc+2));
207:       P1 = _mm_loadh_pi(_mm_loadl_pi(P1,(__m64*)(pc+4)), (__m64*)(pc+6));
208:       P2 = _mm_loadh_pi(_mm_loadl_pi(P2,(__m64*)(pc+8)), (__m64*)(pc+10));
209:       P3 = _mm_loadh_pi(_mm_loadl_pi(P3,(__m64*)(pc+12)),(__m64*)(pc+14));
210:       /* Compare block to zero block */
211:       COMP0 = _mm_cmpneq_ps(P0,_mm_setzero_ps());
212:       COMP1 = _mm_cmpneq_ps(P1,_mm_setzero_ps());
213:       COMP2 = _mm_cmpneq_ps(P2,_mm_setzero_ps());
214:       COMP3 = _mm_cmpneq_ps(P3,_mm_setzero_ps());
215:       /* If block is nonzero ... */
216:       if (_mm_movemask_ps(_mm_or_ps(_mm_or_ps(COMP0,COMP1),_mm_or_ps(COMP2,COMP3)))) {
217:         pv = ba + 16*diag_offset[row];
218:         _mm_prefetch((char*)(pv+16),_MM_HINT_T0);
219:         _mm_prefetch((char*)(pv+24),_MM_HINT_T0);
220:         pj = bj + diag_offset[row] + 1;

222:         /* Form Multiplier, one column at a time */
223:         M0 = _mm_mul_ps(P0,_mm_load_ps1(pv));
224:         M0 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+1)),M0);
225:         M0 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+2)),M0);
226:         M0 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+3)),M0);

228:         _mm_storel_pi((__m64*)(pc),  M0);
229:         _mm_storeh_pi((__m64*)(pc+2),M0);
230: 
231:         M1 = _mm_mul_ps(P0,_mm_load_ps1(pv+4));
232:         M1 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+5)),M1);
233:         M1 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+6)),M1);
234:         M1 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+7)),M1);
235: 
236:         _mm_storel_pi((__m64*)(pc+4),M1);
237:         _mm_storeh_pi((__m64*)(pc+6),M1);
238: 
239:         M2 = _mm_mul_ps(P0,_mm_load_ps1(pv+8));
240:         M2 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+9)),M2);
241:         M2 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+10)),M2);
242:         M2 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+11)),M2);
243: 
244:         _mm_storel_pi((__m64*)(pc+8), M2);
245:         _mm_storeh_pi((__m64*)(pc+10),M2);

247:         M3 = _mm_mul_ps(P0,_mm_load_ps1(pv+12));
248:         M3 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+13)),M3);
249:         M3 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+14)),M3);
250:         M3 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+15)),M3);

252:         _mm_storel_pi((__m64*)(pc+12),M3);
253:         _mm_storeh_pi((__m64*)(pc+14),M3);

255:         /* Update the row: */
256:         nz = bi[row+1] - diag_offset[row] - 1;
257:         pv += 16;
258:         for (j=0; j<nz; j++) {
259:           _mm_prefetch((char*)(pv+16),_MM_HINT_T0);
260:           _mm_prefetch((char*)(pv+24),_MM_HINT_T0);
261:           x = rtmp + 16*pj[j];
262:           /* x:=x-m*pv, One column at a time */
263:           X0 = _mm_sub_ps(_mm_load_ps(x),_mm_mul_ps(M0,_mm_load_ps1(pv)));
264:           X0 = _mm_sub_ps(X0,_mm_mul_ps(M1,_mm_load_ps1(pv+1)));
265:           X0 = _mm_sub_ps(X0,_mm_mul_ps(M2,_mm_load_ps1(pv+2)));
266:           X0 = _mm_sub_ps(X0,_mm_mul_ps(M3,_mm_load_ps1(pv+3)));

268:           _mm_storel_pi((__m64*)(x),  X0);
269:           _mm_storeh_pi((__m64*)(x+2),X0);
270: 
271:           X1 = _mm_sub_ps(_mm_load_ps(x+4),_mm_mul_ps(M0,_mm_load_ps1(pv+4)));
272:           X1 = _mm_sub_ps(X1,_mm_mul_ps(M1,_mm_load_ps1(pv+5)));
273:           X1 = _mm_sub_ps(X1,_mm_mul_ps(M2,_mm_load_ps1(pv+6)));
274:           X1 = _mm_sub_ps(X1,_mm_mul_ps(M3,_mm_load_ps1(pv+7)));
275: 
276:           _mm_storel_pi((__m64*)(x+4),X1);
277:           _mm_storeh_pi((__m64*)(x+6),X1);
278: 
279:           X2 = _mm_sub_ps(_mm_load_ps(x+8),_mm_mul_ps(M0,_mm_load_ps1(pv+8)));
280:           X2 = _mm_sub_ps(X2,_mm_mul_ps(M1,_mm_load_ps1(pv+9)));
281:           X2 = _mm_sub_ps(X2,_mm_mul_ps(M2,_mm_load_ps1(pv+10)));
282:           X2 = _mm_sub_ps(X2,_mm_mul_ps(M3,_mm_load_ps1(pv+11)));
283: 
284:           _mm_storel_pi((__m64*)(x+8), X2);
285:           _mm_storeh_pi((__m64*)(x+10),X2);
286: 
287:           X3 = _mm_sub_ps(_mm_load_ps(x+12),_mm_mul_ps(M0,_mm_load_ps1(pv+12)));
288:           X3 = _mm_sub_ps(X3,_mm_mul_ps(M1,_mm_load_ps1(pv+13)));
289:           X3 = _mm_sub_ps(X3,_mm_mul_ps(M2,_mm_load_ps1(pv+14)));
290:           X3 = _mm_sub_ps(X3,_mm_mul_ps(M3,_mm_load_ps1(pv+15)));
291: 
292:           _mm_storel_pi((__m64*)(x+12),X3);
293:           _mm_storeh_pi((__m64*)(x+14),X3);

295:           pv   += 16;
296:         }
297:         PetscLogFlops(128*nz+112);
298:       }
299:       row = *ajtmp++;
300:     }
301:     /* finished row so stick it into b->a */
302:     pv = ba + 16*bi[i];
303:     pj = bj + bi[i];
304:     nz = bi[i+1] - bi[i];
305:     for (j=0; j<nz; j++) {
306:       __m128 tmp;
307:       x  = rtmp+16*pj[j];
308:       /* Copy x block back into pv block */
309:       _mm_storel_pi((__m64*)(pv),   _mm_loadl_pi(tmp,(__m64*)(x)));
310:       _mm_storeh_pi((__m64*)(pv+2), _mm_loadh_pi(tmp,(__m64*)(x+2)));
311:       _mm_storel_pi((__m64*)(pv+4), _mm_loadl_pi(tmp,(__m64*)(x+4)));
312:       _mm_storeh_pi((__m64*)(pv+6), _mm_loadh_pi(tmp,(__m64*)(x+6)));
313:       _mm_storel_pi((__m64*)(pv+8), _mm_loadl_pi(tmp,(__m64*)(x+8)));
314:       _mm_storeh_pi((__m64*)(pv+10),_mm_loadh_pi(tmp,(__m64*)(x+10)));
315:       _mm_storel_pi((__m64*)(pv+12),_mm_loadl_pi(tmp,(__m64*)(x+12)));
316:       _mm_storeh_pi((__m64*)(pv+14),_mm_loadh_pi(tmp,(__m64*)(x+14)));
317:       pv += 16;
318:     }
319:     /* invert diagonal block */
320:     w = ba + 16*diag_offset[i];
321:     Kernel_A_gets_inverse_A_4_ICL_SSE(w);
322:     /* Note: Using Kramer's rule, flop count below might be high */
323:   }

325:   PetscFree(rtmp);
326:   C->factor    = FACTOR_LU;
327:   C->assembled = PETSC_TRUE;
328:   PetscLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */
329:   return(0);
330: }
331: #endif