Actual source code: baijfact12.c
1: /*$Id: baijfact12.c,v 1.12 2001/04/07 19:14:47 bsmith Exp $*/
2: /*
3: Factorization code for BAIJ format.
4: */
5: #include "src/mat/impls/baij/seq/baij.h"
6: #include "src/vec/vecimpl.h"
7: #include "src/inline/ilu.h"
9: #if !defined(PETSC_HAVE_ICL_SSE)
11: int MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering(Mat A,Mat *B)
12: {
13: /*
14: Default Version for when blocks are 4 by 4 Using natural ordering
15: */
16: Mat C = *B;
17: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
18: int ierr,i,j,n = a->mbs,*bi = b->i,*bj = b->j;
19: int *ajtmpold,*ajtmp,nz,row;
20: int *diag_offset = b->diag,*ai=a->i,*aj=a->j,*pj;
21: MatScalar *pv,*v,*rtmp,*pc,*w,*x;
22: MatScalar p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4;
23: MatScalar p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16;
24: MatScalar p10,p11,p12,p13,p14,p15,p16,m10,m11,m12;
25: MatScalar m13,m14,m15,m16;
26: MatScalar *ba = b->a,*aa = a->a;
29: PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);
31: for (i=0; i<n; i++) {
32: nz = bi[i+1] - bi[i];
33: ajtmp = bj + bi[i];
34: for (j=0; j<nz; j++) {
35: x = rtmp+16*ajtmp[j];
36: x[0] = x[1] = x[2] = x[3] = x[4] = x[5] = x[6] = x[7] = x[8] = x[9] = 0.0;
37: x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0;
38: }
39: /* load in initial (unfactored row) */
40: nz = ai[i+1] - ai[i];
41: ajtmpold = aj + ai[i];
42: v = aa + 16*ai[i];
43: for (j=0; j<nz; j++) {
44: x = rtmp+16*ajtmpold[j];
45: x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3];
46: x[4] = v[4]; x[5] = v[5]; x[6] = v[6]; x[7] = v[7]; x[8] = v[8];
47: x[9] = v[9]; x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13];
48: x[14] = v[14]; x[15] = v[15];
49: v += 16;
50: }
51: row = *ajtmp++;
52: while (row < i) {
53: pc = rtmp + 16*row;
54: p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3];
55: p5 = pc[4]; p6 = pc[5]; p7 = pc[6]; p8 = pc[7]; p9 = pc[8];
56: p10 = pc[9]; p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13];
57: p15 = pc[14]; p16 = pc[15];
58: if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 ||
59: p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 ||
60: p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0
61: || p16 != 0.0) {
62: pv = ba + 16*diag_offset[row];
63: pj = bj + diag_offset[row] + 1;
64: x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3];
65: x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8];
66: x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13];
67: x15 = pv[14]; x16 = pv[15];
68: pc[0] = m1 = p1*x1 + p5*x2 + p9*x3 + p13*x4;
69: pc[1] = m2 = p2*x1 + p6*x2 + p10*x3 + p14*x4;
70: pc[2] = m3 = p3*x1 + p7*x2 + p11*x3 + p15*x4;
71: pc[3] = m4 = p4*x1 + p8*x2 + p12*x3 + p16*x4;
73: pc[4] = m5 = p1*x5 + p5*x6 + p9*x7 + p13*x8;
74: pc[5] = m6 = p2*x5 + p6*x6 + p10*x7 + p14*x8;
75: pc[6] = m7 = p3*x5 + p7*x6 + p11*x7 + p15*x8;
76: pc[7] = m8 = p4*x5 + p8*x6 + p12*x7 + p16*x8;
78: pc[8] = m9 = p1*x9 + p5*x10 + p9*x11 + p13*x12;
79: pc[9] = m10 = p2*x9 + p6*x10 + p10*x11 + p14*x12;
80: pc[10] = m11 = p3*x9 + p7*x10 + p11*x11 + p15*x12;
81: pc[11] = m12 = p4*x9 + p8*x10 + p12*x11 + p16*x12;
83: pc[12] = m13 = p1*x13 + p5*x14 + p9*x15 + p13*x16;
84: pc[13] = m14 = p2*x13 + p6*x14 + p10*x15 + p14*x16;
85: pc[14] = m15 = p3*x13 + p7*x14 + p11*x15 + p15*x16;
86: pc[15] = m16 = p4*x13 + p8*x14 + p12*x15 + p16*x16;
87: nz = bi[row+1] - diag_offset[row] - 1;
88: pv += 16;
89: for (j=0; j<nz; j++) {
90: x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3];
91: x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8];
92: x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12];
93: x14 = pv[13]; x15 = pv[14]; x16 = pv[15];
94: x = rtmp + 16*pj[j];
95: x[0] -= m1*x1 + m5*x2 + m9*x3 + m13*x4;
96: x[1] -= m2*x1 + m6*x2 + m10*x3 + m14*x4;
97: x[2] -= m3*x1 + m7*x2 + m11*x3 + m15*x4;
98: x[3] -= m4*x1 + m8*x2 + m12*x3 + m16*x4;
100: x[4] -= m1*x5 + m5*x6 + m9*x7 + m13*x8;
101: x[5] -= m2*x5 + m6*x6 + m10*x7 + m14*x8;
102: x[6] -= m3*x5 + m7*x6 + m11*x7 + m15*x8;
103: x[7] -= m4*x5 + m8*x6 + m12*x7 + m16*x8;
105: x[8] -= m1*x9 + m5*x10 + m9*x11 + m13*x12;
106: x[9] -= m2*x9 + m6*x10 + m10*x11 + m14*x12;
107: x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12;
108: x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12;
110: x[12] -= m1*x13 + m5*x14 + m9*x15 + m13*x16;
111: x[13] -= m2*x13 + m6*x14 + m10*x15 + m14*x16;
112: x[14] -= m3*x13 + m7*x14 + m11*x15 + m15*x16;
113: x[15] -= m4*x13 + m8*x14 + m12*x15 + m16*x16;
115: pv += 16;
116: }
117: PetscLogFlops(128*nz+112);
118: }
119: row = *ajtmp++;
120: }
121: /* finished row so stick it into b->a */
122: pv = ba + 16*bi[i];
123: pj = bj + bi[i];
124: nz = bi[i+1] - bi[i];
125: for (j=0; j<nz; j++) {
126: x = rtmp+16*pj[j];
127: pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3];
128: pv[4] = x[4]; pv[5] = x[5]; pv[6] = x[6]; pv[7] = x[7]; pv[8] = x[8];
129: pv[9] = x[9]; pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12];
130: pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15];
131: pv += 16;
132: }
133: /* invert diagonal block */
134: w = ba + 16*diag_offset[i];
135: Kernel_A_gets_inverse_A_4(w);
136: }
138: PetscFree(rtmp);
139: C->factor = FACTOR_LU;
140: C->assembled = PETSC_TRUE;
141: PetscLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */
142: return(0);
143: }
145: #else
147: #include "xmmintrin.h"
148: EXTERN int Kernel_A_gets_inverse_A_4SSE(float*);
151: /*
152: SSE Version for when blocks are 4 by 4 Using natural ordering
153: Uses Intel Compiler Intrinsics to perform SSE operations
154: */
155: int MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering(Mat A,Mat *B)
156: {
157: Mat C = *B;
158: Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)C->data;
159: int ierr,i,j,n = a->mbs,*bi = b->i,*bj = b->j;
160: int *ajtmpold,*ajtmp,nz,row;
161: int *diag_offset = b->diag,*ai=a->i,*aj=a->j,*pj;
162: MatScalar *pv,*v,*rtmp,*pc,*w,*x;
163: MatScalar *ba = b->a,*aa = a->a;
164: __m128 X0,X1,X2,X3,M0,M1,M2,M3,P0,P1,P2,P3;
165: __m128 COMP0,COMP1,COMP2,COMP3;
168: PetscMalloc(16*(n+1)*sizeof(MatScalar),&rtmp);
170: for (i=0; i<n; i++) {
171: nz = bi[i+1] - bi[i];
172: ajtmp = bj + bi[i];
173: for (j=0; j<nz; j++) {
174: /* zero out the accumulators */
175: x = rtmp+16*ajtmp[j];
176: _mm_storel_pi((__m64*)(x), _mm_setzero_ps());
177: _mm_storeh_pi((__m64*)(x+2), _mm_setzero_ps());
178: _mm_storel_pi((__m64*)(x+4), _mm_setzero_ps());
179: _mm_storeh_pi((__m64*)(x+6), _mm_setzero_ps());
180: _mm_storel_pi((__m64*)(x+8), _mm_setzero_ps());
181: _mm_storeh_pi((__m64*)(x+10),_mm_setzero_ps());
182: _mm_storel_pi((__m64*)(x+12),_mm_setzero_ps());
183: _mm_storeh_pi((__m64*)(x+14),_mm_setzero_ps());
184: }
185: /* load in initial (unfactored row) */
186: nz = ai[i+1] - ai[i];
187: ajtmpold = aj + ai[i];
188: v = aa + 16*ai[i];
189: for (j=0; j<nz; j++) {
190: __m128 tmp;
191: x = rtmp+16*ajtmpold[j];
192: /* Copy v block into x block */
193: _mm_storel_pi((__m64*)(x), _mm_loadl_pi(tmp,(__m64*)(v)));
194: _mm_storeh_pi((__m64*)(x+2), _mm_loadh_pi(tmp,(__m64*)(v+2)));
195: _mm_storel_pi((__m64*)(x+4), _mm_loadl_pi(tmp,(__m64*)(v+4)));
196: _mm_storeh_pi((__m64*)(x+6), _mm_loadh_pi(tmp,(__m64*)(v+6)));
197: _mm_storel_pi((__m64*)(x+8), _mm_loadl_pi(tmp,(__m64*)(v+8)));
198: _mm_storeh_pi((__m64*)(x+10),_mm_loadh_pi(tmp,(__m64*)(v+10)));
199: _mm_storel_pi((__m64*)(x+12),_mm_loadl_pi(tmp,(__m64*)(v+12)));
200: _mm_storeh_pi((__m64*)(x+14),_mm_loadh_pi(tmp,(__m64*)(v+14)));
201: v += 16;
202: }
203: row = *ajtmp++;
204: while (row < i) {
205: pc = rtmp + 16*row;
206: /* Load block from lower triangle */
207: P0 = _mm_loadh_pi(_mm_loadl_pi(P0,(__m64*)(pc)), (__m64*)(pc+2));
208: P1 = _mm_loadh_pi(_mm_loadl_pi(P1,(__m64*)(pc+4)), (__m64*)(pc+6));
209: P2 = _mm_loadh_pi(_mm_loadl_pi(P2,(__m64*)(pc+8)), (__m64*)(pc+10));
210: P3 = _mm_loadh_pi(_mm_loadl_pi(P3,(__m64*)(pc+12)),(__m64*)(pc+14));
211: /* Compare block to zero block */
212: COMP0 = _mm_cmpneq_ps(P0,_mm_setzero_ps());
213: COMP1 = _mm_cmpneq_ps(P1,_mm_setzero_ps());
214: COMP2 = _mm_cmpneq_ps(P2,_mm_setzero_ps());
215: COMP3 = _mm_cmpneq_ps(P3,_mm_setzero_ps());
216: /* If block is nonzero ... */
217: if (_mm_movemask_ps(_mm_or_ps(_mm_or_ps(COMP0,COMP1),_mm_or_ps(COMP2,COMP3)))) {
218: pv = ba + 16*diag_offset[row];
219: _mm_prefetch((char*)(pv+16),_MM_HINT_T0);
220: _mm_prefetch((char*)(pv+24),_MM_HINT_T0);
221: pj = bj + diag_offset[row] + 1;
223: /* Form Multiplier, one column at a time */
224: M0 = _mm_mul_ps(P0,_mm_load_ps1(pv));
225: M0 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+1)),M0);
226: M0 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+2)),M0);
227: M0 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+3)),M0);
229: _mm_storel_pi((__m64*)(pc), M0);
230: _mm_storeh_pi((__m64*)(pc+2),M0);
231:
232: M1 = _mm_mul_ps(P0,_mm_load_ps1(pv+4));
233: M1 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+5)),M1);
234: M1 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+6)),M1);
235: M1 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+7)),M1);
236:
237: _mm_storel_pi((__m64*)(pc+4),M1);
238: _mm_storeh_pi((__m64*)(pc+6),M1);
239:
240: M2 = _mm_mul_ps(P0,_mm_load_ps1(pv+8));
241: M2 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+9)),M2);
242: M2 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+10)),M2);
243: M2 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+11)),M2);
244:
245: _mm_storel_pi((__m64*)(pc+8), M2);
246: _mm_storeh_pi((__m64*)(pc+10),M2);
248: M3 = _mm_mul_ps(P0,_mm_load_ps1(pv+12));
249: M3 = _mm_add_ps(_mm_mul_ps(P1,_mm_load_ps1(pv+13)),M3);
250: M3 = _mm_add_ps(_mm_mul_ps(P2,_mm_load_ps1(pv+14)),M3);
251: M3 = _mm_add_ps(_mm_mul_ps(P3,_mm_load_ps1(pv+15)),M3);
253: _mm_storel_pi((__m64*)(pc+12),M3);
254: _mm_storeh_pi((__m64*)(pc+14),M3);
256: /* Update the row: */
257: nz = bi[row+1] - diag_offset[row] - 1;
258: pv += 16;
259: for (j=0; j<nz; j++) {
260: _mm_prefetch((char*)(pv+16),_MM_HINT_T0);
261: _mm_prefetch((char*)(pv+24),_MM_HINT_T0);
262: x = rtmp + 16*pj[j];
263: /* x:=x-m*pv, One column at a time */
264: X0 = _mm_sub_ps(_mm_load_ps(x),_mm_mul_ps(M0,_mm_load_ps1(pv)));
265: X0 = _mm_sub_ps(X0,_mm_mul_ps(M1,_mm_load_ps1(pv+1)));
266: X0 = _mm_sub_ps(X0,_mm_mul_ps(M2,_mm_load_ps1(pv+2)));
267: X0 = _mm_sub_ps(X0,_mm_mul_ps(M3,_mm_load_ps1(pv+3)));
269: _mm_storel_pi((__m64*)(x), X0);
270: _mm_storeh_pi((__m64*)(x+2),X0);
271:
272: X1 = _mm_sub_ps(_mm_load_ps(x+4),_mm_mul_ps(M0,_mm_load_ps1(pv+4)));
273: X1 = _mm_sub_ps(X1,_mm_mul_ps(M1,_mm_load_ps1(pv+5)));
274: X1 = _mm_sub_ps(X1,_mm_mul_ps(M2,_mm_load_ps1(pv+6)));
275: X1 = _mm_sub_ps(X1,_mm_mul_ps(M3,_mm_load_ps1(pv+7)));
276:
277: _mm_storel_pi((__m64*)(x+4),X1);
278: _mm_storeh_pi((__m64*)(x+6),X1);
279:
280: X2 = _mm_sub_ps(_mm_load_ps(x+8),_mm_mul_ps(M0,_mm_load_ps1(pv+8)));
281: X2 = _mm_sub_ps(X2,_mm_mul_ps(M1,_mm_load_ps1(pv+9)));
282: X2 = _mm_sub_ps(X2,_mm_mul_ps(M2,_mm_load_ps1(pv+10)));
283: X2 = _mm_sub_ps(X2,_mm_mul_ps(M3,_mm_load_ps1(pv+11)));
284:
285: _mm_storel_pi((__m64*)(x+8), X2);
286: _mm_storeh_pi((__m64*)(x+10),X2);
287:
288: X3 = _mm_sub_ps(_mm_load_ps(x+12),_mm_mul_ps(M0,_mm_load_ps1(pv+12)));
289: X3 = _mm_sub_ps(X3,_mm_mul_ps(M1,_mm_load_ps1(pv+13)));
290: X3 = _mm_sub_ps(X3,_mm_mul_ps(M2,_mm_load_ps1(pv+14)));
291: X3 = _mm_sub_ps(X3,_mm_mul_ps(M3,_mm_load_ps1(pv+15)));
292:
293: _mm_storel_pi((__m64*)(x+12),X3);
294: _mm_storeh_pi((__m64*)(x+14),X3);
296: pv += 16;
297: }
298: PetscLogFlops(128*nz+112);
299: }
300: row = *ajtmp++;
301: }
302: /* finished row so stick it into b->a */
303: pv = ba + 16*bi[i];
304: pj = bj + bi[i];
305: nz = bi[i+1] - bi[i];
306: for (j=0; j<nz; j++) {
307: __m128 tmp;
308: x = rtmp+16*pj[j];
309: /* Copy x block back into pv block */
310: _mm_storel_pi((__m64*)(pv), _mm_loadl_pi(tmp,(__m64*)(x)));
311: _mm_storeh_pi((__m64*)(pv+2), _mm_loadh_pi(tmp,(__m64*)(x+2)));
312: _mm_storel_pi((__m64*)(pv+4), _mm_loadl_pi(tmp,(__m64*)(x+4)));
313: _mm_storeh_pi((__m64*)(pv+6), _mm_loadh_pi(tmp,(__m64*)(x+6)));
314: _mm_storel_pi((__m64*)(pv+8), _mm_loadl_pi(tmp,(__m64*)(x+8)));
315: _mm_storeh_pi((__m64*)(pv+10),_mm_loadh_pi(tmp,(__m64*)(x+10)));
316: _mm_storel_pi((__m64*)(pv+12),_mm_loadl_pi(tmp,(__m64*)(x+12)));
317: _mm_storeh_pi((__m64*)(pv+14),_mm_loadh_pi(tmp,(__m64*)(x+14)));
318: pv += 16;
319: }
320: /* invert diagonal block */
321: w = ba + 16*diag_offset[i];
322: Kernel_A_gets_inverse_A_4SSE(w);
323: /* Note: Using Kramer's rule, flop count below might be high */
324: }
326: PetscFree(rtmp);
327: C->factor = FACTOR_LU;
328: C->assembled = PETSC_TRUE;
329: PetscLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */
330: return(0);
331: }
332: #endif