Actual source code: dgefa4.c
1: /*$Id: dgefa4.c,v 1.19 2001/04/13 18:44:03 buschelm Exp $*/
2: /*
3: Inverts 4 by 4 matrix using partial pivoting.
5: Used by the sparse factorization routines in
6: src/mat/impls/baij/seq and src/mat/impls/bdiag/seq
8: See also src/inline/ilu.h
10: This is a combination of the Linpack routines
11: dgefa() and dgedi() specialized for a size of 4.
13: */
14: #include petsc.h
16: int Kernel_A_gets_inverse_A_4(MatScalar *a)
17: {
18: int i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3;
19: int k4,j3;
20: MatScalar *aa,*ax,*ay,work[16],stmp;
21: MatReal tmp,max;
23: /* gaussian elimination with partial pivoting */
26: /* Parameter adjustments */
27: a -= 5;
29: for (k = 1; k <= 3; ++k) {
30: kp1 = k + 1;
31: k3 = 4*k;
32: k4 = k3 + k;
33: /* find l = pivot index */
35: i__2 = 4 - k;
36: aa = &a[k4];
37: max = PetscAbsScalar(aa[0]);
38: l = 1;
39: for (ll=1; ll<i__2; ll++) {
40: tmp = PetscAbsScalar(aa[ll]);
41: if (tmp > max) { max = tmp; l = ll+1;}
42: }
43: l += k - 1;
44: ipvt[k-1] = l;
46: if (a[l + k3] == 0.) {
47: SETERRQ(k,"Zero pivot");
48: }
50: /* interchange if necessary */
52: if (l != k) {
53: stmp = a[l + k3];
54: a[l + k3] = a[k4];
55: a[k4] = stmp;
56: }
58: /* compute multipliers */
60: stmp = -1. / a[k4];
61: i__2 = 4 - k;
62: aa = &a[1 + k4];
63: for (ll=0; ll<i__2; ll++) {
64: aa[ll] *= stmp;
65: }
67: /* row elimination with column indexing */
69: ax = &a[k4+1];
70: for (j = kp1; j <= 4; ++j) {
71: j3 = 4*j;
72: stmp = a[l + j3];
73: if (l != k) {
74: a[l + j3] = a[k + j3];
75: a[k + j3] = stmp;
76: }
78: i__3 = 4 - k;
79: ay = &a[1+k+j3];
80: for (ll=0; ll<i__3; ll++) {
81: ay[ll] += stmp*ax[ll];
82: }
83: }
84: }
85: ipvt[3] = 4;
86: if (a[20] == 0.) {
87: SETERRQ(3,"Zero pivot,final row");
88: }
90: /*
91: Now form the inverse
92: */
94: /* compute inverse(u) */
96: for (k = 1; k <= 4; ++k) {
97: k3 = 4*k;
98: k4 = k3 + k;
99: a[k4] = 1.0 / a[k4];
100: stmp = -a[k4];
101: i__2 = k - 1;
102: aa = &a[k3 + 1];
103: for (ll=0; ll<i__2; ll++) aa[ll] *= stmp;
104: kp1 = k + 1;
105: if (4 < kp1) continue;
106: ax = aa;
107: for (j = kp1; j <= 4; ++j) {
108: j3 = 4*j;
109: stmp = a[k + j3];
110: a[k + j3] = 0.0;
111: ay = &a[j3 + 1];
112: for (ll=0; ll<k; ll++) {
113: ay[ll] += stmp*ax[ll];
114: }
115: }
116: }
118: /* form inverse(u)*inverse(l) */
120: for (kb = 1; kb <= 3; ++kb) {
121: k = 4 - kb;
122: k3 = 4*k;
123: kp1 = k + 1;
124: aa = a + k3;
125: for (i = kp1; i <= 4; ++i) {
126: work[i-1] = aa[i];
127: aa[i] = 0.0;
128: }
129: for (j = kp1; j <= 4; ++j) {
130: stmp = work[j-1];
131: ax = &a[4*j + 1];
132: ay = &a[k3 + 1];
133: ay[0] += stmp*ax[0];
134: ay[1] += stmp*ax[1];
135: ay[2] += stmp*ax[2];
136: ay[3] += stmp*ax[3];
137: }
138: l = ipvt[k-1];
139: if (l != k) {
140: ax = &a[k3 + 1];
141: ay = &a[4*l + 1];
142: stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp;
143: stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp;
144: stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp;
145: stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp;
146: }
147: }
148: return(0);
149: }
151: #if defined(PETSC_HAVE_ICL_SSE)
152: #include "xmmintrin.h"
154: int Kernel_A_gets_inverse_A_4_ICL_SSE(float *a)
155: {
156: /*
157: This routine is taken from Intel's Small Matrix Library.
158: See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
159: Order Number: 245043-001
160: March 1999
161: http://www.intel.com
163: Note: Intel's SML uses row-wise storage for these small matrices,
164: and PETSc uses column-wise storage. However since inv(A')=(inv(A))'
165: the same code can be used here.
167: Inverse of a 4x4 matrix via Kramer's Rule:
168: bool Invert4x4(SMLXMatrix &);
169: */
170: __m128 minor0, minor1, minor2, minor3;
171: __m128 row0, row1, row2, row3;
172: __m128 det, tmp1;
175: tmp1 = _mm_loadh_pi(_mm_loadl_pi(tmp1, (__m64*)(a)), (__m64*)(a+ 4));
176: row1 = _mm_loadh_pi(_mm_loadl_pi(row1, (__m64*)(a+8)), (__m64*)(a+12));
177: row0 = _mm_shuffle_ps(tmp1, row1, 0x88);
178: row1 = _mm_shuffle_ps(row1, tmp1, 0xDD);
179: tmp1 = _mm_loadh_pi(_mm_loadl_pi(tmp1, (__m64*)(a+ 2)), (__m64*)(a+ 6));
180: row3 = _mm_loadh_pi(_mm_loadl_pi(row3, (__m64*)(a+10)), (__m64*)(a+14));
181: row2 = _mm_shuffle_ps(tmp1, row3, 0x88);
182: row3 = _mm_shuffle_ps(row3, tmp1, 0xDD);
183: /* ----------------------------------------------- */
184: tmp1 = _mm_mul_ps(row2, row3);
185: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
186: minor0 = _mm_mul_ps(row1, tmp1);
187: minor1 = _mm_mul_ps(row0, tmp1);
188: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
189: minor0 = _mm_sub_ps(_mm_mul_ps(row1, tmp1), minor0);
190: minor1 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor1);
191: minor1 = _mm_shuffle_ps(minor1, minor1, 0x4E);
192: /* ----------------------------------------------- */
193: tmp1 = _mm_mul_ps(row1, row2);
194: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
195: minor0 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor0);
196: minor3 = _mm_mul_ps(row0, tmp1);
197: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
198: minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row3, tmp1));
199: minor3 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor3);
200: minor3 = _mm_shuffle_ps(minor3, minor3, 0x4E);
201: /* ----------------------------------------------- */
202: tmp1 = _mm_mul_ps(_mm_shuffle_ps(row1, row1, 0x4E), row3);
203: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
204: row2 = _mm_shuffle_ps(row2, row2, 0x4E);
205: minor0 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor0);
206: minor2 = _mm_mul_ps(row0, tmp1);
207: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
208: minor0 = _mm_sub_ps(minor0, _mm_mul_ps(row2, tmp1));
209: minor2 = _mm_sub_ps(_mm_mul_ps(row0, tmp1), minor2);
210: minor2 = _mm_shuffle_ps(minor2, minor2, 0x4E);
211: /* ----------------------------------------------- */
212: tmp1 = _mm_mul_ps(row0, row1);
213: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
214: minor2 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor2);
215: minor3 = _mm_sub_ps(_mm_mul_ps(row2, tmp1), minor3);
216: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
217: minor2 = _mm_sub_ps(_mm_mul_ps(row3, tmp1), minor2);
218: minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row2, tmp1));
219: /* ----------------------------------------------- */
220: tmp1 = _mm_mul_ps(row0, row3);
221: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
222: minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row2, tmp1));
223: minor2 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor2);
224: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
225: minor1 = _mm_add_ps(_mm_mul_ps(row2, tmp1), minor1);
226: minor2 = _mm_sub_ps(minor2, _mm_mul_ps(row1, tmp1));
227: /* ----------------------------------------------- */
228: tmp1 = _mm_mul_ps(row0, row2);
229: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0xB1);
230: minor1 = _mm_add_ps(_mm_mul_ps(row3, tmp1), minor1);
231: minor3 = _mm_sub_ps(minor3, _mm_mul_ps(row1, tmp1));
232: tmp1 = _mm_shuffle_ps(tmp1, tmp1, 0x4E);
233: minor1 = _mm_sub_ps(minor1, _mm_mul_ps(row3, tmp1));
234: minor3 = _mm_add_ps(_mm_mul_ps(row1, tmp1), minor3);
235: /* ----------------------------------------------- */
236: det = _mm_mul_ps(row0, minor0);
237: det = _mm_add_ps(_mm_shuffle_ps(det, det, 0x4E), det);
238: det = _mm_add_ss(_mm_shuffle_ps(det, det, 0xB1), det);
239: tmp1 = _mm_rcp_ss(det);
240: det = _mm_sub_ss(_mm_add_ss(tmp1, tmp1), _mm_mul_ss(det, _mm_mul_ss(tmp1, tmp1)));
241: det = _mm_shuffle_ps(det, det, 0x00);
242: minor0 = _mm_mul_ps(det, minor0);
243: _mm_storel_pi((__m64*)(a), minor0);
244: _mm_storeh_pi((__m64*)(a+2), minor0);
245: minor1 = _mm_mul_ps(det, minor1);
246: _mm_storel_pi((__m64*)(a+4), minor1);
247: _mm_storeh_pi((__m64*)(a+6), minor1);
248: minor2 = _mm_mul_ps(det, minor2);
249: _mm_storel_pi((__m64*)(a+ 8), minor2);
250: _mm_storeh_pi((__m64*)(a+10), minor2);
251: minor3 = _mm_mul_ps(det, minor3);
252: _mm_storel_pi((__m64*)(a+12), minor3);
253: _mm_storeh_pi((__m64*)(a+14), minor3);
254: return(0);
255: }
257: #endif