Actual source code: dgefa4.c


  2: /*
  3:        Inverts 4 by 4 matrix using gaussian elimination with partial pivoting.

  5:        Used by the sparse factorization routines in
  6:      src/mat/impls/baij/seq

  8:        This is a combination of the Linpack routines
  9:     dgefa() and dgedi() specialized for a size of 4.

 11: */
 12: #include <petscsys.h>

 14: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a, PetscReal shift, PetscBool allowzeropivot, PetscBool *zeropivotdetected)
 15: {
 16:   PetscInt   i__2, i__3, kp1, j, k, l, ll, i, ipvt[4], kb, k3;
 17:   PetscInt   k4, j3;
 18:   MatScalar *aa, *ax, *ay, work[16], stmp;
 19:   MatReal    tmp, max;

 21:   PetscFunctionBegin;
 22:   if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE;
 23:   shift = .25 * shift * (1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15]));

 25:   /* Parameter adjustments */
 26:   a -= 5;

 28:   for (k = 1; k <= 3; ++k) {
 29:     kp1 = k + 1;
 30:     k3  = 4 * k;
 31:     k4  = k3 + k;

 33:     /* find l = pivot index */
 34:     i__2 = 5 - k;
 35:     aa   = &a[k4];
 36:     max  = PetscAbsScalar(aa[0]);
 37:     l    = 1;
 38:     for (ll = 1; ll < i__2; ll++) {
 39:       tmp = PetscAbsScalar(aa[ll]);
 40:       if (tmp > max) {
 41:         max = tmp;
 42:         l   = ll + 1;
 43:       }
 44:     }
 45:     l += k - 1;
 46:     ipvt[k - 1] = l;

 48:     if (a[l + k3] == 0.0) {
 49:       if (shift == 0.0) {
 50:         if (allowzeropivot) {
 51:           PetscCall(PetscInfo(NULL, "Zero pivot, row %" PetscInt_FMT "\n", k - 1));
 52:           if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
 53:         } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row %" PetscInt_FMT, k - 1);
 54:       } else {
 55:         /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */
 56:         a[l + k3] = shift;
 57:       }
 58:     }

 60:     /* interchange if necessary */
 61:     if (l != k) {
 62:       stmp      = a[l + k3];
 63:       a[l + k3] = a[k4];
 64:       a[k4]     = stmp;
 65:     }

 67:     /* compute multipliers */
 68:     stmp = -1. / a[k4];
 69:     i__2 = 4 - k;
 70:     aa   = &a[1 + k4];
 71:     for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;

 73:     /* row elimination with column indexing */
 74:     ax = &a[k4 + 1];
 75:     for (j = kp1; j <= 4; ++j) {
 76:       j3   = 4 * j;
 77:       stmp = a[l + j3];
 78:       if (l != k) {
 79:         a[l + j3] = a[k + j3];
 80:         a[k + j3] = stmp;
 81:       }

 83:       i__3 = 4 - k;
 84:       ay   = &a[1 + k + j3];
 85:       for (ll = 0; ll < i__3; ll++) ay[ll] += stmp * ax[ll];
 86:     }
 87:   }
 88:   ipvt[3] = 4;
 89:   if (a[20] == 0.0) {
 90:     if (PetscLikely(allowzeropivot)) {
 91:       PetscCall(PetscInfo(NULL, "Zero pivot, row 3\n"));
 92:       if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
 93:     } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row 3");
 94:   }

 96:   /* Now form the inverse */
 97:   /* compute inverse(u) */
 98:   for (k = 1; k <= 4; ++k) {
 99:     k3    = 4 * k;
100:     k4    = k3 + k;
101:     a[k4] = 1.0 / a[k4];
102:     stmp  = -a[k4];
103:     i__2  = k - 1;
104:     aa    = &a[k3 + 1];
105:     for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp;
106:     kp1 = k + 1;
107:     if (4 < kp1) continue;
108:     ax = aa;
109:     for (j = kp1; j <= 4; ++j) {
110:       j3        = 4 * j;
111:       stmp      = a[k + j3];
112:       a[k + j3] = 0.0;
113:       ay        = &a[j3 + 1];
114:       for (ll = 0; ll < k; ll++) ay[ll] += stmp * ax[ll];
115:     }
116:   }

118:   /* form inverse(u)*inverse(l) */
119:   for (kb = 1; kb <= 3; ++kb) {
120:     k   = 4 - kb;
121:     k3  = 4 * k;
122:     kp1 = k + 1;
123:     aa  = a + k3;
124:     for (i = kp1; i <= 4; ++i) {
125:       work[i - 1] = aa[i];
126:       aa[i]       = 0.0;
127:     }
128:     for (j = kp1; j <= 4; ++j) {
129:       stmp = work[j - 1];
130:       ax   = &a[4 * j + 1];
131:       ay   = &a[k3 + 1];
132:       ay[0] += stmp * ax[0];
133:       ay[1] += stmp * ax[1];
134:       ay[2] += stmp * ax[2];
135:       ay[3] += stmp * ax[3];
136:     }
137:     l = ipvt[k - 1];
138:     if (l != k) {
139:       ax    = &a[k3 + 1];
140:       ay    = &a[4 * l + 1];
141:       stmp  = ax[0];
142:       ax[0] = ay[0];
143:       ay[0] = stmp;
144:       stmp  = ax[1];
145:       ax[1] = ay[1];
146:       ay[1] = stmp;
147:       stmp  = ax[2];
148:       ax[2] = ay[2];
149:       ay[2] = stmp;
150:       stmp  = ax[3];
151:       ax[3] = ay[3];
152:       ay[3] = stmp;
153:     }
154:   }
155:   PetscFunctionReturn(PETSC_SUCCESS);
156: }

158: #if defined(PETSC_HAVE_SSE)
159:   #include PETSC_HAVE_SSE

161: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a)
162: {
163:   /*
164:      This routine is converted from Intel's Small Matrix Library.
165:      See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
166:      Order Number: 245043-001
167:      March 1999
168:      https://www.intel.com/content/www/us/en/homepage.html

170:      Inverse of a 4x4 matrix via Kramer's Rule:
171:      bool Invert4x4(SMLXMatrix &);
172:   */
173:   PetscFunctionBegin;
174:   SSE_SCOPE_BEGIN;
175:   SSE_INLINE_BEGIN_1(a)

177:   /* ----------------------------------------------- */

179:   SSE_LOADL_PS(SSE_ARG_1, FLOAT_0, XMM0)
180:   SSE_LOADH_PS(SSE_ARG_1, FLOAT_4, XMM0)

182:   SSE_LOADL_PS(SSE_ARG_1, FLOAT_8, XMM5)
183:   SSE_LOADH_PS(SSE_ARG_1, FLOAT_12, XMM5)

185:   SSE_COPY_PS(XMM3, XMM0)
186:   SSE_SHUFFLE(XMM3, XMM5, 0x88)

188:   SSE_SHUFFLE(XMM5, XMM0, 0xDD)

190:   SSE_LOADL_PS(SSE_ARG_1, FLOAT_2, XMM0)
191:   SSE_LOADH_PS(SSE_ARG_1, FLOAT_6, XMM0)

193:   SSE_LOADL_PS(SSE_ARG_1, FLOAT_10, XMM6)
194:   SSE_LOADH_PS(SSE_ARG_1, FLOAT_14, XMM6)

196:   SSE_COPY_PS(XMM4, XMM0)
197:   SSE_SHUFFLE(XMM4, XMM6, 0x88)

199:   SSE_SHUFFLE(XMM6, XMM0, 0xDD)

201:   /* ----------------------------------------------- */

203:   SSE_COPY_PS(XMM7, XMM4)
204:   SSE_MULT_PS(XMM7, XMM6)

206:   SSE_SHUFFLE(XMM7, XMM7, 0xB1)

208:   SSE_COPY_PS(XMM0, XMM5)
209:   SSE_MULT_PS(XMM0, XMM7)

211:   SSE_COPY_PS(XMM2, XMM3)
212:   SSE_MULT_PS(XMM2, XMM7)

214:   SSE_SHUFFLE(XMM7, XMM7, 0x4E)

216:   SSE_COPY_PS(XMM1, XMM5)
217:   SSE_MULT_PS(XMM1, XMM7)
218:   SSE_SUB_PS(XMM1, XMM0)

220:   SSE_MULT_PS(XMM7, XMM3)
221:   SSE_SUB_PS(XMM7, XMM2)

223:   SSE_SHUFFLE(XMM7, XMM7, 0x4E)
224:   SSE_STORE_PS(SSE_ARG_1, FLOAT_4, XMM7)

226:   /* ----------------------------------------------- */

228:   SSE_COPY_PS(XMM0, XMM5)
229:   SSE_MULT_PS(XMM0, XMM4)

231:   SSE_SHUFFLE(XMM0, XMM0, 0xB1)

233:   SSE_COPY_PS(XMM2, XMM6)
234:   SSE_MULT_PS(XMM2, XMM0)
235:   SSE_ADD_PS(XMM2, XMM1)

237:   SSE_COPY_PS(XMM7, XMM3)
238:   SSE_MULT_PS(XMM7, XMM0)

240:   SSE_SHUFFLE(XMM0, XMM0, 0x4E)

242:   SSE_COPY_PS(XMM1, XMM6)
243:   SSE_MULT_PS(XMM1, XMM0)
244:   SSE_SUB_PS(XMM2, XMM1)

246:   SSE_MULT_PS(XMM0, XMM3)
247:   SSE_SUB_PS(XMM0, XMM7)

249:   SSE_SHUFFLE(XMM0, XMM0, 0x4E)
250:   SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM0)

252:   /* ----------------------------------------------- */

254:   SSE_COPY_PS(XMM7, XMM5)
255:   SSE_SHUFFLE(XMM7, XMM5, 0x4E)
256:   SSE_MULT_PS(XMM7, XMM6)

258:   SSE_SHUFFLE(XMM7, XMM7, 0xB1)

260:   SSE_SHUFFLE(XMM4, XMM4, 0x4E)

262:   SSE_COPY_PS(XMM0, XMM4)
263:   SSE_MULT_PS(XMM0, XMM7)
264:   SSE_ADD_PS(XMM0, XMM2)

266:   SSE_COPY_PS(XMM2, XMM3)
267:   SSE_MULT_PS(XMM2, XMM7)

269:   SSE_SHUFFLE(XMM7, XMM7, 0x4E)

271:   SSE_COPY_PS(XMM1, XMM4)
272:   SSE_MULT_PS(XMM1, XMM7)
273:   SSE_SUB_PS(XMM0, XMM1)
274:   SSE_STORE_PS(SSE_ARG_1, FLOAT_0, XMM0)

276:   SSE_MULT_PS(XMM7, XMM3)
277:   SSE_SUB_PS(XMM7, XMM2)

279:   SSE_SHUFFLE(XMM7, XMM7, 0x4E)

281:   /* ----------------------------------------------- */

283:   SSE_COPY_PS(XMM1, XMM3)
284:   SSE_MULT_PS(XMM1, XMM5)

286:   SSE_SHUFFLE(XMM1, XMM1, 0xB1)

288:   SSE_COPY_PS(XMM0, XMM6)
289:   SSE_MULT_PS(XMM0, XMM1)
290:   SSE_ADD_PS(XMM0, XMM7)

292:   SSE_COPY_PS(XMM2, XMM4)
293:   SSE_MULT_PS(XMM2, XMM1)
294:   SSE_SUB_PS_M(XMM2, SSE_ARG_1, FLOAT_12)

296:   SSE_SHUFFLE(XMM1, XMM1, 0x4E)

298:   SSE_COPY_PS(XMM7, XMM6)
299:   SSE_MULT_PS(XMM7, XMM1)
300:   SSE_SUB_PS(XMM7, XMM0)

302:   SSE_MULT_PS(XMM1, XMM4)
303:   SSE_SUB_PS(XMM2, XMM1)
304:   SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM2)

306:   /* ----------------------------------------------- */

308:   SSE_COPY_PS(XMM1, XMM3)
309:   SSE_MULT_PS(XMM1, XMM6)

311:   SSE_SHUFFLE(XMM1, XMM1, 0xB1)

313:   SSE_COPY_PS(XMM2, XMM4)
314:   SSE_MULT_PS(XMM2, XMM1)
315:   SSE_LOAD_PS(SSE_ARG_1, FLOAT_4, XMM0)
316:   SSE_SUB_PS(XMM0, XMM2)

318:   SSE_COPY_PS(XMM2, XMM5)
319:   SSE_MULT_PS(XMM2, XMM1)
320:   SSE_ADD_PS(XMM2, XMM7)

322:   SSE_SHUFFLE(XMM1, XMM1, 0x4E)

324:   SSE_COPY_PS(XMM7, XMM4)
325:   SSE_MULT_PS(XMM7, XMM1)
326:   SSE_ADD_PS(XMM7, XMM0)

328:   SSE_MULT_PS(XMM1, XMM5)
329:   SSE_SUB_PS(XMM2, XMM1)

331:   /* ----------------------------------------------- */

333:   SSE_MULT_PS(XMM4, XMM3)

335:   SSE_SHUFFLE(XMM4, XMM4, 0xB1)

337:   SSE_COPY_PS(XMM1, XMM6)
338:   SSE_MULT_PS(XMM1, XMM4)
339:   SSE_ADD_PS(XMM1, XMM7)

341:   SSE_COPY_PS(XMM0, XMM5)
342:   SSE_MULT_PS(XMM0, XMM4)
343:   SSE_LOAD_PS(SSE_ARG_1, FLOAT_12, XMM7)
344:   SSE_SUB_PS(XMM7, XMM0)

346:   SSE_SHUFFLE(XMM4, XMM4, 0x4E)

348:   SSE_MULT_PS(XMM6, XMM4)
349:   SSE_SUB_PS(XMM1, XMM6)

351:   SSE_MULT_PS(XMM5, XMM4)
352:   SSE_ADD_PS(XMM5, XMM7)

354:   /* ----------------------------------------------- */

356:   SSE_LOAD_PS(SSE_ARG_1, FLOAT_0, XMM0)
357:   SSE_MULT_PS(XMM3, XMM0)

359:   SSE_COPY_PS(XMM4, XMM3)
360:   SSE_SHUFFLE(XMM4, XMM3, 0x4E)
361:   SSE_ADD_PS(XMM4, XMM3)

363:   SSE_COPY_PS(XMM6, XMM4)
364:   SSE_SHUFFLE(XMM6, XMM4, 0xB1)
365:   SSE_ADD_SS(XMM6, XMM4)

367:   SSE_COPY_PS(XMM3, XMM6)
368:   SSE_RECIP_SS(XMM3, XMM6)
369:   SSE_COPY_SS(XMM4, XMM3)
370:   SSE_ADD_SS(XMM4, XMM3)
371:   SSE_MULT_SS(XMM3, XMM3)
372:   SSE_MULT_SS(XMM6, XMM3)
373:   SSE_SUB_SS(XMM4, XMM6)

375:   SSE_SHUFFLE(XMM4, XMM4, 0x00)

377:   SSE_MULT_PS(XMM0, XMM4)
378:   SSE_STOREL_PS(SSE_ARG_1, FLOAT_0, XMM0)
379:   SSE_STOREH_PS(SSE_ARG_1, FLOAT_2, XMM0)

381:   SSE_MULT_PS(XMM1, XMM4)
382:   SSE_STOREL_PS(SSE_ARG_1, FLOAT_4, XMM1)
383:   SSE_STOREH_PS(SSE_ARG_1, FLOAT_6, XMM1)

385:   SSE_MULT_PS(XMM2, XMM4)
386:   SSE_STOREL_PS(SSE_ARG_1, FLOAT_8, XMM2)
387:   SSE_STOREH_PS(SSE_ARG_1, FLOAT_10, XMM2)

389:   SSE_MULT_PS(XMM4, XMM5)
390:   SSE_STOREL_PS(SSE_ARG_1, FLOAT_12, XMM4)
391:   SSE_STOREH_PS(SSE_ARG_1, FLOAT_14, XMM4)

393:   /* ----------------------------------------------- */

395:   SSE_INLINE_END_1;
396:   SSE_SCOPE_END;
397:   PetscFunctionReturn(PETSC_SUCCESS);
398: }

400: #endif