$treeview $search $mathjax
Eigen
3.2.5
$projectbrief
|
$projectbrief
|
$searchbox |
00001 // This file is part of Eigen, a lightweight C++ template library 00002 // for linear algebra. 00003 // 00004 // Copyright (C) 2001 Intel Corporation 00005 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> 00006 // Copyright (C) 2009 Benoit Jacob <jacob.benoit.1@gmail.com> 00007 // 00008 // This Source Code Form is subject to the terms of the Mozilla 00009 // Public License v. 2.0. If a copy of the MPL was not distributed 00010 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. 00011 00012 // The SSE code for the 4x4 float and double matrix inverse in this file 00013 // comes from the following Intel's library: 00014 // http://software.intel.com/en-us/articles/optimized-matrix-library-for-use-with-the-intel-pentiumr-4-processors-sse2-instructions/ 00015 // 00016 // Here is the respective copyright and license statement: 00017 // 00018 // Copyright (c) 2001 Intel Corporation. 00019 // 00020 // Permition is granted to use, copy, distribute and prepare derivative works 00021 // of this library for any purpose and without fee, provided, that the above 00022 // copyright notice and this statement appear in all copies. 00023 // Intel makes no representations about the suitability of this software for 00024 // any purpose, and specifically disclaims all warranties. 00025 // See LEGAL.TXT for all the legal information. 00026 00027 #ifndef EIGEN_INVERSE_SSE_H 00028 #define EIGEN_INVERSE_SSE_H 00029 00030 namespace Eigen { 00031 00032 namespace internal { 00033 00034 template<typename MatrixType, typename ResultType> 00035 struct compute_inverse_size4<Architecture::SSE, float, MatrixType, ResultType> 00036 { 00037 enum { 00038 MatrixAlignment = bool(MatrixType::Flags&AlignedBit), 00039 ResultAlignment = bool(ResultType::Flags&AlignedBit), 00040 StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) 00041 }; 00042 00043 static void run(const MatrixType& matrix, ResultType& result) 00044 { 00045 EIGEN_ALIGN16 const unsigned int _Sign_PNNP[4] = { 0x00000000, 0x80000000, 0x80000000, 0x00000000 }; 00046 00047 // Load the full matrix into registers 00048 __m128 _L1 = matrix.template packet<MatrixAlignment>( 0); 00049 __m128 _L2 = matrix.template packet<MatrixAlignment>( 4); 00050 __m128 _L3 = matrix.template packet<MatrixAlignment>( 8); 00051 __m128 _L4 = matrix.template packet<MatrixAlignment>(12); 00052 00053 // The inverse is calculated using "Divide and Conquer" technique. The 00054 // original matrix is divide into four 2x2 sub-matrices. Since each 00055 // register holds four matrix element, the smaller matrices are 00056 // represented as a registers. Hence we get a better locality of the 00057 // calculations. 00058 00059 __m128 A, B, C, D; // the four sub-matrices 00060 if(!StorageOrdersMatch) 00061 { 00062 A = _mm_unpacklo_ps(_L1, _L2); 00063 B = _mm_unpacklo_ps(_L3, _L4); 00064 C = _mm_unpackhi_ps(_L1, _L2); 00065 D = _mm_unpackhi_ps(_L3, _L4); 00066 } 00067 else 00068 { 00069 A = _mm_movelh_ps(_L1, _L2); 00070 B = _mm_movehl_ps(_L2, _L1); 00071 C = _mm_movelh_ps(_L3, _L4); 00072 D = _mm_movehl_ps(_L4, _L3); 00073 } 00074 00075 __m128 iA, iB, iC, iD, // partial inverse of the sub-matrices 00076 DC, AB; 00077 __m128 dA, dB, dC, dD; // determinant of the sub-matrices 00078 __m128 det, d, d1, d2; 00079 __m128 rd; // reciprocal of the determinant 00080 00081 // AB = A# * B 00082 AB = _mm_mul_ps(_mm_shuffle_ps(A,A,0x0F), B); 00083 AB = _mm_sub_ps(AB,_mm_mul_ps(_mm_shuffle_ps(A,A,0xA5), _mm_shuffle_ps(B,B,0x4E))); 00084 // DC = D# * C 00085 DC = _mm_mul_ps(_mm_shuffle_ps(D,D,0x0F), C); 00086 DC = _mm_sub_ps(DC,_mm_mul_ps(_mm_shuffle_ps(D,D,0xA5), _mm_shuffle_ps(C,C,0x4E))); 00087 00088 // dA = |A| 00089 dA = _mm_mul_ps(_mm_shuffle_ps(A, A, 0x5F),A); 00090 dA = _mm_sub_ss(dA, _mm_movehl_ps(dA,dA)); 00091 // dB = |B| 00092 dB = _mm_mul_ps(_mm_shuffle_ps(B, B, 0x5F),B); 00093 dB = _mm_sub_ss(dB, _mm_movehl_ps(dB,dB)); 00094 00095 // dC = |C| 00096 dC = _mm_mul_ps(_mm_shuffle_ps(C, C, 0x5F),C); 00097 dC = _mm_sub_ss(dC, _mm_movehl_ps(dC,dC)); 00098 // dD = |D| 00099 dD = _mm_mul_ps(_mm_shuffle_ps(D, D, 0x5F),D); 00100 dD = _mm_sub_ss(dD, _mm_movehl_ps(dD,dD)); 00101 00102 // d = trace(AB*DC) = trace(A#*B*D#*C) 00103 d = _mm_mul_ps(_mm_shuffle_ps(DC,DC,0xD8),AB); 00104 00105 // iD = C*A#*B 00106 iD = _mm_mul_ps(_mm_shuffle_ps(C,C,0xA0), _mm_movelh_ps(AB,AB)); 00107 iD = _mm_add_ps(iD,_mm_mul_ps(_mm_shuffle_ps(C,C,0xF5), _mm_movehl_ps(AB,AB))); 00108 // iA = B*D#*C 00109 iA = _mm_mul_ps(_mm_shuffle_ps(B,B,0xA0), _mm_movelh_ps(DC,DC)); 00110 iA = _mm_add_ps(iA,_mm_mul_ps(_mm_shuffle_ps(B,B,0xF5), _mm_movehl_ps(DC,DC))); 00111 00112 // d = trace(AB*DC) = trace(A#*B*D#*C) [continue] 00113 d = _mm_add_ps(d, _mm_movehl_ps(d, d)); 00114 d = _mm_add_ss(d, _mm_shuffle_ps(d, d, 1)); 00115 d1 = _mm_mul_ss(dA,dD); 00116 d2 = _mm_mul_ss(dB,dC); 00117 00118 // iD = D*|A| - C*A#*B 00119 iD = _mm_sub_ps(_mm_mul_ps(D,_mm_shuffle_ps(dA,dA,0)), iD); 00120 00121 // iA = A*|D| - B*D#*C; 00122 iA = _mm_sub_ps(_mm_mul_ps(A,_mm_shuffle_ps(dD,dD,0)), iA); 00123 00124 // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) 00125 det = _mm_sub_ss(_mm_add_ss(d1,d2),d); 00126 rd = _mm_div_ss(_mm_set_ss(1.0f), det); 00127 00128 // #ifdef ZERO_SINGULAR 00129 // rd = _mm_and_ps(_mm_cmpneq_ss(det,_mm_setzero_ps()), rd); 00130 // #endif 00131 00132 // iB = D * (A#B)# = D*B#*A 00133 iB = _mm_mul_ps(D, _mm_shuffle_ps(AB,AB,0x33)); 00134 iB = _mm_sub_ps(iB, _mm_mul_ps(_mm_shuffle_ps(D,D,0xB1), _mm_shuffle_ps(AB,AB,0x66))); 00135 // iC = A * (D#C)# = A*C#*D 00136 iC = _mm_mul_ps(A, _mm_shuffle_ps(DC,DC,0x33)); 00137 iC = _mm_sub_ps(iC, _mm_mul_ps(_mm_shuffle_ps(A,A,0xB1), _mm_shuffle_ps(DC,DC,0x66))); 00138 00139 rd = _mm_shuffle_ps(rd,rd,0); 00140 rd = _mm_xor_ps(rd, _mm_load_ps((float*)_Sign_PNNP)); 00141 00142 // iB = C*|B| - D*B#*A 00143 iB = _mm_sub_ps(_mm_mul_ps(C,_mm_shuffle_ps(dB,dB,0)), iB); 00144 00145 // iC = B*|C| - A*C#*D; 00146 iC = _mm_sub_ps(_mm_mul_ps(B,_mm_shuffle_ps(dC,dC,0)), iC); 00147 00148 // iX = iX / det 00149 iA = _mm_mul_ps(rd,iA); 00150 iB = _mm_mul_ps(rd,iB); 00151 iC = _mm_mul_ps(rd,iC); 00152 iD = _mm_mul_ps(rd,iD); 00153 00154 result.template writePacket<ResultAlignment>( 0, _mm_shuffle_ps(iA,iB,0x77)); 00155 result.template writePacket<ResultAlignment>( 4, _mm_shuffle_ps(iA,iB,0x22)); 00156 result.template writePacket<ResultAlignment>( 8, _mm_shuffle_ps(iC,iD,0x77)); 00157 result.template writePacket<ResultAlignment>(12, _mm_shuffle_ps(iC,iD,0x22)); 00158 } 00159 00160 }; 00161 00162 template<typename MatrixType, typename ResultType> 00163 struct compute_inverse_size4<Architecture::SSE, double, MatrixType, ResultType> 00164 { 00165 enum { 00166 MatrixAlignment = bool(MatrixType::Flags&AlignedBit), 00167 ResultAlignment = bool(ResultType::Flags&AlignedBit), 00168 StorageOrdersMatch = (MatrixType::Flags&RowMajorBit) == (ResultType::Flags&RowMajorBit) 00169 }; 00170 static void run(const MatrixType& matrix, ResultType& result) 00171 { 00172 const __m128d _Sign_NP = _mm_castsi128_pd(_mm_set_epi32(0x0,0x0,0x80000000,0x0)); 00173 const __m128d _Sign_PN = _mm_castsi128_pd(_mm_set_epi32(0x80000000,0x0,0x0,0x0)); 00174 00175 // The inverse is calculated using "Divide and Conquer" technique. The 00176 // original matrix is divide into four 2x2 sub-matrices. Since each 00177 // register of the matrix holds two element, the smaller matrices are 00178 // consisted of two registers. Hence we get a better locality of the 00179 // calculations. 00180 00181 // the four sub-matrices 00182 __m128d A1, A2, B1, B2, C1, C2, D1, D2; 00183 00184 if(StorageOrdersMatch) 00185 { 00186 A1 = matrix.template packet<MatrixAlignment>( 0); B1 = matrix.template packet<MatrixAlignment>( 2); 00187 A2 = matrix.template packet<MatrixAlignment>( 4); B2 = matrix.template packet<MatrixAlignment>( 6); 00188 C1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10); 00189 C2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14); 00190 } 00191 else 00192 { 00193 __m128d tmp; 00194 A1 = matrix.template packet<MatrixAlignment>( 0); C1 = matrix.template packet<MatrixAlignment>( 2); 00195 A2 = matrix.template packet<MatrixAlignment>( 4); C2 = matrix.template packet<MatrixAlignment>( 6); 00196 tmp = A1; 00197 A1 = _mm_unpacklo_pd(A1,A2); 00198 A2 = _mm_unpackhi_pd(tmp,A2); 00199 tmp = C1; 00200 C1 = _mm_unpacklo_pd(C1,C2); 00201 C2 = _mm_unpackhi_pd(tmp,C2); 00202 00203 B1 = matrix.template packet<MatrixAlignment>( 8); D1 = matrix.template packet<MatrixAlignment>(10); 00204 B2 = matrix.template packet<MatrixAlignment>(12); D2 = matrix.template packet<MatrixAlignment>(14); 00205 tmp = B1; 00206 B1 = _mm_unpacklo_pd(B1,B2); 00207 B2 = _mm_unpackhi_pd(tmp,B2); 00208 tmp = D1; 00209 D1 = _mm_unpacklo_pd(D1,D2); 00210 D2 = _mm_unpackhi_pd(tmp,D2); 00211 } 00212 00213 __m128d iA1, iA2, iB1, iB2, iC1, iC2, iD1, iD2, // partial invese of the sub-matrices 00214 DC1, DC2, AB1, AB2; 00215 __m128d dA, dB, dC, dD; // determinant of the sub-matrices 00216 __m128d det, d1, d2, rd; 00217 00218 // dA = |A| 00219 dA = _mm_shuffle_pd(A2, A2, 1); 00220 dA = _mm_mul_pd(A1, dA); 00221 dA = _mm_sub_sd(dA, _mm_shuffle_pd(dA,dA,3)); 00222 // dB = |B| 00223 dB = _mm_shuffle_pd(B2, B2, 1); 00224 dB = _mm_mul_pd(B1, dB); 00225 dB = _mm_sub_sd(dB, _mm_shuffle_pd(dB,dB,3)); 00226 00227 // AB = A# * B 00228 AB1 = _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,3)); 00229 AB2 = _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,0)); 00230 AB1 = _mm_sub_pd(AB1, _mm_mul_pd(B2, _mm_shuffle_pd(A1,A1,3))); 00231 AB2 = _mm_sub_pd(AB2, _mm_mul_pd(B1, _mm_shuffle_pd(A2,A2,0))); 00232 00233 // dC = |C| 00234 dC = _mm_shuffle_pd(C2, C2, 1); 00235 dC = _mm_mul_pd(C1, dC); 00236 dC = _mm_sub_sd(dC, _mm_shuffle_pd(dC,dC,3)); 00237 // dD = |D| 00238 dD = _mm_shuffle_pd(D2, D2, 1); 00239 dD = _mm_mul_pd(D1, dD); 00240 dD = _mm_sub_sd(dD, _mm_shuffle_pd(dD,dD,3)); 00241 00242 // DC = D# * C 00243 DC1 = _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,3)); 00244 DC2 = _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,0)); 00245 DC1 = _mm_sub_pd(DC1, _mm_mul_pd(C2, _mm_shuffle_pd(D1,D1,3))); 00246 DC2 = _mm_sub_pd(DC2, _mm_mul_pd(C1, _mm_shuffle_pd(D2,D2,0))); 00247 00248 // rd = trace(AB*DC) = trace(A#*B*D#*C) 00249 d1 = _mm_mul_pd(AB1, _mm_shuffle_pd(DC1, DC2, 0)); 00250 d2 = _mm_mul_pd(AB2, _mm_shuffle_pd(DC1, DC2, 3)); 00251 rd = _mm_add_pd(d1, d2); 00252 rd = _mm_add_sd(rd, _mm_shuffle_pd(rd, rd,3)); 00253 00254 // iD = C*A#*B 00255 iD1 = _mm_mul_pd(AB1, _mm_shuffle_pd(C1,C1,0)); 00256 iD2 = _mm_mul_pd(AB1, _mm_shuffle_pd(C2,C2,0)); 00257 iD1 = _mm_add_pd(iD1, _mm_mul_pd(AB2, _mm_shuffle_pd(C1,C1,3))); 00258 iD2 = _mm_add_pd(iD2, _mm_mul_pd(AB2, _mm_shuffle_pd(C2,C2,3))); 00259 00260 // iA = B*D#*C 00261 iA1 = _mm_mul_pd(DC1, _mm_shuffle_pd(B1,B1,0)); 00262 iA2 = _mm_mul_pd(DC1, _mm_shuffle_pd(B2,B2,0)); 00263 iA1 = _mm_add_pd(iA1, _mm_mul_pd(DC2, _mm_shuffle_pd(B1,B1,3))); 00264 iA2 = _mm_add_pd(iA2, _mm_mul_pd(DC2, _mm_shuffle_pd(B2,B2,3))); 00265 00266 // iD = D*|A| - C*A#*B 00267 dA = _mm_shuffle_pd(dA,dA,0); 00268 iD1 = _mm_sub_pd(_mm_mul_pd(D1, dA), iD1); 00269 iD2 = _mm_sub_pd(_mm_mul_pd(D2, dA), iD2); 00270 00271 // iA = A*|D| - B*D#*C; 00272 dD = _mm_shuffle_pd(dD,dD,0); 00273 iA1 = _mm_sub_pd(_mm_mul_pd(A1, dD), iA1); 00274 iA2 = _mm_sub_pd(_mm_mul_pd(A2, dD), iA2); 00275 00276 d1 = _mm_mul_sd(dA, dD); 00277 d2 = _mm_mul_sd(dB, dC); 00278 00279 // iB = D * (A#B)# = D*B#*A 00280 iB1 = _mm_mul_pd(D1, _mm_shuffle_pd(AB2,AB1,1)); 00281 iB2 = _mm_mul_pd(D2, _mm_shuffle_pd(AB2,AB1,1)); 00282 iB1 = _mm_sub_pd(iB1, _mm_mul_pd(_mm_shuffle_pd(D1,D1,1), _mm_shuffle_pd(AB2,AB1,2))); 00283 iB2 = _mm_sub_pd(iB2, _mm_mul_pd(_mm_shuffle_pd(D2,D2,1), _mm_shuffle_pd(AB2,AB1,2))); 00284 00285 // det = |A|*|D| + |B|*|C| - trace(A#*B*D#*C) 00286 det = _mm_add_sd(d1, d2); 00287 det = _mm_sub_sd(det, rd); 00288 00289 // iC = A * (D#C)# = A*C#*D 00290 iC1 = _mm_mul_pd(A1, _mm_shuffle_pd(DC2,DC1,1)); 00291 iC2 = _mm_mul_pd(A2, _mm_shuffle_pd(DC2,DC1,1)); 00292 iC1 = _mm_sub_pd(iC1, _mm_mul_pd(_mm_shuffle_pd(A1,A1,1), _mm_shuffle_pd(DC2,DC1,2))); 00293 iC2 = _mm_sub_pd(iC2, _mm_mul_pd(_mm_shuffle_pd(A2,A2,1), _mm_shuffle_pd(DC2,DC1,2))); 00294 00295 rd = _mm_div_sd(_mm_set_sd(1.0), det); 00296 // #ifdef ZERO_SINGULAR 00297 // rd = _mm_and_pd(_mm_cmpneq_sd(det,_mm_setzero_pd()), rd); 00298 // #endif 00299 rd = _mm_shuffle_pd(rd,rd,0); 00300 00301 // iB = C*|B| - D*B#*A 00302 dB = _mm_shuffle_pd(dB,dB,0); 00303 iB1 = _mm_sub_pd(_mm_mul_pd(C1, dB), iB1); 00304 iB2 = _mm_sub_pd(_mm_mul_pd(C2, dB), iB2); 00305 00306 d1 = _mm_xor_pd(rd, _Sign_PN); 00307 d2 = _mm_xor_pd(rd, _Sign_NP); 00308 00309 // iC = B*|C| - A*C#*D; 00310 dC = _mm_shuffle_pd(dC,dC,0); 00311 iC1 = _mm_sub_pd(_mm_mul_pd(B1, dC), iC1); 00312 iC2 = _mm_sub_pd(_mm_mul_pd(B2, dC), iC2); 00313 00314 result.template writePacket<ResultAlignment>( 0, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 3), d1)); // iA# / det 00315 result.template writePacket<ResultAlignment>( 4, _mm_mul_pd(_mm_shuffle_pd(iA2, iA1, 0), d2)); 00316 result.template writePacket<ResultAlignment>( 2, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 3), d1)); // iB# / det 00317 result.template writePacket<ResultAlignment>( 6, _mm_mul_pd(_mm_shuffle_pd(iB2, iB1, 0), d2)); 00318 result.template writePacket<ResultAlignment>( 8, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 3), d1)); // iC# / det 00319 result.template writePacket<ResultAlignment>(12, _mm_mul_pd(_mm_shuffle_pd(iC2, iC1, 0), d2)); 00320 result.template writePacket<ResultAlignment>(10, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 3), d1)); // iD# / det 00321 result.template writePacket<ResultAlignment>(14, _mm_mul_pd(_mm_shuffle_pd(iD2, iD1, 0), d2)); 00322 } 00323 }; 00324 00325 } // end namespace internal 00326 00327 } // end namespace Eigen 00328 00329 #endif // EIGEN_INVERSE_SSE_H