Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

plapack.h

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // 00007 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 /* ******************************************************* 00037 * $Id: plapack.h,v 1.21 2004/06/29 13:21:20 tihocan Exp $ 00038 * This file is part of the PLearn library. 00039 ******************************************************* */ 00040 00043 #ifndef plapack_h 00044 #define plapack_h 00045 00046 #include "Mat.h" 00047 #include "TMat_maths.h" 00048 00049 #include "lapack_proto.h" 00050 00051 namespace PLearn { 00052 using namespace std; 00053 00054 // Direct lapack calls, type independent 00055 inline void lapack_Xsyevx_(char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, double* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00056 { dsyevx_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00057 00058 inline void lapack_Xsyevx_(char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, float* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00059 { ssyevx_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00060 00061 inline void lapack_Xgesdd_(char* JOBZ, int* M, int* N, double* A, int* LDA, double* S, double* U, int* LDU, double* VT, int* LDVT, double* WORK, int* LWORK, int* IWORK, int* INFO) 00062 { dgesdd_(JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, LWORK, IWORK, INFO); } 00063 00064 inline void lapack_Xgesdd_(char* JOBZ, int* M, int* N, float* A, int* LDA, float* S, float* U, int* LDU, float* VT, int* LDVT, float* WORK, int* LWORK, int* IWORK, int* INFO) 00065 { sgesdd_(JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, LWORK, IWORK, INFO); } 00066 00067 inline void lapack_Xsyevr_(char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, int* ISUPPZ, float* WORK, int* LWORK, int* IWORK, int* LIWORK, int* INFO) 00068 { ssyevr_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, IWORK, LIWORK, INFO);} 00069 00070 inline void lapack_Xsyevr_(char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, int* ISUPPZ, double* WORK, int* LWORK, int* IWORK, int* LIWORK, int* INFO) 00071 { dsyevr_(JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, IWORK, LIWORK, INFO);} 00072 00073 inline void lapack_Xsygvx_(int* ITYPE, char* JOBZ, char* RANGE, char* UPLO, int* N, double* A, int* LDA, double* B, int* LDB, double* VL, double* VU, int* IL, int* IU, double* ABSTOL, int* M, double* W, double* Z, int* LDZ, double* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00074 { dsygvx_(ITYPE, JOBZ, RANGE, UPLO, N, A, LDA, B, LDB, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00075 00076 inline void lapack_Xsygvx_(int* ITYPE, char* JOBZ, char* RANGE, char* UPLO, int* N, float* A, int* LDA, float* B, int* LDB, float* VL, float* VU, int* IL, int* IU, float* ABSTOL, int* M, float* W, float* Z, int* LDZ, float* WORK, int* LWORK, int* IWORK, int* IFAIL, int* INFO) 00077 { ssygvx_(ITYPE, JOBZ, RANGE, UPLO, N, A, LDA, B, LDB, VL, VU, IL, IU, ABSTOL, M, W, Z, LDZ, WORK, LWORK, IWORK, IFAIL, INFO); } 00078 00079 00083 00099 // (will work for float and double) 00100 template<class num_t> 00101 void lapackEIGEN(const TMat<num_t>& A, TVec<num_t>& eigenvals, TMat<num_t>& eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0) 00102 { 00103 00104 #ifdef BOUNDCHECK 00105 if(A.length()!=A.width()) 00106 PLERROR("In lapackEIGEN length and width of A differ, it should be symmetric!"); 00107 #endif 00108 00109 char JOBZ = eigenvecs.isEmpty() ?'N' :'V'; 00110 char UPLO = 'U'; 00111 int N = A.length(); 00112 int LDA = A.mod(); 00113 00114 int IL=0, IU=0; 00115 num_t VL=0, VU=0; 00116 00117 eigenvals.resize(N); 00118 int M; // The number of eigenvalues returned 00119 00120 switch(RANGE) 00121 { 00122 case 'A': 00123 if(JOBZ=='V') 00124 eigenvecs.resize(N, N); 00125 break; 00126 case 'I': 00127 IL = int(low)+1; // +1 because fortran indexes start at 1 00128 IU = int(high)+1; 00129 if(JOBZ=='V') 00130 eigenvecs.resize(IU-IL+1, N); 00131 break; 00132 case 'V': 00133 VL = low; 00134 VU = high; 00135 if(JOBZ=='V') 00136 eigenvecs.resize(N, N); 00137 break; 00138 default: 00139 PLERROR("In lapackEIGEN: invalid RANGE character: %c",RANGE); 00140 } 00141 00142 num_t* Z = 0; 00143 int LDZ = 1; 00144 if(eigenvecs.isNotEmpty()) 00145 { 00146 Z = eigenvecs.data(); 00147 LDZ = eigenvecs.mod(); 00148 } 00149 00150 // temporary work vectors 00151 static TVec<num_t> WORK; 00152 static TVec<int> IWORK; 00153 static TVec<int> IFAIL; 00154 00155 WORK.resize(1); 00156 IWORK.resize(5*N); 00157 IFAIL.resize(N); 00158 00159 int LWORK = -1; 00160 int INFO; 00161 00162 00163 // first call to find optimal work size 00164 // cerr << '('; 00165 lapack_Xsyevx_( &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, &VL, &VU, 00166 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00167 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00168 // cerr << ')'; 00169 00170 if(INFO!=0) 00171 PLERROR("In lapackEIGEN, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00172 00173 // make sure we have enough space 00174 LWORK = (int) WORK[0]; // optimal size 00175 WORK.resize(LWORK); 00176 00177 // second call to do the computation 00178 // cerr << '{'; 00179 lapack_Xsyevx_( &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, &VL, &VU, 00180 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00181 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00182 // cerr << '}'; 00183 00184 if(INFO!=0) 00185 PLERROR("In lapackEIGEN, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00186 00187 eigenvals.resize(M); 00188 if(JOBZ=='V') 00189 eigenvecs.resize(M, N); 00190 } 00191 00198 00220 // (will work for float and double) 00221 template<class num_t> 00222 void lapackGeneralizedEIGEN(const TMat<num_t>& A, const TMat<num_t>& B, int ITYPE, TVec<num_t>& eigenvals, TMat<num_t>& eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0) 00223 { 00224 00225 #ifdef BOUNDCHECK 00226 if(A.length()!=A.width()) 00227 PLERROR("In lapackGeneralizedEIGEN length and width of A differ, it should be symmetric!"); 00228 #endif 00229 00230 char JOBZ = eigenvecs.isEmpty() ?'N' :'V'; 00231 char UPLO = 'U'; 00232 int N = A.length();//The order of the matrix pencil (A,B) 00233 int LDA = A.mod(); 00234 int LDB = B.mod(); 00235 00236 int IL=0, IU=0; 00237 num_t VL=0, VU=0; 00238 00239 eigenvals.resize(N); 00240 int M; // The number of eigenvalues returned 00241 00242 switch(RANGE) 00243 { 00244 case 'A': 00245 if(JOBZ=='V') 00246 eigenvecs.resize(N, N); 00247 break; 00248 case 'I': 00249 IL = int(low)+1; // +1 because fortran indexes start at 1 00250 IU = int(high)+1; 00251 if(JOBZ=='V') 00252 eigenvecs.resize(IU-IL+1, N); 00253 break; 00254 case 'V': 00255 VL = low; 00256 VU = high; 00257 if(JOBZ=='V') 00258 eigenvecs.resize(N, N); 00259 break; 00260 default: 00261 PLERROR("In lapackGeneralizedEIGEN: invalid RANGE character: %c",RANGE); 00262 } 00263 00264 num_t* Z = 0; 00265 int LDZ = 1; 00266 if(eigenvecs.isNotEmpty()) 00267 { 00268 Z = eigenvecs.data(); 00269 LDZ = eigenvecs.mod(); 00270 } 00271 00272 // temporary work vectors 00273 static TVec<num_t> WORK; 00274 static TVec<int> IWORK; 00275 static TVec<int> IFAIL; 00276 00277 WORK.resize(1); 00278 IWORK.resize(5*N); 00279 IFAIL.resize(N); 00280 00281 int LWORK = -1; 00282 int INFO; 00283 00284 00285 // first call to find optimal work size 00286 // cerr << '('; 00287 lapack_Xsygvx_( &ITYPE, &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, B.data(), &LDB, &VL, &VU, 00288 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00289 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00290 // cerr << ')'; 00291 00292 if(INFO!=0) 00293 PLERROR("In lapackGeneralizedEIGEN, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00294 00295 // make sure we have enough space 00296 LWORK = (int) WORK[0]; // optimal size 00297 WORK.resize(LWORK); 00298 00299 // second call to do the computation 00300 // cerr << '{'; 00301 lapack_Xsygvx_( &ITYPE, &JOBZ, &RANGE, &UPLO, &N, A.data(), &LDA, B.data(), &LDB, &VL, &VU, 00302 &IL, &IU, &ABSTOL, &M, eigenvals.data(), Z, &LDZ, 00303 WORK.data(), &LWORK, IWORK.data(), IFAIL.data(), &INFO ); 00304 // cerr << '}'; 00305 00306 if(INFO!=0) 00307 PLERROR("In lapackGeneralizedEIGEN, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00308 00309 eigenvals.resize(M); 00310 if(JOBZ=='V') 00311 eigenvecs.resize(M, N); 00312 } 00313 00319 00320 // (will work for float and double) 00321 template<class num_t> 00322 void eigenVecOfSymmMat(TMat<num_t>& m, int k, TVec<num_t>& eigen_values, TMat<num_t>& eigen_vectors) 00323 { 00324 eigen_vectors.resize(k,m.width()); 00325 eigen_values.resize(k); 00326 // FASTER 00327 if(k>= m.width()) 00328 lapackEIGEN(m, eigen_values, eigen_vectors, 'A',num_t(0),num_t(0)); 00329 else 00330 lapackEIGEN(m, eigen_values, eigen_vectors, 'I', num_t(m.width()-k), num_t(m.width()-1)); 00331 00332 // put largest (rather than smallest) first! 00333 eigen_values.swap(); 00334 eigen_vectors.swapUpsideDown(); 00335 } 00336 00347 00348 // (will work for float and double) 00349 template<class num_t> 00350 void generalizedEigenVecOfSymmMat(TMat<num_t>& m1, TMat<num_t>& m2, int itype, int k, TVec<num_t>& eigen_values, TMat<num_t>& eigen_vectors) 00351 { 00352 if(m1.length() != m2.length() || m1.width() != m2.width()) 00353 PLERROR("In generalizedEigenVecOfSymmMat, m1 and m2 must have the same size"); 00354 00355 eigen_vectors.resize(k,m1.width()); 00356 eigen_values.resize(k); 00357 // FASTER 00358 if(k>= m1.width()) 00359 lapackGeneralizedEIGEN(m1, m2, itype, eigen_values, eigen_vectors, 'A',num_t(0),num_t(0)); 00360 else 00361 lapackGeneralizedEIGEN(m1, m2, itype, eigen_values, eigen_vectors, 'I', num_t(m1.width()-k), num_t(m1.width()-1)); 00362 00363 // put largest (rather than smallest) first! 00364 eigen_values.swap(); 00365 eigen_vectors.swapUpsideDown(); 00366 } 00367 00368 00369 00384 // (will work for float and double) 00385 template<class num_t> 00386 void lapackSVD(const TMat<num_t>& At, TMat<num_t>& Ut, TVec<num_t>& S, TMat<num_t>& V, char JOBZ='A', real safeguard = 1) 00387 { 00388 int M = At.width(); 00389 int N = At.length(); 00390 int LDA = At.mod(); 00391 int min_M_N = min(M,N); 00392 S.resize(min_M_N); 00393 00394 switch(JOBZ) 00395 { 00396 case 'A': 00397 Ut.resize(M,M); 00398 V.resize(N,N); 00399 break; 00400 case 'S': 00401 Ut.resize(min_M_N, M); 00402 V.resize(N, min_M_N); 00403 break; 00404 case 'O': 00405 if(M<N) 00406 Ut.resize(M,M); // and V is not used 00407 else 00408 V.resize(N,N); // and Ut is not used 00409 break; 00410 case 'N': 00411 break; 00412 default: 00413 PLERROR("In lapackSVD, bad JOBZ argument : %c",JOBZ); 00414 } 00415 00416 00417 int LDU = 1; 00418 int LDVT = 1; 00419 num_t* U = 0; 00420 num_t* VT = 0; 00421 00422 if(V.isNotEmpty()) 00423 { 00424 LDVT = V.mod(); 00425 VT = V.data(); 00426 } 00427 if(Ut.isNotEmpty()) 00428 { 00429 LDU = Ut.mod(); 00430 U = Ut.data(); 00431 } 00432 00433 static TVec<num_t> WORK; 00434 WORK.resize(1); 00435 int LWORK = -1; 00436 00437 static TVec<int> IWORK; 00438 IWORK.resize(8*min_M_N); 00439 00440 int INFO; 00441 00442 // first call to find optimal work size 00443 lapack_Xgesdd_(&JOBZ, &M, &N, At.data(), &LDA, S.data(), U, &LDU, VT, &LDVT, WORK.data(), &LWORK, IWORK.data(), &INFO); 00444 00445 if(INFO!=0) 00446 PLERROR("In lapackSVD, problem in first call to sgesdd_ to get optimal work size, returned INFO = %d",INFO); 00447 00448 // make sure we have enough space 00449 LWORK = int(WORK[0] * safeguard + 0.5); // optimal size (safeguard may be used to make sure it doesn't crash in some rare occasions). 00450 WORK.resize(LWORK); 00451 // cerr << "Optimal WORK size: " << LWORK << endl; 00452 00453 // second call to do the computation 00454 lapack_Xgesdd_(&JOBZ, &M, &N, At.data(), &LDA, S.data(), U, &LDU, VT, &LDVT, WORK.data(), &LWORK, IWORK.data(), &INFO ); 00455 00456 if(INFO!=0) 00457 { 00458 cerr << At << endl; 00459 PLERROR("In lapackSVD, problem when calling sgesdd_ to perform computation, returned INFO = %d",INFO); 00460 } 00461 } 00462 00465 00499 // (will work for float and double) 00500 template<class num_t> 00501 inline void SVD(const TMat<num_t>& A, TMat<num_t>& U, TVec<num_t>& S, TMat<num_t>& Vt, char JOBZ='A', real safeguard = 1) 00502 { 00503 // A = U.S.Vt -> At = V.S.Ut 00504 lapackSVD(A,Vt,S,U,JOBZ, safeguard); 00505 } 00506 00507 00508 // DO NOT USE! 00509 // eigen_SymmMat is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00526 int eigen_SymmMat(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00527 bool compute_all, int nb_eigen, bool compute_vectors = true, 00528 bool largest_evalues=true); 00529 00530 // DO NOT USE! 00531 // eigen_SymmMat_decreasing is deprecated: use eigenVecOfSymmMat or lapackEIGEN instead"); 00533 int eigen_SymmMat_decreasing(Mat& in, Vec& e_value, Mat& e_vector, int& n_evalues_found, 00534 bool compute_all, int nb_eigen, bool compute_vectors = true, 00535 bool largest_evalues=true); 00536 00539 int matInvert(Mat& in, Mat& inverse); 00540 00543 Mat multivariate_normal(const Vec& mu, const Mat& A, int N); 00544 00547 Vec multivariate_normal(const Vec& mu, const Mat& A); 00548 00551 Vec multivariate_normal(const Vec& mu, const Vec& e_values, const Mat& e_vectors); 00552 00557 void multivariate_normal(Vec& x, const Vec& mu, const Vec& e_values, const Mat& e_vectors, Vec& z); 00558 00571 int lapackSolveLinearSystem(Mat& At, Mat& Bt, TVec<int>& pivots); 00572 00578 Mat solveLinearSystem(const Mat& A, const Mat& B); 00579 00582 Vec solveLinearSystem(const Mat& A, const Vec& b); 00583 00586 void solveLinearSystem(const Mat& A, const Mat& Y, Mat& X); 00587 00590 void solveTransposeLinearSystem(const Mat& A, const Mat& Y, Mat& X); 00591 00597 Vec constrainedLinearRegression(const Mat& Xt, const Vec& Y, real lambda=0.); 00598 00599 // Return the affine transformation that 00600 // is such that the transformed data has 00601 // zero mean and identity covariance. 00602 // The input data matrix is (n x d). 00603 // The results are the matrix W and the bias vector 00604 // such that the 00605 // transformed_row = W row + bias 00606 // have the desired properties. 00607 // Note that W is obtained by doing an eigen-decomposition 00608 // of the data covariance matrix. In case this matrix 00609 // is ill-conditionned, the user can add a regularization 00610 // term on its diagonal before the inversion, by providing 00611 // a regularizer>0 as argument. 00612 void affineNormalization(Mat data, Mat W, Vec bias, real regularizer=0); 00613 00615 inline Vec closestPointOnHyperplane(const Vec& x, const Mat& points, real weight_decay = 0.) 00616 { return transposeProduct(points, constrainedLinearRegression(points,x,weight_decay)); } 00617 00619 inline real hyperplaneDistance(const Vec& x, const Mat& points, real weight_decay = 0.) 00620 { return L2distance(x, closestPointOnHyperplane(x,points,weight_decay)); } 00621 00640 template<class MatT> 00641 void diagonalizeSubspace(MatT& A, Mat& X, Vec& Ax, 00642 Mat& solutions, Vec& evalues, Mat& evectors) 00643 { 00644 int n_try=X.length(); 00645 00646 n_try=GramSchmidtOrthogonalization(X); 00647 X = X.subMatRows(0,n_try); 00648 00649 int n_soln=solutions.length(); 00651 Mat C(n_try,n_try); 00652 for (int i=0;i<n_try;i++) 00653 { 00654 real* Ci = C[i]; 00655 Vec x_i=X(i); 00656 A.product(x_i,Ax); 00657 for (int j=0;j<=i;j++) 00658 Ci[j] = dot(X(j),Ax); 00659 } 00661 for (int i=0;i<n_try;i++) 00662 { 00663 real* Ci = C[i]; 00664 for (int j=i+1;j<n_try;j++) 00665 Ci[j] = C(j,i); 00666 } 00667 00669 int n_evalues_found=0; 00670 Mat CC=C.copy(); 00671 eigen_SymmMat(CC,evalues,evectors,n_evalues_found,true,n_try,true,true); 00673 #if 0 00674 00675 Vec Cv(n_try); 00676 for (int i=0;i<n_try;i++) 00677 { 00678 Vec vi=evectors(i); 00679 if (fabs(norm(vi)-1)>1e-5) 00680 cout << "norm v[" << i << "] = " << norm(vi) << endl; 00681 product(C, vi,Cv); 00682 real ncv=norm(Cv); 00683 if (fabs(ncv-evalues[i])>1e-5) 00684 cout << "C v[" << i << "] = " << ncv << " but evalue = " << evalues[i] << endl; 00685 real vcv = dot(vi,Cv); 00686 if (fabs(vcv-evalues[i])>1e-5) 00687 cout << "v' C v[" << i << "] = " << vcv << " but evalue = " << evalues[i] << endl; 00688 for (int j=0;j<i;j++) 00689 { 00690 Vec vj=evectors(j); 00691 real dij = dot(vi,vj); 00692 if (fabs(dij)>1e-5) 00693 cout << "v[" << i << "] . v[" << j << "] = " << dij << endl; 00694 } 00695 } 00696 #endif 00697 00699 for (int i=0;i<n_soln;i++) 00700 { 00701 Vec xi=solutions(i); 00702 xi.clear(); 00703 for (int j=0;j<n_try;j++) 00704 multiplyAcc(xi, X(j),evectors(i,j)); 00705 } 00706 #if 0 00707 00708 for (int i=0;i<n_soln;i++) 00709 { 00710 Vec xi=solutions(i); 00711 real normxi=norm(xi); 00712 if (fabs(normxi-1)>1e-5) 00713 cout << "norm x[" << i << "]=" << normxi << endl; 00714 product(A, xi,Ax); 00715 cout << "Ax[" << i << "]=" << norm(Ax) << endl; 00716 real xAx = dot(Ax,xi); 00717 real err=fabs(xAx-evalues[i]); 00718 if (err>1e-5) 00719 cout << "xAx [" << i << "]=" << xAx << " but evalue = " << evalues[i] << endl; 00720 for (int j=0;j<i;j++) 00721 { 00722 Vec xj=solutions(j); 00723 err = fabs(dot(xi,xj)); 00724 if (err>1e-5) 00725 cout << "|x[" << i << "] . x[" << j << "]| = " << err << endl; 00726 } 00727 } 00728 #endif 00729 } 00730 00731 } // end of namespace PLearn 00732 00733 00734 #endif 00735

Generated on Tue Aug 17 16:01:26 2004 for PLearn by doxygen 1.3.7