00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00049
#ifndef GenMat_INC
00050
#define GenMat_INC
00051
00052
namespace PLearn {
00053
using namespace std;
00054
00055
00066
template<
class MatT>
00067
class SquaredSymmMatT
00068 {
00069
protected:
00070 MatT& A_;
00071
Vec Ax;
00072
public:
00073 SquaredSymmMatT(MatT& A) : A_(A), Ax(A.length())
00074 {
00075
if (A.length() != A.width())
00076
PLERROR(
"SquaredSymmMatT expects a square symmetric matrix");
00077 }
00078
int length()
const {
return A_.length(); }
00079
int width()
const {
return A_.width(); }
00081
void product(
const Vec& x, Vec& y)
00082 {
00083
product(A_, x,Ax);
00084
product(A_, Ax,y);
00085 }
00086
00087
void diag(Vec& d) {
diagonalOfSquare(A_, d); }
00088
void diagonalOfSquare(Vec& d)
00089 {
PLERROR(
"SquaredSymmMatT::diagonalOfSquare not implemented"); }
00090
00091 };
00092
00102
template<
class MatT>
00103
class ReverseMatT
00104 {
00105
protected:
00106 MatT& A_;
00107
real alpha_;
00108
public:
00109 ReverseMatT(MatT& A,
real alpha) : A_(A), alpha_(alpha)
00110 {
00111
if (A.length() != A.width())
00112
PLERROR(
"ReverseMatT expects a square symmetric matrix");
00113 }
00114
int length()
const {
return A_.length(); }
00115
int width()
const {
return A_.width(); }
00117
void product(
const Vec& x, Vec& y)
00118 {
00119
product(A_, x,y);
00120 y*=-1;
00121
multiplyAcc(y, x,alpha_);
00122 }
00123
00124
void diag(Vec& d)
00125 {
00126
diag(A_, d);
00127
diag*=-1;
00128
diag+=alpha_;
00129 }
00130
void diagonalOfSquare(Vec& d)
00131 {
00132
Vec diag_A(A_.length());
00133
diag(A_, diag_A);
00134
diagonalOfSquare(A_, d);
00135
multiplyAcc(d, diag_A,-2*alpha_);
00136 d+=alpha_*alpha_;
00137 }
00138
00139 };
00140
00152
template<
class MatT>
00153
class MatTPlusSumSquaredVec
00154 {
00155
public:
00156 MatT& A_;
00157
Mat X_;
00158
public:
00159 MatTPlusSumSquaredVec(MatT& A,
int alloc_n_vectors) : A_(A), X_(alloc_n_vectors,A.length())
00160 { X_.resize(0,A.length()); }
00161 MatTPlusSumSquaredVec(MatT& A, Mat& X) : A_(A), X_(X)
00162 {
00163
if (X.width()!=A.length() || A.length()!=A.width())
00164
PLERROR(
"MatTPlusSumSquaredVec(MatT,Mat) arguments don't match");
00165 }
00166
void squaredVecAcc(Vec& x)
00167 {
00168 X_.resize(X_.length()+1,X_.width());
00169 X_(X_.length()-1) <<
x;
00170 }
00171
int length()
const {
return A_.length(); }
00172
int width()
const {
return A_.width(); }
00174
void product(
const Vec& x, Vec& y)
00175 {
00176
product(A_, x,y);
00177
for (
int t=0;t<X_.length();t++)
00178 {
00179
Vec x_t = X_(t);
00180
multiplyAcc(y, x_t,
dot(x_t,x));
00181 }
00182 }
00183
00184
void diag(Vec& d)
00185 {
00186
diag(A_, d);
00187
for (
int t=0;t<X_.length();t++)
00188
squareAcc(d, X_(t));
00189 }
00190
void diagonalOfSquare(Vec& d)
00191 {
PLERROR(
"MatTPlusSumSquaredVec::diagonalOfSquare not implemented"); }
00192 };
00193
00194
00196
#if 0
00197
00198
#define MatT Mat
00199
inline bool SolveLinearSymmSystemByCG(MatT A, Vec x,
const Vec& b,
int& max_iter,
real& tol,
real lambda)
00200 {
00201
real resid;
00202
int n=A.nrows();
00203
Vec p(n), z(n), q(n), invdiag(n);
00204
real alpha, beta, rho, previous_rho;
00205
real normb =
norm(b);
00206
00207
00208
diag(A, invdiag);
00209
if (lambda>0)
00210 invdiag+=lambda;
00211
invertElements(invdiag);
00212
00213
Vec r(n);
00214
00215
product(A, x,r);
00216
if (lambda>0)
00217
multiplyAcc(r, x,lambda);
00218 r*=-1;
00219 r+=b;
00220
00221
if (normb == 0.0)
00222 normb = 1;
00223
00224 resid =
norm(r);
00225
00226
if ((resid / normb) <= tol) {
00227 tol = resid;
00228 max_iter = 0;
00229
return true;
00230 }
00231
00232
for (
int i = 1; i <= max_iter; i++) {
00233
00234
00235
multiply(r,invdiag,z);
00236
00237 rho =
dot(r, z);
00238
00239
if (i == 1)
00240 p << z;
00241
else {
00242 beta = rho / previous_rho;
00243
00244
multiplyAdd(z,p,beta,p);
00245 }
00246
00247
00248
product(A, p,q);
00249
multiplyAcc(q, p,lambda);
00250
00251 alpha = rho /
dot(p, q);
00252
00253
00254
multiplyAcc(x, p,alpha);
00255
00256
multiplyAcc(r, q,-alpha);
00257 resid =
norm(r);
00258
00259
if ((resid / normb) <= tol) {
00260 tol = resid;
00261 max_iter = i;
00262
return true;
00263 }
00264
00265 previous_rho = rho;
00266 }
00267
00268 tol = resid;
00269
return false;
00270 }
00271
#undef MatT
00272
#else
00273
00274
template <
class MatT>
00275
bool SolveLinearSymmSystemByCG(MatT A, Vec x,
const Vec& b,
int& max_iter,
real& tol,
real lambda)
00276 {
00277
real resid;
00278
int n=A.length();
00279
Vec p(n), z(n), q(n), invdiag(n);
00280
real alpha, beta, rho, previous_rho;
00281
real normb =
norm(b);
00282
00284
diag(A, invdiag);
00285
if (lambda>0)
00286 invdiag+=lambda;
00287
invertElements(invdiag);
00288
00289
Vec r(n);
00291
product(A, x,r);
00292
if (lambda>0)
00293
multiplyAcc(r, x,lambda);
00294 r*=-1;
00295 r+=b;
00296
00297
if (normb == 0.0)
00298 normb = 1;
00299
00300 resid =
norm(r);
00301
00302
if ((resid / normb) <= tol) {
00303 tol = resid;
00304 max_iter = 0;
00305
return true;
00306 }
00307
00308
for (
int i = 1; i <= max_iter; i++) {
00311
multiply(r,invdiag,z);
00312
00313 rho =
dot(r, z);
00314
00315
if (i == 1)
00316 p << z;
00317
else {
00318 beta = rho / previous_rho;
00320
multiplyAdd(z,p,beta,p);
00321 }
00322
00324
product(A, p,q);
00325
multiplyAcc(q, p,lambda);
00326
00327 alpha = rho /
dot(p, q);
00328
00330
multiplyAcc(x, p,alpha);
00332
multiplyAcc(r, q,-alpha);
00333 resid =
norm(r);
00335
if ((resid / normb) <= tol) {
00336 tol = resid;
00337 max_iter = i;
00338
return true;
00339 }
00340
00341 previous_rho = rho;
00342 }
00343
00344 tol = resid;
00345
return false;
00346 }
00347
#endif
00348
00362
#if 0
00363
00364
#define MatT Mat
00365
#if 0
00366
#define MatT Mat
00367
inline real PowerIteration(MatT& A, Vec x0,
int& n_iterations,
00368
real RayleighQuotientTolerance, Mat final_vectors,
00369
int& final_offset)
00370 {
00371
int N=final_vectors.length();
00372
if (N<3)
PLERROR(
"PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00373
Vec previous=final_vectors(0);
00374
Vec current=final_vectors(1);
00375
Vec next=final_vectors(2);
00376 previous << x0;
00377
product(A, previous,current);
00378
real current_norm=
norm(current), next_norm, current_Rq, previous_Rq=0;
00379 current/=current_norm;
00380
for (
int it=1;it<=n_iterations;it++)
00381 {
00382
product(A, current,next);
00383
00384 current_Rq =
dot(current,next);
00385
00386 next_norm =
norm(next);
00387 next/=next_norm;
00388 cout <<
"at iteration " << it <<
", R(A,x) = " << current_Rq <<
", |Ax|/|x| = "
00389 << next_norm <<
endl;
00390
if (current_Rq < previous_Rq)
00391
PLWARNING(
"PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00392 previous_Rq, current_Rq);
00393
if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00394 {
00395
00396 n_iterations = it;
00397 final_offset = it%N;
00398
return current_Rq;
00399 }
00400
00401 previous_Rq = current_Rq;
00402 current_norm = next_norm;
00403 previous = current;
00404 current = next;
00405 next = final_vectors((it+2)%N);
00406 }
00407 }
00408
#undef MatT
00409
#else
00410
00411
template<
class MatT>
00412
real PowerIteration(MatT& A, Vec x0,
int& n_iterations,
00413
real RayleighQuotientTolerance, Mat final_vectors,
00414
int& final_offset)
00415 {
00416
int N=final_vectors.length();
00417
if (N<3)
PLERROR(
"PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00418
Vec previous=final_vectors(0);
00419
Vec current=final_vectors(1);
00420
Vec next=final_vectors(2);
00421 previous << x0;
00422
product(A, previous,current);
00423
real current_norm=
norm(current), next_norm, current_Rq, previous_Rq=0;
00424 current/=current_norm;
00425
for (
int it=1;it<=n_iterations;it++)
00426 {
00427
product(A, current,next);
00429 current_Rq =
dot(current,next);
00431 next_norm =
norm(next);
00432 next/=next_norm;
00433
00435
if (current_Rq < previous_Rq)
00436
PLWARNING(
"PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00437 previous_Rq, current_Rq);
00438
if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00439 {
00441 n_iterations = it;
00442 final_offset = it%N;
00443
return next_norm;
00444 }
00446 previous_Rq = current_Rq;
00447 current_norm = next_norm;
00448 previous = current;
00449 current = next;
00450 next = final_vectors((it+2)%N);
00451 }
00452 }
00453
#endif
00454
00469
int GramSchmidtOrthogonalization(Mat A,
real tolerance=1e-6);
00470
00489
template <
class MatT>
00490
real PowerIteration(MatT A, Vec x0,
int& n_iterations,
00491
real RayleighQuotientTolerance, Mat final_vectors,
00492
int& final_offset,
bool verbose=
false)
00493 {
00494
int N=final_vectors.length();
00495
if (N<3)
PLERROR(
"PowerIteration: final_vectors.length_ = %d should be >= 3",N);
00496
Vec previous=final_vectors(0);
00497
Vec current=final_vectors(1);
00498
Vec next=final_vectors(2);
00499 previous << x0;
00500
real max_x =
max(previous);
00501
if (max_x<0) previous*=-1;
00502
product(A, previous,current);
00503
real current_norm=
norm(current), next_norm, current_Rq, previous_Rq=0;
00504 max_x =
max(current);
00505
if (max_x<0) current*=-1;
00506 current/=current_norm;
00507
int it=1;
00508
for (;it<=n_iterations;it++)
00509 {
00510
product(A, current,next);
00512 current_Rq =
dot(current,next);
00514 next_norm =
norm(next);
00515 next/=next_norm;
00516 max_x =
max(next);
00517
if (max_x<0) next*=-1;
00518
if (verbose)
00519 {
00520 cout <<
"at iteration " << it <<
", R(A,x) = " << current_Rq <<
", |Ax|/|x| = "
00521 << next_norm <<
endl;
00522 }
00523
if (current_Rq < previous_Rq)
00524
PLWARNING(
"PowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00525 previous_Rq, current_Rq);
00526
if (it>=N && current_Rq / previous_Rq - 1 < RayleighQuotientTolerance)
00527 {
00529 n_iterations = it;
00530 final_offset = it%N;
00531
if (verbose)
00532 cout <<
"power iteration finishes with |Ax|/|x| = " << next_norm <<
endl;
00533
return next_norm;
00534 }
00536 previous_Rq = current_Rq;
00537 current_norm = next_norm;
00538 previous = current;
00539 current = next;
00540 next = final_vectors((it+2)%N);
00541 }
00542 final_offset = it%N;
00543
if (verbose)
00544 cout <<
"power iteration finishes FAILING with |Ax|/|x| = " << next_norm <<
endl;
00545
return next_norm;
00546 }
00547
00572
template<
class MatT>
00573
real InversePowerIteration(MatT A, Vec x0,
int& n_iterations,
00574
int max_n_cg_iter,
00575
real RTolerance, Mat final_vectors,
00576
int& final_offset,
real regularizer,
bool verbose=
false)
00577 {
00578
int N=final_vectors.length();
00579
if (N<2)
PLERROR(
"PowerIteration: final_vectors.length_ = %d should be >= 2",N);
00580
Vec previous=final_vectors(0);
00581
Vec current=final_vectors(1);
00582 previous << x0;
00583
real max_x =
max(previous);
00584
if (max_x<0) previous*=-1;
00585
real current_Rq, previous_Rq=0;
00586 max_x =
max(current);
00587
if (max_x<0) current*=-1;
00588
normalize(previous);
00589
int it=1;
00590
Vec Ax = x0;
00591
real current_error =0;
00592
for (;it<=n_iterations;it++)
00593 {
00594
int CGniter = max_n_cg_iter;
00595
real residue = RTolerance;
00596 current << previous;
00597
bool success=SolveLinearSymmSystemByCG(A, current, previous, CGniter, residue, regularizer);
00598
if (verbose)
00599 {
00600
if (success)
00601 cout <<
"done CG in " << CGniter <<
" iterations with residue = " << residue <<
endl;
00602
else
00603 cout <<
"done incomplete CG in " << CGniter <<
" iterations with residue = " << residue <<
endl;
00604 }
00605 max_x =
max(current);
00606
if (max_x<0) current*=-1;
00607
normalize(current);
00609
product(A, current,Ax);
00610 current_Rq =
dot(current,Ax);
00611 current_error =
norm(Ax);
00612
if (verbose)
00613 cout <<
"at iteration " << it <<
", R(A,x) = " << current_Rq <<
", |Ax|/|x| = "
00614 << current_error <<
endl;
00615
if (verbose && current_Rq > (1+RTolerance)*previous_Rq)
00616
PLWARNING(
"InversePowerIteration: something strange, x'Ax/x'x is decreasing %g->%g",
00617 previous_Rq, current_Rq);
00618
if (it>=N && 1 - current_Rq / previous_Rq < RTolerance)
00619 {
00621 n_iterations = it;
00622 final_offset = it%N;
00623
if (verbose)
00624 cout <<
"inverse power iteration finishes with |Ax|/|x| = " << current_error <<
endl;
00625
return current_error;
00626 }
00628 previous_Rq = current_Rq;
00629 previous = current;
00630 current = final_vectors((it+1)%N);
00631 }
00632 final_offset = it%N;
00633
if (verbose)
00634 cout <<
"power iteration finishes FAILING with |Ax|/|x| = " << current_error <<
endl;
00635
return current_error;
00636 }
00637
00656
template<
class MatT>
00657
real findSmallestEigenPairOfSymmMat(MatT& A, Vec x,
00658
real error_tolerance=1e-3,
00659
real improvement_tolerance=1e-4,
00660
int max_n_cg_iter=0,
00661
int max_n_power_iter=0,
bool verbose=
false)
00662 {
00663
int n=A.length();
00664
int n_try=5;
00665
00666
if (max_n_cg_iter==0)
00667 max_n_cg_iter = 5+
int(
pow(
double(n),0.3));
00668
if (max_n_power_iter==0)
00669 max_n_power_iter = 5+int(
log(n));
00670
00671
Mat try_solutions(n_try,n);
00672
Mat kernel_solutions =
x.toMat(1,n);
00673
Vec Ax(n);
00674
00675
int max_iter = int(
sqrt(max_n_power_iter));
00676
real err=1e30, prev_err=1e30;
00677
int nrepeat=0;
00678
do {
00679
int n_iter=max_iter;
00680
int offs=0;
00681
real l0 = InversePowerIteration(A,x,n_iter,max_n_cg_iter,
00682 improvement_tolerance,
00683 try_solutions,offs,error_tolerance,verbose);
00684
if (verbose)
00685 cout <<
"got smallest eigenvalue = " << l0
00686 <<
" in " << n_iter <<
" iterations" <<
endl;
00687
00688
if (verbose)
00689 {
00692 n_try = try_solutions.length();
00693
for (
int i=0;i<n_try;i++)
00694 {
00695 cout <<
"for solution " << i <<
endl;
00696
Vec y = try_solutions(i);
00697
normalize(y);
00698
product(A, y,Ax);
00699 prev_err=err;
00700 err =
norm(Ax);
00701 cout <<
"|A y| = " << err <<
endl;
00702 cout <<
"y. A y = " <<
dot(y,Ax) <<
endl;
00703 }
00704 }
00706
00707
Vec evalues(n_try);
00708
Mat evectors(n_try,n_try);
00709
diagonalizeSubspace(A, try_solutions, Ax, kernel_solutions, evalues, evectors);
00710
product(A, x,Ax);
00711 err =
norm(Ax);
00712
if (verbose)
00713 cout <<
"after diagonalization, err = " << err <<
endl;
00714 nrepeat++;
00715 }
while (err > error_tolerance && n_try>=2 &&
00716 prev_err/err-1>improvement_tolerance && nrepeat<max_iter);
00717
if (verbose)
00718 cout <<
"return from findSmallestEigenPairOfSymmMat with err=" << err <<
endl;
00719 }
00720
00721
00741
template<
class MatT>
00742
int SymmMatNullSpaceByInversePowerIteration(MatT &A, Mat solutions,
00743 Vec normsAx, Vec xAx,
00744
real error_tolerance=1e-3,
00745
real improvement_tolerance=1e-4,
00746
int max_n_cg_iter=0,
00747
int max_n_power_iter=0,
bool verbose=
false)
00748
00749 {
00750
int n=A.length();
00751
int n_soln=normsAx.length();
00752 solutions.resize(n_soln,n);
00753
if (max_n_cg_iter==0)
00754 max_n_cg_iter = 5+int(
pow(
double(n),0.3));
00755
if (max_n_power_iter==0)
00756 max_n_power_iter = 5+int(
log(n));
00757
Vec Ax(n);
00758
00759 MatTPlusSumSquaredVec<MatT> B(A,n_soln);
00760
Vec sy(n);
00761
fill_random_uniform(sy);
00762
Mat largest_evecs(3,n);
00763
int offs;
00764
int n_iter =
MIN(max_n_power_iter,20);
00765
real largest_evalue = PowerIteration(A, sy, n_iter ,1e-3,
00766 largest_evecs, offs,verbose);
00767
if (verbose)
00768 cout <<
"largest evalue(B) = " << largest_evalue <<
endl;
00769
for (
int i=0;i<n_soln;i++)
00770 {
00771
Vec y = solutions(i);
00772
if (i==0)
00773 y.fill(1);
00774
else
00775
fill_random_uniform(y,0.1,0.5);
00776
real residue=findSmallestEigenPairOfSymmMat(B, y,
00777 error_tolerance,
00778 improvement_tolerance,
00779 max_n_cg_iter,
00780 max_n_power_iter,verbose);
00781
if (verbose)
00782 {
00783 cout <<
"found vector with |B y| = " << residue <<
endl;
00784 cout <<
"|y| = " <<
norm(y) <<
endl;
00785
product(A, y,Ax);
00786 cout <<
"****** |A y| = " <<
norm(Ax) <<
endl;
00787 }
00788
multiply(y,largest_evalue,sy);
00789 B.squaredVecAcc(sy);
00790 }
00791
00792
int n_s=
GramSchmidtOrthogonalization(solutions);
00793 solutions = solutions.subMatRows(0,n_s);
00794
if (n_s<n_soln && verbose)
00795 cout <<
"found only " << n_s <<
" independent solutions out of "
00796 << n_soln <<
" requested" <<
endl;
00797
for (
int i=0;i<n_s;i++)
00798 {
00799
Vec xi = solutions(i);
00801
product(A, xi,Ax);
00802
real err =
dot(xi,Ax);
00803
real normAx =
norm(Ax);
00804 normsAx[i]=normAx;
00805 xAx[i]=err;
00806
if (verbose)
00807 cout <<
"for " << i <<
"-th solution x: x'Ax = " << err <<
", |Ax|/|x|= "
00808 << normAx <<
endl;
00809 }
00810
return n_s;
00811 }
00812
00850
template<
class MatT>
00851
int GDFindSmallEigenPairs(MatT& A,Mat X,
00852
bool diagonalize_in_the_end=
true,
00853
real tolerance=1e-6,
00854
int n_epochs=0,
00855
real learning_rate=0,
00856
int normalize_every=0,
00857
real decrease_factor=0,
00858
bool verbose=
false)
00859 {
00860
int n=A.length();
00861
int m=X.length();
00862
if (n_epochs==0)
00863 n_epochs = 1000+10*int(
sqrt(n));
00864
Vec Ax(n);
00865
real err_tolerance, sum_norms, actual_err;
00866
if (learning_rate==0)
00867 {
00869
fill_random_uniform(Ax,-1,1);
00870
Mat large_vectors(3,n);
00871
int offs;
00872
int n_iter = 10+int(
sqrt(
log(
double(n))));
00873
real max_eigen_value =
00874 PowerIteration(A, Ax, n_iter,1e-3,large_vectors,offs,verbose);
00875 learning_rate = 2.0/max_eigen_value;
00876
if (verbose)
00877 {
00878 cout <<
"setting initial learning rate = 2/max_eigen_value = 2/"
00879 << max_eigen_value <<
" = " << learning_rate <<
endl;
00880 }
00881 err_tolerance = tolerance * max_eigen_value;
00882 }
00883
else err_tolerance = tolerance;
00884
real prev_err=1e30;
00885
int it=0;
00886
for (;it<n_epochs;it++)
00887 {
00888
real learning_rate = learning_rate / (1+it*decrease_factor);
00889
real err=0;
00890
for (
int i=0;i<n;i++)
00891 {
00892
real* xi = &X(0,i);
00893
for (
int d=0;d<m;d++, xi+=n)
00894 {
00896
real gradient =
matRowDotVec(A, i,X(d));
00897 *xi -= learning_rate * gradient;
00898 err += gradient * *xi;
00899 }
00900 }
00901
if (verbose)
00902 {
00903 cout <<
"at iteration " << it <<
" of gradient descent, est. err.= " << err <<
endl;
00904 cout <<
"lrate = " << learning_rate <<
endl;
00905 }
00906
if (tolerance>0)
00907 {
00908 sum_norms = 0;
00909
for (
int d=0;d<m;d++)
00910 sum_norms +=
norm(X(d));
00911 actual_err = err / (m*sum_norms);
00912
if (actual_err<err_tolerance)
break;
00913 }
00914
if (err>prev_err)
00915 cout <<
"at iteration " << it <<
" of gradient descent, est. err.= " << err
00916 <<
" > previous error = " << prev_err <<
" ; lrate = " << learning_rate <<
endl;
00918
if (verbose)
00919
for (
int d=0;d<m;d++)
00920 cout <<
"norm(x[" << d <<
"])=" <<
norm(X(d)) <<
endl;
00921
if (normalize_every!=0 && it%normalize_every==0)
00922 {
00923
int new_m=
GramSchmidtOrthogonalization(X,1e-9);
00924
for (
int e=new_m;e<m;e++)
00925
fill_random_uniform(X(e));
00926
if (verbose)
00927 {
00929
real C=0;
00930
for (
int d=0;d<m;d++)
00931 {
00932
Vec xd = X(d);
00933
product(A, xd,Ax);
00934 C +=
dot(xd,Ax);
00935 cout <<
"for " << d <<
", |Ax|/|x| = " <<
norm(Ax) <<
endl;
00936 }
00937 cout <<
"actual cost = " << 0.5*C <<
endl;
00938 }
00939 }
00940 }
00941
if (diagonalize_in_the_end)
00942 {
00943
Mat diagonalized_solutions(m,n);
00944
Mat subspace_evectors(m,m);
00945
Vec subspace_evalues(m);
00946
diagonalizeSubspace(A,X,Ax,diagonalized_solutions,subspace_evalues,subspace_evectors);
00947 X << diagonalized_solutions;
00948 }
00949
return it;
00950 }
00951
00960
template<
class MatT>
00961
int metricMultiDimensionalScaling(MatT& square_distances,Mat embedding,
int max_n_eigen_iter=300)
00962 {
00963
int n=square_distances.length();
00964
long int m=embedding.width();
00965
if (embedding.length()!=n)
00966
PLERROR(
"MetricMultiDimensionalScaling: expected embedding.length()==square_distances.length(), got %d!=%d",
00967 embedding.length(),n);
00968
if (square_distances.width()!=n)
00969
PLERROR(
"MetricMultiDimensionalScaling: expected square_distances a square matrix, got %d x %d",
00970 n,square_distances.width());
00971
if (square_distances.size()!=n*n)
00972
PLWARNING(
"MetricMultiDimensionalScaling: current implementation does not seem to work well for sparse distance matrices!");
00974
Vec avg_across_rows(n), avg_across_columns(n);
00976
averageAcrossRowsAndColumns(square_distances, avg_across_rows, avg_across_columns,
true);
00977
real overall_avg=
mean(avg_across_rows);
00978 avg_across_rows *= -1;
00979 avg_across_columns *= -1;
00980
addToRows(square_distances, avg_across_rows,
true);
00981
addToColumns(square_distances, avg_across_rows,
true);
00982 square_distances.add(overall_avg,
true);
00983 square_distances*=-0.5;
00985
00988
Vec e_values(m);
00989
Mat e_vectors(m,n);
00990
int err=
eigenSparseSymmMat(square_distances, e_values,
00991 e_vectors, m, max_n_eigen_iter);
00992
if (err!=0)
00993
return err;
00996
for (
int j=0;j<m;j++)
00997 {
00998
real eval_j = e_values[j];
00999
if (eval_j<0)
01000
PLERROR(
"metricMultiDimensionalScaling::the matrix of dot-products is not positive-definite!, evalue=%g",eval_j);
01001
real scale =
sqrt(eval_j);
01002
Vec feature_j = e_vectors(j);
01003 feature_j *= scale;
01004 embedding.column(j) << feature_j;
01005 }
01006
return 0;
01007 }
01008
01009
01010 }
01011
01012
#endif