00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00041
#include "EntropyContrast.h"
00042
#include <plearn/vmat/VMat_maths.h>
00043
00044
#include <plearn/math/plapack.h>
00045
#include <plearn/math/random.h>
00046
namespace PLearn {
00047
using namespace std;
00048
00049 EntropyContrast::EntropyContrast()
00050 :nconstraints(4)
00051 {
00052
learning_rate = 0.001;
00053
decay_factor = 0;
00054
weight_real =
weight_gen =
weight_extra = 1;
00055
nconstraints = 0 ;
00056
n = 0 ;
00057
evaluate_every_n_epochs = 1;
00058
evaluate_first_epoch =
true;
00059
evaluation_method =
"no_evaluation";
00060
00061
nhidden = 0 ;
00062
alpha = 0.0 ;
00063 }
00064
00065
PLEARN_IMPLEMENT_OBJECT(
EntropyContrast,
00066
"Performs a EntropyContrast search",
00067
"Detailed Description ");
00068
00069 void EntropyContrast::declareOptions(
OptionList& ol)
00070 {
00071
00072
declareOption(ol,
"nconstraints", &EntropyContrast::nconstraints, OptionBase::buildoption,
00073
"The number of constraints to create (that's also the outputsize)");
00074
declareOption(ol,
"learning_rate", &EntropyContrast::learning_rate, OptionBase::buildoption,
00075
"The learning rate of the algorithm");
00076
declareOption(ol,
"decay_factor", &EntropyContrast::decay_factor, OptionBase::buildoption,
00077
"The decay factor of the learning rate");
00078
00079
declareOption(ol,
"weight_decay_hidden", &EntropyContrast::weight_decay_hidden, OptionBase::buildoption,
00080
"The decay factor for the hidden units");
00081
declareOption(ol,
"weight_decay_output", &EntropyContrast::weight_decay_output, OptionBase::buildoption,
00082
"The decay factor for the output units");
00083
00084
declareOption(ol,
"cost_real", &EntropyContrast::cost_real, OptionBase::buildoption,
00085
"The method to compute the real cost");
00086
declareOption(ol,
"cost_gen", &EntropyContrast::cost_gen, OptionBase::buildoption,
00087
"The method to compute the cost for the generated cost");
00088
declareOption(ol,
"cost_extra", &EntropyContrast::cost_extra, OptionBase::buildoption,
00089
"The method to compute the extra cost");
00090
declareOption(ol,
"gen_method", &EntropyContrast::gen_method, OptionBase::buildoption,
00091
"Method used to generate new points");
00092
declareOption(ol,
"weight_real", &EntropyContrast::weight_real, OptionBase::buildoption,
00093
"the relative weight of the cost of the real data, by default it is 1");
00094
declareOption(ol,
"weight_gen", &EntropyContrast::weight_gen, OptionBase::buildoption,
00095
"the relative weight of the cost of the generated data, by default it is 1");
00096
declareOption(ol,
"weight_extra", &EntropyContrast::weight_extra, OptionBase::buildoption,
00097
"the relative weight of the extra cost, by default it is 1");
00098
declareOption(ol,
"evaluation_method", &EntropyContrast::evaluation_method, OptionBase::buildoption,
00099
"Method for evaluation of constraint learning");
00100
declareOption(ol,
"evaluate_every_n_epochs", &EntropyContrast::evaluate_every_n_epochs, OptionBase::buildoption,
00101
"Number of epochs after which the constraints evaluation is done");
00102
declareOption(ol,
"test_set", &EntropyContrast::test_set, OptionBase::buildoption,
00103
"VMat test set");
00104
00105
declareOption(ol,
"nhidden", &EntropyContrast::nhidden, OptionBase::buildoption,
00106
"the number of hidden units");
00107
00108
00109
00110
00111 inherited::declareOptions(ol);
00112 }
00113
00114
00115
00119 void EntropyContrast::initialize_NNcontinuous()
00120 {
00121
fill_random_uniform(
w,-1.0,1.0) ;
00122
fill_random_uniform(
v,-1.0,1.0) ;
00123
00124
fill_random_uniform(
bias_hidden,-1.0,1.0) ;
00125
fill_random_uniform(
bias_output,-1.0,1.0) ;
00126
00127
mu_f.
fill(0.0) ;
00128
sigma_f.
fill(1.0) ;
00129
00130
mu_f_hat.
fill(0.0) ;
00131
sigma_f_hat.
fill(1.0) ;
00132
00133
00134
mu_g = 0.0 ;
00135
sigma_g = 1.0 ;
00136
sigma_g.
fill(1.0) ;
00137
mu_g.
fill(0.0) ;
00138
00139
mu_f_square.
fill(0.0) ;
00140
sigma_f_square.
fill(1.0) ;
00141
00142
00143
full = 1.0 ;
00144 }
00145
00149 void EntropyContrast::computeNNcontinuous_hidden(
const Vec& input_units,
Vec &hidden_units)
00150 {
00151
00152
for (
int i = 0 ; i <
nhidden ; ++i )
00153 {
00154 hidden_units[i] =
bias_hidden[i] ;
00155
for (
int j = 0 ; j <
n ; ++j)
00156 hidden_units[i] +=
v(i,j) * input_units[j] ;
00157 }
00158
compute_tanh(hidden_units,hidden_units) ;
00159
00160 }
00161
00162
00166 void EntropyContrast::computeNNcontinuous_constraints(
Vec& hidden_units,
Vec &output_units)
00167 {
00168
for (
int i = 0 ; i <
nconstraints ; ++i )
00169 {
00170 output_units[i] =
bias_output[i] ;
00171
for (
int j = 0 ; j <
nhidden ; ++j)
00172 output_units[i] +=
w(i,j) * hidden_units[j] ;
00173 }
00174 }
00175
00176
00180 void EntropyContrast::get_NNcontinuous_output(
const Vec & input_units,
Vec &output_units,
Vec &hidden_units)
00181 {
00182
00183
computeNNcontinuous_hidden(input_units,hidden_units) ;
00184
00185
computeNNcontinuous_constraints(hidden_units,output_units) ;
00186
00187 }
00188
00193 void EntropyContrast::gen_normal_0_1(
Vec & output)
00194 {
00195
for (
int i = 0 ; i < output.
length() ; ++ i) {
00196 output[i] =
gaussian_01();
00197 }
00198 }
00199
00200
00204 void EntropyContrast::update_mu_sigma_f(
const Vec & f_x,
Vec & mu,
Vec &sigma)
00205 {
00206
00207 mu = mu *
alpha + f_x * (1-
alpha) ;
00208
00209
00210 sigma =
alpha * (sigma) + (1-
alpha) *
square(f_x - mu) ;
00211
00212 }
00213
00217 void EntropyContrast::update_alpha(
int stage,
int current_input_index)
00218 {
00219
00220
if (stage==0)
00221
alpha = 1.0 - 1.0 / ( current_input_index + 2 ) ;
00222
else
00223
alpha = 1.0 - 1.0/
inputsize;
00224 }
00225
00229 void EntropyContrast::compute_diversity_cost(
const Vec & f_x,
const Vec & cost,
Vec & grad_C_extra_cost_wrt_f_x)
00230 {
00231 cost.
fill (0.0);
00232
for (
int i = 0; i <
nconstraints; ++i)
00233 {
00234
for (
int j = 0; j <= i; ++j)
00235 cost[i] +=
pow (f_x[j], 2);
00236
00237 cost[i] /= i + 1;
00238 }
00239
Vec full_sum(nconstraints) ;
00240 full_sum[0] = (
pow(f_x[0],2) - (
sigma_f[0] +
pow(
mu_f[0],2) ) ) ;
00241
for (
int i = 1 ; i<nconstraints ; ++i)
00242 {
00243 full_sum[i] = full_sum[i-1] + (
pow(f_x[i],2) - (sigma_f[i] +
pow(
mu_f[i],2) ) ) ;
00244 grad_C_extra_cost_wrt_f_x[i] = full_sum[i-1] * f_x[i] / train_set.
length() ;
00245 }
00246
00247 }
00248
00249
00250
00254 void EntropyContrast::compute_df_dx(
Mat &df_dx,
const Vec &input)
00255 {
00256
Vec ones(
nhidden);
00257 ones.
fill(1);
00258
Vec hidden(
nhidden);
00259 hidden =
product(
v,input);
00260 hidden = hidden +
bias_hidden;
00261
Vec diag(
nhidden) ;
00262
diag = ones -
square(
tanh(hidden)) ;
00263
diagonalizedFactorsProduct(df_dx,
w,
diag,
v);
00264 }
00265
00269 void EntropyContrast:: get_real_grad_variance_wrt_f(
const Vec & f_x,
Vec & grad )
00270 {
00271
for (
int i = 0 ; i < f_x.
length() ; ++i)
00272 {
00273 grad[i] = (f_x[i] -
mu_f[i]) /
sigma_f[i] ;
00274 }
00275 }
00276
00280 void EntropyContrast:: get_gen_grad_variance_wrt_f(
const Vec & f_x_hat,
Vec & grad )
00281 {
00282
for (
int i = 0 ; i < f_x_hat.
length() ; ++i)
00283 {
00284 grad[i] = (f_x_hat[i] -
mu_f_hat[i]) /
sigma_f_hat[i] ;
00285 }
00286 }
00287
00291 void EntropyContrast::set_NNcontinuous_gradient(
Vec &grad_C_real_wrt_f_x,
Mat& grad_H_f_x_wrt_w,
Mat& grad_H_f_x_wrt_v,
00292
Vec & hidden_units,
Vec & input_units,
Vec &grad_H_f_x_wrt_bias_hidden,
Vec &grad_H_f_x_wrt_bias_output)
00293 {
00294
00295
00296
for (
int i = 0 ; i <
nconstraints ; ++ i)
00297
for (
int j = 0 ; j <
nhidden ; ++j)
00298 {
00299 grad_H_f_x_wrt_w(i,j) = grad_C_real_wrt_f_x[i] * hidden_units[j] ;
00300 }
00301
00302
00303
for (
int i = 0 ; i < nconstraints ; ++i)
00304 grad_H_f_x_wrt_bias_output[i] = grad_C_real_wrt_f_x[i] ;
00305
00306
00307
00308
00309
real sum;
00310
real grad_tmp ;
00311
for (
int i = 0 ; i < nhidden ; ++ i)
00312 {
00313
sum = 0 ;
00314
for (
int k = 0 ;
k <
n ; ++
k)
00315
sum+=
v(i,
k) * input_units[
k] ;
00316
00317 grad_tmp = 0;
00318
for (
int l = 0 ; l < nconstraints ; ++l)
00319 grad_tmp += grad_C_real_wrt_f_x[l] *
w(l,i) ;
00320
00321
for(
int j=0 ; j<n ; ++j)
00322 grad_H_f_x_wrt_v(i,j) = grad_tmp * (1 -
tanh(
bias_hidden[i] +
sum) *
tanh(
bias_hidden[i] +
sum)) * input_units[j];
00323
00324 grad_H_f_x_wrt_bias_hidden[i] = grad_tmp * (1 -
tanh(
bias_hidden[i] +
sum) *
tanh(
bias_hidden[i] +
sum));
00325
00326 }
00327 }
00331 void EntropyContrast::update_NNcontinuous_from_extra_cost()
00332 {
00333
00334
00335
for (
int i = 0 ; i <
nhidden ; ++i) {
00336
for(
int j = 0 ; j <
n ; ++ j) {
00337
v(i,j)-=
learning_rate *
grad_extra_wrt_v(i,j);
00338 }
00339 }
00340
00341
for (
int i = 0 ; i <
nconstraints ; ++i) {
00342
for(
int j = 0 ; j < nhidden ; ++ j) {
00343
w(i,j)-=
learning_rate *
grad_extra_wrt_w(i,j);
00344 }
00345 }
00346
00347
for(
int j = 0 ; j < nhidden ; ++ j) {
00348
bias_hidden[j] -=
learning_rate *
grad_extra_wrt_bias_hidden[j];
00349 }
00350
00351 }
00355 void EntropyContrast::update_NNcontinuous()
00356 {
00357
for (
int i = 0 ; i <
nhidden ; ++i)
00358
for(
int j = 0 ; j <
n ; ++ j)
00359
v(i,j)-=
learning_rate * (
grad_H_f_x_wrt_v(i,j) -
grad_H_f_x_hat_wrt_v(i,j)) +
weight_decay_hidden *
v(i,j) ;
00360
00361
for (
int i = 0 ; i <
nconstraints ; ++i)
00362
for(
int j = 0 ; j < nhidden ; ++ j)
00363
w(i,j)-=
learning_rate * (
grad_H_f_x_wrt_w(i,j) -
grad_H_f_x_hat_wrt_w(i,j)) +
weight_decay_output *
w(i,j) ;
00364
00365
for(
int j = 0 ; j < nhidden ; ++ j)
00366
bias_hidden[j] -=
learning_rate * (
grad_H_f_x_wrt_bias_hidden[j] -
grad_H_f_x_hat_wrt_bias_hidden[j] );
00367
00368
for(
int j = 0 ; j < nconstraints ; ++ j)
00369
bias_output[j] -=
learning_rate * (
grad_H_f_x_wrt_bias_output[j] -
grad_H_f_x_hat_wrt_bias_output[j] );
00370 }
00371
00375 void EntropyContrast::compute_extra_grad_wrt_df_dx(
Mat& grad_C_wrt_df_dx)
00376 {
00377
for(
int i=0;i<
n;i++){
00378 grad_C_wrt_df_dx[0][i] = 0.0 ;
00379 }
00380
00381
00382
00383
Mat dot_g(
nconstraints,
nconstraints) ;
00384
for (
int i=0; i<
nconstraints ;++i)
00385
for (
int j=0; j<i ; ++j)
00386 dot_g(i,j) =
dot(
df_dx(i),
df_dx(j)) ;
00387
00388
for (
int j = 1; j<nconstraints; ++j )
00389 {
00390
for (
int k = 0;
k<n ; ++
k)
00391 {
00392 grad_C_wrt_df_dx(j,
k) = 0;
00393
for (
int i = 0; i < j ; ++i)
00394 grad_C_wrt_df_dx(j,
k) += 2 * dot_g(j,i) *
df_dx(i,
k) ;
00395 }
00396 }
00397 }
00398
00402 void EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(
Mat &grad_C_wrt_df_dx,
const Vec &input)
00403 {
00404
00405
00406
00407
Vec ones(
nhidden) ;
00408
Vec b(
nhidden) ;
00409 ones.
fill(1) ;
00410
Vec hidden(
nhidden);
00411 hidden =
product(
v,input);
00412 hidden = hidden +
bias_hidden;
00413
Vec diag(
nhidden) ;
00414
diag = ones -
square(
tanh(hidden)) ;
00415
00416 b = ones -
tanh(hidden) ;
00417
00418
Mat a(
nhidden,
nhidden) ;
00419 a.
fill(0.0) ;
00420
addToDiagonal(a,
diag) ;
00421
00422
Mat temp(
nconstraints,
nhidden);
00423
productTranspose(temp,grad_C_wrt_df_dx,
v) ;
00424
product(
grad_extra_wrt_w,temp,a) ;
00425
00426
00427 {
00428
Mat tmp(
nhidden,
nconstraints) ;
00429
product(tmp,a,
transpose(
w)) ;
00430
product(
grad_extra_wrt_v,tmp,grad_C_wrt_df_dx) ;
00431 }
00432
00433
00434 {
00435
Vec grad_C_wrt_a ;
00436
Mat tmp(
nhidden,
n) ;
00437
product(tmp,
transpose(
w),grad_C_wrt_df_dx) ;
00438
Mat tmp_a(
nhidden,
nhidden) ;
00439
product(tmp_a,tmp,
transpose(
v)) ;
00440
00441
00442
Vec temp(
nhidden) ;
00443
for (
int i= 0 ; i <
nhidden ; ++i)
00444 {
00445 temp[i] = (-2) * tmp_a(i,i) * b[i] * a(i,i);
00446
00447
for (
int j = 0 ; j <
n ; ++j)
00448 {
00449
grad_extra_wrt_v(i,j) += temp[i] * input[j];
00450 }
00451 }
00452
grad_extra_wrt_bias_hidden = temp;
00453 }
00454
00455 }
00456
00457
00458
00459 void EntropyContrast::build_()
00460 {
00461
if (!train_set.
isNull())
00462 {
00463
n = train_set->
width() ;
00464
00465
inputsize = train_set->
length() ;
00466
00467
x.
resize(
n) ;
00468
00469
f_x.
resize(
nconstraints) ;
00470
00471
grad_C_real_wrt_f_x.
resize(
nconstraints);
00472
00473
x_hat.
resize(
n) ;
00474
00475
f_x_hat.
resize(
nconstraints) ;
00476
00477
grad_C_generated_wrt_f_x_hat.
resize(
nconstraints);
00478
00479
grad_C_extra_cost_wrt_f_x.
resize(
nconstraints);
00480
00481
starting_learning_rate =
learning_rate;
00482
00483
n_seen_examples = 0;
00484
00485
00486
00487
w.
resize(
nconstraints,
nhidden) ;
00488
00489
z_x.
resize(
nhidden) ;
00490
z_x_hat.
resize(
nhidden) ;
00491
00492
v.
resize(
nhidden,
n) ;
00493
00494
mu_f.
resize(
nconstraints) ;
00495
00496
mu_f_hat.
resize(
nconstraints) ;
00497
00498
sigma_f.
resize(
nconstraints) ;
00499
00500
sigma_f_hat.
resize(
nconstraints) ;
00501
00502
mu_f_square.
resize(
nconstraints) ;
00503
sigma_f_square.
resize(
nconstraints) ;
00504
00505
bias_hidden.
resize(
nhidden) ;
00506
bias_output.
resize(
nconstraints);
00507
00508
grad_H_f_x_wrt_bias_output.
resize(
nconstraints) ;
00509
grad_H_f_x_wrt_bias_hidden.
resize(
nhidden) ;
00510
00511
grad_H_f_x_hat_wrt_bias_output.
resize(
nconstraints) ;
00512
grad_H_f_x_hat_wrt_bias_hidden.
resize(
nhidden) ;
00513
00514
grad_H_f_x_hat_wrt_w.
resize(
nconstraints,
nhidden);
00515
grad_H_f_x_wrt_w.
resize(
nconstraints,
nhidden) ;
00516
00517
grad_H_g_wrt_w.
resize(
nconstraints,
nhidden) ;
00518
00519
00520
grad_H_f_x_wrt_v.
resize(
nhidden,
n) ;
00521
grad_H_f_x_hat_wrt_v.
resize(
nhidden,
n) ;
00522
00523
00524
sigma_g.
resize(
nconstraints) ;
00525
mu_g.
resize(
nconstraints) ;
00526
g_x.
resize(
nconstraints) ;
00527
grad_C_wrt_df_dx.
resize(
nconstraints,
n) ;
00528
df_dx.
resize(
nconstraints,
n) ;
00529
00530
grad_extra_wrt_w.
resize(
nconstraints,
nhidden) ;
00531
grad_extra_wrt_v.
resize(
nhidden,
n) ;
00532
00533
full_sum.
resize(
nconstraints) ;
00534 }
00535
00536 }
00537
00538
00539 void EntropyContrast::build()
00540 {
00541 inherited::build();
00542
build_();
00543 }
00544
00545
00546 void EntropyContrast::makeDeepCopyFromShallowCopy(map<const void*, void*>& copies)
00547 {
00548 inherited::makeDeepCopyFromShallowCopy(copies);
00549
00550 }
00551
00552
00553
00554 int EntropyContrast::outputsize()
const
00555
{
00556
return nconstraints;
00557 }
00558
00559 void EntropyContrast::forget()
00560 {
00561
00562
00563
00564
initialize_NNcontinuous() ;
00565 }
00566
00567 void EntropyContrast::train()
00568 {
00569
int t ;
00570
manual_seed(12345678);
00571
forget();
00572
for (;stage < nstages;stage++)
00573 {
00574 cout <<
getInfo() <<
endl;
00575 cout <<
"Stage = " << stage <<
endl;
00576 cout <<
"Learning rate = " <<
learning_rate <<
endl;
00577
00578
for (t = 0 ; t < train_set.
length(); ++ t)
00579 {
00580
update_alpha(stage,t) ;
00581
00582 train_set->getRow(t,
x);
00583
00585
00587
00588
00589
get_NNcontinuous_output(
x,
f_x,
z_x) ;
00590
00591
update_mu_sigma_f(
f_x,
mu_f,
sigma_f) ;
00592
00593
if (
cost_real ==
"constraint_variance")
00594
update_mu_sigma_f(
square(
f_x),
mu_f_square,
sigma_f_square) ;
00595
00596
00597
if(
cost_real ==
"constraint_variance") {
00598
00599
get_real_grad_variance_wrt_f(
f_x,
grad_C_real_wrt_f_x) ;
00600 }
00601
00602
00603
grad_C_real_wrt_f_x *=
weight_real;
00604
00606
00608
00609
if(
cost_extra ==
"variance_sum_square_constraints") {
00610
compute_diversity_cost(
f_x,
g_x,
grad_C_extra_cost_wrt_f_x) ;
00611
00612
grad_C_extra_cost_wrt_f_x *=
weight_extra;
00613 }
00614
00615
if(
cost_extra ==
"derivative")
00616 {
00617
compute_df_dx(
df_dx,
x) ;
00618
compute_extra_grad_wrt_df_dx(
grad_C_wrt_df_dx) ;
00619
00620
grad_C_wrt_df_dx *=
weight_extra;
00621 }
00622
00623
00624
00625
00626
set_NNcontinuous_gradient(
grad_C_real_wrt_f_x,
grad_H_f_x_wrt_w,
grad_H_f_x_wrt_v,
z_x,
x,
00627
grad_H_f_x_wrt_bias_hidden,
grad_H_f_x_wrt_bias_output);
00628
00629
if (
cost_extra ==
"derivative"){
00630
set_NNcontinuous_gradient_from_extra_cost(
grad_C_wrt_df_dx,
x) ;
00631 }
00632
00633
if (
cost_extra ==
"variance_sum_square_constraints") {
00634
00635
for(
int it=0; it<
grad_C_real_wrt_f_x.
length(); it++)
00636
grad_C_real_wrt_f_x[it] +=
grad_C_extra_cost_wrt_f_x[it];
00637 }
00638
00640
00642
00643
00644
if(
gen_method ==
"N(0,1)") {
00645
gen_normal_0_1(
x_hat) ;
00646 }
00647
00648
00649
get_NNcontinuous_output(
x_hat,
f_x_hat,
z_x_hat);
00650
update_mu_sigma_f(
f_x_hat,
mu_f_hat,
sigma_f_hat);
00651
00652
00653
if(
cost_gen ==
"constraint_variance") {
00654
get_gen_grad_variance_wrt_f(
f_x_hat,
grad_C_generated_wrt_f_x_hat) ;
00655 }
00656
00657
00658
for(
int it=0; it<
grad_C_generated_wrt_f_x_hat.
length(); it++) {
00659
grad_C_generated_wrt_f_x_hat[it] *=
weight_gen;
00660 }
00661
00662
00663
00664
set_NNcontinuous_gradient(
grad_C_generated_wrt_f_x_hat,
grad_H_f_x_hat_wrt_w,
grad_H_f_x_hat_wrt_v,
z_x_hat,
x_hat,
00665
grad_H_f_x_hat_wrt_bias_hidden,
grad_H_f_x_hat_wrt_bias_output);
00666
00668
00670
00671
update_NNcontinuous();
00672
if (
cost_extra==
"derivative") {
00673
update_NNcontinuous_from_extra_cost();
00674 }
00675
n_seen_examples++;
00676
00677
full =
alpha *
full + (1-
alpha) * (
f_x[0] *
f_x[0] - (
sigma_f[0] +
mu_f[0]*mu_f[0])) * (f_x[1] * f_x[1] - (sigma_f[1] + mu_f[1]*mu_f[1]) ) ;
00678 }
00679
00680
learning_rate =
starting_learning_rate / (1 +
decay_factor*
n_seen_examples);
00681
00683
00685
00686
if(stage %
evaluate_every_n_epochs == 0 && !(!
evaluate_first_epoch && stage == 0))
00687 {
00688
if(
evaluation_method ==
"dump_all")
00689 {
00690
if (
n_seen_examples == 250000)
00691 {
00692 FILE * f1 = fopen(
"gen1.dat",
"wt") ;
00693 FILE * f2 = fopen(
"gen2.dat",
"wt") ;
00694 FILE * f3 = fopen(
"gen3.dat",
"wt") ;
00695
00696
for (
int i = -10 ; i <= 10 ; i+=2)
00697
for (
int j = -1 ; j <= 9 ; j+=2 )
00698
for (
int k = -1 ;
k <= 9 ;
k+=3 )
00699 {
00700
Mat res(2,3) ;
00701
Vec input(3) ;
00702
Vec ones(
nhidden) ;
00703 ones.
fill(1) ;
00704 input[0] = (
real)i / 10 ;
00705 input[1] = (
real)j / 10 ;
00706 input[2] = (
real)
k / 100 ;
00707
Vec hidden(
nhidden);
00708 hidden =
product(
v,input) ;
00709
Vec diag(
nhidden) ;
00710
diag = ones -
square(
tanh(hidden)) ;
00711
diagonalizedFactorsProduct(res,
w,
diag,
v);
00712 fprintf(f1,
"%f %f %f %f %f %f\n",(
real)i/10,(
real)j/10,(
real)
k/100,res(0,0),res(0,1),res(0,2));
00713 fprintf(f2,
"%f %f %f %f %f %f\n",(
real)i/10,(
real)j/10,(
real)
k/100,res(1,0),res(1,1),res(1,2));
00714
real norm0 =
sqrt(res(0,0)*res(0,0)+res(0,1)*res(0,1)+res(0,2)*res(0,2)) ;
00715
real norm1 =
sqrt(res(1,0)*res(1,0)+res(1,1)*res(1,1)+res(1,2)*res(1,2)) ;
00716
real angle = res(0,0) / norm0 * res(1,0) / norm1 + res(0,1) / norm0 * res(1,1) / norm1 + res(0,2) / norm0 * res(1,2) / norm1 ;
00717 fprintf(f3,
"%f %f %f %f\n",(
real)i/10,(
real)j/10,(
real)
k/100,angle) ;
00718
00719 }
00720 fclose(f1) ;
00721 fclose(f2) ;
00722 fclose(f3) ;
00723
00724
00725
00726
00727
00728
00729
00730
00731
00732
00733
00734
00735
00736
00737
00738
00739
00740
00741
00742
00743
00744
00745
00746
00747
00748
00749
00750
00751
00752
00753
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763
00764 exit(0) ;
00765 }
00766
for (
int i = 0 ; i <
f_x.
length() ;++i)
00767 cout <<
f_x[i] <<
" ";
00768 cout <<
endl <<
"cov = " <<
full/train_set.
length() <<
endl ;
00769 cout <<
"var f_square: " <<
sigma_f_square[0] <<
" "<< sigma_f_square[1] <<
endl ;
00770 cout <<
"corr: " <<
full /
sqrt(sigma_f_square[0] /
sqrt(sigma_f_square[1])) <<
endl ;
00771 }
00772
00773 }
00774 cout <<
"--------------------------------" <<
endl;
00775 }
00776 }
00777 void EntropyContrast::computeOutput(
const Vec& input,
Vec& output)
const
00778
{
00779 }
00780
00781
00782 void EntropyContrast::reconstruct(
const Vec& output,
Vec& input)
const
00783
{
00784 }
00785
00786 void EntropyContrast::computeCostsFromOutputs(
const Vec& input,
const Vec& output,
00787
const Vec& target,
Vec& costs)
const
00788
{
00789 }
00790
00791 TVec<string> EntropyContrast::getTestCostNames()
const
00792
{
00793
return TVec<string>(1,
"squared_reconstruction_error");
00794 }
00795
00796 TVec<string> EntropyContrast::getTrainCostNames()
const
00797
{
00798
return TVec<string>();
00799 }
00800
00801
00802
00803 }