00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00045
#include "GaussianDistribution.h"
00046
00047
#include <plearn/vmat/VMat_maths.h>
00048
#include <plearn/math/plapack.h>
00049
#include <plearn/math/distr_maths.h>
00050
#include <plearn/math/random.h>
00051
00052
namespace PLearn {
00053
using namespace std;
00054
00055 #define ZEROGAMMA
00056
00057
PLEARN_IMPLEMENT_OBJECT(GaussianDistribution,
"ONE LINE DESCR",
"NO HELP");
00058
00059 void GaussianDistribution::makeDeepCopyFromShallowCopy(
CopiesMap& copies)
00060 {
00061 inherited::makeDeepCopyFromShallowCopy(copies);
00062
deepCopyField(
mu, copies);
00063
deepCopyField(
eigenvalues, copies);
00064
deepCopyField(
eigenvectors, copies);
00065 }
00066
00067
00068 GaussianDistribution::GaussianDistribution()
00069 :
k(1000), gamma(0), ignore_weights_below(0)
00070 {
00071 }
00072
00073
00074 void GaussianDistribution::declareOptions(
OptionList& ol)
00075 {
00076
00077
declareOption(ol,
"k", &GaussianDistribution::k, OptionBase::buildoption,
00078
"number of eigenvectors to keep");
00079
declareOption(ol,
"gamma", &GaussianDistribution::gamma, OptionBase::buildoption,
00080
"Add this to diagonal of empirical covariance matrix.\n"
00081
"The actual covariance matrix used will be VDV' + gamma.I \n"
00082
"where V'=eigenvectors and D=diag(eigenvalues).");
00083
declareOption(ol,
"ignore_weights_below", &GaussianDistribution::ignore_weights_below, OptionBase::buildoption,
00084
"When doing a weighted fitting (weightsize==1), points with a weight below this value will be ignored");
00085
00086
00087
declareOption(ol,
"mu", &GaussianDistribution::mu, OptionBase::learntoption,
"");
00088
declareOption(ol,
"eigenvalues", &GaussianDistribution::eigenvalues, OptionBase::learntoption,
"");
00089
declareOption(ol,
"eigenvectors", &GaussianDistribution::eigenvectors, OptionBase::learntoption,
"");
00090
00091 inherited::declareOptions(ol);
00092 }
00093
00094 void GaussianDistribution::forget()
00095 { }
00096
00097 void GaussianDistribution::train()
00098 {
00099
VMat training_set =
getTrainingSet();
00100
int l = training_set.
length();
00101
int d = training_set.
width();
00102
int ws = training_set->weightsize();
00103
00104
if(d!=
inputsize()+
ws)
00105
PLERROR(
"In GaussianDistribution::train width of training_set should be equal to inputsize()+weightsize()");
00106
00107
00108
static Mat trainmat;
00109
static Mat U;
00110
00111
00112
int maxneigval =
min(
k+1,
min(l,d));
00113
00114
00115
00116
static Mat covarmat;
00117
00118
if(
ws==0)
00119
computeMeanAndCovar(training_set,
mu, covarmat);
00120
else if(
ws==1)
00121
computeWeightedMeanAndCovar(training_set,
mu, covarmat,
ignore_weights_below);
00122
else
00123
PLERROR(
"In GaussianDistribution, weightsize can only be 0 or 1");
00124
00125
00126
eigenVecOfSymmMat(covarmat, maxneigval,
eigenvalues,
eigenvectors);
00127
00128
00129 }
00130
00131 real GaussianDistribution::log_density(
const Vec& x)
const
00132
{
00133
return logOfCompactGaussian(
x,
mu,
eigenvalues,
eigenvectors,
gamma,
true);
00134 }
00135
00136
00137 void GaussianDistribution::resetGenerator(
long g_seed)
const
00138
{
00139
manual_seed(g_seed);
00140 }
00141
00142 void GaussianDistribution::generate(
Vec& x)
const
00143
{
00144
static Vec r;
00145
int n =
eigenvectors.
length();
00146
int m =
mu.
length();
00147 r.
resize(n);
00148
fill_random_normal(r);
00149
for(
int i=0; i<n; i++)
00150 r[i] *=
sqrt(
eigenvalues[i]);
00151
x.resize(m);
00152
transposeProduct(
x,
eigenvectors,r);
00153 r.
resize(m);
00154
fill_random_normal(r,0,
gamma);
00155
x += r;
00156
x +=
mu;
00157 }
00158
00160
00162 int GaussianDistribution::inputsize()
const {
00163
if (train_set ||
mu.
length() == 0)
00164
return inherited::inputsize();
00165
return mu.
length();
00166 }
00167
00168 }