Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

GaussianDistribution.cc

Go to the documentation of this file.
00001 // -*- C++ -*-4 1999/10/29 20:41:34 dugas 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 2002 Pascal Vincent 00005 // 00006 // Redistribution and use in source and binary forms, with or without 00007 // modification, are permitted provided that the following conditions are met: 00008 // 00009 // 1. Redistributions of source code must retain the above copyright 00010 // notice, this list of conditions and the following disclaimer. 00011 // 00012 // 2. Redistributions in binary form must reproduce the above copyright 00013 // notice, this list of conditions and the following disclaimer in the 00014 // documentation and/or other materials provided with the distribution. 00015 // 00016 // 3. The name of the authors may not be used to endorse or promote 00017 // products derived from this software without specific prior written 00018 // permission. 00019 // 00020 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00021 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00022 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00023 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00024 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00025 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00026 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00027 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00028 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00029 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00030 // 00031 // This file is part of the PLearn library. For more information on the PLearn 00032 // library, go to the PLearn Web site at www.plearn.org 00033 00034 00035 00036 00037 /* ******************************************************* 00038 * $Id: GaussianDistribution.cc,v 1.11 2004/08/12 16:15:25 tihocan Exp $ 00039 * This file is part of the PLearn library. 00040 ******************************************************* */ 00041 00042 00045 #include "GaussianDistribution.h" 00046 //#include "fileutils.h" 00047 #include <plearn/vmat/VMat_maths.h> 00048 #include <plearn/math/plapack.h> 00049 #include <plearn/math/distr_maths.h> 00050 #include <plearn/math/random.h> 00051 00052 namespace PLearn { 00053 using namespace std; 00054 00055 #define ZEROGAMMA 00056 00057 PLEARN_IMPLEMENT_OBJECT(GaussianDistribution, "ONE LINE DESCR", "NO HELP"); 00058 00059 void GaussianDistribution::makeDeepCopyFromShallowCopy(CopiesMap& copies) 00060 { 00061 inherited::makeDeepCopyFromShallowCopy(copies); 00062 deepCopyField(mu, copies); 00063 deepCopyField(eigenvalues, copies); 00064 deepCopyField(eigenvectors, copies); 00065 } 00066 00067 00068 GaussianDistribution::GaussianDistribution() 00069 :k(1000), gamma(0), ignore_weights_below(0) 00070 { 00071 } 00072 00073 00074 void GaussianDistribution::declareOptions(OptionList& ol) 00075 { 00076 // Build options 00077 declareOption(ol, "k", &GaussianDistribution::k, OptionBase::buildoption, 00078 "number of eigenvectors to keep"); 00079 declareOption(ol, "gamma", &GaussianDistribution::gamma, OptionBase::buildoption, 00080 "Add this to diagonal of empirical covariance matrix.\n" 00081 "The actual covariance matrix used will be VDV' + gamma.I \n" 00082 "where V'=eigenvectors and D=diag(eigenvalues)."); 00083 declareOption(ol, "ignore_weights_below", &GaussianDistribution::ignore_weights_below, OptionBase::buildoption, 00084 "When doing a weighted fitting (weightsize==1), points with a weight below this value will be ignored"); 00085 00086 // Learnt options 00087 declareOption(ol, "mu", &GaussianDistribution::mu, OptionBase::learntoption, ""); 00088 declareOption(ol, "eigenvalues", &GaussianDistribution::eigenvalues, OptionBase::learntoption, ""); 00089 declareOption(ol, "eigenvectors", &GaussianDistribution::eigenvectors, OptionBase::learntoption, ""); 00090 00091 inherited::declareOptions(ol); 00092 } 00093 00094 void GaussianDistribution::forget() 00095 { } 00096 00097 void GaussianDistribution::train() 00098 { 00099 VMat training_set = getTrainingSet(); 00100 int l = training_set.length(); 00101 int d = training_set.width(); 00102 int ws = training_set->weightsize(); 00103 00104 if(d!=inputsize()+ws) 00105 PLERROR("In GaussianDistribution::train width of training_set should be equal to inputsize()+weightsize()"); 00106 00107 // these are used in SVD 00108 static Mat trainmat; 00109 static Mat U; 00110 00111 // The maximum number of eigenvalues we want. 00112 int maxneigval = min(k+1, min(l,d)); 00113 00114 // First get mean and covariance 00115 // (declared static to avoid repeated dynamic memory allocation) 00116 static Mat covarmat; 00117 00118 if(ws==0) 00119 computeMeanAndCovar(training_set, mu, covarmat); 00120 else if(ws==1) 00121 computeWeightedMeanAndCovar(training_set, mu, covarmat, ignore_weights_below); 00122 else 00123 PLERROR("In GaussianDistribution, weightsize can only be 0 or 1"); 00124 00125 // cerr << "maxneigval: " << maxneigval << " "; 00126 eigenVecOfSymmMat(covarmat, maxneigval, eigenvalues, eigenvectors); 00127 // cerr << eigenvalues.length() << endl; 00128 // cerr << "eig V: \n" << V << endl; 00129 } 00130 00131 real GaussianDistribution::log_density(const Vec& x) const 00132 { 00133 return logOfCompactGaussian(x, mu, eigenvalues, eigenvectors, gamma, true); 00134 } 00135 00136 00137 void GaussianDistribution::resetGenerator(long g_seed) const 00138 { 00139 manual_seed(g_seed); 00140 } 00141 00142 void GaussianDistribution::generate(Vec& x) const 00143 { 00144 static Vec r; 00145 int n = eigenvectors.length(); 00146 int m = mu.length(); 00147 r.resize(n); 00148 fill_random_normal(r); 00149 for(int i=0; i<n; i++) 00150 r[i] *= sqrt(eigenvalues[i]); 00151 x.resize(m); 00152 transposeProduct(x,eigenvectors,r); 00153 r.resize(m); 00154 fill_random_normal(r,0,gamma); 00155 x += r; 00156 x += mu; 00157 } 00158 00160 // inputsize // 00162 int GaussianDistribution::inputsize() const { 00163 if (train_set || mu.length() == 0) 00164 return inherited::inputsize(); 00165 return mu.length(); 00166 } 00167 00168 } // end of namespace PLearn

Generated on Tue Aug 17 15:53:46 2004 for PLearn by doxygen 1.3.7