Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

GaussianKernel.cc

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio, Rejean Ducharme and University of Montreal 00006 // Copyright (C) 2001-2002 Nicolas Chapados, Ichiro Takeuchi, Jean-Sebastien Senecal 00007 // Copyright (C) 2002 Xiangdong Wang, Christian Dorion 00008 00009 // Redistribution and use in source and binary forms, with or without 00010 // modification, are permitted provided that the following conditions are met: 00011 // 00012 // 1. Redistributions of source code must retain the above copyright 00013 // notice, this list of conditions and the following disclaimer. 00014 // 00015 // 2. Redistributions in binary form must reproduce the above copyright 00016 // notice, this list of conditions and the following disclaimer in the 00017 // documentation and/or other materials provided with the distribution. 00018 // 00019 // 3. The name of the authors may not be used to endorse or promote 00020 // products derived from this software without specific prior written 00021 // permission. 00022 // 00023 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00024 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00025 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00026 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00027 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00028 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00029 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00030 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00031 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00032 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 // 00034 // This file is part of the PLearn library. For more information on the PLearn 00035 // library, go to the PLearn Web site at www.plearn.org 00036 00037 00038 /* ******************************************************* 00039 * $Id: GaussianKernel.cc,v 1.11 2004/07/23 14:39:45 tihocan Exp $ 00040 * This file is part of the PLearn library. 00041 ******************************************************* */ 00042 00043 #include "GaussianKernel.h" 00044 00045 //#define GK_DEBUG 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00050 // ** GaussianKernel ** 00051 00052 PLEARN_IMPLEMENT_OBJECT(GaussianKernel, 00053 "The good old Gaussian kernel.", 00054 ""); 00055 00057 // GaussianKernel // 00059 GaussianKernel::GaussianKernel() 00060 : scale_by_sigma(false), 00061 sigma(1) 00062 {} 00063 00064 GaussianKernel::GaussianKernel(real the_sigma) 00065 : scale_by_sigma(false), 00066 sigma(the_sigma) 00067 { 00068 build_(); 00069 } 00070 00072 // declareOptions // 00074 void GaussianKernel::declareOptions(OptionList& ol) 00075 { 00076 declareOption(ol, "sigma", &GaussianKernel::sigma, OptionBase::buildoption, 00077 "The width of the Gaussian."); 00078 00079 declareOption(ol, "scale_by_sigma", &GaussianKernel::scale_by_sigma, OptionBase::buildoption, 00080 "If set to 1, the kernel will be scaled by sigma^2 / 2"); 00081 00082 inherited::declareOptions(ol); 00083 } 00084 00086 // build // 00088 void GaussianKernel::build() 00089 { 00090 inherited::build(); 00091 build_(); 00092 } 00093 00095 // build_ // 00097 void GaussianKernel::build_() 00098 { 00099 minus_one_over_sigmasquare = -1.0/square(sigma); 00100 sigmasquare_over_two = square(sigma) / 2.0; 00101 } 00102 00103 00104 void GaussianKernel::makeDeepCopyFromShallowCopy(map<const void*, void*>& copies) 00105 { 00106 inherited::makeDeepCopyFromShallowCopy(copies); 00107 deepCopyField(squarednorms,copies); 00108 } 00109 00110 00111 void GaussianKernel::addDataForKernelMatrix(const Vec& newRow) 00112 { 00113 inherited::addDataForKernelMatrix(newRow); 00114 00115 int dlen = data.length(); 00116 int sqlen = squarednorms.length(); 00117 if(sqlen == dlen-1) 00118 squarednorms.resize(dlen); 00119 else if(sqlen == dlen) 00120 for(int s=1; s < sqlen; s++) 00121 squarednorms[s-1] = squarednorms[s]; 00122 else 00123 PLERROR("Only two scenarios are managed:\n" 00124 "Either the data matrix was only appended the new row or, under the windowed settings,\n" 00125 "newRow is the new last row and other rows were moved backward.\n" 00126 "However, sqlen = %d and dlen = %d excludes both!", sqlen, dlen); 00127 00128 squarednorms.lastElement() = pownorm(newRow, 2); 00129 } 00130 00132 // evaluateFromSquaredNormOfDifference // 00134 inline real GaussianKernel::evaluateFromSquaredNormOfDifference(real sqnorm_of_diff) const 00135 { 00136 if (sqnorm_of_diff < 0) { 00137 // This could happen (especiallly in -opt) because of approximations, when 00138 // it should actually be 0. 00139 if (sqnorm_of_diff < -1e-3) 00140 // This should not happen. 00141 PLERROR("In GaussianKernel::evaluateFromSquaredNormOfDifference - The given 'sqnorm_of_diff' is (significantly) negative (%f)", sqnorm_of_diff); 00142 sqnorm_of_diff = 0; 00143 } 00144 if (scale_by_sigma) { 00145 return exp(sqnorm_of_diff*minus_one_over_sigmasquare) * sigmasquare_over_two; 00146 } else { 00147 return exp(sqnorm_of_diff*minus_one_over_sigmasquare); 00148 } 00149 } 00150 00151 00153 // evaluate // 00155 real GaussianKernel::evaluate(const Vec& x1, const Vec& x2) const 00156 { 00157 #ifdef BOUNDCHECK 00158 if(x1.length()!=x2.length()) 00159 PLERROR("IN GaussianKernel::evaluate x1 and x2 must have the same length"); 00160 #endif 00161 int l = x1.length(); 00162 real* px1 = x1.data(); 00163 real* px2 = x2.data(); 00164 real sqnorm_of_diff = 0.; 00165 for(int i=0; i<l; i++) 00166 { 00167 real val = px1[i]-px2[i]; 00168 sqnorm_of_diff += val*val; 00169 } 00170 return evaluateFromSquaredNormOfDifference(sqnorm_of_diff); 00171 } 00172 00173 00175 // evaluate_i_j // 00177 real GaussianKernel::evaluate_i_j(int i, int j) const 00178 { 00179 #ifdef GK_DEBUG 00180 if(i==0 && j==1){ 00181 cout << "*** i==0 && j==1 ***" << endl; 00182 cout << "data(" << i << "): " << data(i) << endl << endl; 00183 cout << "data(" << j << "): " << data(j) << endl << endl; 00184 00185 real sqnorm_i = pownorm((Vec)data(i), 2); 00186 if(sqnorm_i != squarednorms[i]) 00187 PLERROR("%f = sqnorm_i != squarednorms[%d] = %f", sqnorm_i, i, squarednorms[i]); 00188 00189 real sqnorm_j = pownorm((Vec)data(j), 2); 00190 if(sqnorm_j != squarednorms[j]) 00191 PLERROR("%f = sqnorm_j != squarednorms[%d] = %f", sqnorm_j, j, squarednorms[j]); 00192 } 00193 #endif 00194 return evaluateFromDotAndSquaredNorm(squarednorms[i],data->dot(i,j,data_inputsize),squarednorms[j]); 00195 } 00196 00198 // evaluate_i_x // 00200 real GaussianKernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const 00201 { 00202 if(squared_norm_of_x<0.) 00203 squared_norm_of_x = pownorm(x); 00204 00205 #ifdef GK_DEBUG 00206 // real dot_x1_x2 = data->dot(i,x); 00207 // cout << "data.row(" << i << "): " << data.row(i) << endl 00208 // << "squarednorms[" << i << "]: " << squarednorms[i] << endl 00209 // << "data->dot(i,x): " << dot_x1_x2 << endl 00210 // << "x: " << x << endl 00211 // << "squared_norm_of_x: " << squared_norm_of_x << endl; 00212 // real sqnorm_of_diff = (squarednorms[i]+squared_norm_of_x)-(dot_x1_x2+dot_x1_x2); 00213 // cout << "a-> sqnorm_of_diff: " << sqnorm_of_diff << endl 00214 // << "b-> minus_one_over_sigmasquare: " << minus_one_over_sigmasquare << endl 00215 // << "a*b: " << sqnorm_of_diff*minus_one_over_sigmasquare << endl 00216 // << "res: " << exp(sqnorm_of_diff*minus_one_over_sigmasquare) << endl; 00217 #endif 00218 00219 return evaluateFromDotAndSquaredNorm(squarednorms[i],data->dot(i,x),squared_norm_of_x); 00220 } 00221 00222 00224 // evaluate_x_i // 00226 real GaussianKernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const 00227 { 00228 if(squared_norm_of_x<0.) 00229 squared_norm_of_x = pownorm(x); 00230 return evaluateFromDotAndSquaredNorm(squared_norm_of_x,data->dot(i,x),squarednorms[i]); 00231 } 00232 00234 // setDataForKernelMatrix // 00236 void GaussianKernel::setDataForKernelMatrix(VMat the_data) 00237 { 00238 inherited::setDataForKernelMatrix(the_data); 00239 squarednorms.resize(data.length()); 00240 for(int index=0; index<data.length(); index++) 00241 squarednorms[index] = data->dot(index,index, data_inputsize); 00242 } 00243 00245 // setParameters // 00247 void GaussianKernel::setParameters(Vec paramvec) 00248 { 00249 PLWARNING("In GaussianKernel: setParameters is deprecated, use setOption instead"); 00250 sigma = paramvec[0]; 00251 minus_one_over_sigmasquare = -1.0/(sigma*sigma); 00252 } 00253 00254 00255 } // end of namespace PLearn 00256

Generated on Tue Aug 17 15:53:52 2004 for PLearn by doxygen 1.3.7