00001 
00002 
00003 
00004 
00005 
00006 
00007 
00008 
00009 
00010 
00011 
00012 
00013 
00014 
00015 
00016 
00017 
00018 
00019 
00020 
00021 
00022 
00023 
00024 
00025 
00026 
00027 
00028 
00029 
00030 
00031 
00032 
00033 
00034 
00035 
00036 
00037 
00038 
00039 
00040 
00041 
00042 
00043 
#include "GaussianKernel.h"
00044 
00045 
00046 
00047 
namespace PLearn {
00048 
using namespace std;
00049 
00050 
00051 
00052 
PLEARN_IMPLEMENT_OBJECT(GaussianKernel,
00053     
"The good old Gaussian kernel.",
00054     
"");
00055 
00057 
00059 GaussianKernel::GaussianKernel()
00060 : scale_by_sigma(false),
00061   sigma(1)
00062 {}
00063 
00064 GaussianKernel::GaussianKernel(
real the_sigma)
00065 : scale_by_sigma(false),
00066   sigma(the_sigma)
00067 {
00068   
build_();
00069 }
00070 
00072 
00074 void GaussianKernel::declareOptions(
OptionList& ol)
00075 {
00076   
declareOption(ol, 
"sigma", &GaussianKernel::sigma, OptionBase::buildoption,
00077                 
"The width of the Gaussian.");
00078 
00079   
declareOption(ol, 
"scale_by_sigma", &GaussianKernel::scale_by_sigma, OptionBase::buildoption,
00080                 
"If set to 1, the kernel will be scaled by sigma^2 / 2");
00081 
00082   inherited::declareOptions(ol);
00083 }
00084 
00086 
00088 void GaussianKernel::build()
00089 {
00090   inherited::build();
00091   
build_();
00092 }
00093 
00095 
00097 void GaussianKernel::build_()
00098 {
00099   
minus_one_over_sigmasquare = -1.0/
square(
sigma);
00100   
sigmasquare_over_two = 
square(
sigma) / 2.0;
00101 }
00102 
00103 
00104 void GaussianKernel::makeDeepCopyFromShallowCopy(map<const void*, void*>& copies)
00105 {
00106   inherited::makeDeepCopyFromShallowCopy(copies);
00107   
deepCopyField(
squarednorms,copies);
00108 }
00109 
00110 
00111 void GaussianKernel::addDataForKernelMatrix(
const Vec& newRow)
00112 {
00113   inherited::addDataForKernelMatrix(newRow);
00114 
00115   
int dlen  = data.
length();
00116   
int sqlen = 
squarednorms.
length();
00117   
if(sqlen == dlen-1)
00118     
squarednorms.
resize(dlen);
00119   
else if(sqlen == dlen)
00120     
for(
int s=1; s < sqlen; s++)
00121       
squarednorms[s-1] = 
squarednorms[s];  
00122   
else
00123     
PLERROR(
"Only two scenarios are managed:\n"
00124             
"Either the data matrix was only appended the new row or, under the windowed settings,\n"
00125             
"newRow is the new last row and other rows were moved backward.\n"
00126             
"However, sqlen = %d and dlen = %d excludes both!", sqlen, dlen);
00127   
00128   squarednorms.lastElement() = 
pownorm(newRow, 2); 
00129 }
00130 
00132 
00134 inline real GaussianKernel::evaluateFromSquaredNormOfDifference(
real sqnorm_of_diff)
 const
00135 
{
00136   
if (sqnorm_of_diff < 0) {
00137     
00138     
00139     
if (sqnorm_of_diff < -1e-3)
00140       
00141       
PLERROR(
"In GaussianKernel::evaluateFromSquaredNormOfDifference - The given 'sqnorm_of_diff' is (significantly) negative (%f)", sqnorm_of_diff);
00142     sqnorm_of_diff = 0;
00143   }
00144   
if (
scale_by_sigma) {
00145     
return exp(sqnorm_of_diff*
minus_one_over_sigmasquare) * 
sigmasquare_over_two;
00146   } 
else {
00147     
return exp(sqnorm_of_diff*
minus_one_over_sigmasquare);
00148   }
00149 }
00150 
00151 
00153 
00155 real GaussianKernel::evaluate(
const Vec& x1, 
const Vec& x2)
 const
00156 
{
00157 
#ifdef BOUNDCHECK
00158 
  if(x1.
length()!=x2.
length())
00159     
PLERROR(
"IN GaussianKernel::evaluate x1 and x2 must have the same length");
00160 
#endif
00161 
  int l = x1.
length();
00162   
real* px1 = x1.
data();
00163   
real* px2 = x2.
data();
00164   
real sqnorm_of_diff = 0.;
00165   
for(
int i=0; i<l; i++)
00166     {
00167       
real val = px1[i]-px2[i];
00168       sqnorm_of_diff += 
val*
val;
00169     }
00170   
return evaluateFromSquaredNormOfDifference(sqnorm_of_diff);
00171 }
00172 
00173 
00175 
00177 real GaussianKernel::evaluate_i_j(
int i, 
int j)
 const
00178 
{ 
00179 
#ifdef GK_DEBUG 
00180 
  if(i==0 && j==1){
00181     cout << 
"*** i==0 && j==1 ***" << 
endl;
00182     cout << 
"data(" << i << 
"): " << data(i) << 
endl << 
endl;
00183     cout << 
"data(" << j << 
"): " << data(j) << 
endl << 
endl;  
00184     
00185     
real sqnorm_i = 
pownorm((
Vec)data(i), 2);
00186     
if(sqnorm_i != 
squarednorms[i])
00187       
PLERROR(
"%f = sqnorm_i != squarednorms[%d] = %f", sqnorm_i, i, 
squarednorms[i]);
00188     
00189     
real sqnorm_j = 
pownorm((
Vec)data(j), 2);
00190     
if(sqnorm_j != 
squarednorms[j])
00191       
PLERROR(
"%f = sqnorm_j != squarednorms[%d] = %f", sqnorm_j, j, 
squarednorms[j]);
00192   }
00193 
#endif
00194 
  return evaluateFromDotAndSquaredNorm(
squarednorms[i],data->dot(i,j,data_inputsize),
squarednorms[j]); 
00195 }
00196 
00198 
00200 real GaussianKernel::evaluate_i_x(
int i, 
const Vec& x, 
real squared_norm_of_x)
 const 
00201 
{ 
00202   
if(squared_norm_of_x<0.)
00203     squared_norm_of_x = 
pownorm(
x);
00204 
00205 
#ifdef GK_DEBUG 
00206 
00207 
00208 
00209 
00210 
00211 
00212 
00213 
00214 
00215 
00216 
00217 
#endif
00218 
00219   
return evaluateFromDotAndSquaredNorm(
squarednorms[i],data->dot(i,
x),squared_norm_of_x); 
00220 }
00221 
00222 
00224 
00226 real GaussianKernel::evaluate_x_i(
const Vec& x, 
int i, 
real squared_norm_of_x)
 const
00227 
{ 
00228   
if(squared_norm_of_x<0.)
00229     squared_norm_of_x = 
pownorm(
x);
00230   
return evaluateFromDotAndSquaredNorm(squared_norm_of_x,data->dot(i,
x),
squarednorms[i]); 
00231 }
00232 
00234 
00236 void GaussianKernel::setDataForKernelMatrix(
VMat the_data)
00237 { 
00238   inherited::setDataForKernelMatrix(the_data);
00239   
squarednorms.
resize(data.
length());
00240   
for(
int index=0; index<data.
length(); index++)
00241     
squarednorms[index] = data->dot(index,index, data_inputsize);
00242 }
00243 
00245 
00247 void GaussianKernel::setParameters(
Vec paramvec)
00248 { 
00249   
PLWARNING(
"In GaussianKernel: setParameters is deprecated, use setOption instead");
00250   
sigma = paramvec[0]; 
00251   
minus_one_over_sigmasquare = -1.0/(
sigma*
sigma);
00252 }
00253 
00254 
00255 } 
00256