Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

AdditiveNormalizationKernel.cc

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // AdditiveNormalizationKernel.cc 00004 // 00005 // Copyright (C) 2004 Olivier Delalleau 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: AdditiveNormalizationKernel.cc,v 1.9 2004/07/21 17:04:44 tihocan Exp $ 00037 ******************************************************* */ 00038 00039 // Authors: Olivier Delalleau 00040 00044 #include "AdditiveNormalizationKernel.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00050 // AdditiveNormalizationKernel // 00052 AdditiveNormalizationKernel::AdditiveNormalizationKernel() 00053 : data_will_change(false), 00054 double_centering(false), 00055 remove_bias(false), 00056 remove_bias_in_evaluate(false) 00057 {} 00058 00059 AdditiveNormalizationKernel::AdditiveNormalizationKernel 00060 (Ker the_source, bool the_remove_bias, bool the_remove_bias_in_evaluate, 00061 bool the_double_centering) 00062 : data_will_change(false), 00063 double_centering(the_double_centering), 00064 remove_bias(the_remove_bias), 00065 remove_bias_in_evaluate(the_remove_bias_in_evaluate) 00066 { 00067 source_kernel = the_source; 00068 build(); 00069 } 00070 00071 PLEARN_IMPLEMENT_OBJECT(AdditiveNormalizationKernel, 00072 "Normalizes additively an underlying kernel with respect to a training set.", 00073 "From a kernel K, defines a new kernel K' such that:\n" 00074 " K'(x,y) = K(x,y) - E[K(x,x_i)] - E[K(x_i,y)] + E[K(x_i,x_j)]\n" 00075 "where the expectation is performed on the data set.\n" 00076 "If the 'remove_bias' option is set, then the expectation will not\n" 00077 "take into account terms of the form K(x_i,x_i).\n" 00078 "If the 'double_centering' option is set, this kernel K' will be\n" 00079 "multiplied by -1/2 (this turns a squared distance kernel into a\n" 00080 "centered dot product kernel).\n" 00081 ); 00082 00084 // declareOptions // 00086 void AdditiveNormalizationKernel::declareOptions(OptionList& ol) 00087 { 00088 // Build options. 00089 00090 declareOption(ol, "double_centering", &AdditiveNormalizationKernel::double_centering, OptionBase::buildoption, 00091 "If set to 1, then the resulting kernel will be multiplied by -1/2,\n" 00092 "which corresponds to the double-centering formula."); 00093 00094 declareOption(ol, "data_will_change", &AdditiveNormalizationKernel::data_will_change, OptionBase::buildoption, 00095 "If set to 1, then the Gram matrix will be always recomputed, even if\n" 00096 "it's not completely sure the data has changed."); 00097 00098 declareOption(ol, "remove_bias", &AdditiveNormalizationKernel::remove_bias, OptionBase::buildoption, 00099 "If set to 1, then the bias induced by the K(x_i,x_i) will be removed.\n"); 00100 00101 declareOption(ol, "remove_bias_in_evaluate", &AdditiveNormalizationKernel::remove_bias_in_evaluate, OptionBase::buildoption, 00102 "If set to 1, then the bias induced by the K(x_i,x_i) will be removed, but only when\n" 00103 "evaluating K(x,y) on test points (you don't need to do this if 'remove_bias' == 1)."); 00104 00105 // Learnt options. 00106 00107 declareOption(ol, "average_col", &AdditiveNormalizationKernel::average_col, OptionBase::learntoption, 00108 "The average of the underlying kernel over each column of the Gram matrix."); 00109 00110 declareOption(ol, "average_row", &AdditiveNormalizationKernel::average_row, OptionBase::learntoption, 00111 "The average of the underlying kernel over each row of the Gram matrix."); 00112 00113 declareOption(ol, "total_average_unbiased", &AdditiveNormalizationKernel::total_average_unbiased, OptionBase::learntoption, 00114 "The average of the underlying kernel over the whole Gram matrix, without\n" 00115 "the diagonal terms."); 00116 00117 declareOption(ol, "total_average", &AdditiveNormalizationKernel::total_average, OptionBase::learntoption, 00118 "The average of the underlying kernel over the whole Gram matrix."); 00119 00120 // Now call the parent class' declareOptions 00121 inherited::declareOptions(ol); 00122 } 00123 00125 // build // 00127 void AdditiveNormalizationKernel::build() 00128 { 00129 // ### Nothing to add here, simply calls build_ 00130 inherited::build(); 00131 build_(); 00132 } 00133 00135 // build_ // 00137 void AdditiveNormalizationKernel::build_() 00138 { 00139 // ### This method should do the real building of the object, 00140 // ### according to set 'options', in *any* situation. 00141 // ### Typical situations include: 00142 // ### - Initial building of an object from a few user-specified options 00143 // ### - Building of a "reloaded" object: i.e. from the complete set of all serialised options. 00144 // ### - Updating or "re-building" of an object after a few "tuning" options have been modified. 00145 // ### You should assume that the parent class' build_() has already been called. 00146 if (double_centering) 00147 factor = -0.5; 00148 else 00149 factor = 1; 00150 } 00151 00153 // computeAverage // 00155 real AdditiveNormalizationKernel::computeAverage(const Vec& x, bool on_row, real squared_norm_of_x) const { 00156 all_k_x.resize(n_examples); 00157 if (is_symmetric || !on_row) { 00158 source_kernel->evaluate_all_i_x(x, all_k_x, squared_norm_of_x); 00159 } else { 00160 source_kernel->evaluate_all_x_i(x, all_k_x, squared_norm_of_x); 00161 } 00162 return sum(all_k_x) / real(n_examples); 00163 } 00164 00166 // computeGramMatrix // 00168 void AdditiveNormalizationKernel::computeGramMatrix(Mat K) const { 00169 // Uses default Kernel implementation. 00170 Kernel::computeGramMatrix(K); 00171 } 00172 00174 // evaluate // 00176 real AdditiveNormalizationKernel::evaluate(const Vec& x1, const Vec& x2) const { 00177 real avg_1 = computeAverage(x1, true); 00178 real avg_2 = computeAverage(x2, false); 00179 if (remove_bias || !remove_bias_in_evaluate) { 00180 // We can use the 'total_average'. 00181 return factor * (source_kernel->evaluate(x1, x2) - avg_1 - avg_2 + total_average); 00182 } else { 00183 // We need to use the 'total_average_unbiased'. 00184 return factor * (source_kernel->evaluate(x1, x2) - avg_1 - avg_2 + total_average_unbiased); 00185 } 00186 } 00187 00189 // evaluate_i_j // 00191 real AdditiveNormalizationKernel::evaluate_i_j(int i, int j) const { 00192 return factor * (source_kernel->evaluate_i_j(i,j) - average_row[i] - average_col[j] + total_average); 00193 } 00194 00196 // evaluate_i_x // 00198 real AdditiveNormalizationKernel::evaluate_i_x(int i, const Vec& x, real squared_norm_of_x) const { 00199 return factor * (source_kernel->evaluate_i_x(i, x, squared_norm_of_x) 00200 - average_row[i] - computeAverage(x, false, squared_norm_of_x) + total_average); 00201 } 00202 00204 // evaluate_i_x_again // 00206 real AdditiveNormalizationKernel::evaluate_i_x_again(int i, const Vec& x, real squared_norm_of_x, bool first_time) const { 00207 if (first_time) { 00208 avg_evaluate_i_x_again = computeAverage(x, false, squared_norm_of_x); 00209 } 00210 return factor * (source_kernel->evaluate_i_x_again(i, x, squared_norm_of_x, first_time) 00211 - average_row[i] - avg_evaluate_i_x_again + total_average); 00212 } 00213 00215 // evaluate_x_i // 00217 real AdditiveNormalizationKernel::evaluate_x_i(const Vec& x, int i, real squared_norm_of_x) const { 00218 return factor * (source_kernel->evaluate_x_i(x, i, squared_norm_of_x) 00219 - average_col[i] - computeAverage(x, true, squared_norm_of_x) + total_average); 00220 } 00221 00223 // evaluate_x_i_again // 00225 real AdditiveNormalizationKernel::evaluate_x_i_again(const Vec& x, int i, real squared_norm_of_x, bool first_time) const { 00226 if (first_time) { 00227 avg_evaluate_x_i_again = computeAverage(x, true, squared_norm_of_x); 00228 } 00229 return factor * (source_kernel->evaluate_x_i_again(x, i, squared_norm_of_x, first_time) 00230 - average_col[i] - avg_evaluate_x_i_again + total_average); 00231 } 00232 00234 // makeDeepCopyFromShallowCopy // 00236 void AdditiveNormalizationKernel::makeDeepCopyFromShallowCopy(map<const void*, void*>& copies) 00237 { 00238 inherited::makeDeepCopyFromShallowCopy(copies); 00239 00240 // ### Call deepCopyField on all "pointer-like" fields 00241 // ### that you wish to be deepCopied rather than 00242 // ### shallow-copied. 00243 // ### ex: 00244 // deepCopyField(trainvec, copies); 00245 00246 // ### Remove this line when you have fully implemented this method. 00247 PLERROR("AdditiveNormalizationKernel::makeDeepCopyFromShallowCopy not fully (correctly) implemented yet!"); 00248 } 00249 00251 // setDataForKernelMatrix // 00253 void AdditiveNormalizationKernel::setDataForKernelMatrix(VMat the_data) { 00254 bool there_was_data_and_it_changed = data && !(data->looksTheSameAs(the_data)); 00255 // Set the data for this kernel as well as for the underlying kernel. 00256 inherited::setDataForKernelMatrix(the_data); 00257 // Check whether we need to recompute the Gram matrix and its average. 00258 int n = the_data->length(); 00259 if ( data_will_change 00260 || average_row.length() != n 00261 || there_was_data_and_it_changed) { 00262 // Compute the underlying Gram matrix. 00263 Mat gram(n, n); 00264 source_kernel->computeGramMatrix(gram); 00265 // Compute the row (and column) average. 00266 average_row.resize(n); 00267 average_row.fill(0); 00268 if (is_symmetric) { 00269 average_col = average_row; 00270 } else { 00271 average_col.resize(n); 00272 average_col.fill(0); 00273 } 00274 real k_x_x; 00275 total_average_unbiased = 0; 00276 for (int i = 0; i < n; i++) { 00277 if (is_symmetric) { 00278 real v; 00279 k_x_x = gram(i,i); 00280 if (!remove_bias) { 00281 average_row[i] += k_x_x; 00282 total_average_unbiased -= k_x_x; 00283 } 00284 for (int j = i + 1; j < n; j++) { 00285 v = gram(i,j); 00286 average_row[i] += v; 00287 average_row[j] += v; 00288 } 00289 } else { 00290 for (int j = 0; j < n; j++) { 00291 if (!remove_bias || j != i) { 00292 average_row[i] += gram(i,j); 00293 average_col[i] += gram(j,i); 00294 if (j == i) { 00295 total_average_unbiased -= gram(i,j); 00296 } 00297 } 00298 } 00299 } 00300 } 00301 total_average = sum(average_row); 00302 if (remove_bias) { 00303 // The sum is already unbiased. 00304 total_average_unbiased = total_average; 00305 } else { 00306 // At this point, 'total_average_unbiased' = - \sum K(x_i,x_i). 00307 total_average_unbiased += total_average; 00308 } 00309 real n_terms_in_sum; // The number of terms summed in average_row. 00310 if (remove_bias) { 00311 // The diagonal terms were not added. 00312 n_terms_in_sum = real(n - 1); 00313 } else { 00314 n_terms_in_sum = real(n); 00315 } 00316 total_average /= real(n * n_terms_in_sum); 00317 total_average_unbiased /= real(n * (n-1)); 00318 average_row /= n_terms_in_sum; 00319 if (!is_symmetric) { 00320 average_col /= n_terms_in_sum; 00321 } 00322 } 00323 } 00324 00325 } // end of namespace PLearn 00326

Generated on Tue Aug 17 15:48:27 2004 for PLearn by doxygen 1.3.7