Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

pl_math.cc

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio and University of Montreal 00006 // 00007 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 00037 /* ******************************************************* 00038 * $Id: pl_math.cc,v 1.10 2004/04/16 17:37:54 yoshua Exp $ 00039 * This file is part of the PLearn library. 00040 ******************************************************* */ 00041 00042 00045 #include "pl_math.h" 00046 00047 namespace PLearn { 00048 using namespace std; 00049 00050 00051 # ifdef BIGENDIAN 00052 _plearn_nan_type plearn_nan = {{ 0x7f, 0xc0, 0, 0 }}; 00053 # endif 00054 # ifdef LITTLEENDIAN 00055 // TODO This line removes the compilation warning, but does it still work ? 00056 // _plearn_nan_type plearn_nan = {{ 0, 0, 0xc0, 0x7f }}; 00057 _plearn_nan_type plearn_nan = { 0, 0, 0xc0, 0x7f }; 00058 # endif 00059 00060 float tanhtable[TANHTABLESIZE]; 00061 00062 PLMathInitializer::PLMathInitializer() 00063 { 00065 real scaling = MAXTANHX/(TANHTABLESIZE-1); 00066 for(int i=0; i<TANHTABLESIZE; i++) 00067 tanhtable[i] = (float) tanh(i*scaling); 00068 } 00069 00070 PLMathInitializer::~PLMathInitializer() 00071 {} 00072 00073 PLMathInitializer pl_math_initializer; 00074 00075 real safeflog(real a) 00076 { 00077 if (a < 0.0) 00078 PLERROR("safeflog: negative argument (%f)", a); 00079 if (a < 1e-25) 00080 return -57.5; 00081 else return (real)log((double)a); 00082 } 00083 00084 real safeexp(real a) 00085 { 00086 #if USEDOUBLE 00087 if (a < -300) return 0; 00088 if (a > 300) return 1e38; 00089 #else 00090 if (a < -87) return 0; 00091 if (a > 43) return 5e18; 00092 #endif 00093 return exp(a); 00094 } 00095 00096 real log(real base, real a) 00097 { 00098 return log(a) / log(base); 00099 } 00100 00101 real logtwo(real a) 00102 { 00103 return log(a) / LOG_2; 00104 } 00105 00106 real safeflog(real base, real a) 00107 { 00108 return safeflog(a) / safeflog(base); 00109 } 00110 00111 real safeflog2(real a) 00112 { 00113 return safeflog(a) / LOG_2; 00114 } 00115 00116 // compute log(exp(log_a)+exp(log_b)) without losing too much precision 00117 real logadd(real log_a, real log_b) 00118 { 00119 if (log_a < log_b) 00120 { // swap them 00121 real tmp = log_a; 00122 log_a = log_b; 00123 log_b = tmp; 00124 } 00125 real negative_absolute_difference = log_b - log_a; 00126 if (negative_absolute_difference < MINUS_LOG_THRESHOLD) 00127 return log_a; 00128 return log_a + log1p(exp(negative_absolute_difference)); 00129 } 00130 00131 real square_f(real x) 00132 { return x*x; } 00133 00134 // compute log(exp(log_a)-exp(log_b)) without losing too much precision 00135 real logsub(real log_a, real log_b) 00136 { 00137 if (log_a < log_b) 00138 PLERROR("log_sub: log_a (%f) should be greater than log_b (%f)", log_a, log_b); 00139 00140 real negative_absolute_difference = log_b - log_a; 00141 00142 if (FEQUAL(log_a, log_b)) 00143 return -FLT_MAX; 00144 else if (negative_absolute_difference < MINUS_LOG_THRESHOLD) 00145 return log_a; 00146 else 00147 return log_a + log1p(-exp(negative_absolute_difference)); 00148 } 00149 00150 real small_dilogarithm(real x) 00151 { 00152 real somme = x; 00153 real prod = x; 00154 int i=2; 00155 for (;i<=999;i++) 00156 { 00157 real coef = (i-1.0)/i; 00158 prod *= x*coef*coef; 00159 somme += prod; 00160 if (fabs(prod/somme)<1e-16) break; // tolerance 00161 } 00162 static bool warning_was_raised=false; 00163 if (i==1000 && !warning_was_raised) 00164 { 00165 warning_was_raised=true; 00166 PLWARNING("dilogarithm: insufficient precision"); 00167 } 00168 return somme; 00169 } 00170 00171 real positive_dilogarithm(real x) 00172 { 00173 if (x<0.5) 00174 return small_dilogarithm(x); 00175 else if (x<1.0) 00176 return Pi*Pi/6.0 - small_dilogarithm(1.0-x) - log(x)*log(1-x); 00177 else if (x==1.0) 00178 return Pi*Pi/6.0; 00179 else if (x<=1.01) 00180 { 00181 real delta=x-1.0; 00182 real log_delta=log(delta); 00183 return Pi*Pi/6.0 + delta*(1-log_delta+delta* 00184 ((2*log_delta-1)/4 + delta* 00185 ((1-3*log_delta)/9 + delta* 00186 ((4*log_delta-1)/16 + delta* 00187 ((1-5*log_delta)/25 + delta* 00188 ((6*log_delta-1)/36 + delta* 00189 ((1-7*log_delta)/49 + delta* 00190 (8*log_delta-1)/64))))))); 00191 } 00192 else if (x<=2.0) 00193 { 00194 real logx = log(x); 00195 return Pi*Pi/6.0 + small_dilogarithm(1.0-1.0/x) - logx*(0.5*logx+log(1-1/x)); 00196 } else 00197 { 00198 real logx = log(x); 00199 return Pi*Pi/3.0 - small_dilogarithm(1.0/x) - 0.5*logx*logx; 00200 } 00201 } 00202 00203 real dilogarithm(real x) 00204 { 00205 if (is_missing(x)) 00206 { 00207 #ifdef BOUNDCHECK 00208 PLWARNING("Dilogarithm taking NaN as input"); 00209 #endif 00210 return MISSING_VALUE; 00211 } 00212 if (x<0) 00213 return -positive_dilogarithm(-x) + 0.5*positive_dilogarithm(x*x); 00214 else 00215 if (x==0) return 0; 00216 else 00217 return positive_dilogarithm(x); 00218 } 00219 00220 real hard_slope_integral(real l, real r, real a, real b) 00221 { 00222 if (b<l) return 0; 00223 if (b<r) 00224 { 00225 if (a<l) 00226 return 0.5*(b-l)*(b-l)/(r-l); 00227 else // a>=l 00228 return 0.5*((b-l)*(b-l)-(a-l)*(a-l))/(r-l); 00229 } 00230 else // b>=r 00231 { 00232 if (a<l) 00233 return 0.5*(r-l)+(b-r); 00234 else if (a<r) // l<a<r 00235 return 0.5*((r-l) - (a-l)*(a-l)/(r-l)) + (b-r); 00236 else // a>r 00237 return b-a; 00238 } 00239 } 00240 00241 real soft_slope_integral(real smoothness, real left, real right, real a, real b) 00242 { 00243 if (smoothness==0) 00244 return 0.5*(b-a); 00245 if (smoothness<100) 00246 return 00247 (b - a) + (softplus_primitive(-smoothness*(b-right)) - softplus_primitive(-smoothness*(b-left)) 00248 -softplus_primitive(-smoothness*(a-right)) + softplus_primitive(-smoothness*(a-left)))/ 00249 (smoothness*smoothness*(right-left)); 00250 // else do the integral of the hard slope function 00251 return hard_slope_integral(left,right,a,b); 00252 } 00253 00254 real tabulated_soft_slope_integral(real smoothness, real left, real right, real a, real b) 00255 { 00256 if (smoothness==0) 00257 return 0.5*(b-a); 00258 if (smoothness<100) 00259 return 00260 (b - a) + (tabulated_softplus_primitive(-smoothness*(b-right)) - tabulated_softplus_primitive(-smoothness*(b-left)) 00261 -tabulated_softplus_primitive(-smoothness*(a-right)) + tabulated_softplus_primitive(-smoothness*(a-left)))/ 00262 (smoothness*smoothness*(right-left)); 00263 // else do the integral of the hard slope function 00264 return hard_slope_integral(left,right,a,b); 00265 } 00266 00267 } // end of namespace PLearn

Generated on Tue Aug 17 16:01:25 2004 for PLearn by doxygen 1.3.7