Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

pl_math.h

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // PLearn (A C++ Machine Learning Library) 00004 // Copyright (C) 1998 Pascal Vincent 00005 // Copyright (C) 1999-2002 Pascal Vincent, Yoshua Bengio and University of Montreal 00006 // 00007 00008 // Redistribution and use in source and binary forms, with or without 00009 // modification, are permitted provided that the following conditions are met: 00010 // 00011 // 1. Redistributions of source code must retain the above copyright 00012 // notice, this list of conditions and the following disclaimer. 00013 // 00014 // 2. Redistributions in binary form must reproduce the above copyright 00015 // notice, this list of conditions and the following disclaimer in the 00016 // documentation and/or other materials provided with the distribution. 00017 // 00018 // 3. The name of the authors may not be used to endorse or promote 00019 // products derived from this software without specific prior written 00020 // permission. 00021 // 00022 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00023 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00024 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00025 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00026 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00027 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00028 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00029 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00030 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00031 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00032 // 00033 // This file is part of the PLearn library. For more information on the PLearn 00034 // library, go to the PLearn Web site at www.plearn.org 00035 00036 00037 /* ******************************************************* 00038 * $Id: pl_math.h,v 1.18 2004/07/21 16:30:53 chrish42 Exp $ 00039 * This file is part of the PLearn library. 00040 ******************************************************* */ 00041 00042 00045 #ifndef pl_math_INC 00046 #define pl_math_INC 00047 00048 #include <cmath> 00049 #include <cfloat> 00050 #include <climits> 00051 #include <plearn/base/plerror.h> 00052 00053 namespace PLearn { 00054 using namespace std; 00055 00056 #ifdef USEDOUBLE 00057 #define real double 00058 #endif 00059 00060 #ifdef USEFLOAT 00061 #define real float 00062 #endif 00063 00064 // it is not really the max, but it's large enough for most applications 00065 #ifdef USEFLOAT 00066 #define REAL_MAX FLT_MAX 00067 #else 00068 #define REAL_MAX DBL_MAX 00069 #endif 00070 00071 union _plearn_nan_type { unsigned char c[4]; float d; }; 00072 extern _plearn_nan_type plearn_nan; 00073 00075 #define MISSING_VALUE (plearn_nan.d) 00076 00077 using namespace std; 00078 00079 using std::log; 00080 using std::sqrt; 00081 using std::pow; 00082 using std::exp; 00083 using std::tanh; 00084 using std::abs; 00085 00087 #define MIN(a,b) ((a)<(b)?(a):(b)) 00088 #define MAX(a,b) ((a)>(b)?(a):(b)) 00089 00090 #define SIGN(a) ((a)>=0?1:-1) 00091 00092 inline real sign(real a) { 00093 if (a>0) return 1; 00094 if (a<0) return -1; 00095 return 0; 00096 } 00097 inline real positive(real a) { if (a>0) return a; return 0; } 00098 inline real negative(real a) { if (a<0) return a; return 0; } 00099 00100 #define ABS(x) ((x)>0 ?(x) :-(x)) 00101 #define random() rand() 00102 00103 #define Pi 3.141592653589793 00104 #define Log2Pi 1.837877066409 00105 #define LOG_2 0.693147180559945 00106 #define LOG_INIT -FLT_MAX 00107 #define MINUS_LOG_THRESHOLD -18.42 00108 00109 #if defined(_MSC_VER) || defined(_MINGW_) 00112 #define drand48() ( rand()/ (double)(RAND_MAX+1) ) 00113 00114 #define log1p(a) log((real)1.0+(real)a) 00115 #define rint(a) (int)(a+(real)0.5) 00116 #define isnan(x) _isnan(x) 00117 #define finite(x) _finite(x) 00118 #endif 00119 00120 #if defined(DARWIN) 00121 #define isnan(x) __isnan(x) 00122 #endif 00123 00124 template<class T> 00125 inline T square(const T& x) 00126 { return x*x; } 00127 00128 // Wish I could get rid of this, but it's used a s some function pointer 00129 // in calls to some apply function in RandomVar. So I'd need to change 00130 // those calls first to sth more stl like. 00131 real square_f(real x); 00132 00133 template<class T> 00134 inline T two(const T& x) 00135 { return x+x; } 00136 00137 #define TANHTABLESIZE 5000 00138 #define MAXTANHX 10. 00139 00140 class PLMathInitializer 00141 { 00142 public: 00143 PLMathInitializer(); 00144 ~PLMathInitializer(); 00145 }; 00146 00147 /* 00148 #define DOUBLE2INT(i,d) {maniax = ((d)+6755399441055744.0); i=*((int *)(&maniax));} 00149 00150 inline int double2int(double d) 00151 { 00152 double maniax = d+6755399441055744.0; 00153 return *((int *)&maniax); 00154 } 00155 */ 00156 00157 #if defined(LINUX) && !defined(__INTEL_COMPILER) // note: intel compiler on SGI does not like that 00158 #define DOUBLE_TO_INT(in,out) __asm__ __volatile__ ("fistpl %0" : "=m" (out) : "t" (in) : "st") 00159 #else 00160 #define DOUBLE_TO_INT(in, out) out = int(in) 00161 #endif 00162 00163 extern float tanhtable[TANHTABLESIZE]; 00164 extern PLMathInitializer pl_math_initializer; 00165 00166 inline real fasttanh(const real& x) 00167 { 00168 if(x>0) 00169 { 00170 if(x>MAXTANHX) 00171 return real(tanhtable[TANHTABLESIZE-1]); 00172 else 00173 { 00174 int i; 00175 DOUBLE_TO_INT( double(x*((TANHTABLESIZE-1)/MAXTANHX)), i); 00176 return real(tanhtable[i]); 00177 } 00178 } 00179 else 00180 { 00181 real nx = -x; 00182 if(nx>MAXTANHX) 00183 return real(-tanhtable[TANHTABLESIZE-1]); 00184 else 00185 { 00186 int i; 00187 DOUBLE_TO_INT( double(nx*((TANHTABLESIZE-1)/MAXTANHX)), i); 00188 return real(-tanhtable[i]); 00189 } 00190 } 00191 } 00192 00193 inline real fastsigmoid(const real& x) 00194 { return (real)0.5*(fasttanh(0.5*x)+1.); } 00195 00196 00197 // These are quadratic approximations to tanh and sigmoid 00198 00199 /* 00200 inline real ultrafasttanh(const real& x) 00201 { 00202 if(x>1.92033) return 0.96016; 00203 else if (x>0) return 0.96016 - 0.26037 * square(x - 1.92033); 00204 else if (x<=-1.92033) return -0.96016; 00205 else return 0.26037 * square(x + 1.92033) - 0.96016; 00206 } 00207 00208 inline real ultrafastsigmoid(const real& x) 00209 { return (real)0.5*(ultrafasttanh(0.5*x)+1.); } 00210 */ 00211 00212 inline real ultrafasttanh(const real& x) 00213 { 00214 if(x>=0) 00215 return (x<1.7 ? (1.5*x/(1+x)) : ( x<3 ? (0.935409070603099 + 0.0458812946797165*(x-1.7)) :0.99505475368673)); 00216 else 00217 { 00218 real xx = -x; 00219 return -(xx<1.7 ? (1.5*xx/(1+xx)) : ( xx<3 ? (0.935409070603099 + 0.0458812946797165*(xx-1.7)) :0.99505475368673)); 00220 } 00221 } 00222 00223 /* 00224 inline real ultrafasttanh(const real& x) 00225 { 00226 return x/(1+fabs(x)); 00227 } 00228 */ 00229 00230 inline real ultrafastsigmoid(const real& x) 00231 { 00232 // return 0.5*x / (1. + fabs(x)) + 0.5; 00233 return (real)0.5*(ultrafasttanh(0.5*x)+1.); 00234 // return fastsigmoid(x); 00235 } 00236 00239 template<class T> 00240 inline bool is_missing(const T& x) { return false; } 00241 00243 inline bool is_missing(double x) { return isnan(x)!=0; } 00244 00246 inline bool is_missing(float x) { return isnan(x)!=0; } 00247 00248 inline bool is_integer(real x) { return real(int(x))==x; } 00249 00250 inline real FABS(real x) 00251 { return x>=0. ?x :-x; } 00252 00253 #define FSWAP(a,b) do {real _c; _c = *(a); *(a) = *(b); *(b) = _c;} while(0) 00254 #define FLOAT_THRESHOLD 0.00001 00255 #define FEQUAL(a,b) (FABS((a)-(b))<FLOAT_THRESHOLD) 00256 00257 00258 inline real mypow(real x, real p) 00259 { return x==0 ?0 :pow(x,p); } 00260 00261 inline real ipow(real x, int p) 00262 { 00263 real result = 1.0; 00264 while(p--) 00265 result *= x; 00266 return result; 00267 } 00268 00270 inline real sigmoid(real x) 00271 { return (real)0.5*(tanh(0.5*x)+1.); } 00272 00275 inline real is_positive(real x) { return x>0? 1 : 0; } 00276 00278 inline real inverse_sigmoid(real x) 00279 { 00280 #ifdef BOUNDCHECK 00281 if (x < 0. || x > 1.) 00282 PLERROR("In inv_sigmoid_value: a should be in [0,1]"); 00283 #endif 00284 if (FEQUAL(x,0.)) 00285 return -88.; 00286 else if (FEQUAL(x,1.)) 00287 return 14.5; 00288 else 00289 return real(-log(1./x - 1.)); 00290 } 00291 00293 inline real softplus(real x) 00294 { 00295 if(x<=-30.) 00296 return 0.0; 00297 else if(x>=30.) 00298 return x; 00299 else 00300 return log1p(exp(x)); 00302 } 00303 00304 inline real tabulated_softplus(real x) 00305 { 00306 static const int n_softplus_values = 1000000; 00307 static const real min_softplus_arg = -10; 00308 static const real max_softplus_arg = 10; 00309 static const real softplus_delta = (n_softplus_values-1)/(max_softplus_arg-min_softplus_arg); 00310 static real softplus_values[n_softplus_values]; 00311 static bool computed_softplus_table = false; 00312 if (!computed_softplus_table) 00313 { 00314 real y=min_softplus_arg; 00315 real dy=1.0/softplus_delta; 00316 for (int i=0;i<n_softplus_values;i++,y+=dy) 00317 softplus_values[i] = softplus(y); 00318 computed_softplus_table=true; 00319 } 00320 if (x<min_softplus_arg) return 0; 00321 if (x>max_softplus_arg) return x; 00322 int bin = int(rint((x-min_softplus_arg)*softplus_delta)); 00323 return softplus_values[bin]; 00324 } 00325 00327 inline real inverse_softplus(real y) 00328 { 00329 if (y<0) 00330 return MISSING_VALUE; 00331 if (y>=30) 00332 return y; 00333 if (y==0) 00334 return -30; 00335 return log(exp(y)-1); 00336 } 00337 00338 inline real hard_slope(real x, real left=0, real right=1) 00339 { 00340 if (x<left) return 0; 00341 if (x>right) return 1; 00342 return (x-left)/(right-left); 00343 } 00344 00345 // as smoothness-->infty this becomes the linear by part function that 00346 // is 0 in [-infty,left], linear in [left,right], and 1 in [right,infty]. 00347 // For finite smoothness, it is a smoother function, always with value in the interval [0,1]. 00348 // It is always monotonically increasing wrt x (positive derivative in x). 00349 inline real soft_slope(real x, real smoothness=1, real left=0, real right=1) 00350 { 00351 if (smoothness==0) 00352 return 0.5; 00353 if (smoothness>1000) 00354 return hard_slope(x,left,right); 00355 return 1 + (softplus(-smoothness*(x-left))-softplus(-smoothness*(x-right)))/(smoothness*(right-left)); 00356 } 00357 00358 inline real tabulated_soft_slope(real x, real smoothness=1, real left=0, real right=1) 00359 { 00360 if (smoothness==0) 00361 return 0.5; 00362 if (smoothness>1000) 00363 return hard_slope(x,left,right); 00364 return 1 + (tabulated_softplus(-smoothness*(x-left))-tabulated_softplus(-smoothness*(x-right)))/(smoothness*(right-left)); 00365 } 00366 00367 // This is the derivative of soft_slope with respect to x. 00368 inline real d_soft_slope(real x, real smoothness=1, real left=0, real right=1) 00369 { 00370 // note that d(softplus(z))/dz = sigmoid(z) 00371 return (-sigmoid(-smoothness*(x-left))+sigmoid(-smoothness*(x-right)))/(right-left); 00372 } 00373 00375 inline int n_choose(int M,int N) 00376 { 00377 int k=M-N; 00378 float res=1; 00379 int i; 00380 for (i=1;i<=k;i++) { 00381 res *= (i+N)/(float)i; 00382 } 00383 return (int)(res+0.499); 00384 } 00385 00386 real safeflog(real a); 00387 real safeexp(real a); 00388 00389 real log(real base, real a); 00390 real logtwo(real a); 00391 real safeflog(real base, real a); 00392 real safeflog2(real a); 00393 00394 typedef real (*tRealFunc)(real); 00395 typedef real (*tRealReadFunc)(real,real); 00396 00398 real logadd(real log_a, real log_b); 00399 00401 real logsub(real log_a, real log_b); 00402 00407 real dilogarithm(real x); 00408 00409 inline real softplus_primitive(real x) { 00410 return -dilogarithm(-exp(x)); 00411 } 00412 00413 inline real tabulated_softplus_primitive(real x) { 00414 static const int n_softplus_primitive_values = 10000; 00415 static const real min_softplus_primitive_arg = -20; 00416 static const real max_softplus_primitive_arg = 10; 00417 static const real max_offset = max_softplus_primitive_arg*max_softplus_primitive_arg*0.5; 00418 static const real softplus_primitive_delta = (n_softplus_primitive_values-1)/(max_softplus_primitive_arg-min_softplus_primitive_arg); 00419 static real softplus_primitive_values[n_softplus_primitive_values]; 00420 static bool computed_softplus_primitive_table = false; 00421 if (!computed_softplus_primitive_table) 00422 { 00423 real y=min_softplus_primitive_arg; 00424 real dy=1.0/softplus_primitive_delta; 00425 for (int i=0;i<n_softplus_primitive_values;i++,y+=dy) 00426 softplus_primitive_values[i] = softplus_primitive(y); 00427 computed_softplus_primitive_table=true; 00428 } 00429 if (x<min_softplus_primitive_arg) return 0; 00430 if (x>max_softplus_primitive_arg) return softplus_primitive_values[n_softplus_primitive_values-1]+x*x*0.5 - max_offset; 00431 int bin = int(rint((x-min_softplus_primitive_arg)*softplus_primitive_delta)); 00432 return softplus_primitive_values[bin]; 00433 } 00434 00435 real hard_slope_integral(real left=0, real right=1, real a=0, real b=1); 00436 00437 00438 // integral of the soft_slope function between a and b 00439 real soft_slope_integral(real smoothness=1, real left=0, real right=1, real a=0, real b=1); 00440 real tabulated_soft_slope_integral(real smoothness=1, real left=0, real right=1, real a=0, real b=1); 00441 00442 } // end of namespace PLearn 00443 00444 00445 #endif

Generated on Tue Aug 17 16:01:25 2004 for PLearn by doxygen 1.3.7