Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

PLearn Namespace Reference

< for swap More...


Classes

class  PLearn::Array< T >
class  PLearn::TypeTraits< Array< T > >
class  PLearn::Array2ArrayMap< T >
class  PLearn::Option< T, Enclosing >
 Template class for option definitions. More...

class  PLearn::ArrayAllocatorOptions
class  PLearn::ArrayAllocator< T, SizeBits >
class  PLearn::ArrayAllocatorIndex< IndexBase, SizeBits >
 This type represents an index into the allocated memory, as a bit-field parameterized by the template argument SizeBits. More...

class  PLearn::ArrayAllocatorTrivial< T, SizeBits >
 This allocator solely performs allocation. More...

class  PLearn::PLearnInit
class  PLearn::Object
 The Object class. More...

class  PLearn::OptionBase
 Base class for option definitions. More...

class  PLearn::PDate
class  PLearn::PDateTime
class  PLearn::PLearnError
class  PLearn::PPointable
class  PLearn::PP< T >
class  PLearn::TypeTraits< PP< T > >
class  PLearn::MultiMap< A, B >
class  PLearn::ProgressBarPlugin
 Base class for pb plugins. More...

class  PLearn::TextProgressBarPlugin
 Simple plugin for displaying text progress bar. More...

struct  PLearn::NullProgressBarPlugin
 Simpler plugin that doesn't display a progress bar at all. More...

class  PLearn::ProgressBar
 This class will help you display progress of a calculation. More...

class  PLearn::Range
class  PLearn::RealRange
 represents a real range: i.e. one of ]low,high[ ; [low,high[; [low,high]; ]low,high] More...

class  PLearn::RealMapping
class  PLearn::TypeTraits< RealMapping >
class  PLearn::SetOption
class  PLearn::TypeTraits< SetOption >
class  PLearn::SmallVector< T, SizeBits, Allocator >
class  PLearn::StaticInitializer
 A StaticInitializer is typically declared as a static member of a class, and given a parameter that is a static initialization function for said class. More...

class  PLearn::Storage< T >
class  PLearn::StringTable
class  PLearn::TinyVector< T, N, TTrait >
class  PLearn::TinyVectorTrait< T >
class  PLearn::TinyVectorTrait< unsigned char >
class  PLearn::TinyVectorTrait< char >
class  PLearn::TinyVectorTrait< unsigned int >
class  PLearn::TinyVectorTrait< int >
class  PLearn::TypeMapEntry
class  PLearn::TypeFactory
class  PLearn::TypeTraits< T >
class  PLearn::TypeTraits< T * >
class  PLearn::TypeTraits< string >
class  PLearn::TypeTraits< vector< T > >
class  PLearn::TypeTraits< list< T > >
class  PLearn::TypeTraits< pair< T, U > >
class  PLearn::TypeTraits< map< T, U > >
class  PLearn::StringFieldMapping
class  PLearn::NumToStringMapping
class  PLearn::AutoSDBVMatrix
 A VMatrix view of a SimpleDB: columns whose type is string are removed from the view, all others are converted to real (characters to their ascii code, and dates to the float date format: 990324). More...

class  PLearn::NistDB
class  PLearn::SDBVMOutputCoder
class  PLearn::SDBVMField
class  PLearn::SDBVMSource
 A SDBVMSource represents a source for a value that can be either directly a field from a SDB or an already processed SDBVMField. More...

class  PLearn::SDBVMatrix
class  PLearn::SDBVMFieldSource1
 A field that maps exactly 1 SDB field to a VMatrix segment (abstract). More...

class  PLearn::SDBVMFieldSource2
 A field that maps exactly 2 SDB fields to a VMatrix segment (abstract). More...

class  PLearn::SDBVMFieldAsIs
 Pass through the value within the SDB (after conversion to real of the underlying SDB type). More...

class  PLearn::SDBVMFieldNormalize
 Normalize the field (subtract the mean then divide by standard dev). More...

class  PLearn::SDBVMFieldDivSigma
 Just divide by standard deviation. More...

class  PLearn::SDBVMFieldAffine
 Apply an affine transformation to the field: y = a*x+b. More...

class  PLearn::SDBVMFieldPosAffine
 Take the positive part of the field, followed by affine transformation: y = a*max(x,0)+b. More...

class  PLearn::SDBVMFieldSignedPower
 Do the following : y = x^a. More...

class  PLearn::SDBVMFieldFunc1
class  PLearn::SDBVMFieldFunc2
class  PLearn::SDBVMFieldDate
 Convert a date to fill 3 columns in the VMat: YYYY, MM, DD. More...

class  PLearn::SDBVMFieldDay
class  PLearn::SDBVMFieldMonths
 Computed year*12+(month-1). More...

class  PLearn::SDBVMFieldDateDiff
 difference between two dates ("source1-source2" expressed as an integer number of days, months, or years) More...

class  PLearn::SDBVMFieldDiscrete
 A field that recodes its source field according to an OutputCoder object. More...

class  PLearn::SDBVMFieldDateGreater
 verifies if the date within the row is greater than a threshold date More...

class  PLearn::SDBVMFieldCodeAsIs
class  PLearn::SDBVMFieldRemapReals
class  PLearn::SDBVMFieldRemapStrings
class  PLearn::SDBVMFieldRemapIntervals
class  PLearn::SDBVMFieldMultiDiscrete
class  PLearn::SDBVMFieldICBCTargets
class  PLearn::SDBVMFieldHasClaim
class  PLearn::SDBVMFieldSumClaims
class  PLearn::SDBVMFieldICBCClassification
class  PLearn::FieldStat
class  PLearn::SDBWithStats
struct  PLearn::Field
class  PLearn::FieldPtr
class  PLearn::FieldValue
struct  PLearn::FieldValue::DateVal_t
class  PLearn::Schema
class  PLearn::SimpleDBIndexKey< KeyType >
class  PLearn::SimpleDB< KeyType, QueryResult >
class  PLearn::RowIterator
class  PLearn::FieldRowRef
class  PLearn::Row
class  PLearn::UCISpecification
class  PLearn::GhostScript
class  PLearn::Gnuplot
class  PLearn::MatlabInterface
class  PLearn::RGB
class  PLearn::HSV
class  PLearn::RGBImage
 uses top left coordinate system Pixel (i,j) is at row i, column j More...

class  PLearn::RGBImageDB
class  PLearn::RGBImageVMatrix
class  PLearn::RGBImagesVMatrix
class  PLearn::FilePStreamBuf
class  PLearn::IntStream
class  PLearn::IntStreamVMatrix
class  PLearn::FilesIntStream
class  PLearn::InMemoryIntStream
class  PLearn::IntVecFile
class  PLearn::MRUFileList
class  PLearn::pl_fdstreambuf
 pl_fdstreambuf: stream buffer that acts on a POSIX file descriptor More...

class  PLearn::pl_fdstream
class  PLearn::pl_nullstreambuf
class  PLearn::pl_streambuf
class  PLearn::pl_streammarker
class  PLearn::PStream
class  PLearn::PIFStream
class  PLearn::POFStream
class  PLearn::PIStringStream
class  PLearn::pl_stream_raw
class  PLearn::pl_stream_clear_flags
class  PLearn::pl_stream_initiate
class  PLearn::PStreamBuf
class  PLearn::StdPStreamBuf
class  PLearn::TmpFilenames
struct  PLearn::tRule
class  PLearn::AdditiveNormalizationKernel
class  PLearn::ClassDistanceProportionCostFunction
class  PLearn::ClassErrorCostFunction
class  PLearn::ClassMarginCostFunction
class  PLearn::CompactVMatrixGaussianKernel
class  PLearn::CompactVMatrixPolynomialKernel
class  PLearn::ConvexBasisKernel
 returns prod_i log(1+exp(c*(x1[i]-x2[i]))) NOTE: IT IS NOT SYMMETRIC! More...

class  PLearn::DifferenceKernel
 returns sum_i[x1_i-x2_i] More...

class  PLearn::DirectNegativeCostFunction
class  PLearn::DistanceKernel
 This class implements an Ln distance (defaults to L2 i.e. euclidean distance). More...

class  PLearn::DivisiveNormalizationKernel
class  PLearn::DotProductKernel
 returns <x1,x2> More...

class  PLearn::GaussianDensityKernel
class  PLearn::GaussianKernel
 returns exp(-norm_2(x1-x2)^2/sigma^2) More...

class  PLearn::GeneralizedDistanceRBFKernel
 returns exp(-phi*(sum_i[abs(x1_i^a - x2_i^a)^b])^c) More...

class  PLearn::GeodesicDistanceKernel
class  PLearn::Kernel
class  PLearn::Ker
class  PLearn::KernelVMatrix
class  PLearn::LaplacianKernel
 returns exp(-phi*(sum_i[abs(x1_i - x2_i)])) More...

class  PLearn::LiftBinaryCostFunction
class  PLearn::LLEKernel
class  PLearn::LogOfGaussianDensityKernel
class  PLearn::MulticlassErrorCostFunction
class  PLearn::NegKernel
class  PLearn::NegLogProbCostFunction
class  PLearn::NegOutputCostFunction
 This simply returns -output[0] (target should usually have a length of 0) This is used for density estimators whose use(x) method typically computes log(p(x)). More...

class  PLearn::NormalizedDotProductKernel
class  PLearn::PolynomialKernel
 returns (beta*dot(x1,x2)+1)^n More...

class  PLearn::PowDistanceKernel
class  PLearn::PrecomputedKernel
 A kernel that precomputes the kernel matrix as soon as setDataForKernelMatrix is called. More...

class  PLearn::PricingTransactionPairProfitFunction
class  PLearn::QuadraticUtilityCostFunction
class  PLearn::ReconstructionWeightsKernel
class  PLearn::ScaledGaussianKernel
 returns exp(-sum_i[(phi_i*(x1_i - x2_i))^2]/sigma^2) More...

class  PLearn::ScaledGeneralizedDistanceRBFKernel
 returns exp(-(sum_i phi_i*[abs(x1_i^a - x2_i^a)^b])^c) More...

class  PLearn::ScaledLaplacianKernel
 returns exp(-(sum_i[abs(x1_i - x2_i)*phi_i])) More...

class  PLearn::SelectedOutputCostFunction
 This allows to apply a costfunction on a single output element (and correponding target element) of a larger output vector, rather than on the whole vector. More...

class  PLearn::SigmoidalKernel
 returns sigmoid(c*x1.x2) More...

class  PLearn::SigmoidPrimitiveKernel
 returns log(1+exp(c*x1.x2)) = primitive of sigmoidal kernel More...

class  PLearn::SourceKernel
class  PLearn::SquaredErrorCostFunction
 ********************************************************* The following 'kernels' are rather used as cost functions More...

class  PLearn::WeightedCostFunction
 A costfunction that allows to reweight another costfunction (weight being last element of target) Returns target.lastElement() * costfunc(output,target.subVec(0,target.length()-1));. More...

class  PLearn::Binner
class  PLearn::BottomNI< T >
class  PLearn::ConditionalCDFSmoother
class  PLearn::ConditionalStatsCollector
class  PLearn::DoubleAccessSparseMatrix< T >
class  PLearn::SMat< T >
class  PLearn::HashKeyDataPair< KeyType, DataType >
class  PLearn::Hash< KeyType, DataType >
struct  PLearn::Symbol
class  PLearn::IntPair
 Example of class that can be used as key. More...

class  PLearn::LiftStatsCollector
class  PLearn::LimitedGaussianSmoother
class  PLearn::ManualBinner
union  PLearn::_plearn_nan_type
class  PLearn::PLMathInitializer
class  PLearn::ProbSparseMatrix
class  PLearn::PSMat
class  PLearn::RowMapSparseMatrix< T >
class  PLearn::RowMapSparseValueMatrix< T >
class  PLearn::ScaledConditionalCDFSmoother
class  PLearn::Smoother
class  PLearn::SparseMatrix
class  PLearn::StatsCollectorCounts
class  PLearn::StatsCollector
class  PLearn::StatsIterator
class  PLearn::MeanStatsIterator
class  PLearn::ExpMeanStatsIterator
class  PLearn::StddevStatsIterator
class  PLearn::StderrStatsIterator
class  PLearn::SharpeRatioStatsIterator
class  PLearn::MinStatsIterator
class  PLearn::MaxStatsIterator
class  PLearn::LiftStatsIterator
class  PLearn::QuantilesStatsIterator
class  PLearn::StatsItArray
class  PLearn::TTensorElementIterator< T >
class  PLearn::TTensorSubTensorIterator< T >
class  PLearn::TTensor< T >
class  PLearn::TMat< T >
class  PLearn::TypeTraits< TMat< T > >
class  PLearn::SelectedIndicesCmp< T >
class  PLearn::TMatColRowsIterator< T >
 Model of the Random Access Iterator concept for iterating through a single column of a TMat, one row at a time. More...

class  PLearn::TMatElementIterator< T >
class  PLearn::TMatRowsAsArraysIterator< T >
 Model of the Random Access Iterator concept for iterating through the ROWS of a TMat. More...

class  PLearn::TMatRowsIterator< T >
 Model of the Random Access Iterator concept for iterating through the ROWS of a TMat. More...

class  PLearn::TopNI< T >
class  PLearn::TVec< T >
class  PLearn::TypeTraits< TVec< T > >
class  PLearn::VecCompressor
class  PLearn::VecStatsCollector
class  PLearn::Measurer
class  PLearn::CallbackMeasurer
class  PLearn::NearestNeighborPredictionCost
class  PLearn::ObjectGenerator
class  PLearn::RunObject
class  PLearn::ShellScript
class  PLearn::AdaptGradientOptimizer
class  PLearn::ConjGradientOptimizer
class  PLearn::GradientOptimizer
class  PLearn::ScaledGradientOptimizer
class  PLearn::HyperOptimizer
class  PLearn::HSetVal
class  PLearn::HTryAll
class  PLearn::HCoordinateDescent
class  PLearn::HTryCombinations
class  PLearn::Optimizer
class  PLearn::RandomVar
 we follow the same pattern as Var & Variable More...

class  PLearn::RVArray
 An RVArray stores a table of RandomVar's. More...

class  PLearn::RVInstance
 RVInstance represents a RandomVariable V along with a "value" v. More...

class  PLearn::RVInstanceArray
class  PLearn::ConditionalExpression
class  PLearn::RandomVariable
class  PLearn::StochasticRandomVariable
class  PLearn::FunctionalRandomVariable
class  PLearn::NonRandomVariable
class  PLearn::JointRandomVariable
class  PLearn::RandomElementOfRandomVariable
 RandomVariable that is the element of the first parent RandomVariable indexed by the second parent RandomVariable. More...

class  PLearn::RVArrayRandomElementRandomVariable
class  PLearn::NegRandomVariable
class  PLearn::ExpRandomVariable
class  PLearn::LogRandomVariable
class  PLearn::DiagonalNormalRandomVariable
class  PLearn::MixtureRandomVariable
class  PLearn::PlusRandomVariable
class  PLearn::MinusRandomVariable
class  PLearn::ElementWiseDivisionRandomVariable
class  PLearn::ProductRandomVariable
class  PLearn::SubVecRandomVariable
 Y = sub-vector of X starting at position "start", of length "value->length()". More...

class  PLearn::MultinomialRandomVariable
class  PLearn::ExtendedRandomVariable
class  PLearn::ConcatColumnsRandomVariable
 concatenate the columns of the matrix arguments, just like the hconcat function (PLearn.h) on Vars. More...

class  PLearn::RandomVarVMatrix
 This is a convenient wrapping around the required data structures for efficient repeated sampling from a RandomVar. More...

class  PLearn::SourceSampleVariable
class  PLearn::UnarySampleVariable
class  PLearn::BinarySampleVariable
class  PLearn::UniformSampleVariable
class  PLearn::MultinomialSampleVariable
class  PLearn::DiagonalNormalSampleVariable
class  PLearn::IPServer
class  PLearn::IPopen
class  PLearn::PLMPI
 ** PLMPI is just a "namespace holder" (because we're not actually using namespaces) for a few MPI related variables. All members are static ** More...

class  PLearn::Popen
class  PLearn::Profiler
class  PLearn::Profiler::Stats
union  PLearn::semun
class  PLearn::SemId
 This class is defined in order to distinguish semaphore and shared memory id's from plain integers when constructing a Semaphore or a SharedMemory object. More...

class  PLearn::ResourceSemaphore
class  PLearn::CountEventsSemaphore
class  PLearn::SharedMemory< T >
class  PLearn::AbsVariable
class  PLearn::AffineTransformVariable
 Affine transformation of a vector variable. More...

class  PLearn::AffineTransformWeightPenalty
 Weight decay terms for affine transforms. More...

class  PLearn::ArgmaxVariable
class  PLearn::ArgminOfVariable
class  PLearn::ArgminVariable
class  PLearn::BinaryClassificationLossVariable
 For one-dimensional output: class is 0 if output < 0.5, and 1 if >= 0.5. More...

class  PLearn::BinaryVariable
class  PLearn::ClassificationLossVariable
 Indicator(classnum==argmax(netout)). More...

class  PLearn::ColumnIndexVariable
class  PLearn::ColumnSumVariable
 result is a single row that contains the sum of each column of the input More...

class  PLearn::ConcatColumnsVariable
 concatenation of the columns of several variables More...

class  PLearn::ConcatOfVariable
class  PLearn::ConcatRowsVariable
 concatenation of the rows of several variables More...

class  PLearn::ConvolveVariable
 A convolve var; equals convolve(input, mask). More...

class  PLearn::CrossEntropyVariable
 cost = - sum_i {target_i * log(output_i) + (1-target_i) * log(1-output_i)} More...

class  PLearn::CutAboveThresholdVariable
class  PLearn::CutBelowThresholdVariable
class  PLearn::DeterminantVariable
 The argument must be a square matrix Var and the result is its determinant. More...

class  PLearn::DiagonalizedFactorsProductVariable
class  PLearn::DilogarithmVariable
 This is the primitive of a sigmoid: log(1+exp(x)). More...

class  PLearn::DivVariable
 divides 2 matrix vars of same size elementwise More...

class  PLearn::DotProductVariable
 Dot product between 2 vectors (or possibly 2 matrices, which are then simply seen as vectors). More...

class  PLearn::DuplicateColumnVariable
class  PLearn::DuplicateRowVariable
class  PLearn::DuplicateScalarVariable
class  PLearn::ElementAtPositionVariable
class  PLearn::EqualConstantVariable
 A scalar var; equal 1 if input1==input2, 0 otherwise. More...

class  PLearn::EqualScalarVariable
 A scalar var; equal 1 if input1==input2, 0 otherwise. More...

class  PLearn::EqualVariable
 A scalar var; equal 1 if input1==input2, 0 otherwise. More...

class  PLearn::ErfVariable
class  PLearn::ExpVariable
class  PLearn::ExtendedVariable
class  PLearn::Func
class  PLearn::Function
class  PLearn::HardSlopeVariable
class  PLearn::IfThenElseVariable
class  PLearn::IndexAtPositionVariable
class  PLearn::InterValuesVariable
 if values = [x1,x2,...,x10], the resulting variable is [(x1+x2)/2,(x2+x3)/2, ... More...

class  PLearn::InvertElementsVariable
class  PLearn::IsAboveThresholdVariable
 Does elementwise newx_i = (x_i>=threshold ?truevalue :falsevalue);. More...

class  PLearn::IsLargerVariable
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ More...

class  PLearn::IsMissingVariable
 A scalar var; equal 1 if input1!=c, 0 otherwise. More...

class  PLearn::IsSmallerVariable
class  PLearn::LeftPseudoInverseVariable
class  PLearn::LiftOutputVariable
class  PLearn::LogAddVariable
 output = log(exp(input1)+exp(input2)) but it is computed in such a way as to preserve precision More...

class  PLearn::LogSoftmaxVariable
class  PLearn::LogSumVariable
class  PLearn::LogVariable
class  PLearn::MarginPerceptronCostVariable
class  PLearn::MatrixAffineTransformFeedbackVariable
 Affine transformation of a MATRIX variable. More...

class  PLearn::MatrixAffineTransformVariable
 Affine transformation of a MATRIX variable. More...

class  PLearn::MatrixElementsVariable
class  PLearn::MatrixInverseVariable
class  PLearn::MatrixOneHotSquaredLoss
class  PLearn::MatrixSoftmaxLossVariable
class  PLearn::MatrixSoftmaxVariable
class  PLearn::MatrixSumOfVariable
class  PLearn::MatRowVariable
 Variable that is the row of matrix mat indexed by variable input. More...

class  PLearn::Max2Variable
class  PLearn::MaxVariable
class  PLearn::MiniBatchClassificationLossVariable
class  PLearn::MinusColumnVariable
class  PLearn::MinusRowVariable
class  PLearn::MinusScalarVariable
class  PLearn::MinusTransposedColumnVariable
class  PLearn::MinusVariable
class  PLearn::MinVariable
class  PLearn::MulticlassLossVariable
 cost = sum_i {cost_i}, with cost_i = 1 if (target_i == 1 && output_i < 1/2) cost_i = 1 if (target_i == 0 && output_i > 1/2) cost_i = 0 otherwise More...

class  PLearn::NaryVariable
class  PLearn::NegateElementsVariable
class  PLearn::NegCrossEntropySigmoidVariable
class  PLearn::NllSemisphericalGaussianVariable
 This class implements the negative log-likelihood cost of a Markov chain that uses semispherical gaussian transition probabilities. More...

class  PLearn::OneHotSquaredLoss
 Computes sum(square_i(netout[i]-(i==classnum ?hotval :coldval)) This is used typically in a classification setting where netout is a Var of network outputs, and classnum is the target class number. More...

class  PLearn::OneHotVariable
 Represents a vector of a given lenth, that has value 1 at the index given by another variable and 0 everywhere else. More...

class  PLearn::PDistributionVariable
class  PLearn::PLogPVariable
 returns the elementwise x*log(x) in a (hopefully!) numerically stable way This can be used to compute the Entropy for instance More...

class  PLearn::PlusColumnVariable
 adds a single-column var to each column of a matrix var More...

class  PLearn::PlusConstantVariable
 adds a scalar constant to a matrix var More...

class  PLearn::PlusRowVariable
 adds a single-row var to each row of a matrix var More...

class  PLearn::PlusScalarVariable
 adds a scalar var to a matrix var More...

class  PLearn::PlusVariable
 adds 2 matrix vars of same size More...

class  PLearn::PowVariable
 elementwise pow (returns 0 wherever input is negative) More...

class  PLearn::PowVariableVariable
class  PLearn::ProductTransposeVariable
 Matrix product between matrix1 and transpose of matrix2. More...

class  PLearn::ProductVariable
 Matrix product. More...

class  PLearn::ProjectionErrorVariable
 The first input is a set of n_dim vectors (possibly seen as a single vector of their concatenation) f_i, each in R^n The second input is a set of T vectors (possibly seen as a single vector of their concatenation) t_j, each in R^n The output is the following: sum_j min_{w_j} || t_j - sum_i w_{ji} f_i ||^2 where row w_j of w is optmized analytically and separately for each j. More...

class  PLearn::ReshapeVariable
 Variable that views another variable, but with a different length() and width() (the only restriction being that length()*width() remain the same). More...

class  PLearn::RightPseudoInverseVariable
class  PLearn::RowAtPositionVariable
class  PLearn::RowSumVariable
 result is a single column that contains the sum of each row of the input More...

class  PLearn::SemiSupervisedProbClassCostVariable
class  PLearn::SigmoidVariable
class  PLearn::SignVariable
 sign(x) = 1 if x>0, -1 if x<0, 0 if x=0, all done element by element. More...

class  PLearn::SoftmaxLossVariable
class  PLearn::SoftmaxVariable
class  PLearn::SoftplusVariable
 This is the primitive of a sigmoid: log(1+exp(x)). More...

class  PLearn::SoftSlopeIntegralVariable
class  PLearn::SoftSlopeVariable
class  PLearn::SourceVariable
class  PLearn::SquareRootVariable
class  PLearn::SquareVariable
class  PLearn::SubMatTransposeVariable
class  PLearn::SubMatVariable
class  PLearn::SubsampleVariable
 A subsample var; equals subrample(input, the_subsamplefactor). More...

class  PLearn::SumAbsVariable
class  PLearn::SumOfVariable
class  PLearn::SumOverBagsVariable
class  PLearn::SumSquareVariable
class  PLearn::SumVariable
class  PLearn::TanhVariable
class  PLearn::TimesColumnVariable
 multiplies each column of a matrix var elementwise with a single column variable More...

class  PLearn::TimesConstantVariable
 multiplies a matrix var by a scalar constant More...

class  PLearn::TimesRowVariable
 multiplies each row of a matrix var elementwise with a single row variable More...

class  PLearn::TimesScalarVariable
 multiplies a matrix var by a scalar var More...

class  PLearn::TimesVariable
 multiplies 2 matrix vars of same size elementwise More...

class  PLearn::TransposeProductVariable
 Matrix product between transpose of matrix1 and matrix2. More...

class  PLearn::UnaryHardSlopeVariable
class  PLearn::UnaryVariable
class  PLearn::UnequalConstantVariable
 A scalar var; equal 1 if input1!=c, 0 otherwise. More...

class  PLearn::UnfoldedFuncVariable
class  PLearn::UnfoldedSumOfVariable
class  PLearn::VarArray
class  PLearn::VarArrayElementVariable
 Variable that is the element of the input1 VarArray indexed by the input2 variable. More...

class  PLearn::VarColumnsVariable
class  PLearn::VarElementVariable
class  PLearn::Var
class  PLearn::Variable
class  PLearn::VarMeasurer
class  PLearn::VarRowsVariable
class  PLearn::VarRowVariable
 Variable that is the row of the input1 variable indexed by the input2 variable. More...

class  PLearn::VecElementVariable
 Variable that is the element of vector vec indexed by variable input. More...

class  PLearn::WeightedSumSquareVariable
class  PLearn::AsciiVMatrix
class  PLearn::AutoVMatrix
 This class is a simple wrapper to an underlying VMatrix of another type All it does is forward the method calls. More...

class  PLearn::BatchVMatrix
 VMat class that replicates small parts of a matrix (mini-batches), so that each mini-batch appears twice (consecutively). More...

class  PLearn::BootstrapSplitter
class  PLearn::BootstrapVMatrix
class  PLearn::ByteMemoryVMatrix
class  PLearn::CenteredVMatrix
union  PLearn::short_and_twobytes
class  PLearn::CompactVMatrix
class  PLearn::CompressedVMatrix
class  PLearn::ConcatColumnsVMatrix
class  PLearn::ConcatRowsSubVMatrix
class  PLearn::ConcatRowsVMatrix
class  PLearn::CrossReferenceVMatrix
class  PLearn::CumVMatrix
class  PLearn::DatedJoinVMatrix
class  PLearn::DatedVMatrix
class  PLearn::DBSplitter
class  PLearn::DiskVMatrix
 A VMatrix whose (compressed) data resides in a directory and can span several files. More...

class  PLearn::ExplicitSplitter
class  PLearn::ExtendedVMatrix
class  PLearn::FileVMatrix
 A VMatrix that exists in a .pmat file (native plearn matrix format, same as for Mat). More...

class  PLearn::FilteredVMatrix
class  PLearn::FilterSplitter
class  PLearn::FinancePreprocVMatrix
class  PLearn::ForwardVMatrix
 This class is a simple wrapper to an underlying VMatrix of another type All it does is forward the method calls. More...

class  PLearn::FractionSplitter
class  PLearn::GeneralizedOneHotVMatrix
 This VMat is a generalization of OneHotVMatrix where all columns (given by the Vec index) are mapped, instead of just the last one. More...

class  PLearn::GetInputVMatrix
class  PLearn::GramVMatrix
class  PLearn::IndexedVMatrix
 VMat class that sees a matrix as a collection of triplets (row, column, value) Thus it is a N x 3 matrix, with N = the number of elements in the original matrix. More...

class  PLearn::InterleaveVMatrix
struct  PLearn::JoinFieldStat
class  PLearn::JoinVMatrix
class  PLearn::JulianizeVMatrix
class  PLearn::KFoldSplitter
class  PLearn::KNNVMatrix
class  PLearn::LearnerProcessedVMatrix
class  PLearn::LocalNeighborsDifferencesVMatrix
class  PLearn::MemoryVMatrix
class  PLearn::MovingAverageVMatrix
class  PLearn::MultiInstanceVMatrix
class  PLearn::OneHotVMatrix
class  PLearn::PairsVMatrix
class  PLearn::PLearnerOutputVMatrix
class  PLearn::PrecomputedVMatrix
class  PLearn::ProcessingVMatrix
class  PLearn::RangeVMatrix
 Outputs scalar samples (length 1) starting at start, up to end (inclusive) with step. When end is reached it starts over again. More...

class  PLearn::RegularGridVMatrix
class  PLearn::RemapLastColumnVMatrix
class  PLearn::RemoveDuplicateVMatrix
class  PLearn::RemoveRowsVMatrix
 sees an underlying VMat with the specified rows excluded More...

class  PLearn::RepeatSplitter
class  PLearn::RowBufferedVMatrix
class  PLearn::RowsSubVMatrix
class  PLearn::SelectColumnsVMatrix
 selects variables (columns) from a source matrix according to given vector of indices. More...

class  PLearn::SelectRowsFileIndexVMatrix
class  PLearn::SelectRowsVMatrix
 selects samples from a source matrix according to given vector of indices More...

class  PLearn::SentencesBlocks
class  PLearn::SequentialSplitter
class  PLearn::ShiftAndRescaleVMatrix
class  PLearn::SortRowsVMatrix
 Sort the samples of a VMatrix according to one (or more) given columns. More...

class  PLearn::SourceVMatrix
class  PLearn::SourceVMatrixSplitter
class  PLearn::SparseVMatrixRow
class  PLearn::SparseVMatrix
class  PLearn::Splitter
class  PLearn::StrTableVMatrix
class  PLearn::SubInputVMatrix
class  PLearn::SubVMatrix
class  PLearn::TemporalHorizonVMatrix
 This VMat delay the last targetsize entries of an underlying VMat by a certain horizon. More...

class  PLearn::TestInTrainSplitter
class  PLearn::ThresholdVMatrix
class  PLearn::ToBagSplitter
class  PLearn::TrainTestBagsSplitter
class  PLearn::TrainTestSplitter
class  PLearn::TrainValidTestSplitter
class  PLearn::TransposeVMatrix
class  PLearn::UniformizeVMatrix
class  PLearn::UniformVMatrix
class  PLearn::UpsideDownVMatrix
class  PLearn::VecExtendedVMatrix
class  PLearn::VMat
class  PLearn::VMatLanguage
class  PLearn::PreprocessingVMatrix
class  PLearn::VMatrix
class  PLearn::VMatrixFromDistribution
class  PLearn::VMField
 a VMField contains a fieldname and a fieldtype More...

class  PLearn::VMFieldStat
 this class holds simple statistics about a field More...

class  PLearn::VVec
 A VVec is a reference to a row or part of a row (a subrow) of a VMatrix. More...

class  PLearn::VVMatrix
 this class is a wrapper for a .vmat VMatrix. More...

class  PLearn::YMDDatedVMatrix
class  PLearn::AdaBoost
class  PLearn::ClassifierFromDensity
class  PLearn::MultiInstanceNNet
class  PLearn::ConditionalDensityNet
class  PLearn::ConditionalDistribution
class  PLearn::ConditionalGaussianDistribution
class  PLearn::Distribution
class  PLearn::EmpiricalDistribution
class  PLearn::GaussianDistribution
class  PLearn::GaussianProcessRegressor
class  PLearn::GaussMix
class  PLearn::HistogramDistribution
class  PLearn::LocallyWeightedDistribution
class  PLearn::ManifoldParzen2
class  PLearn::PConditionalDistribution
class  PLearn::PDistribution
class  PLearn::SpiralDistribution
class  PLearn::UnconditionalDistribution
class  PLearn::UniformDistribution
class  PLearn::AddCostToLearner
class  PLearn::EmbeddedLearner
class  PLearn::Learner
class  PLearn::NeighborhoodSmoothnessNNet
class  PLearn::NeuralNet
class  PLearn::NNet
class  PLearn::PLearner
class  PLearn::SelectInputSubsetLearner
class  PLearn::StackedLearner
class  PLearn::StatefulLearner
class  PLearn::TestingLearner
class  PLearn::GraphicalBiText
class  PLearn::ShellProgressBar
class  PLearn::Dictionary
class  PLearn::ProbVector
class  PLearn::SmoothedProbSparseMatrix
class  PLearn::ComplementedProbSparseMatrix
class  PLearn::TextSenseSequenceVMatrix
 This class handles a sequence of words/sense tag/POS triplets to present it as target words and their context. More...

class  PLearn::ProbabilitySparseMatrix
class  PLearn::PPointableSet
class  PLearn::Set
struct  PLearn::Node
class  PLearn::WordNetOntology
class  PLearn::Experiment
class  PLearn::GenerateDecisionPlot
class  PLearn::Grapher
class  PLearn::StatSpec
 The specification of a statistic to compute (as can be specified as a string in PTester). More...

class  PLearn::PTester
 This code is deprecated, use PTester.h and PTester.cc instead. More...

class  PLearn::ConstantRegressor
class  PLearn::LinearRegressor
class  PLearn::PLS
class  PLearn::EmbeddedSequentialLearner
class  PLearn::MovingAverage
 This SequentialLearner only takes the n previous target to predict the next one. More...

class  PLearn::SequentialLearner
class  PLearn::SequentialModelSelector
class  PLearn::SequentialValidation
class  PLearn::TestMethod
class  PLearn::Train
class  PLearn::EntropyContrast
class  PLearn::GaussianContinuum
class  PLearn::Isomap
class  PLearn::IsomapTangentLearner
class  PLearn::KernelPCA
class  PLearn::KernelProjection
class  PLearn::KPCATangentLearner
class  PLearn::LLE
class  PLearn::PCA
class  PLearn::SpectralClustering
class  PLearn::TangentLearner
class  PLearn::AutoRunCommand
class  PLearn::FieldConvertCommand
class  PLearn::HelpCommand
class  PLearn::JulianDateCommand
class  PLearn::KolmogorovSmirnovCommand
class  PLearn::LearnerCommand
class  PLearn::PLearnCommand
 This is the base class for all PLearn commands (those that can be issued in the plearn program). More...

class  PLearn::PLearnCommandRegistry
class  PLearn::ReadAndWriteCommand
class  PLearn::RunCommand
class  PLearn::TestDependenciesCommand
class  PLearn::TestDependencyCommand
class  PLearn::VMatCommand

Typedefs

typedef map< const void *,
void * > 
CopiesMap
 Global typedef to make the map of copied objects (needed by the deep copy mechanism in Object) more palatable.

typedef map< string, float > map_string_float
 Some typedefs to use the NODEEPCOPY macro with.

typedef map< string, double > map_string_double
typedef map< double, stringmap_double_string
typedef map< float, stringmap_float_string
typedef int(* compare_function )(const void *, const void *)
typedef vector< PP< OptionBase > > OptionList
typedef void(* VOIDFUNC )()
typedef Object *(* NEW_OBJECT )()
 Typedef for the "new instance" function type, which returns a default-initialized Object.

typedef OptionList &(* GETOPTIONLIST_METHOD )()
typedef bool(* ISA_METHOD )(Object *o)
typedef map< string, TypeMapEntryTypeMap
typedef PP< SDBVMOutputCoderPSDBVMOutputCoder
typedef PP< SDBVMFieldPSDBVMField
typedef PP< SDBVMFieldDiscretePSDBVMFieldDiscrete
typedef Array< PSDBVMFieldDiscreteFieldArray
 In general, if there are N fields, x_1...x_N, and each can take y_i values, then the discrete value is:.

typedef SimpleDB SDB
 A utility typedef for the common case.

typedef Ker CostFunc
 a cost function maps (output,target) to a loss

typedef CostFunc ProfitFunc
 a profit function maps (output,target) to a profit

typedef real(* tRealFunc )(real)
typedef real(* tRealReadFunc )(real, real)
typedef pair< real, StatsCollectorCounts * > PairRealSCCType
typedef PP< StatsIteratorStatsIt
typedef TinyVector< int, 7 > IVec
typedef TTensor< realTensor
typedef TMat< realMat
typedef TVec< realVec
typedef bool(* MeasurerCallbackFunction )(int t, const Vec &costs)
typedef Array< PP< HyperOptimizer > > HStrategy
typedef map< string, stringHAliases
typedef RandomVar MatRandomVar
typedef int tFileHandle
typedef ofstream pofstream
 The stream classes.

typedef PP< LearnerPPLearner
typedef map< int, realSparseVec
typedef const map< int, realConstSparseVec
typedef PPointableSet::iterator SetIterator

Enumerations

enum  SDBVMOutputCoding { SDBVMUnknownCoding = 0, SDBVMNumeric, SDBVMOneHot, SDBVMOneHotMinus1 }
enum  FieldType {
  Unknown = 0, StringType, CharacterType, SignedCharType,
  ShortType, IntType, FloatType, DoubleType,
  DateType
}
enum  eNumericType {
  NT_NOT_NUMERIC = 0x0000, NT_ORDINAL = 0x0001, NT_CARDINAL = 0x0002, NT_CURRENCY = 0x0004,
  NT_PREFIXED = 0x0008, NT_SUFFIXED = 0x0010, NT_RANGE = 0x0020, NT_TIME = 0x0040,
  NT_CODE = 0x0080, NT_PERCENT = 0x0100, NT_UNKNOWN_NUMERIC_TYPE = 0x8000
}

Functions

template<class T> void swap (Array< T > &a1, Array< T > &a2)
template<class T> PStreamoperator>> (PStream &in, Array< T > &a)
template<class T> PStreamoperator<< (PStream &out, const Array< T > &a)
template<class T> ostream & operator<< (ostream &out, const Array< T > &a)
template<class T> void deepCopyField (Array< T > &field, CopiesMap &copies)
template<class T> Array< T > operator & (const T &elem, const Array< T > &a)
template<class T> Array< T > & operator &= (Array< T > &a, const T &elem)
template<class T> Array< T > & operator &= (Array< T > &a, const Array< T > &ar)
template<class T> Array< T > & operator &= (Array< T > &a, const vector< T > &ar)
template<class T> Array< T > operator & (const Array< T > &a, const T &elem)
template<class T> Array< T > operator & (const Array< T > &a, const Array< T > &ar)
template<class T> Array< T > operator & (const Array< T > &a, const vector< T > &ar)
string join (const Array< string > &s, const string &separator)
template<class T> Array< TVec< T > > operator & (const TVec< T > &m1, const TVec< T > &m2)
 This will allow a convenient way of building arrays of Matrices by writing ex: m1&m2&m3.

template<class T> TVec< T > concat (const Array< TVec< T > > &varray)
template<class T> TMat< T > vconcat (const Array< TMat< T > > &ar)
template<class T> TMat< T > hconcat (const Array< TMat< T > > &ar)
template<class T> TMat< T > vconcat (const TMat< T > &m1, const TMat< T > &m2)
template<class T> TMat< T > hconcat (const TMat< T > &m1, const TMat< T > &m2)
template<class T> Array< TMat< T > > operator & (const TMat< T > &m1, const TMat< T > &m2)
 This will allow a convenient way of building arrays of Matrices by writing ex: m1&m2&m3.

void endianswap2 (void *ptr, int n)
 swaps endians for n 2-byte elements (such as short)

void endianswap4 (void *ptr, int n)
 swaps endians for n 4-byte elements (such as int or float)

void endianswap8 (void *ptr, int n)
 swaps endians for n 8-byte elements (such as double)

char byte_order ()
void endianswap (char *ptr, int n=1)
void endianswap (signed char *ptr, int n=1)
void endianswap (unsigned char *ptr, int n=1)
void endianswap (short *ptr, int n=1)
void endianswap (unsigned short *ptr, int n=1)
void endianswap (int *ptr, int n=1)
void endianswap (unsigned int *ptr, int n=1)
void endianswap (long *ptr, int n=1)
void endianswap (unsigned long *ptr, int n=1)
void endianswap (float *ptr, int n=1)
void endianswap (double *ptr, int n=1)
template<class T> void deepCopyField (T &, CopiesMap &)
 Types that do not require deep copy. Any type not handled below: do nothing.

template<class T> void deepCopyField (T *&field, CopiesMap &copies)
template<class T> T * deepCopy (const T *source, CopiesMap &copies)
 A simple template function that calls the method.

template<class T> T * deepCopy (const T *source)
 This function simply calls the previous one with an initially empty map.

int file_size (const string &filename)
 Simple file info.

char * strcopy (char *s)
 make a copy of a C string and return it

void pretty_print_number (char *buffer, real number)
 print a number without unnecessary trailing zero's, into buffer

bool file_exists (const string &filename)
bool isMapKeysAreInt (map< real, int > &m)
 check that all keys of the map are int values

string hostname ()
string prgname (const string &setname)
template<class In, class Out> Out copy_cast (In first, In last, Out res)
 Like std::copy, but with an explicit cast to the destination type.

template<class T> void clear_1 (T &x)
 clearing an element (that's called by clear_n...) Default implementation for clearing any type

void clear_1 (char &x)
void clear_1 (unsigned char &x)
void clear_1 (signed char &x)
void clear_1 (short &x)
void clear_1 (unsigned short &x)
void clear_1 (int &x)
void clear_1 (unsigned int &x)
void clear_1 (long &x)
void clear_1 (unsigned long &x)
void clear_1 (float &x)
void clear_1 (double &x)
void clear_1 (bool &x)
template<class For> void clear_n (For begin, int n)
 clears n elements starting at iterator position begin

void clear_n (float *begin, int n)
 efficient specialisation for built-in types

void clear_n (double *begin, int n)
void clear_n (bool *begin, int n)
void clear_n (char *begin, int n)
void clear_n (unsigned char *begin, int n)
void clear_n (short *begin, int n)
void clear_n (unsigned short *begin, int n)
void clear_n (int *begin, int n)
void clear_n (unsigned int *begin, int n)
void clear_n (long *begin, int n)
void clear_n (unsigned long *begin, int n)
template<class T> void swap (T &a, T &b)
ObjectloadObject (const string &filename)
 Loads an object from the given file (no macro-preprocessing is performed).

ObjectmacroLoadObject (const string &filename, map< string, string > &vars)
 Same as loadObject but first performs macro-processing on the file vars may be initialised with the values of some variables and upon return it will also contain newly $DEFINED variables.

ObjectmacroLoadObject (const string &filename)
 same as previous, but no need to pass a variables map

ObjectreadObject (PStream &in, unsigned int id)
PStreamoperator>> (PStream &in, Object *&x)
template<class T> ObjecttoObjectPtr (const T *x)
 The toObjectPtr functions attempt to return a pointer to Object (or 0 if the passed argument cannot be considered an Object subclass).

template<class T> ObjecttoObjectPtr (const T &x)
template<> ObjecttoObjectPtr (const Object &x)
template<> ObjecttoObjectPtr (const Object *x)
template<class T> ObjecttoObjectPtr (const PP< T > &x)
template<class T> ObjecttoIndexedObjectPtr (const Array< T > &x, int i)
template<class T> ObjecttoIndexedObjectPtr (const TVec< T > &x, int i)
template<class T> ObjecttoIndexedObjectPtr (const T &, int)
ObjectreadObject (istream &in_)
ObjectnewObject (const string &representation)
 Creates a new object according to the given representation.

ostream & operator<< (ostream &out, const Object &obj)
PStreamoperator>> (PStream &in, Object &o)
PStreamoperator<< (PStream &out, const Object &o)
template<class ObjectType, class OptionType> void declareOption (OptionList &ol, const string &optionname, OptionType ObjectType::*member_ptr, OptionBase::flag_t flags, const string &description, const string &defaultval="")
 For flags, you should specify one of OptionBase::buildoption, OptionBase::learntoption or OptionBase::tuningoption If the option is not to be serialized, you can additionally specify OptionBase::nosave.

template<class ObjectType, class OptionType> void declareOption (OptionList &ol, const string &optionname, OptionType *ObjectType::*member_ptr, OptionBase::flag_t flags, const string &description, const string &defaultval="")
template<class ObjectType, class OptionType> void redeclareOption (OptionList &ol, const string &optionname, OptionType ObjectType::*member_ptr, OptionBase::flag_t flags, const string &description, const string &defaultval="")
 Allows one to redeclare an option differently (e.g.

float date_to_float (const PDate &t)
 converts date to float: ex: September 29 1972: 720929; December 25 2002: 1021225 Also converts missing date to missing flat value and vice-versa.

PDate float_to_date (float f)
int operator- (const PDate &to_date, const PDate &from_date)
 substract two dates, the result being counted in days.

PDate operator+ (const PDate &pdate, int ndays)
 add a number of days

PDate operator- (const PDate &pdate, int ndays)
 subtract a number of days add a number of days

ostream & operator<< (ostream &os, const PDate &date)
PDate float_to_date (double d)
double datetime_to_double (const PDateTime &t)
 converts date/time to double: for example: September 29 1972: 720929; December 25 2002: 1021225.

PDateTime double_to_datetime (double f)
double hhmmss_to_double (int hh, int mm, int ss)
 converts an hours/minutes/seconds to a day fraction

void double_to_hhmmss (double fraction, int &hh, int &mm, int &ss)
 convert a day fraction (< 1) to hours/minutes/seconds

double operator- (const PDateTime &to_date, const PDateTime &from_date)
 subtract two dates, the result being counted in days (+ fractions)

ostream & operator<< (ostream &os, const PDateTime &date)
size_t hashbytes (const char *byte_start, size_t byte_length)
 **************** Hash tables support *************************

size_t hashval (const char *strng)
template<class T> size_t hashval (const T &x)
 default which will work in many cases but not all

void errormsg (const char *msg,...)
void warningmsg (const char *msg,...)
void exitmsg (const char *msg,...)
template<class T> void deepCopyField (PP< T > &field, CopiesMap &copies)
 Any pointer or smart pointer: call deepCopy().

template<class T> T * deepCopy (PP< T > source, CopiesMap &copies)
 A simple template function.

template<class T> T * deepCopy (PP< T > source)
 This function simply calls the previous one with an initially empty map.

ostream & operator<< (ostream &out, Range r)
PStreamoperator<< (PStream &out, const RealRange &x)
PStreamoperator>> (PStream &in, RealRange &x)
bool operator< (RealMapping::single_mapping_t a, RealMapping::single_mapping_t b)
void write (ostream &out, const RealRange &range)
ostream & operator<< (ostream &out, const RealRange &range)
void read (istream &in, RealRange &range)
ObjecttoObjectPtr (const RealMapping &o)
PStreamoperator>> (PStream &in, RealMapping &o)
PStreamoperator>> (PStream &in, RealMapping *&o)
PStreamoperator<< (PStream &out, const RealMapping &o)
PStreamoperator>> (PStream &in, PP< RealMapping > &o)
ObjecttoObjectPtr (const SetOption &o)
PStreamoperator>> (PStream &in, SetOption &o)
PStreamoperator>> (PStream &in, SetOption *&o)
PStreamoperator<< (PStream &out, const SetOption &o)
PStreamoperator>> (PStream &in, PP< SetOption > &o)
template<class T, unsigned SizeBits, class Allocator> unsigned int hashval (const SmallVector< T, SizeBits, Allocator > &v)
 hash function for hash tables

template<class T, unsigned SizeBits, class Allocator> bool operator== (const SmallVector< T, SizeBits, Allocator > &a, const SmallVector< T, SizeBits, Allocator > &b)
 Equality operator.

template<class T, unsigned SizeBits, class Allocator> bool operator!= (const SmallVector< T, SizeBits, Allocator > &x, const SmallVector< T, SizeBits, Allocator > &y)
 const SmallVector<T,SizeBits,Allocator>& y) { return !(x==y); }

template<class T, unsigned SizeBits> bool operator< (const SmallVector< T, SizeBits > &, const SmallVector< T, SizeBits > &)
 const SmallVector<T,SizeBits,Allocator>&);

template<class T, unsigned SizeBits, class Allocator> bool operator< (const SmallVector< T, SizeBits, Allocator > &x, const SmallVector< T, SizeBits, Allocator > &y)
template<class T> PStreamoperator<< (PStream &out, const Storage< T > &seq)
template<class T> PStreamoperator>> (PStream &in, Storage< T > &seq)
ostream & operator<< (ostream &out, const StringTable &st)
string left (const string &s, size_t width, char padding)
 aligns the given string in a cell having the given width

string right (const string &s, size_t width, char padding)
string center (const string &s, size_t width, char padding)
bool pl_isnumber (const string &str, double *dbl)
bool pl_isnumber (const string &str, float *dbl)
long tolong (const string &s, int base)
 conversions from string to numerical types

bool tobool (const string &s)
double todouble (const string &s)
string extract_filename (const string &filepath)
 ** File path manipulation functions ** Returns everything after the last '/' (if there's no '/' returns filepath)

string extract_directory (const string &filepath)
 Returns everything before the last '/' including the '/' (if there's no '/' it returns "./").

string extract_extension (const string &filepath)
 Returns everything after the last '.' of the filename (i.e.

string extract_filename_without_extension (const string &filepath)
 Returns everything before the last '.' of the filename, excluding the '.' (if there's no '.' in the filename it returns the whole filename).

string remove_extension (const string &filename)
 Return the filename withoug the extension (i.e. removing the last.

stringdata_filename_2_filenames (const string &filename, int &nb_files)
 take a filename containing the name of a file per line, and return theses names as a string* of length nb_files

string removeblanks (const string &s)
 removes starting and ending blanks '
','',' ',''

string removeallblanks (const string &s)
 removes all blanks '
','',' ',''

string removenewline (const string &s)
 removes any trailing '
' and/or ''

string remove_trailing_slash (const string &s)
 removes any trailing '/' from the path

string append_slash (const string &path)
 appends a trailing slash to path if there isn't already one

string lowerstring (const string &ss)
 convert a string to all lowercase

string upperstring (const string &ss)
 convert a string to all uppercase

string pgetline (istream &in)
 returns the next line read from the stream, after removing any trailing '' and/or '
'

bool isBlank (const string &s)
 returns true if s is a blank line (containing only space, tab, until end of line or a # comment-character is reached

bool isParagraphBlank (const string &s)
 returns true if s is a blank paragraph (containing only space, tab, until end of **string**)

string space_to_underscore (string str)
 replaces all characters <= ' ' (i.e. newline, tab, space, etc...) by '_'

string underscore_to_space (string str)
 replaces all underscores by a single space character

string backslash_to_slash (string str)
 replaces all backslashes with slash

int search_replace (string &text, const string &searchstr, const string &replacestr)
 replaces all occurences of searchstr in the text by replacestr returns the number of matches that got replaced

vector< stringsplit (const string &s, char delimiter)
 splits a string along occurences of the delimiters.

vector< stringsplit (const string &s, const string &delimiters, bool keep_delimiters)
void split_on_first (const string &s, const string &delimiters, string &left, string &right)
pair< string, stringsplit_on_first (const string &s, const string &delimiters)
void remove_comments (string &text, const string &commentstart)
 In a multiline text, removes everything starting at commentstart pattern until the end of line.

string join (const vector< string > &s, const string &separator)
 makes a single string from a vector of strings

vector< stringremove (const vector< string > &v, string element)
 return vector with all instances of element removed

int findit (const vector< string > &v, string element)
vector< stringaddprepostfix (const string &prefix, const vector< string > &names, const string &postfix)
 returns the list of names, but with a prepended prefix and an appended postfix

string addprepostfix (const string &prefix, const string &text, const string &postfix)
 Returns a string with the prefix prepended and the postfix appended to each *line* of the text string.

vector< stringstringvector (int argc, char **argv)
 makes a C++ style vector of strings from a C style vectr of strings Note: this may be useful in conjunction with get_option.

string get_option (const vector< string > &command_line, const string &option, const string &default_value)
bool find (const vector< string > &command_line, const string &option)
vector< stringgetNonBlankLines (const string &in)
 Returns a vector of string containing only non-empty lines, as you guessed it.

ostream & operator<< (ostream &out, const vector< string > &vs)
 formatted printing of vector<string> prints strings separated by a ", "

string tostring (const double &x)
string tostring (const float &x)
template<class T> string tostring (const T &x)
 ------------------------------------------------------------------

string tostring (const char *s)
 specialised version for char*

int toint (const string &s, int base=10)
float tofloat (const string &s)
 FLOAT.

vector< stringaddprefix (const string &prefix, const vector< string > &names)
 returns the list of names, but with a prepended prefix

vector< stringaddpostfix (const vector< string > &names, const string &postfix)
 returns the list of names, but with an appended postfix

string addprefix (const string &prefix, const string &text)
 Returns a string with the prefix prepended to each *line* of the text string.

string addpostfix (const string &text, const string &postfix)
 Returns a string with the postfix appended to each *line* of the text string.

int findpos (const vector< string > &v, string element)
 return index of element in v, or -1 if not found

template<class U, class V> ostream & operator<< (ostream &out, const pair< U, V > &p)
 Formatted printing of a pair<U,V> as U:V.

template<class T, unsigned N, class TTrait> bool operator== (const TinyVector< T, N, TTrait > &, const TinyVector< T, N, TTrait > &)
 Equality operator.

template<class T, unsigned N, class TTrait> bool operator< (const TinyVector< T, N, TTrait > &, const TinyVector< T, N, TTrait > &)
 Lexicographical Ordering.

template<class T, unsigned N, class TTrait> bool operator!= (const TinyVector< T, N, TTrait > &x, const TinyVector< T, N, TTrait > &y)
 Other operators (should be defined in std::rel_ops, but does not work properly with gcc yet).

template<class T, unsigned N, class TTrait> bool operator> (const TinyVector< T, N, TTrait > &x, const TinyVector< T, N, TTrait > &y)
template<class T, unsigned N, class TTrait> bool operator<= (const TinyVector< T, N, TTrait > &x, const TinyVector< T, N, TTrait > &y)
template<class T, unsigned N, class TTrait> bool operator>= (const TinyVector< T, N, TTrait > &x, const TinyVector< T, N, TTrait > &y)
void displayObjectHelp (ostream &out, const string &classname)
 Will display the help message for an object of the given classname.

 DECLARE_TYPE_TRAITS_FOR_BASETYPE (char, 0x01, 0x01)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (signed char, 0x01, 0x01)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (unsigned char, 0x02, 0x02)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (short, 0x03, 0x04)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (unsigned short, 0x05, 0x06)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (int, 0x07, 0x08)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (unsigned int, 0x0B, 0x0C)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (long, 0x07, 0x08)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (float, 0x0E, 0x0F)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (double, 0x10, 0x11)
 DECLARE_TYPE_TRAITS_FOR_BASETYPE (bool, 0x12, 0x12)
Mat input2dSet (const string &filename)
 This will input a 2d binary classification problem (launches a java applet).

void normalizeDataSets (Mat &training_set, Mat &validation_set, Mat &test_set)
void normalizeDataSets (VMat &training_set, VMat &validation_set, VMat &test_set)
void normalizeDataSets (Mat &training_set, Mat &test_set)
 normalize both training_set and test_set according to mean and stddev computed on training_set

void normalizeDataSet (Mat &m)
void splitTrainValidTest (VMat &data_set, VMat &train_set, VMat &valid_set, real valid_fraction, VMat &test_set, real test_fraction, bool normalize)
VMat reduceInputSize (real fraction, VMat data)
VMat reduceDataSetSize (real fraction, VMat data)
void remapClassnums (VMat &data, real remap_minval_to, real remap_maxval_to)
 remaps classnums to {0,1} or to {-1,+1}

VMat loadBreastCancerWisconsin (bool normalize, bool uniq)
int loadBreastCancer (VMat &training_set, VMat &validation_set, VMat &test_set, int ntrain, int nvalid, bool uniq)
 These calls return the number of classes...

VMat loadPimaIndians (bool normalize)
VMat loadHousing (bool normalize)
VMat loadSonar ()
VMat loadIonosphere ()
VMat loadDiabetes (bool normalize)
int loadDiabetes (VMat &training_set, VMat &validation_set, VMat &test_set, int ntrain, int nvalid)
int loadATT800 (VMat &training_set, VMat &test_set)
VMat loadLetters (bool normalize)
VMat loadLetters (const char *class0, const char *class1, bool normalize)
int loadLetters (VMat &training_set, VMat &validation_set, VMat &test_set, char *which_letters, real validation_fraction, real test_fraction, bool do_shuffle)
VMat loadLetters (int n_letters, bool do_shuffle)
int loadLetters (VMat &training_set, VMat &validation_set, VMat &test_set, int n_letters, real validation_fraction, real test_fraction, bool do_shuffle)
void loadCorelDatamat (int classnum, Mat &train, Mat &valid, Mat &test)
Mat smoothCorelHisto (Mat &data)
void loadCorel (Mat &training_set, Mat &validation_set, Mat &test_set, int negative_class, int positive_class)
void loadCallxx (int year, VMat &d)
void loadUSPS (VMat &trainset, VMat &testset, bool use_smooth)
VMat loadUSPS (bool use_smooth)
void loadLetters (int &inputsize, int &nclasses, VMat &trainset, VMat &testset)
void loadClassificationDataset (const string &datasetname, int &inputsize, int &nclasses, VMat &trainset, VMat &testset, bool normalizeinputs, VMat &allset)
void loadUCI (VMat &trainset, VMat &testset, VMat &allset, string db_spec, string id, bool &normalize, const string &type)
 Load the train, test and all datasets for a UCI database.

void loadUCIAMat (VMat &data, string file, PP< UCISpecification > uci_spec)
 Load a AMAT format UCI dataset in the given VMatrix.

void loadUCISet (VMat &data, string file, PP< UCISpecification > uci_spec)
 Load a specific UCI dataset in the given VMatrix.

string loadClassificationDatasetHelp ()
string getDataSetHelp ()
 returns a help describing the datasetstring parameter of getDataSet

time_t getDataSetDate (const string &datasetstring, const string &alias)
VMat getDataSet (const string &datasetstring, const string &alias)
string locateDatasetAliasesDir (const string &dir_or_file_path)
 Looks for 'dataset.aliases' file in specified directory and its parent directories; Returns the directory containing dataset.aliases (returned string will be terminated by a slash) or an empty string if not found.

map< string, stringgetDatasetAliases (const string &dir_or_file_path)
 Looks for 'dataset.aliases' file in specified directory and its parent directories; loads it and returns the corresponding map.

void loadMNIST (VMat &training_set, VMat &test_set)
int ICBCpartition (const Vec &claims, real threshold)
ostream & operator<< (ostream &os, const FieldValue &ft)
double todouble (const RowIterator &it)
string tostring (const RowIterator &it)
void printFieldName (ostream &o, const Row::iterator &field)
 outputs the given field name in a cell of apropriate size

void printFieldNames (ostream &o, const Row &rowc)
 outputs all field names, separated by " | "

ostream & operator<< (ostream &o, const Row::iterator &field)
 outputs a single field flushed right in a cell of apropriate width (as given by field.char_width())

ostream & operator<< (ostream &o, const Row &rowc)
 outputs all fields in a row, separated by " | "

void halfShuffleRows (SDB &sdb)
 not quite a random shuffle (see implementation) but more efficient use of disk cache

void randomShuffleRows (SDB &sdb)
 Performs a random permutation of all the rows of the SDB (same algorithm as Mat::shuffle).

double todouble (const Row::iterator &it)
 Generic conversions from an iterator.

string tostring (const Row::iterator &it)
 PLEARN_IMPLEMENT_OBJECT (UCISpecification,"Describes the specifications of a UCI database.","This object specifies characteristics of a database from the UCI machine\n""learning repository, such as the input size, target size, etc...\n""It is intended to be used in a script put in the same directory as the\n""database, in order to be loaded by the getDataSet() method.\n")
 DECLARE_OBJECT_PTR (UCISpecification)
void displayHistogram (Gnuplot &gp, Mat dataColumn, int n_bins, Vec *pbins, bool regular_bins, bool normalized, string extra_args)
void displayVarGraph (const VarArray &outputs, bool display_values, real boxwidth, const char *the_filename, bool must_wait, VarArray display_only_these)
 VarGraph *.

void OldDisplayVarGraph (const VarArray &outputs, bool display_values, real boxwidth, const char *the_filename, bool must_wait, VarArray display_only_these)
void displayFunction (Func f, bool display_values, bool display_differentiation, real boxwidth, const char *the_filename, bool must_wait)
Mat compute2dGridOutputs (Learner &learner, real min_x, real max_x, real min_y, real max_y, int length, int width, real singleoutput_threshold)
void displayPoints (GhostScript &gs, Mat data, real radius, bool color)
 this draws x and + with the given radius for all the points in data (supposed to have width 3: [x, y, classnum]

void displayDecisionSurface (GhostScript &gs, real destx, real desty, real destwidth, real destheight, Learner &learner, Mat trainset, Vec svindexes, Vec outlierindexes, int nextsvindex, real min_x, real max_x, real min_y, real max_y, real radius, int nx, int ny)
real rgb2real (real r, real g, real b)
void real2rgb (real colorval, real &r, real &g, real &b)
void matlabR11eigs (RowMapSparseMatrix< real > &A, Mat eigen_vectors, Vec eigen_values, string which_eigenvalues)
void matlabR11eigs (RowMapSparseMatrix< double > &A, Mat eigen_vectors, Vec eigen_values, string which_eigenvalues="LM")
 PLEARN_IMPLEMENT_OBJECT (FilePStreamBuf,"ONE LINE DESCRIPTION","MULTI LINE\nHELP")
 DECLARE_OBJECT_PTR (FilePStreamBuf)
string getcwd ()
 returns the absolute path to the current working directory as a string

int chdir (const string &path)
 change current directory

string abspath (const string &path)
 returns the absolute path of the (possibly relative) specified path.

bool pathexists (const string &path)
 returns true if the given path points to an existing regular file or directory

bool isdir (const string &path)
 returns true if the given path is an existing directory (or a symbolic link pointing to a directory)

bool isfile (const string &path)
 returns true if the given path is an existing regular file (or a symbolic link pointing to a file)

time_t mtime (const string &path)
 returns the time of last modification of file (or 0 if file does not exist).

vector< stringlsdir (const string &dirpath)
vector< stringlsdir_fullpath (const string &dirpath)
 Same as lsdir, except dirpath is prepended to the entries' names.

bool force_mkdir (const string &dirname)
void force_mkdir_for_file (const string &filepath)
 Extracts the directory part of the filepath and calls force_mkdir Calls PLERROR in case of failure.

bool force_rmdir (const string &dirname)
long filesize (const string &filename)
 Returns the length of a file, measured in bytes.

string loadFileAsString (const string &filepath)
 Returns the whole content of the file as a string.

void saveStringInFile (const string &filepath, const string &text)
 Writes the raw string into the given file Intermediate directories in filepath are created if necessary.

void cp (const string &srcpath, const string &destpath)
 calls system with cp -R to recursively copy source to destination

void rm (const string &file)
 calls system rm command with string file as parameters

void mv (const string &file)
 calls system mv command with string file as parameters

void mvforce (const string &file)
 calls system mv command with string file as parameters will not prompt before overwriting

void readWhileMatches (istream &in, const string &s)
 Reads while the characters read exactly match those in s Will throw a PLERROR exception as soon as it doesn't match.

void skipRestOfLine (istream &in)
 skips everything until '
' (also consumes the '
')

void skipBlanksAndComments (istream &in)
 will skip all blanks (white space, newline and #-style comments) Next character read will be first "non-blank"

void getNextNonBlankLine (istream &in, string &line)
 returns the next non blank line (#-style comments are considered blank)

int countNonBlankLinesOfFile (const string &filename)
 Will return the number of non-blank lines of file #-style comments are considered blank.

int smartReadUntilNext (istream &in, string stoppingsymbols, string &characters_read, bool ignore_brackets)
 same as PStream's method smartReadUntilNext, but for istream

string newFilename (const string directory, const string prefix, bool is_directory)
 Returns a temporary file (or directory) name suitable for a unique (one time) use.

string makeFileNameValid (const string &path)
void touch (const string &file)
 trivial unix touch

string makeExplicitPath (const string &filename)
 returns "./"+filename if filename is relative to current dir

string readFileAndMacroProcess (const string &filepath, map< string, string > &variables)
 Same as readAndMacroProcess, but takes a filename instead of an istream The following variables are automatically set from the filepath: FILEPATH DIRPATH FILENAME FILEBASE FILEEXT.

string readAndMacroProcess (istream &in, map< string, string > &variables)
 Will return the text, macro processed, with each instance of ${varname} in the text that corresponds to a key in the given map replaced by its associated value.

char peekAfterSkipBlanks (istream &in)
 peeks the first char after removal of blanks

char peekAfterSkipBlanksAndComments (istream &in)
 peeks the first char after removal of blanks and comments

char getAfterSkipBlanks (istream &in)
 gets the first char after removal of blanks

char getAfterSkipBlanksAndComments (istream &in)
 gets the first char after removal of blanks and comments

string readFileAndMacroProcess (const string &filepath)
FilesIntStreamword_sequences2files_int_stream (const char *word_sequences_file)
void loadMat (const string &file_name, TMat< float > &mat)
 Tries to guess the format...

void loadMat (const string &file_name, TMat< double > &mat)
void loadVec (const string &file_name, TVec< float > &vec)
void loadVec (const string &file_name, TVec< double > &vec)
void savePVec (const string &filename, const TVec< float > &vec)
 Old native PLearn binary format (.pmat).

void savePVec (const string &filename, const TVec< double > &vec)
void loadPVec (const string &filename, TVec< float > &vec)
void loadPVec (const string &filename, TVec< double > &vec)
void savePMat (const string &filename, const TMat< float > &mat)
void savePMat (const string &filename, const TMat< double > &mat)
void loadPMat (const string &filename, TMat< float > &mat)
void loadPMat (const string &filename, TMat< double > &mat)
void saveGnuplot (const string &filename, const Vec &vec)
void saveGnuplot (const string &filename, const Mat &mat)
void loadGnuplot (const string &filename, Mat &mat)
 Format readable by gnuplot.

void matlabSave (const string &dir, const string &plot_title, const Vec &data, const Vec &add_col, const Vec &bounds, string legend, bool save_plot)
void matlabSave (const string &dir, const string &plot_title, const Vec &xValues, const Vec &yValues, const Vec &add_col, const Vec &bounds, string legend, bool save_plot)
void matlabSave (const string &dir, const string &plot_title, const Mat &data, const Vec &add_col, const Vec &bounds, TVec< string > legend, bool save_plot)
 Simply calls the coming matlabSave function with an empty xValues Vec. See below.

void matlabSave (const string &dir, const string &plot_title, const Vec &xValues, const Mat &yValues, const Vec &add_col, const Vec &bounds, TVec< string > legend, bool save_plot)
void saveAsciiWithoutSize (const string &filename, const Vec &vec)
void loadAsciiWithoutSize (const string &filename, const Vec &vec)
 Reads and writes an ascii file without the size header (assuming that the size(length() and width()) is set).

void saveAsciiWithoutSize (const string &filename, const Mat &mat)
void loadAsciiWithoutSize (const string &filename, const Mat &mat)
void saveSNMat (const string &filename, const Mat &mat)
Mat loadSNMat (const string &filename)
 SN Format.

void saveSNVec (const string &filename, const Vec &vec)
Vec loadSNVec (const string &filename)
Mat loadADMat (const string &filename)
 Native AD format.

Vec loadADVec (const string &filename)
int compare_string_pointers (const void *ts1, const void *ts2)
Mat loadUCIMLDB (const string &filename, char ****to_symbols, int **to_n_symbols, TVec< int > *the_max_in_col, TVec< string > *header_columns)
Mat loadSTATLOG (const string &filename, char ****to_symbols, int **to_n_symbols)
void loadJPEGrgb (const string &jpeg_filename, Mat &rgbmat, int &row_size, int scale)
void parseSizeFromRemainingLines (const string &filename, ifstream &in, bool &could_be_old_amat, int &length, int &width)
Mat makeMat (int length, int width, const string &values)
 convenience construction from string allows to write things such as Mat m = newMat(2,2, "1 2 3 4")

Vec makeVec (int length, const string &values)
template<class T> void loadAscii (const string &filename, TMat< T > &mat, TVec< string > &fieldnames, TVec< map< string, real > > *map_sr=0)
 WARNING: use only for float, double, and int types. Other type are not guaranteed to work intelligent functions that will load a file in almost all ascii formats that ever existed in this lab.

template<class T> void loadAscii (const string &filename, TMat< T > &mat)
template<class T> void saveAscii (const string &filename, const TMat< T > &mat, const TVec< string > &fieldnames)
template<class T> void saveAscii (const string &filename, const TMat< T > &mat)
template<class T> void saveAscii (const string &filename, const TVec< T > &vec)
 first number in file is length

template<class T> void loadAscii (const string &filename, TVec< T > &vec)
template<class T> void loadAsciiSingleBinaryDescriptor (const string &filename, TMat< T > &mat)
 Load an ASCII matrix whose format is: (entry_name, long_binary_dscriptor) with 'long_binary_dscriptor' being of the form '001100101011', each character being an entry of the matrix.

void write_compr_mode_and_size (ostream &out, unsigned char mode, int size)
void read_compr_mode_and_size (istream &in, unsigned char &mode, int &size)
void binread_compressed (istream &in, double *data, int l)
void binwrite_compressed (ostream &out, const double *data, int l)
 version for compressed array (efficient for sparse data, and small integer values) (format is detailed in .cc, see write_compr_mode_and_size function in general.cc)

void binread_compressed (istream &in, float *data, int l)
void binwrite_compressed (ostream &out, const float *data, int l)
void read_compr_mode_and_size (FILE *in, unsigned char &mode, int &size)
void binread_compressed (FILE *in, double *data, int l)
void binwrite_compressed (FILE *out, const double *data, int l)
void binread_compressed (FILE *in, float *data, int l)
void binwrite_compressed (FILE *out, const float *data, int l)
void write_compr_mode_and_size_ptr (char *&out, unsigned char mode, int size)
void read_compr_mode_and_size_ptr (char *&in, unsigned char &mode, int &size)
 DEPRECATED DO NOT USE! compressed vec to and from memory.

void uncompress_vec (char *comprbuf, double *data, int l, bool double_stored_as_float)
void compress_vec (char *comprbuf, const double *data, int l, bool double_stored_as_float)
size_t new_read_compressed (FILE *in, real *vec, int l, bool swap_endians)
 Reads the l doubles in the new compressed formtat from in Returns the number of bytes read.

unsigned char new_get_compr_data_type (double x, double tolerance)
unsigned char new_get_compr_data_type (float x)
size_t new_write_mode_and_size (FILE *out, bool insert_zeroes, unsigned int N, unsigned char data_type)
 returns number of bytes written

size_t new_write_raw_data_as (FILE *out, real *vec, int l, unsigned char data_type)
size_t new_write_compressed (FILE *out, real *vec, int l, double tolerance, bool swap_endians)
 Writes the l doubles in new compressed format to out.

template<class T> void binwrite (ostream &out, const T *x, int n)
 general purpose (but less efficient) version for pointers to things that have a binwrite/binread function

template<class T> void binread (istream &in, T *x, int n)
template<class A, class B> void binwrite (ostream &out, const pair< A, B > x)
template<class A, class B> void binread (istream &in, pair< A, B > &x)
void binwrite (ostream &out, char x)
 binwrite and binread for a few basic types

void binread (istream &in, char &x)
void binwrite (ostream &out, unsigned char x)
void binread (istream &in, unsigned char &x)
void binwrite (ostream &out, int x)
void binread (istream &in, int &x)
void binwrite (ostream &out, unsigned int x)
void binread (istream &in, unsigned int &x)
void binwrite (ostream &out, short x)
void binread (istream &in, short &x)
void binwrite (ostream &out, unsigned short x)
void binread (istream &in, unsigned short &x)
void binwrite (ostream &out, bool x)
 note that bool are saved as unsigned short

void binread (istream &in, bool &x)
void binwrite (ostream &out, float x)
void binread (istream &in, float &x)
void binwrite (ostream &out, double x)
void binread (istream &in, double &x)
void binwrite_double (ostream &out, double x)
void binread_double (istream &in, double &x)
void binwrite_double (ostream &out, float x)
void binread_double (istream &in, float &x)
void binwrite (ostream &out, const int *x, int n)
 multi-element versions, giving address and number of elements

void binread (istream &in, int *x, int n)
void binwrite (ostream &out, const unsigned int *x, int n)
void binread (istream &in, unsigned int *x, int n)
void binwrite (ostream &out, const short *x, int n)
void binread (istream &in, short *x, int n)
void binwrite (ostream &out, const unsigned short *x, int n)
void binread (istream &in, unsigned short *x, int n)
void binwrite (ostream &out, const float *x, int n)
void binread (istream &in, float *x, int n)
void binwrite (ostream &out, const double *x, int n)
void binread (istream &in, double *x, int n)
void binwrite_double (ostream &out, const double *x, int n)
void binread_double (istream &in, double *x, int n)
void binwrite_double (ostream &out, const float *x, int n)
void binread_double (istream &in, float *x, int n)
template<class T> void binwrite (FILE *out, const T *x, int n)
 general purpose (but less efficient) version for pointers to things that have a binwrite/binread function

template<class T> void binread (FILE *in, T *x, int n)
template<class A, class B> void binwrite (FILE *out, const pair< A, B > x)
template<class A, class B> void binread (FILE *in, pair< A, B > &x)
void binwrite (FILE *out, char x)
 binwrite and binread for a few basic types

void binread (FILE *in, char &x)
void binwrite (FILE *out, unsigned char x)
void binread (FILE *in, unsigned char &x)
void binwrite (FILE *out, int x)
void binread (FILE *in, int &x)
void binwrite (FILE *out, unsigned int x)
void binread (FILE *in, unsigned int &x)
void binwrite (FILE *out, short x)
void binread (FILE *in, short &x)
void binwrite (FILE *out, unsigned short x)
void binread (FILE *in, unsigned short &x)
void binwrite (FILE *out, bool x)
 note that bool are saved as unsigned short

void binread (FILE *in, bool &x)
void binwrite (FILE *out, float x)
void binread (FILE *in, float &x)
void binwrite (FILE *out, double x)
void binread (FILE *in, double &x)
void binwrite_double (FILE *out, double x)
void binread_double (FILE *in, double &x)
void binwrite_double (FILE *out, float x)
void binread_double (FILE *in, float &x)
void binwrite (FILE *out, const int *x, int n)
 multi-element versions, giving address and number of elements

void binread (FILE *in, int *x, int n)
void binwrite (FILE *out, const unsigned int *x, int n)
void binread (FILE *in, unsigned int *x, int n)
void binwrite (FILE *out, const short *x, int n)
void binread (FILE *in, short *x, int n)
void binwrite (FILE *out, const unsigned short *x, int n)
void binread (FILE *in, unsigned short *x, int n)
void binwrite (FILE *out, const float *x, int n)
void binread (FILE *in, float *x, int n)
void binwrite (FILE *out, const double *x, int n)
void binread (FILE *in, double *x, int n)
void binwrite_double (FILE *out, const double *x, int n)
void binread_double (FILE *in, double *x, int n)
void binwrite_double (FILE *out, const float *x, int n)
void binread_double (FILE *in, float *x, int n)
void writeHeader (ostream &out, const string &classname, int version)
 writes "<ClassName:version>\n"

void writeFooter (ostream &out, const string &classname)
 writes "</ClassName>\n"

int readHeader (istream &in, const string &classname)
 consumes "<ClassName:version>\n and returns version"

void readFooter (istream &in, const string &classname)
 consumes "</ClassName>\n"

void writeFieldName (ostream &out, const string &fieldname)
 writes "fieldname: "

bool readFieldName (istream &in, const string &fieldname, bool force)
 consumes "fieldname: " if possible, and return true if it does however if force=true and fieldname is not found then call error.

void fwrite_int (FILE *f, const int *ptr, int n, bool is_file_bigendian)
 Writes binary data to the file in the specified representation (little or big endian) regardeless of the endianness used on the current architecture.

void fwrite_float (FILE *f, const float *ptr, int n, bool is_file_bigendian)
void fwrite_float (FILE *f, const double *ptr, int n, bool is_file_bigendian)
 writes double array to float file

void fwrite_double (FILE *f, const double *ptr, int n, bool is_file_bigendian)
void fwrite_double (FILE *f, const float *ptr, int n, bool is_file_bigendian)
 writes float array to double file

void fread_int (FILE *f, int *ptr, int n, bool is_file_bigendian)
 Reads binary data from a file assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness on the current architecture.

void fread_float (FILE *f, float *ptr, int n, bool is_file_bigendian)
void fread_float (FILE *f, double *ptr, int n, bool is_file_bigendian)
 reads disk floats into double array

void fread_double (FILE *f, double *ptr, int n, bool is_file_bigendian)
void fread_double (FILE *f, float *ptr, int n, bool is_file_bigendian)
 reads disk doubles into float array

void fread_short (FILE *f, unsigned short *ptr, int n, bool is_file_bigendian)
void write_int (ostream &out, const int *ptr, int n, bool is_file_bigendian)
 Writes binary data to the file in the specified representation (little or big endian) regardeless of the endianness used on the current architecture.

void write_short (ostream &out, const short *ptr, int n, bool is_file_bigendian)
void write_double (ostream &out, const double *ptr, int n, bool is_file_bigendian)
void write_float (ostream &out, const float *ptr, int n, bool is_file_bigendian)
void read_int (istream &in, int *ptr, int n, bool is_file_bigendian)
 Reads binary data from a file assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness on the current architecture.

void read_short (istream &in, short *ptr, int n, bool is_file_bigendian)
void read_float (istream &in, float *ptr, int n, bool is_file_bigendian)
void read_double (istream &in, double *ptr, int n, bool is_file_bigendian)
void reverse_uint (const unsigned int *ptr, int n)
 NOTE: these calls are deprecated, use directly endianswap from base/byte_order.h.

void reverse_int (const int *ptr, int n)
void reverse_float (const float *ptr, int n)
void reverse_double (const double *ptr, int n)
void reverse_ushort (const unsigned short *ptr, int n)
void reverse_short (const short *ptr, int n)
int fread_int (FILE *f, bool is_file_bigendian=true)
float fread_float (FILE *f, bool is_file_bigendian=true)
double fread_double (FILE *f, bool is_file_bigendian=true)
void fwrite_int (FILE *f, int value, bool is_file_bigendian=true)
 The following calls write a single value to the file in the specified representation, regardeless of the endianness on the current architecture.

void fwrite_float (FILE *f, float value, bool is_file_bigendian=true)
void fwrite_double (FILE *f, double value, bool is_file_bigendian=true)
void write_uint (ostream &out, const unsigned int *ptr, int n, bool is_file_bigendian)
void write_ushort (ostream &out, const unsigned short *ptr, int n, bool is_file_bigendian)
void write_bool (ostream &out, const bool *ptr, int n, bool is_file_bigendian)
void write_int (ostream &out, int value, bool is_file_bigendian=true)
 The following calls write a single value to the file in the specified representation, regardeless of the endianness on the current architecture.

void write_short (ostream &out, short value, bool is_file_bigendian=true)
void write_float (ostream &out, float value, bool is_file_bigendian=true)
void write_double (ostream &out, double value, bool is_file_bigendian=true)
void write_uint (ostream &out, unsigned int value, bool is_file_bigendian=true)
void write_ushort (ostream &out, unsigned short value, bool is_file_bigendian=true)
void write_sbyte (ostream &out, signed char x)
void write_ubyte (ostream &out, unsigned char x)
void read_uint (istream &in, unsigned int *ptr, int n, bool is_file_bigendian)
void read_ushort (istream &in, unsigned short *ptr, int n, bool is_file_bigendian)
void read_bool (istream &in, bool *ptr, int n, bool is_file_bigendian)
int read_int (istream &in, bool is_file_bigendian=true)
short read_short (istream &in, bool is_file_bigendian=true)
float read_float (istream &in, bool is_file_bigendian=true)
double read_double (istream &in, bool is_file_bigendian=true)
unsigned int read_uint (istream &in, bool is_file_bigendian=true)
unsigned short read_ushort (istream &in, bool is_file_bigendian=true)
signed char read_sbyte (istream &in)
unsigned char read_ubyte (istream &in)
void writeNewline (ostream &out)
 Writes a single newline character.

void readNewline (istream &in)
 Reads next character and issues an error if it's not a newline.

template<class T> void writeField (ostream &out, const string &fieldname, const T &x)
 generic field writing and reading

template<class T> void readField (istream &in, const string &fieldname, T &x)
template<class T> void binwriteField (ostream &out, const string &fieldname, const T &x)
 generic field BINARY writing and reading

template<class T> void binreadField (istream &in, const string &fieldname, T &x)
template<class T> void binwriteField_double (ostream &out, const string &fieldname, const T &x)
template<class T> void binreadField_double (istream &in, const string &fieldname, T &x)
template<class T> void readField (istream &in, const string &fieldname, T &x, T default_value)
 readField with a default value when the field is not found

PStreamflush (PStream &out)
PStreamendl (PStream &out)
PStreamws (PStream &in)
void binread_ (PStream &in, bool *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, double *x, unsigned int n, unsigned char typecode)
 The binread_ for float and double are special.

void binread_ (PStream &in, float *x, unsigned int n, unsigned char typecode)
template<class T> PStreamoperator>> (PStream &in, T *&x)
template<class T> PStreamoperator<< (PStream &out, const T *&x)
template<class T> PStreamoperator>> (PStream &in, PP< T > &o)
template<class T> PStreamoperator<< (PStream &out, const PP< T > &o)
template<class T> PStreamoperator<< (PStream &out, T *&ptr)
PStreamoperator<< (PStream &out, bool x)
template<class A, class B> PStreamoperator<< (PStream &out, const pair< A, B > &x)
template<typename S, typename T> PStreamoperator>> (PStream &in, pair< S, T > &x)
template<class MapT> void writeMap (PStream &out, const MapT &m)
template<class MapT> void readMap (PStream &in, MapT &m)
template<class Key, class Value> PStreamoperator<< (PStream &out, const map< Key, Value > &m)
template<class Key, class Value> PStreamoperator>> (PStream &in, map< Key, Value > &m)
template<class Key, class Value> PStreamoperator<< (PStream &out, const multimap< Key, Value > &m)
template<class Key, class Value> PStreamoperator>> (PStream &in, multimap< Key, Value > &m)
template<class Key, class Value> PStreamoperator<< (PStream &out, const hash_map< Key, Value > &m)
template<class Key, class Value> PStreamoperator>> (PStream &in, hash_map< Key, Value > &m)
template<class Key, class Value> PStreamoperator<< (PStream &out, const hash_multimap< Key, Value > &m)
template<class Key, class Value> PStreamoperator>> (PStream &in, hash_multimap< Key, Value > &m)
template<class Iterator> void binwrite_ (PStream &out, Iterator &it, unsigned int n)
 Serialization of sequences *.

void binwrite_ (PStream &out, const bool *x, unsigned int n)
void binwrite_ (PStream &out, const char *x, unsigned int n)
void binwrite_ (PStream &out, char *x, unsigned int n)
void binwrite_ (PStream &out, const signed char *x, unsigned int n)
void binwrite_ (PStream &out, signed char *x, unsigned int n)
void binwrite_ (PStream &out, const unsigned char *x, unsigned int n)
void binwrite_ (PStream &out, unsigned char *x, unsigned int n)
void binwrite_ (PStream &out, const short *x, unsigned int n)
void binwrite_ (PStream &out, short *x, unsigned int n)
void binwrite_ (PStream &out, const unsigned short *x, unsigned int n)
void binwrite_ (PStream &out, unsigned short *x, unsigned int n)
void binwrite_ (PStream &out, const int *x, unsigned int n)
void binwrite_ (PStream &out, int *x, unsigned int n)
void binwrite_ (PStream &out, const unsigned int *x, unsigned int n)
void binwrite_ (PStream &out, unsigned int *x, unsigned int n)
void binwrite_ (PStream &out, const long *x, unsigned int n)
void binwrite_ (PStream &out, long *x, unsigned int n)
void binwrite_ (PStream &out, const unsigned long *x, unsigned int n)
void binwrite_ (PStream &out, unsigned long *x, unsigned int n)
void binwrite_ (PStream &out, const float *x, unsigned int n)
void binwrite_ (PStream &out, float *x, unsigned int n)
void binwrite_ (PStream &out, const double *x, unsigned int n)
void binwrite_ (PStream &out, double *x, unsigned int n)
template<class Iterator> void binread_ (PStream &in, Iterator it, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, char *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, signed char *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, unsigned char *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, short *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, unsigned short *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, int *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, unsigned int *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, long *x, unsigned int n, unsigned char typecode)
void binread_ (PStream &in, unsigned long *x, unsigned int n, unsigned char typecode)
template<class SequenceType> void writeSequence (PStream &out, const SequenceType &seq)
template<class SequenceType> void readSequence (PStream &in, SequenceType &seq)
 This reads into a sequence.

template<class T> void write (ostream &out_, const T &o)
template<class T> void read (istream &in_, T &o)
template<class T> void read (const string &stringval, T &x)
template<class T> PStreamoperator>> (PStream &in, vector< T > &v)
template<class T> PStreamoperator<< (PStream &out, const vector< T > &v)
template<class SetT> void writeSet (PStream &out, const SetT &s)
template<class SetT> void readSet (PStream &in, SetT &s)
template<class T> PStreamoperator>> (PStream &in, set< T > &v)
template<class T> PStreamoperator<< (PStream &out, const set< T > &v)
template<class T> void load (const string &filepath, T &x)
template<class T> void save (const string &filepath, const T &x)
 If necessary, missing directories along the filepath will be created.

istream nullin (&null_streambuf)
ostream nullout (&null_streambuf)
iostream nullinout (&null_streambuf)
const string wordseparators (" \t\n\r)]};,:|#")
 List of characters considered to mark a separation between "words"; This is a fairly restricted list, meaning that many things can be part of a "word" in this sense (for ex: "this-is_a+single@wor'd"), this is to insure a smooth transition for the new setOption, which calls readOptionVal ...

const char * eNumericTypeNames (int a)
 converts a code in corresponding string

bool containsChar (const char *s, const char *symbols)
 true if string s contains any one of the characters in symbols.

char * stringPos (const char *s, const char *strings[])
bool looksNumeric (const char *s)
 tells wether this string looks like a numeric entity

bool elementOf (const char *s, const char t)
void compactRepresentationTranslate (char *t)
void compactRepresentationShrinkNum (char *t)
void compactRepresentationRangesAndOrdinals (char *t)
void compactRepresentation (char *t)
 gives a (intermediate) code for a numeric string (starting with #)

int numericType (const char *mot)
 assigns a code to a "word"

 PLEARN_IMPLEMENT_OBJECT (AdditiveNormalizationKernel,"Normalizes additively an underlying kernel with respect to a training set.","From a kernel K, defines a new kernel K' such that:\n"" K'(x,y) = K(x,y) - E[K(x,x_i)] - E[K(x_i,y)] + E[K(x_i,x_j)]\n""where the expectation is performed on the data set.\n""If the 'remove_bias' option is set, then the expectation will not\n""take into account terms of the form K(x_i,x_i).\n""If the 'double_centering' option is set, this kernel K' will be\n""multiplied by -1/2 (this turns a squared distance kernel into a\n""centered dot product kernel).\n")
 DECLARE_OBJECT_PTR (AdditiveNormalizationKernel)
 PLEARN_IMPLEMENT_OBJECT (ClassDistanceProportionCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ClassDistanceProportionCostFunction)
CostFunc class_distance_proportion ()
 if outputs are neg distances to each class: dist_to_correct_class/(dist_to_correct_class+dist_to_closest_other_class)

 PLEARN_IMPLEMENT_OBJECT (ClassErrorCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ClassErrorCostFunction)
CostFunc class_error (bool output_is_classnum=false)
 PLEARN_IMPLEMENT_OBJECT (ClassMarginCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ClassMarginCostFunction)
CostFunc class_margin (bool binary_target_is_01=false, bool output_is_positive=false)
 difference between correct class score and max of other class' scores

 PLEARN_IMPLEMENT_OBJECT (CompactVMatrixGaussianKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (CompactVMatrixGaussianKernel)
 PLEARN_IMPLEMENT_OBJECT (CompactVMatrixPolynomialKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (CompactVMatrixPolynomialKernel)
 PLEARN_IMPLEMENT_OBJECT (ConvexBasisKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ConvexBasisKernel)
 PLEARN_IMPLEMENT_OBJECT (DifferenceKernel,"ONE LINE DESCR","NO HELP")
CostFunc output_minus_target (int singleoutputindex)
 DECLARE_OBJECT_PTR (DifferenceKernel)
 PLEARN_IMPLEMENT_OBJECT (DirectNegativeCostFunction,"ONE LINE DESCR","NO HELP")
CostFunc directnegative_costfunc ()
 PLEARN_IMPLEMENT_OBJECT (DistanceKernel,"ONE LINE DESCR","NO HELP")
CostFunc absolute_deviation (int singleoutputindex)
 DECLARE_OBJECT_PTR (DistanceKernel)
 PLEARN_IMPLEMENT_OBJECT (DivisiveNormalizationKernel,"Divisive normalization of an underlying kernel.","From a positive kernel K, defines a new kernel K' such that:\n"" K'(x,y) = K(x,y) / sqrt(E[K(x,x_i)] . E[K(x_i,y)])\n""where the expectation is performed on the data set.\n""If the 'remove_bias' option is set, then the expectation will not\n""take into account terms of the form K(x_i,x_i).\n")
 DECLARE_OBJECT_PTR (DivisiveNormalizationKernel)
 PLEARN_IMPLEMENT_OBJECT (DotProductKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (DotProductKernel)
 PLEARN_IMPLEMENT_OBJECT (GaussianDensityKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (GaussianDensityKernel)
 PLEARN_IMPLEMENT_OBJECT (GaussianKernel,"The good old Gaussian kernel.","")
 DECLARE_OBJECT_PTR (GaussianKernel)
 PLEARN_IMPLEMENT_OBJECT (GeneralizedDistanceRBFKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (GeneralizedDistanceRBFKernel)
 PLEARN_IMPLEMENT_OBJECT (GeodesicDistanceKernel,"Computes the geodesic distance based on k nearest neighbors.","")
 DECLARE_OBJECT_PTR (GeodesicDistanceKernel)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Kernel,"ONE LINE DESCR","NO HELP")
Mat findClosestPairsOfDifferentClass (int k, VMat data, Ker dist)
 DECLARE_OBJECT_PTR (Kernel)
 DECLARE_OBJECT_PP (Ker, Kernel)
template<> void deepCopyField (Ker &field, CopiesMap &copies)
Array< Keroperator & (const Ker &k1, const Ker &k2)
 ******************** inline Ker operators

 PLEARN_IMPLEMENT_OBJECT (LaplacianKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (LaplacianKernel)
 PLEARN_IMPLEMENT_OBJECT (LiftBinaryCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (LiftBinaryCostFunction)
CostFunc class_lift (bool make_positive=false)
 PLEARN_IMPLEMENT_OBJECT (LLEKernel,"The kernel used in Locally Linear Embedding.","This kernel is the (weighted) sum of two kernels K' and K'', such that:\n"" - K'(x_i, x_j) = \\delta_{ij}\n"" - K'(x_i, x) = K'(x, x_i) = w(x, x_i), where w(x, x_i) is the weight of\n"" x_i in the reconstruction of x by its knn nearest neighbors in the\n"" training set\n"" - K'(x, y) = 0\n"" - K''(x_i, x_j) = W_{ij} + W_{ji} - \\sum_k W_{ki} W{kj}, where W is the\n"" matrix of weights w(x_i, x_j)\n"" - K''(x, x_i) = K''(x_i, x) = 0\n""The weight of K' is given by the 'reconstruct_coeff' option: when this\n""weight tends to infinity, the mapping obtained is the same as the\n""out-of-sample extension proposed in (Saul and Roweis, 2002). To obtain\n""such a behavior, one should set 'reconstruct_coeff' to -1. This is the\n""default behavior, and it is suggested to keep it.\n")
 DECLARE_OBJECT_PTR (LLEKernel)
 PLEARN_IMPLEMENT_OBJECT (LogOfGaussianDensityKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (LogOfGaussianDensityKernel)
 PLEARN_IMPLEMENT_OBJECT (MulticlassErrorCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (MulticlassErrorCostFunction)
 PLEARN_IMPLEMENT_OBJECT (NegKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (NegKernel)
Ker operator- (const Ker &k)
 PLEARN_IMPLEMENT_OBJECT (NegLogProbCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (NegLogProbCostFunction)
CostFunc condprob_cost (bool normalize=false, bool smooth_map_outputs=false)
 negative log conditional probability

 PLEARN_IMPLEMENT_OBJECT (NegOutputCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (NegOutputCostFunction)
CostFunc neg_output_costfunc ()
 returns -output[0]. This is for density estimators whose use(x) method typically computes log(p(x))

 PLEARN_IMPLEMENT_OBJECT (NormalizedDotProductKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (NormalizedDotProductKernel)
 PLEARN_IMPLEMENT_OBJECT (PolynomialKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (PolynomialKernel)
 PLEARN_IMPLEMENT_OBJECT (PowDistanceKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (PowDistanceKernel)
 PLEARN_IMPLEMENT_OBJECT (PrecomputedKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (PrecomputedKernel)
 PLEARN_IMPLEMENT_OBJECT (PricingTransactionPairProfitFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (PricingTransactionPairProfitFunction)
 PLEARN_IMPLEMENT_OBJECT (QuadraticUtilityCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (QuadraticUtilityCostFunction)
CostFunc quadratic_risk (real risk_aversion, CostFunc profit_function)
 PLEARN_IMPLEMENT_OBJECT (ReconstructionWeightsKernel,"Computes the reconstruction weights of a point given its neighbors.","K(x, x_i) = the weight of x_i in the reconstruction of x by its knn\n""nearest neighbors. More precisely, we compute weights W_i such that\n""|| x - \\sum_j W_i x_i ||^2 is minimized, and K(x,x_i) = W_i.\n""If the second argument is not in the training set, K(x,y) will be 0.\n""In order not to compute K(x_i, x_j) = delta_{ij} when applied on\n""training points, one can set the 'ignore_nearest' option to 1 (or more),\n""which will ensure we do not use x_i itself in its reconstruction by its\n""nearest neighbors (however, the total number of neighbors computed,\n""including x_i itself, will always stay equal to knn).\n""Note that this is NOT a symmetric kernel!\n")
 DECLARE_OBJECT_PTR (ReconstructionWeightsKernel)
 PLEARN_IMPLEMENT_OBJECT (ScaledGaussianKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ScaledGaussianKernel)
 PLEARN_IMPLEMENT_OBJECT (ScaledGeneralizedDistanceRBFKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ScaledGeneralizedDistanceRBFKernel)
 PLEARN_IMPLEMENT_OBJECT (ScaledLaplacianKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ScaledLaplacianKernel)
 PLEARN_IMPLEMENT_OBJECT (SelectedOutputCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (SelectedOutputCostFunction)
 PLEARN_IMPLEMENT_OBJECT (SigmoidalKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (SigmoidalKernel)
 PLEARN_IMPLEMENT_OBJECT (SigmoidPrimitiveKernel,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (SigmoidPrimitiveKernel)
 PLEARN_IMPLEMENT_OBJECT (SourceKernel,"A kernel built upon an underlying source kernel","The default behavior of a SourceKernel is to forward all calls to the underlying\n""kernel. However, subclasses will probably want to override the methods to perform\n""more complex operations.")
 DECLARE_OBJECT_PTR (SourceKernel)
 PLEARN_IMPLEMENT_OBJECT (SquaredErrorCostFunction,"ONE LINE DESCR","NO HELP")
CostFunc squared_error (int singleoutputindex)
 DECLARE_OBJECT_PTR (SquaredErrorCostFunction)
CostFunc squared_classification_error (real hot_value=0.8, real cold_value=0.2)
 PLEARN_IMPLEMENT_OBJECT (WeightedCostFunction,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (WeightedCostFunction)
CostFunc weighted_costfunc (CostFunc costfunc)
 reweighting

int dnaupd_ (long int *, const char *, long int *, const char *, long int *, double *, double *, long int *, double *, long int *, long int *, long int *, double *, double *, long int *, long int *, short, short)
int dneupd_ (long int *, const char *, long int *, double *, double *, double *, long int *, double *, double *, double *, const char *, long int *, const char *, long int *, double *, double *, long int *, double *, long int *, long int *, long int *, double *, double *, long int *, long int *, short, short, short)
int snaupd_ (long int *, const char *, long int *, const char *, long int *, float *, float *, long int *, float *, long int *, long int *, long int *, float *, float *, long int *, long int *, short, short)
int sneupd_ (long int *, const char *, long int *, float *, float *, float *, long int *, float *, float *, float *, const char *, long int *, const char *, long int *, float *, float *, long int *, float *, long int *, long int *, long int *, float *, float *, long int *, long int *, short, short, short)
int dsaupd_ (long int *, const char *, long int *, const char *, long int *, double *, double *, long int *, double *, long int *, long int *, long int *, double *, double *, long int *, long int *, short, short)
int dseupd_ (long int *, const char *, long int *, double *, double *, long int *, double *, const char *, long int *, const char *, long int *, double *, double *, long int *, double *, long int *, long int *, long int *, double *, double *, long int *, long int *, short, short, short)
int ssaupd_ (long int *, const char *, long int *, const char *, long int *, float *, float *, long int *, float *, long int *, long int *, long int *, float *, float *, long int *, long int *, short, short)
int sseupd_ (long int *, const char *, long int *, float *, float *, long int *, float *, const char *, long int *, const char *, long int *, float *, float *, long int *, float *, long int *, long int *, long int *, float *, float *, long int *, long int *, short, short, short)
 PLEARN_IMPLEMENT_OBJECT (Binner,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (Binner)
 PLEARN_IMPLEMENT_OBJECT (ConditionalCDFSmoother,"Smoother that combines a detailed prior curve with a rough input curve.\n","This smoother is meant to smooth conditional distribution functions, using\n""a high-resolution prior cdf provided as a HistogramDistribution. Its 'smooth'\n""function takes a lower-resolution curve and smooths it using the prior\n""to fill the gaps.")
 DECLARE_OBJECT_PTR (ConditionalCDFSmoother)
 PLEARN_IMPLEMENT_OBJECT (ConditionalStatsCollector,"ONE LINE DESCRIPTION","MULTI LINE\nHELP")
 DECLARE_OBJECT_PTR (ConditionalStatsCollector)
real logOfCompactGaussian (const Vec &x, const Vec &mu, const Vec &eigenvalues, const Mat &eigenvectors, real gamma, bool add_gamma_to_eigenval)
 returns log P(x|gaussian) with a gaussian represented compactly by the first few eigenvalues and eigenvectors of its covariance matrix.

real logOfNormal (const Vec &x, const Vec &mu, const Mat &C)
real logPFittedGaussian (const Vec &x, const Mat &X, real lambda)
 Fits a gaussian to the points in X (computing its mean and covariance matrix, and adding lambda to the diagonal of that covariance matrix) Then calls logOfNormal to return log(p(x | the_gaussian)).

template<class T> PStreamoperator<< (PStream &out, const DoubleAccessSparseMatrix< T > &m)
template<class T> PStreamoperator>> (PStream &in, DoubleAccessSparseMatrix< T > &m)
void ssyevx_ (char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, float *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void dsyevx_ (char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, double *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void ssyev_ (char *JOBZ, char *UPLO, int *N, float *A, int *LDA, float *W, float *WORK, int *LWORK, int *INFO)
void dsyev_ (char *JOBZ, char *UPLO, int *N, double *A, int *LDA, double *W, double *WORK, int *LWORK, int *INFO)
void sgetri_ (int *N, float *A, int *LDA, int *IPIV, float *WORK, int *LWORK, int *INFO)
void dgetri_ (int *N, double *A, int *LDA, int *IPIV, double *WORK, int *LWORK, int *INFO)
void sgetrf_ (int *M, int *N, float *A, int *LDA, int *IPIV, int *INFO)
void dgetrf_ (int *M, int *N, double *A, int *LDA, int *IPIV, int *INFO)
void sgesv_ (int *N, int *NRHS, float *A, int *LDA, int *IPIV, float *B, int *LDB, int *INFO)
void dgesv_ (int *N, int *NRHS, double *A, int *LDA, int *IPIV, double *B, int *LDB, int *INFO)
void sgesdd_ (char *JOBZ, int *M, int *N, float *A, int *LDA, float *S, float *U, int *LDU, float *VT, int *LDVT, float *WORK, int *LWORK, int *IWORK, int *INFO)
void dgesdd_ (char *JOBZ, int *M, int *N, double *A, int *LDA, double *S, double *U, int *LDU, double *VT, int *LDVT, double *WORK, int *LWORK, int *IWORK, int *INFO)
void ssyevr_ (char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, int *ISUPPZ, float *WORK, int *LWORK, int *IWORK, int *LIWORK, int *INFO)
void dsyevr_ (char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, int *ISUPPZ, double *WORK, int *LWORK, int *IWORK, int *LIWORK, int *INFO)
void ssygvx_ (int *ITYPE, char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *B, int *LDB, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, float *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void dsygvx_ (int *ITYPE, char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *B, int *LDB, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, double *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
 PLEARN_IMPLEMENT_OBJECT (LiftStatsCollector,"Computes the performance of a binary classifier","The following statistics can be requested out of getStat():\n""- LIFT = % of positive examples in the first n samples, divided by the % of positive examples in the whole database\n""- LIFT_MAX = best performance that could be achieved, if all positive examples were selected in the first n samples\n""(where n = lift_fraction * nsamples).\n""IMPORTANT: if you add more samples after you call finalize() (or get any of the statistics above), some samples may\n""be wrongly discarded and further statistics may be wrong\n\n""Here are the typical steps to follow to optimize the lift with a neural network:\n""- add a lift_output cost to cost_funcs (e.g. cost_funcs = [ \"stable_cross_entropy\" \"lift_output\"];)\n""- change the template_stats_collector of your PTester:\n"" template_stats_collector =\n"" LiftStatsCollector (\n"" output_column = \"lift_output\" ;\n"" opposite_lift = 1 ; # if you want to optimize the lift\n"" sign_trick = 1 ;\n"" )\n""- add the lift to its statnames:\n"" statnames = [ \"E[train.E[stable_cross_entropy]]\",\"E[test.E[stable_cross_entropy]]\",\n"" \"E[train.LIFT]\", \"E[test.LIFT]\" ]\n""- maybe also change which_cost in your HyperOptimize strategy.\n")
 DECLARE_OBJECT_PTR (LiftStatsCollector)
 PLEARN_IMPLEMENT_OBJECT (LimitedGaussianSmoother,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (LimitedGaussianSmoother)
 PLEARN_IMPLEMENT_OBJECT (ManualBinner,"Binner with predefined cut-points.","ManualBinner implements a Binner for which cutpoints are predefined. ""It's getBinning function doesn't have to look at the data; it simply ""builds a RealMapping from the supplied bin_positions.")
 DECLARE_OBJECT_PTR (ManualBinner)
VecnewVecArray (int n)
VecnewVecArray (int n, int the_length)
MatnewMatArray (int n)
MatnewMatArray (int n, int the_length, int the_width)
MatnewIndexedMatArray (int n, Mat &m, int indexcolumn)
Mat operator^ (const Mat &m1, const Mat &m2)
Mat unitmatrix (int n)
ostream & operator<< (ostream &out, const Vec &v)
template<> void deepCopyField (Vec &field, CopiesMap &copies)
template<> void deepCopyField (Mat &field, CopiesMap &copies)
template<class MatT> int eigenSparseSymmMat (MatT &A, Vec e_values, Mat e_vectors, long int &n_evalues, int max_n_iter=300, bool compute_vectors=true, bool largest_evalues=true, bool according_to_magnitude=true, bool both_ends=false)
template<class MatT> int eigenSparseNonSymmMat (MatT &A, Vec e_values, Mat e_vectors, long int &n_evalues, int max_n_iter=300, bool compute_vectors=true, bool largest_evalues=true, bool according_to_magnitude=true, bool both_ends=false)
real pl_gammln (real z)
 function gamma returns log(Gamma(z)), where Gamma(z) = ^infty t^{z-1}*e^{-t} dt

real pl_dgammlndz (real z)
 d(pl_gammln(z))/dz derivate of pl_gammln(z)

real pl_gser (real a, real x)
 returns the series value of the incomplete gamma function

real pl_gcf (real a, real x)
 returns the continued fraction representation of the incomplete gamma function

real pl_gammq (real a, real x)
 returns the incomplete gamma function Q(a,x) = 1 - P(a,x) it either uses the series or the continued fraction formula

real pl_erf (real x)
 The error function.

real gauss_01_cum (real x)
 For X ~ Normal(0,1), cumulative probability function P(X<x).

real gauss_01_quantile (real q)
real gauss_01_density (real x)
 for X ~ Normal(0,1), return density of X at x

real gauss_01_log_density (real x)
real gauss_log_density_var (real x, real mu, real var)
real gauss_density_var (real x, real mu, real var)
real gauss_log_density_stddev (real x, real mu, real sigma)
real p_value (real mu, real vn)
real normal_cdf (real x)
real gauss_cum (real x, real mu, real sigma)
real gauss_density_stddev (real x, real mu, real sigma)
real safeflog (real a)
real safeexp (real a)
real log (real base, real a)
real logtwo (real a)
real safeflog (real base, real a)
real safeflog2 (real a)
real logadd (real log_a, real log_b)
 compute log(exp(log_a)+exp(log_b)) without losing too much precision

real square_f (real x)
real logsub (real log_a, real log_b)
 compute log(exp(log_a)-exp(log_b)) without losing too much precision

real small_dilogarithm (real x)
real positive_dilogarithm (real x)
real dilogarithm (real x)
 return the dilogarithm function dilogarithm(x) = sum_{i=1}^{} x^i/i^2 = int_{z=x}^0 log(1-z)/z dz It is also useful because -dilogarithm(-exp(x)) is the primitive of the softplus function log(1+exp(x)).

real hard_slope_integral (real l, real r, real a, real b)
real soft_slope_integral (real smoothness, real left, real right, real a, real b)
real tabulated_soft_slope_integral (real smoothness, real left, real right, real a, real b)
real sign (real a)
real positive (real a)
real negative (real a)
template<class T> T square (const T &x)
template<class T> T two (const T &x)
real fasttanh (const real &x)
real fastsigmoid (const real &x)
real ultrafasttanh (const real &x)
real ultrafastsigmoid (const real &x)
template<class T> bool is_missing (const T &x)
 Tells if the passed value means "missing" for its data-type.

bool is_missing (double x)
 Missing value for double and float are represented by NaN.

bool is_missing (float x)
 Missing value for double and float are represented by NaN.

bool is_integer (real x)
real FABS (real x)
real mypow (real x, real p)
real ipow (real x, int p)
real sigmoid (real x)
 numerically stable version of sigmoid(x) = 1.0/(1.0+exp(-x))

real is_positive (real x)
 "hard" version of the sigmoid, i.e.

real inverse_sigmoid (real x)
 numerically stable version of inverse_sigmoid(x) = log(x/(1-x))

real softplus (real x)
 numerically stable computation of log(1+exp(x))

real tabulated_softplus (real x)
real inverse_softplus (real y)
 inverse of softplus function

real hard_slope (real x, real left=0, real right=1)
real soft_slope (real x, real smoothness=1, real left=0, real right=1)
real tabulated_soft_slope (real x, real smoothness=1, real left=0, real right=1)
real d_soft_slope (real x, real smoothness=1, real left=0, real right=1)
int n_choose (int M, int N)
 Return M choose N, i.e., M! / ( N! (M-N)! ).

real softplus_primitive (real x)
real tabulated_softplus_primitive (real x)
int eigen_SymmMat (Mat &in, Vec &e_value, Mat &e_vector, int &n_evalues_found, bool compute_all, int nb_eigen, bool compute_vectors, bool largest_evalues)
int eigen_SymmMat_decreasing (Mat &in, Vec &e_value, Mat &e_vector, int &n_evalues_found, bool compute_all, int nb_eigen, bool compute_vectors, bool largest_evalues)
 same as the previous call, but eigenvalues/vectors are sorted by largest firat (in decreasing order)

int matInvert (Mat &in, Mat &inverse)
 This function compute the inverse of a matrix.

int lapackSolveLinearSystem (Mat &At, Mat &Bt, TVec< int > &pivots)
void solveLinearSystem (const Mat &A, const Mat &Y, Mat &X)
 for matrices A such that A.length() <= A.width(), find X s.t.

void solveTransposeLinearSystem (const Mat &A, const Mat &Y, Mat &X)
 for matrices A such that A.length() >= A.width(), find X s.t.

Mat solveLinearSystem (const Mat &A, const Mat &B)
Vec solveLinearSystem (const Mat &A, const Vec &b)
 Returns solution x of Ax = b (same as above, except b and x are vectors).

Vec constrainedLinearRegression (const Mat &Xt, const Vec &Y, real lambda)
Mat multivariate_normal (const Vec &mu, const Mat &A, int N)
 generate N vectors sampled from the normal with mean vector mu and covariance matrix A

Vec multivariate_normal (const Vec &mu, const Mat &A)
 generate a vector sampled from the normal with mean vector mu and covariance matrix A

Vec multivariate_normal (const Vec &mu, const Vec &e_values, const Mat &e_vectors)
 generate 1 vector sampled from the normal with mean mu and covariance matrix A = evectors * diagonal(e_values) * evectors'

void multivariate_normal (Vec &x, const Vec &mu, const Vec &e_values, const Mat &e_vectors, Vec &z)
 generate a vector x sampled from the normal with mean mu and covariance matrix A = evectors * diagonal(e_values) * evectors' (the normal(0,I) originally sampled to obtain x is stored in z).

void affineNormalization (Mat data, Mat W, Vec bias, real regularizer)
void lapack_Xsyevx_ (char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, double *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void lapack_Xsyevx_ (char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, float *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void lapack_Xgesdd_ (char *JOBZ, int *M, int *N, double *A, int *LDA, double *S, double *U, int *LDU, double *VT, int *LDVT, double *WORK, int *LWORK, int *IWORK, int *INFO)
void lapack_Xgesdd_ (char *JOBZ, int *M, int *N, float *A, int *LDA, float *S, float *U, int *LDU, float *VT, int *LDVT, float *WORK, int *LWORK, int *IWORK, int *INFO)
void lapack_Xsyevr_ (char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, int *ISUPPZ, float *WORK, int *LWORK, int *IWORK, int *LIWORK, int *INFO)
void lapack_Xsyevr_ (char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, int *ISUPPZ, double *WORK, int *LWORK, int *IWORK, int *LIWORK, int *INFO)
void lapack_Xsygvx_ (int *ITYPE, char *JOBZ, char *RANGE, char *UPLO, int *N, double *A, int *LDA, double *B, int *LDB, double *VL, double *VU, int *IL, int *IU, double *ABSTOL, int *M, double *W, double *Z, int *LDZ, double *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
void lapack_Xsygvx_ (int *ITYPE, char *JOBZ, char *RANGE, char *UPLO, int *N, float *A, int *LDA, float *B, int *LDB, float *VL, float *VU, int *IL, int *IU, float *ABSTOL, int *M, float *W, float *Z, int *LDZ, float *WORK, int *LWORK, int *IWORK, int *IFAIL, int *INFO)
template<class num_t> void lapackEIGEN (const TMat< num_t > &A, TVec< num_t > &eigenvals, TMat< num_t > &eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0)
 Computes the eigenvalues and eigenvectors of a symmetric (NxN) matrix A.

template<class num_t> void lapackGeneralizedEIGEN (const TMat< num_t > &A, const TMat< num_t > &B, int ITYPE, TVec< num_t > &eigenvals, TMat< num_t > &eigenvecs, char RANGE='A', num_t low=0, num_t high=0, num_t ABSTOL=0)
 Computes the eigenvalues and eigenvectors of a real generalized symmetric-definite eigenproblem, of the form A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x A and B are assumed to be symmetric and B is also positive definite.

template<class num_t> void eigenVecOfSymmMat (TMat< num_t > &m, int k, TVec< num_t > &eigen_values, TMat< num_t > &eigen_vectors)
 Computes up to k largest eigen_values and corresponding eigen_vectors of symmetric matrix m.

template<class num_t> void generalizedEigenVecOfSymmMat (TMat< num_t > &m1, TMat< num_t > &m2, int itype, int k, TVec< num_t > &eigen_values, TMat< num_t > &eigen_vectors)
 Computes up to k largest eigen_values and corresponding eigen_vectors of a real generalized symmetric-definite eigenproblem, of the form m1*x=(lambda)*m2*x (itype = 1), m1*m2*x=(lambda)*x (itype = 2) or m2*m1*x=(lambda)*x (itype = 3) m1 and m2 are assumed to be symmetric and m2 is also positive definite.

template<class num_t> void lapackSVD (const TMat< num_t > &At, TMat< num_t > &Ut, TVec< num_t > &S, TMat< num_t > &V, char JOBZ='A', real safeguard=1)
template<class num_t> void SVD (const TMat< num_t > &A, TMat< num_t > &U, TVec< num_t > &S, TMat< num_t > &Vt, char JOBZ='A', real safeguard=1)
 Performs the SVD decomposition A = U.S.Vt Where U and Vt are orthonormal matrices.

Vec closestPointOnHyperplane (const Vec &x, const Mat &points, real weight_decay=0.)
 closest point to x on hyperplane that passes through all points (with weight decay)

real hyperplaneDistance (const Vec &x, const Mat &points, real weight_decay=0.)
 Distance between point x and closest point on hyperplane that passes through all points.

template<class MatT> void diagonalizeSubspace (MatT &A, Mat &X, Vec &Ax, Mat &solutions, Vec &evalues, Mat &evectors)
template<class T> PStreamoperator<< (PStream &out, const ProbSparseMatrix &p)
template<class T> PStreamoperator>> (PStream &in, ProbSparseMatrix &p)
real log_gamma (real xx)
real log_beta (real x, real y)
real incomplete_beta_continued_fraction (real z, real x, real y)
real incomplete_beta (real z, real x, real y)
real student_t_cdf (real t, int nb_degrees_of_freedom)
void manual_seed (long x)
void seed ()
long get_seed ()
real uniform_sample ()
real bounded_uniform (real a, real b)
real expdev ()
real gaussian_01 ()
real gaussian_mu_sigma (real mu, real sigma)
real gaussian_mixture_mu_sigma (Vec &w, const Vec &mu, const Vec &sigma)
real gamdev (int ia)
real poidev (real xm)
real bnldev (real pp, int n)
int multinomial_sample (const Vec &distribution)
int uniform_multinomial_sample (int N)
 return an integer between 0 and N-1 with equal probabilities

void fill_random_uniform (const Vec &dest, real minval, real maxval)
 sample each element from uniform distribution U[minval,maxval]

void fill_random_discrete (const Vec &dest, const Vec &set)
 sample each element from the given set

void fill_random_normal (const Vec &dest, real mean, real stdev)
 sample each element from Normal(mean,sdev^2) distribution

void fill_random_normal (const Vec &dest, const Vec &mean, const Vec &stdev)
 sample each element from multivariate Normal(mean,diag(sdev^2)) distribution

void fill_random_uniform (const Mat &dest, real minval, real maxval)
void fill_random_normal (const Mat &dest, real mean, real sdev)
double incbcf (double a, double b, double x)
double incbd (double a, double b, double x)
real normal_sample ()
real binomial_sample (real prob1)
 alias

template<class T> void bootstrap_rows (const TMat< T > &source, TMat< T > destination)
 sample with replacement the rows of source and put them in destination.

template<class T> void shuffleElements (const TVec< T > &vec)
 randomly shuffle the entries of the TVector

template<class T> void shuffleRows (const TMat< T > &mat)
template<class T> void computeRanks (const TMat< T > &mat, TMat< T > &ranks)
 For each column of mat, sort the elements and put in the 'ranks' matrix (of the same dimensions) the rank of original elements.

template<class T> void product (RowMapSparseMatrix< T > &M, const Vec &x, Vec &y)
 PLEARN_IMPLEMENT_OBJECT (ScaledConditionalCDFSmoother,"This smoothes a low-resolution histogram using as prior a high-resolution one.","This class takes as 'prior_cdf' a detailed histogram (usually derived from\n""an unconditional distribution) and uses it to smooth a given survival\n""function and provide extra detail (high resolution).\n""Two smoothing formula are provided, both of which guarantee that the smoothed\n""survival function takes the same value as the raw one at or near original bin\n""positions. In between the original bin positions, the smoothed survival\n""is obtained by applying one of two possible formula, according to the\n""preserve_relative_density option.\n")
 DECLARE_OBJECT_PTR (ScaledConditionalCDFSmoother)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Smoother,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (Smoother)
SparseMatrix operator+ (const SparseMatrix &A, const SparseMatrix &B)
 add two sparse matrices (of same dimensions but with values in possibly different places)

SparseMatrix add (Array< SparseMatrix > &matrices)
 add a bunch of sparse matrices and return result

void SpearmanRankCorrelation (const VMat &x, const VMat &y, Mat &r)
 Compute the Spearman Rank correlation statistic.

real testNoCorrelationAsymptotically (real r, int n)
 Return P(|R|>|r|) two-sided p-value for the null-hypothesis that there is no monotonic dependency, with r the observed correlation between two paired samples of length n.

void testSpearmanRankCorrelationPValues (const VMat &x, const VMat &y, Mat &pvalues)
 Compute P(|R|>|r|) two-sided p-value for the null-hypothesis that there is no monotonic dependency, with r the observed Spearman Rank correlation between two paired samples x and y of length n (column matrices).

void testSpearmanRankCorrelation (const VMat &x, const VMat &y, Mat &r, Mat &pvalues)
 same as above but return also in r the rank correlations

real max_cdf_diff (Vec &v1, Vec &v2)
 Returns the max of the difference between the empirical cdf of 2 series of values Side-effect: the call sorts v1 and v2.

real KS_test (real D, real N, int conv)
void KS_test (Vec &v1, Vec &v2, int conv, real &D, real &p_value)
 Kolmogorov-Smirnov test.

real KS_test (Vec &v1, Vec &v2, int conv)
 Returns result of Kolmogorov-Smirnov test between 2 samples The call sorts v1 and v2.

real paired_t_test (Vec u, Vec v)
 Given two paired sets u and v of n measured values, the paired t-test determines whether they differ from each other in a significant way under the assumptions that the paired differences are independent and identically normally distributed.

 PLEARN_IMPLEMENT_OBJECT (StatsCollector,"Collects basic statistics","A StatsCollector allows to compute basic global statistics for a series of numbers,\n""as well as statistics within automatically determined ranges.\n""The first maxnvalues encountered values will be used as reference points to define\n""the ranges, so to get reasonable results, your sequence should be iid, and NOT sorted!\n""\n""The following statistics are available:"" - E Sample mean\n"" - V Sample variance\n"" - STDDEV Sample standard deviation\n"" - STDERROR Standard error of the mean\n"" - MIN Minimum value\n"" - MAX Maximum value\n"" - SUM Sum of observations \n"" - SUMSQ Sum of squares\n"" - FIRST First observation\n"" - LAST Last observation\n"" - N Total number of observations\n"" - NMISSING Number of missing observations\n"" - NNONMISSING Number of non-missing observations\n"" - SHARPERATIO Mean divided by standard deviation\n")
int sortIdComparator (const void *i1, const void *i2)
TVec< RealMappingcomputeRanges (TVec< StatsCollector > stats, int discrete_mincount, int continuous_mincount)
PStreamoperator>> (PStream &in, StatsCollectorCounts &c)
 this class holds simple statistics about a field

PStreamoperator<< (PStream &out, const StatsCollectorCounts &c)
 DECLARE_OBJECT_PTR (StatsCollector)
template<> void deepCopyField (StatsCollector &field, CopiesMap &copies)
 Apparently needed to specialize this method, otherwise it was the generic deepCopyField from CopiesMap.h that was called when deep copying a TVec<StatsCollector>.

 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (StatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (MeanStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (ExpMeanStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (StddevStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (StderrStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (SharpeRatioStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (MinStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (MaxStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (LiftStatsIterator,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (QuantilesStatsIterator,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (StatsIterator)
 DECLARE_OBJECT_PTR (MeanStatsIterator)
 DECLARE_OBJECT_PTR (ExpMeanStatsIterator)
 DECLARE_OBJECT_PTR (StddevStatsIterator)
 DECLARE_OBJECT_PTR (StderrStatsIterator)
 DECLARE_OBJECT_PTR (SharpeRatioStatsIterator)
 DECLARE_OBJECT_PTR (MinStatsIterator)
 DECLARE_OBJECT_PTR (MaxStatsIterator)
 DECLARE_OBJECT_PTR (LiftStatsIterator)
 DECLARE_OBJECT_PTR (QuantilesStatsIterator)
 DECLARE_TYPE_TRAITS (StatsItArray)
PStreamoperator>> (PStream &in, StatsItArray &o)
PStreamoperator<< (PStream &out, const StatsItArray &o)
template<> void deepCopyField (StatsItArray &field, CopiesMap &copies)
StatsItArray operator & (const StatsIt &statsit1, const StatsIt &statsit2)
StatsIt mean_stats ()
StatsIt stddev_stats ()
StatsIt stderr_stats ()
StatsIt min_stats ()
StatsIt max_stats ()
StatsIt quantiles_stats (Vec quantiles, int n_data=1000)
StatsIt lift_stats (int the_index=0, real the_fraction=0.1)
StatsIt sharpe_ratio_stats ()
 exponential of the mean

StatsIt exp_mean_stats ()
template<class T, class I> void selectElements (const TVec< T > &source, const TVec< I > &indices, TVec< T > &destination)
template<class T> void elementsEqualTo (const TVec< T > &source, const T &value, const TVec< T > &destination)
 put in destination 1's when (*this)[i]==value, 0 otherwise

template<class T> TVec< T > concat (const TVec< T > &v1, const TVec< T > &v2)
template<class T> TVec< T > removeElement (const TVec< T > &v, int elemnum)
 if the element to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the element removed.

template<class T, class I> void selectRows (const TMat< T > &source, const TVec< I > &row_indices, TMat< T > &destination)
template<class T, class I> void selectColumns (const TMat< T > &source, const TVec< I > &column_indices, TMat< T > &destination)
template<class T, class I> void select (const TMat< T > &source, const TVec< I > &row_indices, const TVec< I > &column_indices, TMat< T > &destination)
template<class T> TMat< T > removeRow (const TMat< T > &m, int rownum)
 returns a new mat which is m with the given row removed if the row to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the row removed.

template<class T> TMat< T > removeColumn (const TMat< T > &m, int colnum)
 returns a new mat which is m with the given column removed if the column to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the column removed.

template<class T> TMat< T > diagonalmatrix (const TVec< T > &v)
template<class T> TMat< T > deepCopy (const TMat< T > source)
template<class T> TMat< T > deepCopy (const TMat< T > source, CopiesMap copies)
template<class T> void deepCopyField (TMat< T > &field, CopiesMap &copies)
template<class T> void clear (const TMat< T > &x)
template<class T> void swap (TMat< T > &a, TMat< T > &b)
template<class T> void operator<< (const TMat< T > &m1, const TMat< T > &m2)
 copy TMat << TMat

template<class T, class U> void operator<< (const TMat< T > &m1, const TMat< U > &m2)
 copy TMat << TMat (different types)

template<class T> void operator<< (const TMat< T > &m1, const TVec< T > &m2)
 copy TMat << Tvec

template<class T, class U> void operator<< (const TMat< T > &m1, const TVec< U > &m2)
 copy TMat << Tvec (different types)

template<class T> void operator<< (const TVec< T > &m1, const TMat< T > &m2)
 copy TVec << TMat

template<class T, class U> void operator<< (const TVec< T > &m1, const TMat< U > &m2)
 copy TVec << TMat (different types)

template<class T, class U> void operator>> (const TMat< T > &m1, const TMat< U > &m2)
 copy TMat >> TMat

template<class T, class U> void operator>> (const TVec< T > &m1, const TMat< U > &m2)
 copy TVec >> TMat

template<class T, class U> void operator>> (const TMat< T > &m1, const TVec< U > &m2)
 copy TMat >> Tvec

template<class T> ostream & operator<< (ostream &out, const TMat< T > &m)
 printing a TMat

template<class T> istream & operator>> (istream &in, const TMat< T > &m)
 inputing a TMat

template<class T> TMat< T > rowmatrix (const TVec< T > &v)
 returns a view of this vector as a single row matrix

template<class T> TMat< T > columnmatrix (const TVec< T > &v)
 returns a view of this vector as a single column matrix

template<class T> void select (const TMat< T > &source, const TVec< T > &row_indices, const TVec< T > &column_indices, TMat< T > &destination)
template<class T> void savePMat (const string &filename, const TMat< T > &mat)
template<class T> PStreamoperator<< (PStream &out, const TMat< T > &m)
 Read and Write from C++ stream: write saves length and read resizes accordingly (the raw modes don't write any size information).

template<class T> PStreamoperator>> (PStream &in, TMat< T > &m)
string join (const TVec< string > &s, const string &separator)
template<class T> T max (const TVec< T > &vec)
template<class T> void softmax (const TVec< T > &x, const TVec< T > &y)
 y = softmax(x)

template<class T> void log_softmax (const TVec< T > &x, TVec< T > &y)
template<class T> void exp (const TVec< T > &x, TVec< T > &y)
 computes y <- exp(x)

template<class T> T sumsquare (const TVec< T > &x)
 returns the sum of squared elements

template<class T> T sumabs (const TVec< T > &x)
 returns the sum of absolute values of elements

template<class T> void squareElements (const TVec< T > &x)
 squares the elements of x in place

template<class T> void squareElements (const TMat< T > &m)
 squares the elements of m in place

template<class T> T sumsquare (const TMat< T > &m)
 returns the sum of squared elements

template<class T> T sumabs (const TMat< T > &m)
 returns the sum of absolute value of the elements

template<class T> void multiply (const TVec< T > &source1, T source2, TVec< T > &destination)
 destination = source1*source2

template<class T> T sum (const TVec< T > &vec, bool ignore_missing=false)
template<class T> T sum_of_log (const TVec< T > &vec)
 returns the sum of the log of the elements (this is also the log of the product of the elements but is more stable if you have very small elements).

template<class T> T product (const TVec< T > &vec)
template<class T> T mean (const TVec< T > &vec, bool ignore_missing=false)
 if ignore_missing==true, then the mean is computed by ignoring the possible MISSING_VALUE in the Vec.

template<class T> T harmonic_mean (const TVec< T > &vec, bool ignore_missing=false)
template<class T> T avgdev (const TVec< T > &vec, T meanval)
template<class T> T geometric_mean (const TVec< T > &vec)
template<class T> T weighted_mean (const TVec< T > &vec, const TVec< T > &weights, bool ignore_missing=false)
template<class T> T variance (const TVec< T > &vec, T meanval)
template<class T> T covariance (const TVec< T > &vec1, const TVec< T > &vec2, T mean1, T mean2)
template<class T> T weighted_variance (const TVec< T > &vec, const TVec< T > &weights, T no_weighted_mean, T weighted_mean)
template<class T> TVec< T > histogram (const TVec< T > &vec, T minval, T maxval, int nbins)
template<class T> T min (const TVec< T > &vec)
template<class T> T maxabs (const TVec< T > &vec)
template<class T> T minabs (const TVec< T > &vec, int index=int())
template<class T> int argmax (const TVec< T > &vec)
template<class T> int argmax (const TVec< T > &vec, bool ignore_missing)
template<class T> int argmin (const TVec< T > &vec)
template<class T> int argmin (const TVec< T > &vec, bool ignore_missing)
template<class T> T pownorm (const TVec< T > &vec, double n)
template<class T> T pownorm (const TVec< T > &vec)
template<class T> T norm (const TVec< T > &vec, double n)
template<class T> T norm (const TVec< T > &vec)
template<class T> void normalize (const TVec< T > &vec, double n)
template<class T> T powdistance (const TVec< T > &vec1, const TVec< T > &vec2, double n)
template<class T> T powdistance (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> T dist (const TVec< T > &vec1, const TVec< T > &vec2, double n)
template<class T> T L2distance (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> T L1distance (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> T weighted_powdistance (const TVec< T > &vec1, const TVec< T > &vec2, double n, const TVec< T > &weights)
template<class T> T weighted_distance (const TVec< T > &vec1, const TVec< T > &vec2, double n, const TVec< T > &weights)
template<class T> void operator-= (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> void operator *= (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> void operator/= (const TVec< T > &vec, T scalar)
template<class T> void operator/= (const TVec< T > &vec, int scalar)
template<class T> void compute_log (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > log (const TVec< T > &src)
template<class T> void compute_sqrt (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > sqrt (const TVec< T > &src)
template<class T> void compute_safelog (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > safelog (const TVec< T > &src)
template<class T> void operator/= (const TVec< T > &vec1, const TVec< T > &vec2)
template<class T> void compute_tanh (const TVec< T > &src, const TVec< T > &dest)
template<class T> void bprop_tanh (const TVec< T > &tanh_x, const TVec< T > &d_tanh_x, TVec< T > &d_x)
template<class T> TVec< T > tanh (const TVec< T > &src)
template<class T> void compute_fasttanh (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > fasttanh (const TVec< T > &src)
template<class T> void compute_sigmoid (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > sigmoid (const TVec< T > &src)
template<class T> void compute_fastsigmoid (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > fastsigmoid (const TVec< T > &src)
template<class T> void compute_inverse_sigmoid (const TVec< T > &src, const TVec< T > &dest)
template<class T> TVec< T > inverse_sigmoid (const TVec< T > &src)
template<class T> void negateElements (const TVec< T > &vec)
template<class T> void invertElements (const TVec< T > &vec)
template<class T> TVec< T > inverted (const TVec< T > &vec)
template<class T> void operator+= (const TVec< T > &vec, T scalar)
template<class T> void operator-= (const TVec< T > &vec, T scalar)
template<class T> TVec< T > operator- (TVec< T > vec)
template<class T> T dot (const TVec< T > &vec1, const TVec< T > &vec2)
template<class V, class T, class U> V dot (const TVec< T > &vec1, const TVec< U > &vec2)
 Special dot product that allows TVec's of different types, as long as operator*(T,U) is defined.

template<class T> T dot (const TMat< T > &m1, const TMat< T > &m2)
template<class T> TVec< T > operator- (const TVec< T > &v1, const TVec< T > &v2)
template<class T> TVec< T > operator- (T v1, const TVec< T > &v2)
template<class T> TVec< T > operator- (const TVec< T > &v1, T v2)
template<class T> TVec< T > operator+ (const TVec< T > &v1, const TVec< T > &v2)
template<class T> TVec< T > operator+ (T v1, const TVec< T > &v2)
template<class T> TVec< T > operator+ (const TVec< T > &v1, T v2)
template<class T> TVec< T > operator% (const TVec< T > &v1, const TVec< T > &v2)
template<class T> TVec< T > operator * (T scalar, const TVec< T > &v)
template<class T> TVec< T > operator * (const TVec< T > &v1, T v2)
template<class T> TVec< T > operator/ (const TVec< T > &v1, const TVec< T > &v2)
template<class T> TVec< T > operator/ (T v1, const TVec< T > &v2)
template<class T1, class T2> TVec< T1 > operator/ (const TVec< T1 > &v1, T2 scalar)
template<class T> T logadd (const TVec< T > &vec)
template<class T> T output_margin (const TVec< T > &class_scores, int correct_class)
template<class T> void fill_one_hot (const TVec< T > &vec, int hotpos, T coldvalue, T hotvalue)
template<class T> TVec< T > one_hot (int length, int hotpos, T coldvalue, T hotvalue)
template<class T> TVec< T > square (const TVec< T > &vec)
template<class T> TVec< T > squareroot (const TVec< T > &vec)
template<class T> TVec< T > remove_missing (const TVec< T > &vec)
template<class T, class U, class V> TVec< U > apply (const TVec< T > &vec, U(*func)(V))
 Transform a vector of T into a vector of U through a unary function.

template<class T, class U> void apply (const TVec< T > &source, TVec< U > &destination, U(*func)(T))
 Transform a vector of T into a vector of U through a unary function.

template<class T, class U, class V> void apply (const TVec< T > &src1, const TVec< U > &src2, TVec< V > &dest, V(*func)(T, U))
 Transform a vector of T and a vector of U into a vector of V, through a binary function.

template<class T> void multiply (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void multiplyAdd (const TVec< T > &source1, const TVec< T > &source2, T source3, TVec< T > &destination)
template<class T> void multiplyScaledAdd (const TVec< T > &source, T a, T b, TVec< T > &destination)
template<class T> void add (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void add (const TVec< T > &source1, T source2, TVec< T > &destination)
template<class T> void substract (const TVec< T > &source1, T source2, TVec< T > &destination)
template<class T> void substract (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void divide (const TVec< T > &source1, T source2, TVec< T > &destination)
template<class T> void divide (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void divide (T source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void max (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void max (const TVec< T > &source1, T source2, TVec< T > &destination)
template<class T> void min (const TVec< T > &source1, const TVec< T > &source2, TVec< T > &destination)
template<class T> void min (const TVec< T > &source1, T source2, TVec< T > &destination)
template<class T> TVec< T > softmax (const TVec< T > &x)
template<class T> void tanh (const TVec< T > &x, TVec< T > &y)
template<class T> TVec< T > exp (TVec< T > vec)
template<class T> TVec< T > nonZeroIndices (TVec< T > v)
template<class T> TVec< T > nonZeroIndices (TVec< bool > v)
template<class T> void complement_indices (TVec< T > &indices, int n, TVec< T > &complement_indices, TVec< T > &buffer)
template<class T> void equals (const TVec< T > &src, T v, TVec< T > &dest)
template<class T> void isLargerThan (const TVec< T > &first, const TVec< T > &second, TVec< T > &dest)
template<class T> void isLargerThanOrEqualTo (const TVec< T > &first, const TVec< T > &second, TVec< T > &dest)
template<class T> void isSmallerThan (const TVec< T > &first, const TVec< T > &second, TVec< T > &dest)
template<class T> void isSmallerThanOrEqualTo (const TVec< T > &first, const TVec< T > &second, TVec< T > &dest)
template<class T> void ifThenElse (const TVec< T > &if_vec, const TVec< T > &then_vec, const TVec< T > &else_vec, TVec< T > &dest)
template<class T> int vec_counts (const TVec< T > &src, T value)
template<class T> int vec_find (const TVec< T > &src, T f)
template<class T> T estimatedCumProb (T x, TVec< T > bins)
template<class T> int positionOfkthOrderedElement (const TVec< T > &vec, int k)
template<class T> T kthOrderedElement (const TVec< T > &vec, int k)
 returns the value of the kth ordered element of v k can take values 0 to vec.length()-1

template<class T> T median (const TVec< T > &vec)
 returns the median value of vec

template<class T> T selectAndOrder (const TVec< T > &vec, int pos)
 find the element at position pos that would result from a sort and put all elements (not in order!) lower than v[pos] in v[i<pos].

template<class T> TVec< T > getQuantiles (const TVec< T > &vec, int q)
template<class T> TVec< T > nonZero (const TVec< T > &vec)
 returns a vector composed of the values of v that are different from 0;

template<class T> TVec< T > positiveValues (const TVec< T > &vec)
 returns a vector composed of the values of v that are greater than 0;

template<class T> int positionOfClosestElement (const TVec< T > &vec, const T &value, bool is_sorted_vec=false)
template<class T> void projectOnOrthogonalSubspace (const TVec< T > &vec, const TMat< T > &orthonormal_subspace)
template<class T> void operator *= (const TVec< T > &vec, T factor)
template<class T> void operator+= (const TVec< T > &vec1, const TVec< T > &vec2)
 element-wise +

template<class T> void multiplyAcc (const TVec< T > &vec, const TVec< T > &x, T scale)
 vec[i] += x[i]*scale;

template<class T> void exponentialMovingAverageUpdate (const TVec< T > &vec, const TVec< T > &x, T alpha)
 TVec[i] = (1-alpha)*TVec[i]+x[i]*alpha;.

template<class T> void exponentialMovingVarianceUpdate (const TVec< T > &vec, const TVec< T > &x, const TVec< T > &mu, T alpha)
 TVec[i] = (1-alpha)*TVec[i]+(x[i]-mu[i])^2*alpha;.

template<class T> void exponentialMovingSquareUpdate (const TVec< T > &vec, const TVec< T > &x, T alpha)
 TVec[i] = (1-alpha)*TVec[i]+x[i]^2*alpha;.

template<class T> void multiplyAcc (const TVec< T > &vec, const TVec< T > &x, const TVec< T > &y)
 vec[i] += x[i]*y[i];

template<class T> void squareMultiplyAcc (const TVec< T > &vec, const TVec< T > &x, T scale)
 TVec[i] += x[i]*x[i]*scale;.

template<class T> void squareAcc (const TVec< T > &vec, const TVec< T > &x)
 TVec[i] += x[i]*x[i];.

template<class T> void squareSubtract (const TVec< T > &vec, const TVec< T > &x)
 Tvec[i] -= x[i]*x[i];.

template<class T> void diffSquareMultiplyAcc (const TVec< T > &vec, const TVec< T > &x, const TVec< T > &y, T scale)
 TVec[i] += (x[i]-y[i])^2*scale;.

template<class T> void diffSquareMultiplyScaledAcc (const TVec< T > &vec, const TVec< T > &x, const TVec< T > &y, T fact1, T fact2)
 TVec[i] = TVec[i]*fact1 + (x[i]-y[i])^2*fact2;.

template<class T> void product (const TVec< T > &result, const TMat< T > &m, const TVec< T > &v)
 result[i] = sum_j m[i,j] * v[j]

template<class T> void productAcc (const TVec< T > &vec, const TMat< T > &m, const TVec< T > &v)
 result[i] += sum_j m[i,j] * v[j]

template<class T> void transposeProduct (const TVec< T > &result, const TMat< T > &m, const TVec< T > &v)
 result[i] = sum_j m[j,i] * v[j] Equivalently: rowvec(result) = rowvec(v) .

template<class T> void transposeProductAcc (const TVec< T > &result, const TMat< T > &m, const TVec< T > &v)
 result[i] += sum_j m[j,i] * v[j]

template<class T> void transposeProductAcc (const TVec< T > &result, const TMat< T > &m, const TVec< T > &v, T alpha)
 result[i] += alpha * sum_j m[j,i] * v[j]

template<class T> void compressedTransposeProductAcc (const TVec< T > &result, const TMat< T > &m, char *comprbufvec)
template<class T> void diagonalizedFactorsProduct (TMat< T > &result, const TMat< T > &U, const TVec< T > d, const TMat< T > V, bool accumulate=false)
 return the matrix with elements (i,j) = sum_k U_{ik} d_k V_{kj}

template<class T> void diagonalizedFactorsProductBprop (const TMat< T > &dCdresult, const TMat< T > &U, const TVec< T > d, const TMat< T > V, TMat< T > &dCdU, TVec< T > &dCdd, TMat< T > &dCdV)
 GIVEN that res(i,j) = sum_k U_{ik} d_k V_{kj}, and given dC/dres, U,d and V, accumulate gradients on dC/dU, dC/dd and dC/dV: dC/dU[i,k] += sum_j dC/dres[i,j] d_k V[k,j] dC/dd[k] += sum_{ij} dC/dres[i,j] U[i,k] V[k,j] dC/dV[k,j] += d_k * sum_i U[i,k] dC/dres[i,j].

template<class T> void diagonalizedFactorsProductTranspose (TMat< T > &result, const TMat< T > &U, const TVec< T > d, const TMat< T > V, bool accumulate=false)
 return the matrix with elements (i,j) = sum_k U_{ik} d_k V_{jk}

template<class T> void diagonalizedFactorsProductTransposeBprop (const TMat< T > &dCdresult, const TMat< T > &U, const TVec< T > d, const TMat< T > V, TMat< T > &dCdU, TVec< T > &dCdd, TMat< T > &dCdV)
template<class T> void diagonalizedFactorsTransposeProduct (TMat< T > &result, const TMat< T > &U, const TVec< T > d, const TMat< T > V, bool accumulate=false)
 return the matrix with elements (i,j) = sum_k U_{ki} d_k V_{kj}

template<class T> void diagonalizedFactorsTransposeProductBprop (const TMat< T > &dCdresult, const TMat< T > &U, const TVec< T > d, const TMat< T > V, TMat< T > &dCdU, TVec< T > &dCdd, TMat< T > &dCdV)
template<class T> void diagonalizedFactorsTransposeProductTranspose (TMat< T > &result, const TMat< T > &U, const TVec< T > d, const TMat< T > V, bool accumulate=false)
 return the matrix with elements (i,j) = sum_k U_{ki} d_k V_{jk}

template<class T> void diagonalizedFactorsTransposeProductTransposeBprop (const TMat< T > &dCdresult, const TMat< T > &U, const TVec< T > d, const TMat< T > V, TMat< T > &dCdU, TVec< T > &dCdd, TMat< T > &dCdV)
template<class T> T matRowDotVec (const TMat< T > &mat, int i, const TVec< T > v)
 return dot product of i-th row with vector v

template<class T> T matColumnDotVec (const TMat< T > &mat, int j, const TVec< T > v)
 return dot product of j-th column with vector v

template<class T> void matRowsDots (TVec< T > v, const TMat< T > &A, const TMat< T > &B)
 return dot products of i-th row of A with i-th row of B in vector v

template<class T> void matRowsDotsAcc (TVec< T > v, const TMat< T > &A, const TMat< T > &B)
 return dot products of i-th row of A with i-th row of B in vector v

template<class T> void fillItSymmetric (const TMat< T > &mat)
 Fill the bottom left part of a matrix with its top right part, so that it becomes symmetric.

template<class T> void makeItSymmetric (const TMat< T > &mat, T max_dif)
template<class T> void product (const TMat< T > &mat, const TVec< T > &x, TVec< T > &y)
template<class T> void product (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void productAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void product2Acc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void squareProductAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void externalProduct (const TMat< T > &mat, const TVec< T > &v1, const TVec< T > &v2)
template<class T> void externalProductAcc (const TMat< T > &mat, const TVec< T > &v1, const TVec< T > &v2)
template<class T> void externalProductScaleAcc (const TMat< T > &mat, const TVec< T > &v1, const TVec< T > &v2, T gamma)
template<class T> void externalProductScaleAcc (const TMat< T > &mat, const TVec< T > &v1, const TVec< T > &v2, T gamma, T alpha)
template<class T> void productTranspose (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void squareProductTranspose (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void product2Transpose (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void productTransposeAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void product2TransposeAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void squareProductTransposeAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeProduct (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeProduct2 (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeProductAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeProduct2Acc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeTransposeProduct (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> void transposeTransposeProductAcc (const TMat< T > &mat, const TMat< T > &m1, const TMat< T > &m2)
template<class T> T trace (const TMat< T > &mat)
template<class T> void regularizeMatrix (const TMat< T > &mat, T tolerance)
 Applies a regularizer : diag(A) += (tolerance * trace(A)).

template<class T> void makeRowsSumTo1 (const TMat< T > &mat)
template<class T> void multiply (const TMat< T > &result, const TMat< T > &x, T scale)
template<class T> TMat< T > operator * (const TMat< T > &m, const T &scalar)
template<class T> TMat< T > operator * (const T &scalar, const TMat< T > &m)
template<class T> TMat< T > operator/ (const TMat< T > &m, const T &scalar)
template<class T> void multiplyAcc (const TMat< T > &mat, const TMat< T > &x, T scale)
template<class T> void multiplyAcc (const TMat< T > &mat, const TMat< T > &x, const TMat< T > &y)
template<class T> void squareMultiplyAcc (const TMat< T > &mat, const TMat< T > &x, T scale)
template<class T> void diffSquareMultiplyAcc (const TMat< T > &mat, const TMat< T > &x, const TMat< T > &y, T scale)
template<class T> TVec< T > selectAndOrder (const TMat< T > &mat, int pos, int colnum=0)
template<class T> void addToDiagonal (const TMat< T > &mat, T lambda)
template<class T> void addToDiagonal (const TMat< T > &mat, const TVec< T > &lambda)
template<class T> void diag (const TMat< T > &mat, const TVec< T > &d)
template<class T> TVec< T > diag (const TMat< T > &mat)
template<class T> void diagonalOfSquare (const TMat< T > &mat, const TVec< T > &d)
template<class T> void projectOnOrthogonalSubspace (const TMat< T > &mat, TMat< T > orthonormal_subspace)
template<class T> void averageAcrossRowsAndColumns (const TMat< T > &mat, TVec< T > &avg_across_rows, TVec< T > &avg_across_columns, bool ignored)
template<class T> void addToRows (const TMat< T > &mat, const TVec< T > row, bool ignored)
template<class T> void addToColumns (const TMat< T > &mat, const TVec< T > col, bool ignored)
template<class T> void substractFromRows (const TMat< T > &mat, const TVec< T > row, bool ignored)
template<class T> void substractFromColumns (const TMat< T > &mat, const TVec< T > col, bool ignored)
template<class T> void addToMat (const TMat< T > &mat, T scalar, bool ignored)
template<class T> T sum (const TMat< T > &mat, bool ignore_missing=false)
template<class T> T product (const TMat< T > &mat)
template<class T> T sum_of_squares (const TMat< T > &mat)
template<class T> T mean (const TMat< T > &mat)
template<class T> T geometric_mean (const TMat< T > &mat)
template<class T> T variance (const TMat< T > &mat, T meanval)
template<class T> T correlation (const TMat< T > &mat)
template<class T> T correlation (const TVec< T > &x, const TVec< T > &y)
template<class T> T min (const TMat< T > &mat)
template<class T> T max (const TMat< T > &mat)
template<class T> void argmin (const TMat< T > &mat, int &mini, int &minj)
 Stores the position of the min in the 'mini' & 'minj' arg.

template<class T> void argmax (const TMat< T > &mat, int &maxi, int &maxj)
template<class T> int argmin (const TMat< T > &m)
 return mini*width+minj

template<class T> int argmax (const TMat< T > &m)
 return maxi*width+maxj

template<class T> void rowSum (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowSum (const TMat< T > &mat, const TVec< T > &colvec)
template<class T> void rowMean (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowVariance (const TMat< T > &mat, const TMat< T > &singlecolumn, const TMat< T > &rowmean)
template<class T> void rowSumOfSquares (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowMax (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowMax (const TMat< T > &mat, const TVec< T > &colvec)
template<class T> void rowMin (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowMin (const TMat< T > &mat, const TVec< T > &colvec)
template<class T> void rowArgmax (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void rowArgmin (const TMat< T > &mat, const TMat< T > &singlecolumn)
template<class T> void columnSum (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnSumOfSquares (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnMean (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnWeightedMean (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnVariance (const TMat< T > &mat, TVec< T > &result, const TVec< T > &columnmean)
template<class T> void columnWeightedVariance (const TMat< T > &mat, TVec< T > &result, const TVec< T > &column_weighted_mean)
template<class T> void columnMax (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnMin (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnArgmax (const TMat< T > &mat, TVec< T > &result)
template<class T> void columnArgmin (const TMat< T > &mat, TVec< T > &result)
template<class T> T mahalanobis_distance (const TVec< T > &input, const TVec< T > &meanvec, const TMat< T > &inversecovmat)
template<class T> void computeMean (const TMat< T > &m, TVec< T > &meanvec)
 compute the mean of the rows of m (looping over columns)

template<class T> void computeMeanAndVariance (const TMat< T > &m, TVec< T > &meanvec, TVec< T > &variancevec)
 compute the mean and variance of the rows of m (looping over columns)

template<class T> void computeCovar (const TMat< T > &m, const TVec< T > &meanvec, TMat< T > &covarmat)
template<class T> void computeMeanAndCovar (const TMat< T > &m, TVec< T > &meanvec, TMat< T > &covarmat)
template<class T> void computeMeanAndStddev (const TMat< T > &m, TVec< T > &meanvec, TVec< T > &stddevvec)
 compute the mean and standard deviations of the rows of m (looping over columns)

template<class T> void computeColumnsMeanAndStddev (const TMat< T > &m, TMat< T > &meanvec, TMat< T > &stddevvec)
 compute the mean and standard deviations of the colums of m (looping over s) (the result is stored in column vectors meanvec and stddevvec)

template<class T> void normalize (TMat< T > &m)
 substract mean, and divide by stddev (these are estimated globally)

template<class T> void normalizeRows (const TMat< T > &m)
 Divides each row by the sum of its elements.

template<class T> void normalizeColumns (const TMat< T > &m)
 Divides each column by the sum of its elements.

template<class T> void normalize (TMat< T > &m, double n)
 divide each row by its n norm

template<class T> void operator+= (const TMat< T > &m, T scalar)
template<class T> void operator *= (const TMat< T > &m, T scalar)
template<class T> void operator-= (const TMat< T > &m, T scalar)
template<class T> void operator/= (const TMat< T > &m, T scalar)
template<class T> void operator/= (const TMat< T > &m, int scalar)
template<class T> void operator+= (const TMat< T > &m, const TVec< T > &v)
 adds v to every row

template<class T> void operator-= (const TMat< T > &m, const TVec< T > &v)
 subtracts v from every row

template<class T> void operator *= (const TMat< T > &m, const TVec< T > &v)
 does an elementwise multiplication of every row by v

template<class T> void operator *= (const TMat< T > &m1, const TMat< T > &m2)
 does an elementwise division of every row by v

template<class T> void operator/= (const TMat< T > &m, const TVec< T > &v)
template<class T> void operator/= (const TMat< T > &m1, const TMat< T > &m2)
 does an elementwise division

template<class T> void operator+= (const TMat< T > &m1, const TMat< T > &m2)
template<class T> void operator-= (const TMat< T > &m1, const TMat< T > &m2)
template<class T> TMat< T > operator- (const TMat< T > &m1, const TMat< T > &m2)
template<class T> TMat< T > operator+ (const TMat< T > &m1, const TMat< T > &m2)
template<class T> void substract (const TMat< T > &m1, const TMat< T > &m2, TMat< T > &destination)
template<class T> void add (const TMat< T > &m1, const TMat< T > &m2, TMat< T > &destination)
template<class T> TMat< T > operator- (const TMat< T > &m)
 return a negated copy of m

template<class T> void negateElements (const TMat< T > &m)
 x'_ij = -x_ij;

template<class T> void invertElements (const TMat< T > &m)
 x'_ij = 1.0/x_ij;

template<class T> TMat< T > leftPseudoInverse (TMat< T > &m)
template<class T> void leftPseudoInverse (const TMat< T > &m, TMat< T > &inv)
template<class T> TMat< T > rightPseudoInverse (TMat< T > &m)
template<class T> void rightPseudoInverse (const TMat< T > &m, TMat< T > &inv)
template<class T> TMat< T > inverse (TMat< T > &m)
template<class T> void inverse (const TMat< T > &m, TMat< T > &inv)
template<class T> void solveLinearSystemByCholesky (const TMat< T > &A, const TMat< T > &B, TMat< T > &X, TMat< T > *pL=0, TVec< T > *py=0)
template<class T> void solveTransposeLinearSystemByCholesky (const TMat< T > &A, const TMat< T > &B, TMat< T > &X, TMat< T > *pL=0, TVec< T > *py=0)
template<class T> void choleskyDecomposition (const TMat< T > &A, TMat< T > &L)
template<class T> void bpropCholeskyDecomposition (const TMat< T > &A, const TMat< T > &L, TMat< T > &dC_dA, TMat< T > &dC_dL)
template<class T> void choleskySolve (const TMat< T > &L, TVec< T > b, TVec< T > x, TVec< T > &y)
template<class T> void choleskySolve (const TMat< T > &L, const TMat< T > &B, TMat< T > &X, TVec< T > &y)
template<class T> void bpropCholeskySolve (const TMat< T > &L, const TVec< T > &x, const TVec< T > &y, TMat< T > &dC_dL, TVec< T > &dC_db, TVec< T > &dC_dx)
template<class T> real choleskyInvert (const TMat< T > &A, TMat< T > &Ainv)
template<class T> TVec< T > choleskySolve (const TMat< T > &A, const TVec< T > &b)
template<class T> TMat< T > choleskyInvert (const TMat< T > &A)
template<class T> void LU_decomposition (TMat< T > &A, TVec< T > &Trow, int &detsign, TVec< T > *p=0)
template<class T> T det (const TMat< T > &A)
template<class T> T det (const TMat< T > &LU, int detsign)
template<class T> void equals (const TMat< T > &src, T v, TMat< T > &dest)
template<class T> void transpose (const TMat< T > src, TMat< T > dest)
template<class T> TMat< T > transpose (const TMat< T > &src)
template<class T> void apply (T(*func)(const TVec< T > &), const TMat< T > &m, TMat< T > &dest)
template<class T> void apply (T(*func)(const TVec< T > &, const TVec< T > &), const TMat< T > &m1, const TMat< T > &m2, TMat< T > &dest)
template<class T> void linearRegressionNoBias (TMat< T > inputs, TMat< T > outputs, T weight_decay, TMat< T > weights)
template<class T> void linearRegression (TMat< T > inputs, TMat< T > outputs, T weight_decay, TMat< T > theta_t)
template<class T> void linearRegression (TVec< T > inputs, TVec< T > outputs, T weight_decay, TVec< T > theta_t)
template<class T> TMat< T > smooth (TMat< T > data, int windowsize)
template<class T> TMat< T > square (const TMat< T > &m)
template<class T> TMat< T > sqrt (const TMat< T > &m)
template<class T> void affineMatrixInitialize (TMat< T > W, bool output_on_columns=true, real scale=1.0)
template<class T> TMat< T > grep (TMat< T > data, int col, TVec< T > values, bool exclude=false)
template<class T> void convolve (TMat< T > m, TMat< T > mask, TMat< T > result)
template<class T> void subsample (TMat< T > m, int thesubsamplefactor, TMat< T > result)
template<class T> void classification_confusion_matrix (TMat< T > outputs, TMat< T > target_classes, TMat< T > confusion_matrix)
template<class T> int GramSchmidtOrthogonalization (TMat< T > A, T tolerance=1e-6)
 Orthonormalize in-place the rows of the given matrix, using successive projections on the orthogonal subspace of the previously found basis.

template<class T> TVec< T > product (const TMat< T > &m, const TVec< T > &v)
 products return m x v

template<class T> TVec< T > transposeProduct (const TMat< T > &m, const TVec< T > &v)
 return m' x v

template<class T> TMat< T > product (const TMat< T > &m1, const TMat< T > &m2)
 return m1 x m2

template<class T> TMat< T > transposeProduct (const TMat< T > &m1, const TMat< T > &m2)
 return m1' x m2

template<class T> TMat< T > productTranspose (const TMat< T > &m1, const TMat< T > &m2)
 return m1 x m2'

template<class T> TMat< T > operator+ (const TMat< T > &m, const TVec< T > &v)
 return m + v (added to every ROW of m)

template<class T> TMat< T > operator- (const TMat< T > &m, const TVec< T > &v)
 return m - v (subtracted from every ROW of m)

template<class T> TMat< T > operator * (const TMat< T > &m, const TVec< T > &v)
 does an elementwise multiplication of every row by v

template<class T> TMat< T > operator/ (const TMat< T > &m, const TVec< T > &v)
 elementwise division of every row by v

template<class T> TMat< T > operator/ (const TMat< T > &m1, const TMat< T > &m2)
 elementwise division of every row by v

template<class T> void choleskySolve (const TMat< T > &L, TVec< T > b, TVec< T > x)
template<class T> TMat< T > grep (TMat< T > data, int col, T value, bool exclude=false)
 Same as above, but with a single value argument.

template<class T> void addIfNonMissing (const TVec< T > &source, const TVec< int > &nnonmissing, TVec< T > destination)
template<class T> void addXandX2IfNonMissing (const TVec< T > &source, const TVec< int > &nnonmissing, TVec< T > somme, TVec< T > somme2)
real dot_product (real s, real *x, real *y, int n)
void bprop_update_layer (real *dy, real *x, real *dx, real *w, int n_y, int n_x, real learning_rate, real weight_decay)
template<class T> void sortRows (TMat< T > &mat, const TVec< int > &key_columns, bool increasing_order=true)
template<class T> void sortElements (const TVec< T > &vec)
 Sorts the elements of vec in place.

template<class T> void partialSortRows (const TMat< T > &mat, int k, int sortk=1, int col=0)
 Uses partial_sort.

template<class T> void sortRows (const TMat< T > &mat, int col=0, bool increasing_order=true)
 This implementation should be very efficient, but it does two memory allocation: a first one of mat.length()*(sizeof(real)+sizeof(int)) and a second one of mat.length()*sizeof(int).

template<class T> void sortColumns (const TMat< T > &mat, int rownum)
template<class T> int binary_search (const TVec< T > &src, T x)
template<class T> int binary_search (const TMat< T > &src, int c, T x)
template<class T> TMatColRowsIterator< T > operator+ (typename TMatColRowsIterator< T >::difference_type n, const TMatColRowsIterator< T > &y)
template<class T> TMatRowsAsArraysIterator< T > operator+ (typename TMatRowsAsArraysIterator< T >::difference_type n, const TMatRowsAsArraysIterator< T > &y)
template<class T> TMatRowsIterator< T > operator+ (typename TMatRowsIterator< T >::difference_type n, const TMatRowsIterator< T > &y)
void operator<< (const Vec &v, real f)
 Same as fill(f) (will only work with Vec, because of a potential conflict with T == string if we wanted to make it generic).

template<class T> TVec< T > deepCopy (const TVec< T > &source)
template<class T> TVec< T > deepCopy (const TVec< T > &source, CopiesMap &copies)
template<class T> void deepCopyField (TVec< T > &field, CopiesMap &copies)
template<class T> void swap (TVec< T > &a, TVec< T > &b)
template<class T> void operator<< (const TVec< T > &m1, const TVec< T > &m2)
 copy TVec << TVec

template<class T, class U> void operator<< (const TVec< T > &m1, const TVec< U > &m2)
 copy TVec << TVec (different types)

template<class T, class U> void operator>> (const TVec< T > &m1, const TVec< U > &m2)
 copy TVec >> TVec

template<class T> void savePVec (const string &filename, const TVec< T > &vec)
template<class T> PStreamoperator<< (PStream &out, const TVec< T > &v)
 Read and Write from C++ stream: write saves length and read resizes accordingly (the raw modes don't write any size information).

template<class T> PStreamoperator>> (PStream &in, TVec< T > &v)
template<class T> void binwrite (ostream &out, const TVec< T > &v)
template<class T> void binread (istream &in, TVec< T > &v)
template<class T> void binwrite_double (ostream &out, const TVec< T > &v)
template<class T> void binread_double (istream &in, TVec< T > &v)
template<class T> ostream & operator<< (ostream &out, const TVec< T > &v)
template<class T> istream & operator>> (istream &in, const TVec< T > &v)
template<class T> bool operator<= (const TVec< T > &left, const TVec< T > &right)
 A simple family of relational operators for TVec.

template<class T> bool operator>= (const TVec< T > &left, const TVec< T > &right)
template<class T> bool operator< (const TVec< T > &left, const TVec< T > &right)
template<class T> bool operator> (const TVec< T > &left, const TVec< T > &right)
 PLEARN_IMPLEMENT_OBJECT (VecStatsCollector,"Collects basic statistics on a vector","VecStatsCollector allows to collect statistics on a series of vectors.\n""Individual vectors x are presented by calling update(x), and this class will\n""collect both individual statistics for each element (as a Vec<StatsCollector>)\n""as well as (optionally) compute the covariance matrix.")
 DECLARE_OBJECT_PTR (VecStatsCollector)
 PLEARN_IMPLEMENT_OBJECT (NearestNeighborPredictionCost,"ONE LINE DESCRIPTION","MULTI LINE\nHELP")
 DECLARE_OBJECT_PTR (NearestNeighborPredictionCost)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (ObjectGenerator,"ObjectGenerator is the base class for implementing object-generation techniques.","The OptionGenerator takes a template Object, and from a list of options,\n""it will generate another Object (or a complete list).\n")
 DECLARE_OBJECT_PTR (ObjectGenerator)
 PLEARN_IMPLEMENT_OBJECT (RunObject,"Allows to build a non-runnable object in a PLearn script.","This Object implements a run() method so that it can be used in\n""a PLearn script, in order to build another Object given by the\n""'underlying_object' option without PLearn returning an error.\n")
 DECLARE_OBJECT_PTR (RunObject)
 PLEARN_IMPLEMENT_OBJECT (ShellScript,"Allows one to run shell commands (especially within a script).","This runnable object will execute the given shell commands when run.")
 DECLARE_OBJECT_PTR (ShellScript)
int print_diff (ostream &out, VMat m1, VMat m2, double tolerance)
 Prints where m1 and m2 differ by more than tolerance returns the number of such differences, or -1 if the sizes differ.

void interactiveDisplayCDF (const Array< VMat > &vmats)
void displayBasicStats (VMat vm)
void printDistanceStatistics (VMat vm, int inputsize)
bool getList (char *str, int curj, const VMat &vm, Vec &outList, char *strReason)
void viewVMat (const VMat &vm)
void plotVMats (char *defs[], int ndefs)
int vmatmain (int argc, char **argv)
 PLEARN_IMPLEMENT_OBJECT (AdaptGradientOptimizer,"An optimizer that performs gradient descent with learning rate adaptation.","")
 PLEARN_IMPLEMENT_OBJECT (ConjGradientOptimizer,"Optimizer based on the conjugate gradient method.","The conjugate gradient algorithm is basically the following :\n""- 0: initialize the search direction d = -gradient\n""- 1: perform a line search along direction d for the minimum of the gradient\n""- 2: move to this minimum, update the search direction d and go to step 1\n""There are various methods available through the options for both steps 1 and 2.")
 PLEARN_IMPLEMENT_OBJECT (GradientOptimizer,"Optimization by gradient descent.","GradientOptimizer is the simple usual gradient descent algorithm \n"" (the number of samples on which to estimate gradients before an \n"" update, which determines whether we are performing 'batch' \n"" 'stochastic' or even 'minibatch', is currently specified outside \n"" this class, typically in the numer of s/amples of the meanOf function \n"" to be optimized, as its 'nsamples' parameter). \n""Options for GradientOptimizer are [ option_name: <type> (default) ]: \n"" - start_learning_rate: <real> (0.01) \n"" the initial learning rate \n"" - decrease_constant: <real> (0) \n"" the learning rate decrease constant \n""\n""GradientOptimizer derives form Optimizer. \n")
 DECLARE_OBJECT_PTR (GradientOptimizer)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (HyperOptimizer,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (HSetVal,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (HTryAll,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (HCoordinateDescent,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (HTryCombinations,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (HyperOptimizer)
 DECLARE_OBJECT_PTR (HSetVal)
 DECLARE_OBJECT_PTR (HTryAll)
 DECLARE_OBJECT_PTR (HCoordinateDescent)
 DECLARE_OBJECT_PTR (HTryCombinations)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Optimizer,"ONE LINE DESCR","NO HELP")
void varDeepCopyField (Var &field, CopiesMap &copies)
 To use varDeepCopyField.

 DECLARE_OBJECT_PTR (Optimizer)
RandomVar operator * (RandomVar a, RandomVar b)
 ********************** GLOBAL FUNCTIONS ********************** //!<

RandomVar operator+ (RandomVar a, RandomVar b)
RandomVar operator- (RandomVar a, RandomVar b)
 Return a MatRandomVar that is the element-by-element difference of two RandomVar's.

RandomVar operator/ (RandomVar a, RandomVar b)
 Return a MatRandomVar that is the element-by-element ratio of two RandomVar's.

RandomVar exp (RandomVar x)
 exponential function applied element-by-element

RandomVar log (RandomVar x)
 natural logarithm function applied element-by-element

RandomVar extend (RandomVar v, real extension_value, int n_extend)
RandomVar hconcat (const RVArray &a)
real EM (ConditionalExpression conditional_expression, RVArray parameters_to_learn, VMat distr, int n_samples, int max_n_iterations, real relative_improvement_threshold, bool accept_worsening_likelihood, bool compute_final_train_NLL)
real EM (ConditionalExpression conditional_expression, RVArray parameters_to_learn, VMat distr, int n_samples, int max_n_iterations, real relative_improvement_threshold, bool compute_final_train_NLL)
Var P (ConditionalExpression conditional_expression, bool clearMarksUponReturn)
Var logP (ConditionalExpression conditional_expression, bool clearMarksUponReturn, RVInstanceArray *parameters_to_learn)
Var ElogP (ConditionalExpression conditional_expression, RVInstanceArray &parameters_to_learn, bool clearMarksUponReturn)
RandomVar marginalize (const RandomVar &RV, const RandomVar &hiddenRV)
Vec sample (ConditionalExpression conditional_expression)
void sample (ConditionalExpression conditional_expression, Mat &samples)
Var Sample (ConditionalExpression conditional_expression)
RandomVar normal (real mean, real standard_dev, int d, real minimum_standard_deviation)
 Functions to build a normal distribution.

RandomVar normal (RandomVar mean, RandomVar log_variance, real minimum_standard_deviation)
RandomVar mixture (RVArray components, RandomVar log_weights)
RandomVar multinomial (RandomVar log_probabilities)
real oEM (ConditionalExpression conditional_expression, RVArray parameters_to_learn, VMat distr, int n_samples, int max_n_iterations, real relative_improvement_threshold=0.001, bool compute_final_train_NLL=true)
real oEM (ConditionalExpression conditional_expression, RVArray parameters_to_learn, VMat distr, int n_samples, Optimizer &MStepOptimizer, int max_n_iterations, real relative_improvement_threshold=0.001, bool compute_final_train_NLL=true)
int establish_connection (int n_hosts, const char *hostnames[], int port_no)
int establish_connection (const char *hostname, int port_no)
int establish_connection (const int argc, const char *argv[])
void * MemoryMap (const char *filename, tFileHandle &handle, bool read_only, off_t &filesize)
 returns a pointer to the memory-mapped file or 0 if it fails for some reason.

void memoryUnmap (void *data, tFileHandle handle, int length)
vector< stringexecute (const string &command)
int getSystemTotalMemory ()
 PLEARN_IMPLEMENT_OBJECT (AbsVariable,"ONE LINE DESCR","NO HELP")
 AbsVariable *.

 DECLARE_OBJECT_PTR (AbsVariable)
Var abs (Var v)
 PLEARN_IMPLEMENT_OBJECT (AffineTransformVariable,"Affine transformation of a vector variable.","NO HELP")
 DECLARE_OBJECT_PTR (AffineTransformVariable)
Var affine_transform (Var vec, Var transformation)
 first row of transformation is the bias.

 PLEARN_IMPLEMENT_OBJECT (AffineTransformWeightPenalty,"Affine transformation with Weight decay terms","NO HELP")
 DECLARE_OBJECT_PTR (AffineTransformWeightPenalty)
Var affine_transform_weight_penalty (Var transformation, real weight_decay, real bias_decay=0, bool L1_penalty=false)
 weight decay and bias decay terms This has not been tested yet [Pascal: a tester].

 PLEARN_IMPLEMENT_OBJECT (ArgmaxVariable,"Compute the index of the maximum value in the input","NO HELP")
 ArgmaxVariable *.

 DECLARE_OBJECT_PTR (ArgmaxVariable)
Var argmax (Var v)
 PLEARN_IMPLEMENT_OBJECT (ArgminOfVariable,"ONE LINE DESCR","NO HELP")
Var argminOf (Var v, Var expression, Var values_of_v, VarArray inputs)
 PLEARN_IMPLEMENT_OBJECT (ArgminVariable,"Compute the index of the minimum value in the input","NO HELP")
 ArgminVariable *.

 DECLARE_OBJECT_PTR (ArgminVariable)
Var argmin (Var v)
 PLEARN_IMPLEMENT_OBJECT (BinaryClassificationLossVariable,"For one-dimensional output: class is 0 if output < 0.5, and 1 if >= 0.5.","NO HELP")
 BinaryClassificationLossVariable *.

 DECLARE_OBJECT_PTR (BinaryClassificationLossVariable)
Var binary_classification_loss (Var network_output, Var classnum)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (BinaryVariable,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (ClassificationLossVariable,"Indicator(classnum==argmax(netout))","NO HELP")
 ClassificationLossVariable *.

 DECLARE_OBJECT_PTR (ClassificationLossVariable)
Var classification_loss (Var network_output, Var classnum)
 PLEARN_IMPLEMENT_OBJECT (ColumnIndexVariable,"Return a row vector with the elements indexed in each column","NO HELP")
 ColumnIndexVariable *.

 DECLARE_OBJECT_PTR (ColumnIndexVariable)
Var matrixIndex (Var mat, Var index)
 PLEARN_IMPLEMENT_OBJECT (ColumnSumVariable,"ONE LINE DESCR","NO HELP")
Var columnSum (Var v)
 PLEARN_IMPLEMENT_OBJECT (ConcatColumnsVariable,"Concatenation of the columns of several variables","NO HELP")
 ConcatColumnsVariable *.

 DECLARE_OBJECT_PTR (ConcatColumnsVariable)
Var hconcat (const VarArray &varray)
 PLEARN_IMPLEMENT_OBJECT (ConcatOfVariable,"Concatenates the results of each operation in the loop into the resulting variable","NO HELP")
 ConcatOfVariable *.

 DECLARE_OBJECT_PTR (ConcatOfVariable)
Var concatOf (VMat distr, Func f)
 concatOf

Var concatOf (Var output, const VarArray &inputs, VMat distr, int nsamples, VarArray parameters=VarArray())
 deprecated old version, do not use!

 PLEARN_IMPLEMENT_OBJECT (ConcatRowsVariable,"Concatenation of the rows of several variables","NO HELP")
 ConcatRowsVariable *.

 DECLARE_OBJECT_PTR (ConcatRowsVariable)
Var vconcat (const VarArray &varray)
 PLEARN_IMPLEMENT_OBJECT (ConvolveVariable,"A convolve var; equals convolve(input, mask)","NO HELP")
 ConvolveVariable *.

 DECLARE_OBJECT_PTR (ConvolveVariable)
Var convolve (Var input, Var mask)
 PLEARN_IMPLEMENT_OBJECT (CrossEntropyVariable,"cost = - sum_i {target_i * log(output_i) + (1-target_i) * log(1-output_i)}","NO HELP")
 CrossEntropyVariable *.

 DECLARE_OBJECT_PTR (CrossEntropyVariable)
Var cross_entropy (Var network_output, Var targets)
 PLEARN_IMPLEMENT_OBJECT (CutAboveThresholdVariable,"ONE LINE DESCR","NO HELP")
 CutAboveThresholdVariable *.

 DECLARE_OBJECT_PTR (CutAboveThresholdVariable)
Var cutAboveThreshold (Var v, real threshold)
Var negative (Var v)
 PLEARN_IMPLEMENT_OBJECT (CutBelowThresholdVariable,"ONE LINE DESCR","NO HELP")
 CutBelowThresholdVariable *.

 DECLARE_OBJECT_PTR (CutBelowThresholdVariable)
Var cutBelowThreshold (Var v, real threshold)
Var positive (Var v)
 PLEARN_IMPLEMENT_OBJECT (DeterminantVariable,"The argument must be a square matrix Var and the result is its determinant","NO HELP")
 DeterminantVariable *.

 DECLARE_OBJECT_PTR (DeterminantVariable)
Var det (Var m)
 PLEARN_IMPLEMENT_OBJECT (DiagonalizedFactorsProductVariable,"Variable that represents the leftmatrix*diag(vector)*rightmatrix product","The three parents are respectively the left matrix U, the center vector d,\n""and the right matrix V. Options allow to transpose the matrices.\n""The output value has elements (i,j) equal to sum_k U_{ik} d_k V_{kj}\n")
 DiagonalizedFactorsProductVariable *.

 DECLARE_OBJECT_PTR (DiagonalizedFactorsProductVariable)
Var diagonalized_factors_product (Var left_matrix, Var center_diagonal, Var right_matrix)
 PLEARN_IMPLEMENT_OBJECT (DilogarithmVariable,"This Var computes the dilogarithm function","The dilogarithm function is useful to compute the primitive of the softplus.\n"" dilogarithm(x) = sum_{k=1}^\\infty x^k/k^2\n""so dilogarithm'(x) = -(1/x)log(1-x), i.e. e^x dilogarithm'(-e^x)=log(1+e^x)=softplus(x)\n""and primitive(softplus)(x) = -dilogarithm(-e^x)\n")
 DilogarithmVariable *.

 DECLARE_OBJECT_PTR (DilogarithmVariable)
Var dilogarithm (Var v)
Var softplus_primitive (Var v)
 PLEARN_IMPLEMENT_OBJECT (DivVariable,"Divide 2 matrix vars of same size elementwise","NO HELP")
 DivVariable *.

 DECLARE_OBJECT_PTR (DivVariable)
 PLEARN_IMPLEMENT_OBJECT (DotProductVariable,"Dot product between 2 matrices (or vectors) with same number of elements","NO HELP")
 DotProductVariable *.

 DECLARE_OBJECT_PTR (DotProductVariable)
Var dot (Var v1, Var v2)
 dot product

 PLEARN_IMPLEMENT_OBJECT (DuplicateColumnVariable,"ONE LINE DESCR","NO HELP")
 DuplicateColumnVariable *.

 DECLARE_OBJECT_PTR (DuplicateColumnVariable)
Var duplicateColumn (Var v, int the_width)
 PLEARN_IMPLEMENT_OBJECT (DuplicateRowVariable,"ONE LINE DESCR","NO HELP")
 DuplicateRowVariable *.

 DECLARE_OBJECT_PTR (DuplicateRowVariable)
Var duplicateRow (Var v, int the_length)
 PLEARN_IMPLEMENT_OBJECT (DuplicateScalarVariable,"ONE LINE DESCR","NO HELP")
 DuplicateScalarVariable *.

 DECLARE_OBJECT_PTR (DuplicateScalarVariable)
Var duplicateScalar (Var v, int the_length, int the_width)
 PLEARN_IMPLEMENT_OBJECT (ElementAtPositionVariable,"A variable of size length() x width(), filled with zeros except for the ""single element indexed by input2 =(i,j) or input2 = (k).","NO HELP")
 ElementAtPositionVariable *.

 DECLARE_OBJECT_PTR (ElementAtPositionVariable)
 PLEARN_IMPLEMENT_OBJECT (EqualConstantVariable,"A scalar var; equal 1 if input1==input2, 0 otherwise","NO HELP")
 EqualConstantVariable *.

 DECLARE_OBJECT_PTR (EqualConstantVariable)
Var operator== (Var v1, real cte)
 result[i] = 1 if v1[i]==cte, 0 otherwise

Var operator== (real cte, Var v1)
 result[i] = 1 if v1[i]==cte, 0 otherwise

 PLEARN_IMPLEMENT_OBJECT (EqualScalarVariable,"A scalar var; equal 1 if input1==input2, 0 otherwise","NO HELP")
 EqualScalarVariable *.

 DECLARE_OBJECT_PTR (EqualScalarVariable)
 PLEARN_IMPLEMENT_OBJECT (EqualVariable,"A scalar var; equal 1 if input1==input2, 0 otherwise","NO HELP")
 EqualVariable *.

Var isequal (Var v1, Var v2)
 DECLARE_OBJECT_PTR (EqualVariable)
 PLEARN_IMPLEMENT_OBJECT (ErfVariable,"ONE LINE DESCR","NO HELP")
 ErfVariable *.

 DECLARE_OBJECT_PTR (ErfVariable)
Var erf (Var v)
 PLEARN_IMPLEMENT_OBJECT (ExpVariable,"ONE LINE DESCR","NO HELP")
 ExpVariable *.

 DECLARE_OBJECT_PTR (ExpVariable)
Var exp (Var v)
 PLEARN_IMPLEMENT_OBJECT (ExtendedVariable,"Variable that extends the input variable by appending rows at ""its top and bottom and columns at its left and right.","NO HELP")
 ExtendedVariable *.

 DECLARE_OBJECT_PTR (ExtendedVariable)
Var extend (Var v, int top_extent, int bottom_extent, int left_extent, int right_extent, real fill_value=0.0)
 general extension of a matrix in any direction

Var extend (Var v, real extension_value=1.0, int n_extend=1)
 simple extension of a vector (same semantic as old extend, when we only had vectors)

Func operator/ (Func f, real value)
 PLEARN_IMPLEMENT_OBJECT (Function,"Implements a function defined as a var graph","NO HELP")
template<> void deepCopyField (Func &field, CopiesMap &copies)
 DECLARE_OBJECT_PTR (Function)
 DECLARE_OBJECT_PP (Func, Function)
 PLEARN_IMPLEMENT_OBJECT (HardSlopeVariable,"This Var computes the hard_slope function","The hard_slope function is linear by parts function:\n""0 in [-infty,left], linear in [left,right], and 1 in [right,infty], and continuous.\n""If the arguments are vectors than the operation is performed element by element on all of them.\n")
 HardSlopeVariable *.

 DECLARE_OBJECT_PTR (HardSlopeVariable)
Var hard_slope (Var x, Var left, Var right)
Var d_hard_slope (Var x, Var left, Var right)
 PLEARN_IMPLEMENT_OBJECT (IfThenElseVariable,"Variable that represents the element-wise IF-THEN-ELSE","NO HELP")
 IfThenElseVariable *.

 DECLARE_OBJECT_PTR (IfThenElseVariable)
Var ifThenElse (Var If, Var Then, Var Else)
 IT WOULD BE NICE IF WE COULD REDEFINE (:?).

 PLEARN_IMPLEMENT_OBJECT (IndexAtPositionVariable,"ONE LINE DESCR","NO HELP")
 IndexAtPositionVariable *.

 DECLARE_OBJECT_PTR (IndexAtPositionVariable)
 PLEARN_IMPLEMENT_OBJECT (InterValuesVariable,"if values = [x1,x2,...,x10], the resulting variable is [(x1+x2)/2,(x2+x3)/2, ... (x9+x10)/2]","NO HELP")
 InterValuesVariable *.

 DECLARE_OBJECT_PTR (InterValuesVariable)
Var interValues (Var values)
 if values = [x1,x2,...,x10], the resulting variable is [(x1+x2)/2,(x2+x3)/2, ...

 PLEARN_IMPLEMENT_OBJECT (InvertElementsVariable,"ONE LINE DESCR","NO HELP")
 InvertElementsVariable *.

 DECLARE_OBJECT_PTR (InvertElementsVariable)
Var invertElements (Var v)
 PLEARN_IMPLEMENT_OBJECT (IsAboveThresholdVariable,"ONE LINE DESCR","NO HELP")
 IsAboveThresholdVariable *.

 DECLARE_OBJECT_PTR (IsAboveThresholdVariable)
Var isAboveThreshold (Var v, real threshold=0, real truevalue=1, real falsevalue=0, bool strict=false)
Var operator>= (Var v, real threshold)
Var operator<= (Var v, real threshold)
 PLEARN_IMPLEMENT_OBJECT (IsLargerVariable,"ONE LINE DESCR","NO HELP")
 IsLargerVariable *.

 DECLARE_OBJECT_PTR (IsLargerVariable)
Var operator> (Var v1, Var v2)
Var operator<= (Var v1, Var v2)
 PLEARN_IMPLEMENT_OBJECT (IsMissingVariable,"ONE LINE DESCR","NO HELP")
 IsMissingVariable *.

 DECLARE_OBJECT_PTR (IsMissingVariable)
Var isMissing (Var x)
 PLEARN_IMPLEMENT_OBJECT (IsSmallerVariable,"ONE LINE DESCR","NO HELP")
 IsSmallerVariable *.

 DECLARE_OBJECT_PTR (IsSmallerVariable)
Var operator< (Var v1, Var v2)
 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Var operator>= (Var v1, Var v2)
 PLEARN_IMPLEMENT_OBJECT (LeftPseudoInverseVariable,"ONE LINE DESCR","NO HELP")
 LeftPseudoInverseVariable *.

 DECLARE_OBJECT_PTR (LeftPseudoInverseVariable)
Var leftPseudoInverse (Var v)
 PLEARN_IMPLEMENT_OBJECT (LiftOutputVariable,"The result is the output if the target is 1, and the opposite of the output ""otherwise. This variable is to be used with a LiftStatsCollector, in a""stochastic gradient descent.","NO HELP")
 LiftOutputVariable *.

 DECLARE_OBJECT_PTR (LiftOutputVariable)
Var lift_output (Var linear_output, Var target)
 PLEARN_IMPLEMENT_OBJECT (LogAddVariable,"output = log(exp(input1)+exp(input2)) but it is ""computed in such a way as to preserve precision","NO HELP")
 LogAddVariable *.

 DECLARE_OBJECT_PTR (LogAddVariable)
Var logadd (Var &input1, Var &input2)
 PLEARN_IMPLEMENT_OBJECT (LogSoftmaxVariable,"ONE LINE DESCR","NO HELP")
 LogSoftmaxVariable *.

 DECLARE_OBJECT_PTR (LogSoftmaxVariable)
Var log_softmax (Var v)
 PLEARN_IMPLEMENT_OBJECT (LogSumVariable,"ONE LINE DESCR","NO HELP")
Var logadd (Var input)
 PLEARN_IMPLEMENT_OBJECT (LogVariable,"ONE LINE DESCR","NO HELP")
 LogVariable *.

 DECLARE_OBJECT_PTR (LogVariable)
Var log (Var v)
 PLEARN_IMPLEMENT_OBJECT (MarginPerceptronCostVariable,"Compute sigmoid of its first input, and then computes the negative ""cross-entropy cost","NO HELP")
 MarginPerceptronCostVariable *.

 DECLARE_OBJECT_PTR (MarginPerceptronCostVariable)
Var margin_perceptron_cost (Var output, Var target, real margin)
 PLEARN_IMPLEMENT_OBJECT (MatrixAffineTransformFeedbackVariable,"Affine transformation of a MATRIX variable.","NO HELP")
 DECLARE_OBJECT_PTR (MatrixAffineTransformFeedbackVariable)
 PLEARN_IMPLEMENT_OBJECT (MatrixAffineTransformVariable,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (MatrixAffineTransformVariable)
 PLEARN_IMPLEMENT_OBJECT (MatrixElementsVariable,"Fills the elements of a matrix using the given scalar Variable ""expression, that depends on index variables i and j, that loop from ""0 to ni-1 and 0 to nj-1 respectively.","NO HELP")
 MatrixElementsVariable *.

 DECLARE_OBJECT_PTR (MatrixElementsVariable)
Var matrixElements (Var expression, const Var &i, const Var &j, int ni, int nj, const VarArray &parameters)
 PLEARN_IMPLEMENT_OBJECT (MatrixInverseVariable,"Matrix inversions... ","NO HELP")
 MatrixInverseVariable *.

 DECLARE_OBJECT_PTR (MatrixInverseVariable)
Var matrixInverse (Var v)
 PLEARN_IMPLEMENT_OBJECT (MatrixOneHotSquaredLoss,"ONE LINE DESCR","NO HELP")
 MatrixOneHotSquaredLoss *.

 DECLARE_OBJECT_PTR (MatrixOneHotSquaredLoss)
 PLEARN_IMPLEMENT_OBJECT (MatrixSoftmaxLossVariable,"ONE LINE DESCR","NO HELP")
 MatrixSoftmaxLossVariable *.

 DECLARE_OBJECT_PTR (MatrixSoftmaxLossVariable)
 PLEARN_IMPLEMENT_OBJECT (MatrixSoftmaxVariable,"ONE LINE DESCR","NO HELP")
 MatrixSoftmaxVariable *.

 DECLARE_OBJECT_PTR (MatrixSoftmaxVariable)
 PLEARN_IMPLEMENT_OBJECT (MatrixSumOfVariable,"ONE LINE DESCR","NO HELP")
 MatrixSumOfVariable *.

 DECLARE_OBJECT_PTR (MatrixSumOfVariable)
Var sumOf (VMat distr, Func f, int nsamples, int input_size)
Var meanOf (VMat distr, Func f, int nsamples, int input_size)
 PLEARN_IMPLEMENT_OBJECT (MatRowVariable,"Variable that is the row of matrix mat indexed by variable input","NO HELP")
 MatRowVariable *.

 DECLARE_OBJECT_PTR (MatRowVariable)
Var accessRow (const Mat &m, Var index)
 PLEARN_IMPLEMENT_OBJECT (Max2Variable,"Elementwise max over 2 elements: max(v1,v2)[i] = max(v1[i],v2[i]) ""with same dimensions as the input vectors","NO HELP")
 Max2Variable *.

 DECLARE_OBJECT_PTR (Max2Variable)
Var max (Var v1, Var v2)
 PLEARN_IMPLEMENT_OBJECT (MaxVariable,"ONE LINE DESCR","NO HELP")
 MaxVariable *.

 DECLARE_OBJECT_PTR (MaxVariable)
Var max (Var v)
 PLEARN_IMPLEMENT_OBJECT (MiniBatchClassificationLossVariable,"ONE LINE DESCR","NO HELP")
 MiniBatchClassificationLossVariable *.

 DECLARE_OBJECT_PTR (MiniBatchClassificationLossVariable)
 PLEARN_IMPLEMENT_OBJECT (MinusColumnVariable,"ONE LINE DESCR","NO HELP")
 MinusColumnVariable *.

 DECLARE_OBJECT_PTR (MinusColumnVariable)
 PLEARN_IMPLEMENT_OBJECT (MinusRowVariable,"ONE LINE DESCR","NO HELP")
 MinusRowVariable *.

 DECLARE_OBJECT_PTR (MinusRowVariable)
 PLEARN_IMPLEMENT_OBJECT (MinusScalarVariable,"ONE LINE DESCR","NO HELP")
 MinusScalarVariable *.

 PLEARN_IMPLEMENT_OBJECT (MinusTransposedColumnVariable,"ONE LINE DESCR","NO HELP")
 MinusTransposedColumnVariable *.

 DECLARE_OBJECT_PTR (MinusTransposedColumnVariable)
 PLEARN_IMPLEMENT_OBJECT (MinusVariable,"ONE LINE DESCR","NO HELP")
 MinusVariable *.

 DECLARE_OBJECT_PTR (MinusVariable)
Var minus (Var v, Var w)
 PLEARN_IMPLEMENT_OBJECT (MinVariable,"ONE LINE DESCR","NO HELP")
 MinVariable *.

 DECLARE_OBJECT_PTR (MinVariable)
Var min (Var v)
 PLEARN_IMPLEMENT_OBJECT (MulticlassLossVariable,"ONE LINE DESCR","NO HELP")
 MulticlassLossVariable *.

 DECLARE_OBJECT_PTR (MulticlassLossVariable)
Var multiclass_loss (Var network_output, Var targets)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (NaryVariable,"ONE LINE DESCR","NO HELP")
 NaryVariable *.

 PLEARN_IMPLEMENT_OBJECT (NegateElementsVariable,"Elementwise negation and inversion...","NO HELP")
 NegateElementsVariable *.

 DECLARE_OBJECT_PTR (NegateElementsVariable)
Var negateElements (Var v)
 PLEARN_IMPLEMENT_OBJECT (NegCrossEntropySigmoidVariable,"Compute sigmoid of its first input, and then computes the negative ""cross-entropy cost","NO HELP")
 NegCrossEntropySigmoidVariable *.

 DECLARE_OBJECT_PTR (NegCrossEntropySigmoidVariable)
Var stable_cross_entropy (Var linear_output, Var target)
 PLEARN_IMPLEMENT_OBJECT (NllSemisphericalGaussianVariable,"Computes the negative log-likelihood of a Gaussian on some data point, depending on the nearest neighbors."," This class implements the negative log-likelihood cost of a Markov chain that\n"" uses semispherical gaussian transition probabilities. The parameters of the\n"" semispherical gaussians are a tangent plane, two variances,\n"" one mean and the distance of the point with its nearest neighbors.\n"" The two variances correspond to the shared variance of every manifold directions\n"" and of every noise directions. \n"" This variable is used to do gradient descent on the parameters, but\n"" not to estimate de likelihood of the Markov chain a some point, which is\n"" more complex to estimate.\n")
 NllSemisphericalGaussianVariable *.

 DECLARE_OBJECT_PTR (NllSemisphericalGaussianVariable)
Var nll_semispherical_gaussian (Var tangent_plane_var, Var mu_var, Var sm_var, Var sn_var, Var neighbors_dist_var, Var p_target_var, Var p_neighbors_var, Var noise, Var mu_noisy, bool use_noise=false, real epsilon=1e-6)
 PLEARN_IMPLEMENT_OBJECT (OneHotSquaredLoss,"Computes sum(square_i(netout[i]-(i==classnum ?hotval :coldval))","NO HELP")
 OneHotSquaredLoss *.

 DECLARE_OBJECT_PTR (OneHotSquaredLoss)
Var onehot_squared_loss (Var network_output, Var classnum, real coldval=0., real hotval=1.)
 PLEARN_IMPLEMENT_OBJECT (OneHotVariable,"Represents a vector of a given lenth, that has value 1 at the index ""given by another variable and 0 everywhere else","NO HELP")
 OneHotVariable *.

 DECLARE_OBJECT_PTR (OneHotVariable)
Var onehot (int the_length, Var hotindex, real coldvalue=0.0, real hotvalue=1.0)
 PLEARN_IMPLEMENT_OBJECT (PDistributionVariable,"Variable that represents a random variable according to some PDistribution object","")
 PDistributionVariable *.

 DECLARE_OBJECT_PTR (PDistributionVariable)
 PLEARN_IMPLEMENT_OBJECT (PLogPVariable,"Returns the elementwise x*log(x) in a (hopefully!) numerically stable way","NO HELP")
 PLogPVariable *.

 DECLARE_OBJECT_PTR (PLogPVariable)
Var plogp (Var v)
 PLEARN_IMPLEMENT_OBJECT (PlusColumnVariable,"Adds a single-column var to each column of a matrix var","NO HELP")
 PlusColumnVariable *.

 DECLARE_OBJECT_PTR (PlusColumnVariable)
 PLEARN_IMPLEMENT_OBJECT (PlusConstantVariable,"Adds a scalar constant to a matrix var","NO HELP")
 PlusConstantVariable *.

 DECLARE_OBJECT_PTR (PlusConstantVariable)
 PLEARN_IMPLEMENT_OBJECT (PlusRowVariable,"Adds a single-row var to each row of a matrix var","NO HELP")
 PlusRowVariable *.

 DECLARE_OBJECT_PTR (PlusRowVariable)
 PLEARN_IMPLEMENT_OBJECT (PlusScalarVariable,"Adds a scalar var to a matrix var","NO HELP")
 PlusScalarVariable *.

 DECLARE_OBJECT_PTR (PlusScalarVariable)
 PLEARN_IMPLEMENT_OBJECT (PlusVariable,"Adds 2 matrix vars of same size","NO HELP")
 PlusVariable *.

 DECLARE_OBJECT_PTR (PlusVariable)
 PLEARN_IMPLEMENT_OBJECT (PowVariable,"Elementwise pow (returns 0 wherever input is negative)","NO HELP")
 PowVariable *.

 DECLARE_OBJECT_PTR (PowVariable)
Var pow (Var v, real power)
Var sqrt (Var v)
 PLEARN_IMPLEMENT_OBJECT (PowVariableVariable,"x^y where x and y are variables but y is scalar ""or it has the same size as x","NO HELP")
 PowVariableVariable *.

Var pow (Var v, Var power)
 PLEARN_IMPLEMENT_OBJECT (ProductTransposeVariable,"Matrix product between matrix1 and transpose of matrix2","NO HELP")
 ProductTransposeVariable *.

 DECLARE_OBJECT_PTR (ProductTransposeVariable)
Var productTranspose (Var &m1, Var &m2)
 PLEARN_IMPLEMENT_OBJECT (ProductVariable,"Matrix product","NO HELP")
 ProductVariable *.

 DECLARE_OBJECT_PTR (ProductVariable)
Var product (Var v1, Var v2)
 general matrix product

 PLEARN_IMPLEMENT_OBJECT (ProjectionErrorVariable,"Computes the projection error of a set of vectors on a non-orthogonal basis.\n","The first input is a set of n_dim vectors (possibly seen as a single vector of their concatenation) f_i, each in R^n\n""The second input is a set of T vectors (possibly seen as a single vector of their concatenation) t_j, each in R^n\n""There are several options that control which kind of projection error is actually computed:\n""If !use_subspace_distance {the recommended setting}, the output is\n"" sum_j min_w || t_j - sum_i w_i f_i ||^2 / ||t_j||^2\n"" where the denominator can be eliminated (not recommended) by turning off the\n"" normalize_by_neighbor_distance option. In this expression, w is a local\n"" n_dim-vector that is optmized analytically.\n"" If the 'ordered_vectors' is set, the gradient is not computed truthfully\n"" but in such a way as to induce a natural ordering among the vectors f_i.\n"" For each f_i, the above criterion is applied using a projection that\n"" involves only the first i vectors f_1...f_i. In this way the first vector f_1\n"" tries to *explain* the vectors t_j as well as possible with a single dimension,\n"" and the vector f_2 learns to *explain* what f_2 did not already predict, etc...\n"" When this option is set, we also choose the w_i in the same greedy way, starting\n"" from w_1 chosen to minimize the projection error wrt f_1, w_2 chosen to minimize the\n"" residual projection error left on f_2, etc... Hence the cost minimized wrt f_k on neighbor j is\n"" ||t_j - sum_{i<=k} w_i f_i||^2 / ||t_j||^2\n"" (this cost is minimized to choose w_k, and to get a gradient on f_k as well).\n"" In that case no SVD is used, instead one obtains an analytic solution for w_k:\n"" w_k = (t_j . f_k - sum_{i<k} w_i f_i . f_k)/||f_k||^2.\n"" The output produced by fprop is sum_j || t_j - sum_i w_i f_i ||^2 / ||t_j||^2\n"" where the w_i are chosen as in the previous equation.\n""However, if use_subspace_distance (not recommended), the output is\n"" min_{w,u} || sum_i w_i f_i - sum_j u_j t_j ||^2 .\n""In both cases, if norm_penalization>0, an extra term is added:\n"" norm_penalization * sum_i (||f_i||^2 - 1)^2.\n""The 'epsilon' and 'regularization' options are used to regularize the SVD-based matrix\n""inversion involved in minimizing for w: only the singular values of F' that are\n""above 'epsilon' are inverted (and their singular vectors considered, and then they\n""are incremented by 'regularization' before inverting.\n")
 ProjectionErrorVariable *.

 DECLARE_OBJECT_PTR (ProjectionErrorVariable)
Var projection_error (Var f, Var t, real norm_penalization=0, int n=-1, bool normalize_by_neighbor_distance=true, bool use_subspace_distance=false, real epsilon=0, real regularization=0, bool ordered_vectors=true)
 PLEARN_IMPLEMENT_OBJECT (ReshapeVariable,"Variable that views another variable, but with a different length() and width() ""(the only restriction being that length()*width() remain the same)","NO HELP")
 ReshapeVariable *.

 DECLARE_OBJECT_PTR (ReshapeVariable)
Var reshape (Var v, int newlength, int newwidth)
 PLEARN_IMPLEMENT_OBJECT (RightPseudoInverseVariable,"ONE LINE DESCR","NO HELP")
 RightPseudoInverseVariable *.

 DECLARE_OBJECT_PTR (RightPseudoInverseVariable)
Var rightPseudoInverse (Var v)
 PLEARN_IMPLEMENT_OBJECT (RowAtPositionVariable,"Variables positionned inside a larger zero variable ...","NO HELP")
 RowAtPositionVariable *.

 DECLARE_OBJECT_PTR (RowAtPositionVariable)
 PLEARN_IMPLEMENT_OBJECT (RowSumVariable,"Result is a single column that contains the sum of each row of the input","NO HELP")
 RowSumVariable *.

 DECLARE_OBJECT_PTR (RowSumVariable)
Var rowSum (Var v)
 PLEARN_IMPLEMENT_OBJECT (SemiSupervisedProbClassCostVariable,"ONE LINE DESCR","NO HELP")
 SemiSupervisedProbClassVariable *.

 DECLARE_OBJECT_PTR (SemiSupervisedProbClassCostVariable)
 PLEARN_IMPLEMENT_OBJECT (SigmoidVariable,"ONE LINE DESCR","NO HELP")
 SigmoidVariable *.

Var softmax (Var x1, Var x2, Var hardness)
 DECLARE_OBJECT_PTR (SigmoidVariable)
Var sigmoid (Var v)
 PLEARN_IMPLEMENT_OBJECT (SignVariable,"sign(x) = 1 if x>0, -1 if x<0, 0 if x=0, all done element by element.","NO HELP")
 SignVariable *.

 DECLARE_OBJECT_PTR (SignVariable)
Var sign (Var input)
 PLEARN_IMPLEMENT_OBJECT (SoftmaxLossVariable,"ONE LINE DESCR","NO HELP")
 SoftmaxLossVariable *.

 DECLARE_OBJECT_PTR (SoftmaxLossVariable)
Var softmax (Var input, Var index)
 PLEARN_IMPLEMENT_OBJECT (SoftmaxVariable,"ONE LINE DESCR","NO HELP")
 SoftmaxVariable *.

 DECLARE_OBJECT_PTR (SoftmaxVariable)
Var softmax (Var v)
 PLEARN_IMPLEMENT_OBJECT (SoftplusVariable,"This is the primitive of a sigmoid: log(1+exp(x))","NO HELP")
 SoftplusVariable *.

 DECLARE_OBJECT_PTR (SoftplusVariable)
Var softplus (Var v)
 PLEARN_IMPLEMENT_OBJECT (SoftSlopeIntegralVariable,"This Var computes the integral of the soft_slope function in an interval.","Compute the integral of soft_slope(x,s,l,r) over x from a to b\n")
 SoftSlopeIntegralVariable *.

 DECLARE_OBJECT_PTR (SoftSlopeIntegralVariable)
Var soft_slope_integral (Var smoothness, Var left, Var right, real a=0, real b=1)
 PLEARN_IMPLEMENT_OBJECT (SoftSlopeVariable,"This Var computes the soft_slope function","The soft_slope function is a soft version of linear by parts function.\n""(as smoothness goes to infty). More precisely it converges to a function that is\n""0 in [-infty,left], linear in [left,right], and 1 in [right,infty], and continuous\n""It is always monotonically increasing wrt x (positive derivative in x).\n""If the arguments are vectors than the operation is performed element by element on all of them.\n")
 SoftSlopeVariable *.

 DECLARE_OBJECT_PTR (SoftSlopeVariable)
Var soft_slope (Var x, Var smoothness, Var left, Var right)
Var d_soft_slope (Var x, Var smoothness, Var left, Var right)
Var soft_slope_limit (Var x, Var smoothness, Var left, Var right)
 PLEARN_IMPLEMENT_OBJECT (SourceVariable,"ONE LINE DESCR","NO HELP")
 SourceVariable *.

 DECLARE_OBJECT_PTR (SourceVariable)
 PLEARN_IMPLEMENT_OBJECT (SquareRootVariable,"ONE LINE DESCR","NO HELP")
 SquareRootVariable *.

 DECLARE_OBJECT_PTR (SquareRootVariable)
Var squareroot (Var v)
 PLEARN_IMPLEMENT_OBJECT (SquareVariable,"ONE LINE DESCR","NO HELP")
 SquareVariable *.

 DECLARE_OBJECT_PTR (SquareVariable)
Var square (Var v)
 PLEARN_IMPLEMENT_OBJECT (SubMatTransposeVariable,"ONE LINE DESCR","NO HELP")
 SubMatTransposeVariable *.

 DECLARE_OBJECT_PTR (SubMatTransposeVariable)
Var transpose (Var v)
 PLEARN_IMPLEMENT_OBJECT (SubMatVariable,"ONE LINE DESCR","NO HELP")
 SubMatVariable *.

 DECLARE_OBJECT_PTR (SubMatVariable)
Var subMat (Var v, int i, int j, int l, int w)
 PLEARN_IMPLEMENT_OBJECT (SubsampleVariable,"A subsample var; equals subsample(input, the_subsamplefactor)","NO HELP")
 SubsampleVariable *.

 DECLARE_OBJECT_PTR (SubsampleVariable)
Var subsample (Var input, int subsample_factor)
 PLEARN_IMPLEMENT_OBJECT (SumAbsVariable,"ONE LINE DESCR","NO HELP")
 SumAbsVariable *.

 DECLARE_OBJECT_PTR (SumAbsVariable)
Var sumabs (Var v)
 PLEARN_IMPLEMENT_OBJECT (SumOfVariable,"Variable that sums the value of a Func evaluated on each row of a VMat","NO HELP")
 SumOfVariable *.

 DECLARE_OBJECT_PTR (SumOfVariable)
Var sumOf (VMat distr, Func f, int nsamples)
 sumOf

Var sumOf (Var output, const VarArray &inputs, VMat distr, int nsamples, VarArray parameters=VarArray())
 deprecated old version do not use!

Var meanOf (VMat distr, Func f, int nsamples)
 meanOf

Var meanOf (Var output, const VarArray &inputs, VMat distr, int nsamples, VarArray parameters=VarArray())
 deprecated old version do not use!

 PLEARN_IMPLEMENT_OBJECT (SumOverBagsVariable,"Variable that sums the value of a Func each time evaluated on a subsequence of a VMat\n","returns\n"" Sum_{bags in vmat} f(inputs and targets in bag)\n""(it can average this sum over the number of bags if the 'average' option is set).\n""By convention a bag is a sequence of rows of the vmat in which the last column of the target\n""indicates whether the row is the first one (and/or) the last one, with its two least significant bits:\n"" last_column_of_target == 1 ==> first row\n"" last_column_of_target == 2 ==> last row\n"" last_column_of_target == 0 ==> intermediate row\n"" last_column_of_target == 1+2==3 ==> single-row bag (both first and last).\n""The option n_samples controls how many terms in the sum are considered at a time:\n"" n_samples <= 0: sum over the whole vmat (e.g. for batch gradient computation)\n"" n_samples = 1: sum over a single bag at a time (e.g. for stochastic gradient)\n"" where each fprop or fbprop advances to the next bag\n"" otherwise: sum over n_samples bags at a time (e.g. for min-batch training)\n""The last column of the target is not given in the call to f, but a bag_size input is provided instead.\n""The inputs to f are: (matrix of bag inputs, the bag size, the bag target, [the bag weight])\n""(the bag weight is included only if there are weights in the original VMat).")
 SumOverBagsVariable *.

 DECLARE_OBJECT_PTR (SumOverBagsVariable)
Var sumOverBags (VMat vmat, Func f, int max_bag_size, int nsamples, bool average=false, bool transpose=false)
 sumOf

 PLEARN_IMPLEMENT_OBJECT (SumSquareVariable,"ONE LINE DESCR","NO HELP")
 SumSquareVariable *.

 DECLARE_OBJECT_PTR (SumSquareVariable)
Var sumsquare (Var v)
 PLEARN_IMPLEMENT_OBJECT (SumVariable,"ONE LINE DESCR","NO HELP")
 SumVariable *.

 DECLARE_OBJECT_PTR (SumVariable)
Var sum (Var v)
 PLEARN_IMPLEMENT_OBJECT (TanhVariable,"ONE LINE DESCR","NO HELP")
 TanhVariable *.

 DECLARE_OBJECT_PTR (TanhVariable)
Var tanh (Var v)
 PLEARN_IMPLEMENT_OBJECT (TimesColumnVariable,"Multiplies each column of a matrix var elementwise with a single column variable","NO HELP")
 TimesColumnVariable *.

 DECLARE_OBJECT_PTR (TimesColumnVariable)
 PLEARN_IMPLEMENT_OBJECT (TimesConstantVariable,"Multiplies a matrix var by a scalar constant","NO HELP")
 TimesConstantVariable *.

 DECLARE_OBJECT_PTR (TimesConstantVariable)
 PLEARN_IMPLEMENT_OBJECT (TimesRowVariable,"Multiplies each row of a matrix var elementwise with a single row variable","NO HELP")
 TimesRowVariable *.

 DECLARE_OBJECT_PTR (TimesRowVariable)
 PLEARN_IMPLEMENT_OBJECT (TimesScalarVariable,"Multiplies a matrix var by a scalar var","NO HELP")
 TimesScalarVariable *.

 DECLARE_OBJECT_PTR (TimesScalarVariable)
Var timesScalar (Var v, Var scalar)
 PLEARN_IMPLEMENT_OBJECT (TimesVariable,"Multiplies 2 matrix vars of same size elementwise","NO HELP")
 TimesVariable *.

 DECLARE_OBJECT_PTR (TimesVariable)
Var times (Var v, Var w)
 PLEARN_IMPLEMENT_OBJECT (TransposeProductVariable,"Matrix product between transpose of matrix1 and matrix2","NO HELP")
 TransposeProductVariable *.

 DECLARE_OBJECT_PTR (TransposeProductVariable)
Var transposeProduct (Var &m1, Var &m2)
 PLEARN_IMPLEMENT_OBJECT (UnaryHardSlopeVariable,"Hard slope function whose Var input is only the argument of the function.","Maps x (elementwise) to 0 if x<left, 1 if x>right, and linear in between otherwise.")
 UnaryHardSlopeVariable *.

 DECLARE_OBJECT_PTR (UnaryHardSlopeVariable)
Var unary_hard_slope (Var v, real l=-1, real r=1)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (UnaryVariable,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (UnequalConstantVariable,"A scalar var; equal 1 if input1!=c, 0 otherwise","NO HELP")
 UnequalConstantVariable *.

 DECLARE_OBJECT_PTR (UnequalConstantVariable)
Var operator!= (Var v1, real cte)
 result[i] = 1 if v1[i]!=cte, 0 otherwise

Var operator!= (real cte, Var v1)
 result[i] = 1 if v1[i]!=cte, 0 otherwise

 PLEARN_IMPLEMENT_OBJECT (UnfoldedFuncVariable,"Variable that puts in the rows of its output matrix the value\n""of a Func evaluated on each row of an input matrix.\n","The input_matrix and output matrix have n_unfold rows. A separate propagation path\n""is created that maps (using the Func as a template) each input row to each output row.\n""The parents of this variable include the non-input parents of the Func.\n")
 UnfoldedFuncVariable *.

 DECLARE_OBJECT_PTR (UnfoldedFuncVariable)
Var unfoldedFunc (Var input_matrix, Func f, bool transpose=false)
 PLEARN_IMPLEMENT_OBJECT (UnfoldedSumOfVariable,"Variable that sums the value of a Func evaluated on each row of a matrix.\n","However, unlike the SumOfVariable, it does so by unfolding the Func (up to given maximum number\n""of times 'max_bag_size'), and it allows that number to be variable. Each of the unfolded Func\n""is applied on a different row of the input matrix. The number of rows to sum is specified on the\n""fly by another input, the bag_size.\n")
 UnfoldedSumOfVariable *.

 DECLARE_OBJECT_PTR (UnfoldedSumOfVariable)
Var unfoldedSumOf (Var input_matrix, Var bag_size, Func f, int max_bag_size)
ostream & operator<< (ostream &out, const Var &v)
Var var (real init_value)
template<> void deepCopyField (Var &field, CopiesMap &copies)
 Specialized in order to display a warning message.

Var operator+ (Var v, real cte)
Var operator+ (real cte, Var v)
Var operator- (Var v, real cte)
Var operator+ (Var v1, Var v2)
void operator+= (Var &v1, const Var &v2)
Var operator- (Var v1, Var v2)
Var operator- (Var v)
void operator-= (Var &v1, const Var &v2)
Var operator- (real cte, Var v)
Var operator * (Var v, real cte)
Var operator * (real cte, Var v)
Var operator * (Var v1, Var v2)
 element-wise multiplications

Var operator/ (Var v, real cte)
Var operator/ (real cte, Var v)
Var operator/ (Var v1, Var v2)
Var operator== (Var v1, Var v2)
Var operator!= (Var v1, Var v2)
Var isdifferent (Var v1, Var v2)
Var mean (Var v)
Var neg_log_pi (Var p, Var index)
Var softmax (Var input, int index)
Var pownorm (Var input, real n)
Var norm (Var input, real n)
Var entropy (Var v, bool normalize)
Var distance (Var input1, Var input2, real n)
Var powdistance (Var input1, Var input2, real n)
VarArray propagationPath (const VarArray &inputs, const VarArray &outputs)
 The function that computes a propagation path *.

VarArray propagationPath (const VarArray &outputs)
 returns the propagationpath going from all sources that influence the outputs to the outputs.

VarArray propagationPathToParentsOfPath (const VarArray &inputs, const VarArray &outputs)
 from all sources to all direct non-inputs parents of the path inputs-->outputs

VarArray nonInputParentsOfPath (VarArray inputs, VarArray outputs)
 Isn't this useless? as we have a constructor of VarArray from Var that should be called automatically !!!???? (Pascal).

VarArray allSources (const VarArray &v)
 returns all sources that influence the given vars

VarArray operator- (const VarArray &a, const VarArray &b)
 returns all variables of a that are not in b

VarArray nonInputSources (const VarArray &inputs, const VarArray &outputs)
 returns all sources that influence outputs except those that influence it only through inputs

void operator<< (VarArray &ar, const Array< Vec > &values)
void operator>> (VarArray &ar, const Array< Vec > &values)
void printInfo (VarArray &a)
void printInfo (VarArray inputs, const Var &output, bool show_gradients)
template<> void deepCopyField (VarArray &field, CopiesMap &copies)
void operator<< (VarArray &ar, const Vec &datavec)
void operator>> (VarArray &ar, const Vec &datavec)
VarArray operator & (Var v1, Var v2)
 DECLARE_TYPE_TRAITS (VarArray)
PStreamoperator>> (PStream &in, VarArray &o)
PStreamoperator<< (PStream &out, const VarArray &o)
 PLEARN_IMPLEMENT_OBJECT (VarArrayElementVariable,"Selects one element of a VarArray according to a Var index */","NO HELP")
 VarArrayElementVariable *.

 DECLARE_OBJECT_PTR (VarArrayElementVariable)
 PLEARN_IMPLEMENT_OBJECT (VarColumnsVariable,"ONE LINE DESCR","NO HELP")
 VarColumnsVariable *.

 DECLARE_OBJECT_PTR (VarColumnsVariable)
 PLEARN_IMPLEMENT_OBJECT (VarElementVariable,"ONE LINE DESCR","NO HELP")
 VarElementVariable *.

 DECLARE_OBJECT_PTR (VarElementVariable)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Variable,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (Variable)
 DECLARE_OBJECT_PP (Var, Variable)
 PLEARN_IMPLEMENT_OBJECT (VarRowsVariable,"Variable that is a subset of a matrix's rows; ""input1 : matrix from which rows are selected; ""input2 : vector whose elements are row indices in input1","NO HELP")
 VarRowsVariable *.

 DECLARE_OBJECT_PTR (VarRowsVariable)
 PLEARN_IMPLEMENT_OBJECT (VarRowVariable,"Variable that is the row of the input1 variable indexed by the input2 variable","NO HELP")
 VarRowVariable *.

 DECLARE_OBJECT_PTR (VarRowVariable)
 PLEARN_IMPLEMENT_OBJECT (VecElementVariable,"Variable that is the element of vector vec indexed by variable input","NO HELP")
 VecElementVariable *.

 DECLARE_OBJECT_PTR (VecElementVariable)
Var accessElement (const Vec &v, Var index)
 PLEARN_IMPLEMENT_OBJECT (WeightedSumSquareVariable,"ONE LINE DESCR","NO HELP")
 WeightedSumSquareVariable *.

 DECLARE_OBJECT_PTR (WeightedSumSquareVariable)
Var weighted_sumsquare (Var v, Var w)
 PLEARN_IMPLEMENT_OBJECT (AsciiVMatrix,"ONE LINE DESCR","AsciiVMatrix implements a file in ascii format")
 DECLARE_OBJECT_PTR (AsciiVMatrix)
 PLEARN_IMPLEMENT_OBJECT (AutoVMatrix,"Automatically builds an appropriate VMat given its specification.","AutoVMatrix tries to interpret the given 'specification' (it will call getDataSet) and\n""will be a wrapper around the appropriate VMatrix type, simply forwarding calls to it.\n""AutoVMatrix can be used to access the UCI databases.\n")
 DECLARE_OBJECT_PTR (AutoVMatrix)
 PLEARN_IMPLEMENT_OBJECT (BatchVMatrix,"ONE LINE DESCR","VMat class that replicates small parts of a matrix (mini-batches), so that each mini-batch appears twice (consecutively).")
 DECLARE_OBJECT_PTR (BatchVMatrix)
 PLEARN_IMPLEMENT_OBJECT (BootstrapSplitter,"A splitter whose splits are bootstrap samples of the original dataset","BootstrapSplitter implements a ...")
 DECLARE_OBJECT_PTR (BootstrapSplitter)
 PLEARN_IMPLEMENT_OBJECT (BootstrapVMatrix,"A VMatrix that sees a bootstrap subset of its parent VMatrix.\n""This is not a real bootstrap since a sample can only appear once.","")
 BootstrapVMatrix *.

 DECLARE_OBJECT_PTR (BootstrapVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ByteMemoryVMatrix,"ONE_LINE_DESCR","ONE LINE HELP")
 DECLARE_OBJECT_PTR (ByteMemoryVMatrix)
 PLEARN_IMPLEMENT_OBJECT (CenteredVMatrix,"A VMatrix that centers a dataset.","The empirical mean is subtracted to each row of the underlying VMat.")
 DECLARE_OBJECT_PTR (CenteredVMatrix)
 PLEARN_IMPLEMENT_OBJECT (CompressedVMatrix,"ONE LINE DESCR","ONE LINE HELP")
 DECLARE_OBJECT_PTR (CompressedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ConcatColumnsVMatrix,"ONE LINE DESCR","NO HELP")
 ConcatColumnsVMatrix *.

 DECLARE_OBJECT_PTR (ConcatColumnsVMatrix)
VMat hconcat (VMat d1, VMat d2)
VMat hconcat (Array< VMat > ds)
 PLEARN_IMPLEMENT_OBJECT (ConcatRowsSubVMatrix,"ONE LINE DESC","ONE LINE HELP")
 ConcatRowsSubVMatrix *.

 DECLARE_OBJECT_PTR (ConcatRowsSubVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ConcatRowsVMatrix,"Concatenates the rows of a number of VMat.","It can also be used to select fields which are common to those VMat,\n""using the 'only_common_fields' option.\n""Otherwise, the fields are just assumed to be those of the first VMat.\n")
 ConcatRowsVMatrix *.

 DECLARE_OBJECT_PTR (ConcatRowsVMatrix)
VMat vconcat (VMat d1, VMat d2)
VMat vconcat (Array< VMat > ds)
 PLEARN_IMPLEMENT_OBJECT (CrossReferenceVMatrix,"ONE LINE DESC","ONE LINE HELP")
 CrossReferenceVMatrix *.

 DECLARE_OBJECT_PTR (CrossReferenceVMatrix)
 PLEARN_IMPLEMENT_OBJECT (CumVMatrix,"Add columns that a cumulated values of given columns","The user specifies one or more columns and for each such <column-name>\n""a cum-<column-name> column is created which will contain the sum from row 0\n""to the current row of <column-name>.\n")
 DECLARE_OBJECT_PTR (CumVMatrix)
 PLEARN_IMPLEMENT_OBJECT (DatedJoinVMatrix,"Join two vmatrices, taking into account a date field.","The two vmatrices play an asymmetric role. They are called\n""master and slave. The resulting vmatrix has one row for each row\n""of the master vmatrix (or optionally of the slave vmatrix). Its\n""columns are a concatenation of selected columns of the master vmatrix\n""and of selected columns of the slave which 'match' according to a rule\n""(always in the order: master fields, slave fields). Matchint is\n""obtained using shared 'key fields'. Optionally, for matching, a date field\n""in the master is forced to belong to a date interval in the slave,\n""as follows: slave_date_start < master_date <= slave_date_end.\n""If no match is found then the master (or slave) columns are left with missing values.\n""If more than one slave row matches, then the one with the latest\n""slave_date_start is used (and a warning is optionally issued). If\n""no slave_date_start field is provided then no date constraint is\n""enforced, and the last key-matching slave row is matched to a master row.\n""An option (output_the_slave) allows to output one row for each slave row\n""instead of the default which outputs one row for each master row.\n""Note that if (output_the_slave) then the non-matching master rows are 'lost'\n""whereas if (!output_the_slave) then the non-matching slave rows are 'lost'.\n""If output_the_slave and more than one master row matches with a given slave_row\n""then the SUM of the master fields is computed (i.e. be careful that their sum is meaningful)\n")
 DECLARE_OBJECT_PTR (DatedJoinVMatrix)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (DatedVMatrix,"ONE_LINE_DESC","ONE_LINE_HELP")
 DatedVMatrix *.

 DECLARE_OBJECT_PTR (DatedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (DBSplitter,"A Splitter that contains several databases.","The databases to be used can be specified with the 'databases' option. ")
 DECLARE_OBJECT_PTR (DBSplitter)
 PLEARN_IMPLEMENT_OBJECT (DiskVMatrix,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (DiskVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ExplicitSplitter,"ONE LINE DESCR","ExplicitSplitter allows you to define a 'splitter' by giving explicitly the datasets for each split\n""as a matrix VMatrices.\n""(This splitter in effect ignores the 'dataset' it is given with setDataSet) \n")
 DECLARE_OBJECT_PTR (ExplicitSplitter)
 PLEARN_IMPLEMENT_OBJECT (ExtendedVMatrix,"ONE_LINE_DESC","ONE_LINE_HELP")
 ExtendedVMatrix *.

 DECLARE_OBJECT_PTR (ExtendedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (FileVMatrix,"ONE LINE DESCR","NO HELP")
 FileVMatrix *.

int strlen (char *s)
 DECLARE_OBJECT_PTR (FileVMatrix)
 PLEARN_IMPLEMENT_OBJECT (FilteredVMatrix,"A filtered view of its source vmatrix","The filter is an exression in VPL language.\n""The filtered indexes are saved in the metadata directory, that NEEDS to\n""be provided.\n")
 DECLARE_OBJECT_PTR (FilteredVMatrix)
 PLEARN_IMPLEMENT_OBJECT (FilterSplitter,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (FilterSplitter)
 PLEARN_IMPLEMENT_OBJECT (FinancePreprocVMatrix,"ONE LINE DESCR","FinancePreprocVMatrix implements a VMatrix with extra preprocessing columns.")
 DECLARE_OBJECT_PTR (FinancePreprocVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ForwardVMatrix,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (ForwardVMatrix)
 PLEARN_IMPLEMENT_OBJECT (FractionSplitter,"A Splitter that can extract several subparts of a dataset in each split.","Ranges of the dataset are specified explicitly as start:end positions,\n""that can be absolute or relative to the number of samples in the training set.")
 DECLARE_OBJECT_PTR (FractionSplitter)
 PLEARN_IMPLEMENT_OBJECT (GeneralizedOneHotVMatrix,"ONE LINE DESC","ONE LINE HELP")
 GeneralizedOneHotVMatrix *.

 DECLARE_OBJECT_PTR (GeneralizedOneHotVMatrix)
 PLEARN_IMPLEMENT_OBJECT (GetInputVMatrix,"This VMatrix only sees the input part of its source VMatrix.","")
 DECLARE_OBJECT_PTR (GetInputVMatrix)
 PLEARN_IMPLEMENT_OBJECT (GramVMatrix,"Computes the Gram matrix of a given kernel.","Currently, this class inherits from a MemoryVMatrix, and the Gram matrix\n""is stored in memory.\n")
 DECLARE_OBJECT_PTR (GramVMatrix)
 PLEARN_IMPLEMENT_OBJECT (IndexedVMatrix,"ONE LINE DESCR"," VMat class that sees a matrix as a collection of triplets (row, column, value)\n""Thus it is a N x 3 matrix, with N = the number of elements in the original matrix.\n")
 DECLARE_OBJECT_PTR (IndexedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (InterleaveVMatrix,"ONE LINE DESC","ONE LINE HELP")
 InterleaveVMatrix *.

 DECLARE_OBJECT_PTR (InterleaveVMatrix)
 PLEARN_IMPLEMENT_OBJECT (JoinVMatrix,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (JoinVMatrix)
 PLEARN_IMPLEMENT_OBJECT (JulianizeVMatrix,"ONE LINE DESCR","JulianizeVMatrix provides a conversion from a VMat containing dates\n""in an explicit 3-column (YYYY,MM,DD) or 6-column (YYYY,MM,DD,HH,MM,SS)\n""format to a Julian day number format (including fractional part to\n""represent the hour within the day). The dates can be at any columns,\n""not only columns 0-2 (or 0-5). More than a single date can be\n""converted.\n")
 DECLARE_OBJECT_PTR (JulianizeVMatrix)
 PLEARN_IMPLEMENT_OBJECT (KernelVMatrix,"ONE LINE DESC","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (KFoldSplitter,"K-fold cross-validation splitter.","KFoldSplitter implements K splits of the dataset into a training-set and a test-set.\n""If the number of splits is higher than the number of examples, leave-one-out cross-validation\n""will be performed.")
 DECLARE_OBJECT_PTR (KFoldSplitter)
 PLEARN_IMPLEMENT_OBJECT (KNNVMatrix,"A VMatrix that sees the nearest neighbours of each sample in the source VMat.","Each sample is followed by its (knn-1) nearest neighbours.\n""To each row is appended an additional target, which is:\n"" - 1 if it is the first of a bag of neighbours,\n"" - 2 if it is the last of a bag,\n"" - 0 if it is none of these,\n"" - 3 if it is both (only for knn == 1).\n""In addition, if a kernel_pij kernel is provided,, in the input part of the VMatrix\n""is appended p_ij, where\n"" p_ij = K(x_i,x_j) / \\sum_{k \\in knn(i), k != i} K(x_i,x_k)\n""where K = kernel_pij, and j != i (for j == i, p_ij = -1).")
 DECLARE_OBJECT_PTR (KNNVMatrix)
 PLEARN_IMPLEMENT_OBJECT (LearnerProcessedVMatrix,"ONE LINE DESCR","LearnerProcessedVMatrix implements a VMatrix processed on the fly by a learner (which will optionally be first trained on the source vmatrix)")
 DECLARE_OBJECT_PTR (LearnerProcessedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (LocalNeighborsDifferencesVMatrix,"Computes the difference between each input row and its nearest neighbors.","For each row x of the source VMatrix, the resulting row will be the\n""concatenation of n_neighbors vectors, each of which is the difference\n""between one of the nearest neighbors of x in the source and x itself.\n")
 DECLARE_OBJECT_PTR (LocalNeighborsDifferencesVMatrix)
VMat local_neighbors_differences (VMat source, int n_neighbors, bool concat=false, bool append_indexes=false)
 PLEARN_IMPLEMENT_OBJECT (MemoryVMatrix,"A VMatrix whose data is stored in memory.","The data can either be given directly by a Mat, or by another VMat that\n""will be precomputed in memory at build time.\n")
 MemoryVMatrix *.

 DECLARE_OBJECT_PTR (MemoryVMatrix)
 PLEARN_IMPLEMENT_OBJECT (MovingAverageVMatrix,"Perform moving average of given columns","The user specifies one or more columns and for each such <column-name>\n""a moving average window size: a ma<windowsize>-<column-name> column is\n""created which will contain at row t the moving average from row t-<windowsize>+1\n""to t inclusively of <column-name>.\n")
 DECLARE_OBJECT_PTR (MovingAverageVMatrix)
 Declares a few other classes and functions related to this class.

 PLEARN_IMPLEMENT_OBJECT (MultiInstanceVMatrix,"Virtual Matrix for a multi instance dataset","In a multi-instance dataset examples come in 'bags' with only one target label\n""for each bag. This class is built upon a source text file that describes such\n""a dataset (see the help on the 'filename' option for format details).\n""The resulting VMatrix shows the following structure in its rows, with\n""all the rows of a bag being consecutive. Each row represents an instance and has:\n"" - the input features for the instance\n"" - the bag's source_targetsize target values (repeated over bag instances)\n"" - a bag signal integer that identifies the beginning and end of the bag:\n"" 1 means the first instance of the bag\n"" 2 means the last instance of the bag\n"" 3 is for a bag with a single row (= 1+2)\n"" 0 is for intermediate instances.\n""The targetsize of the VMatrix is automatically set to source_targetsize+1\n""since the bag_signal is included (appended) in the target vector\n")
 DECLARE_OBJECT_PTR (MultiInstanceVMatrix)
 PLEARN_IMPLEMENT_OBJECT (OneHotVMatrix,"ONE LINE DESC","NO HELP")
 OneHotVMatrix *.

VMat onehot (VMat d, int nclasses, real cold_value=0.0, real hot_value=1.0)
 DECLARE_OBJECT_PTR (OneHotVMatrix)
 PLEARN_IMPLEMENT_OBJECT (PairsVMatrix,"ONE LINE DESC","NO HELP")
 PairsVMatrix *.

 DECLARE_OBJECT_PTR (PairsVMatrix)
 PLEARN_IMPLEMENT_OBJECT (PLearnerOutputVMatrix,"Use a PLearner (or a set of them) to transform the input part of a data set into the learners outputs","The input part of this VMatrix is obtained from the input part an original data set on which\n""one or more PLearner's computeOutput method is applied. The other columns of the original data set\n""are copied as is. Optionally, the raw input can be copied as well\n""always in the input part of the new VMatrix. The order of the elements of a new row is as follows:\n"" - the outputs of the learners (concatenated) when applied on the input part of the original data,\n"" - optionally, the raw input part of the original data,\n"" - all the non-input columns of the original data.")
 DECLARE_OBJECT_PTR (PLearnerOutputVMatrix)
 PLEARN_IMPLEMENT_OBJECT (PrecomputedVMatrix,"VMatrix that caches (pre-computes on disk) the content of a source vmatrix","This sub-class of SourceVMatrix pre-computes the content of a source vmatrix\n""in a dmat or pmat file. The name of the disk file is obtained from the metadatadir option\n""followed by precomp.dmat or precomp.pmat")
 DECLARE_OBJECT_PTR (PrecomputedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (ProcessingVMatrix,"ONE LINE DESCRIPTION","MULTI-LINE \nHELP")
 DECLARE_OBJECT_PTR (ProcessingVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RangeVMatrix,"ONE LINE DESC","NO HELP")
 Range VMatrix *.

VMat vrange (real start, real end, real step=1.0)
 DECLARE_OBJECT_PTR (RangeVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RegularGridVMatrix,"ONE LINE DESCR","RegularGridVMatrix represents the list of coordinates along a regularly spaced grid.")
 DECLARE_OBJECT_PTR (RegularGridVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RemapLastColumnVMatrix,"ONE LINE DESC","NO HELP")
 RemapLastColumnVMatrix *.

VMat remapLastColumn (VMat d, Mat mapping)
VMat remapLastColumn (VMat d, real if_equals_value, real then_value=1.0, real else_value=-1.0)
 DECLARE_OBJECT_PTR (RemapLastColumnVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RemoveDuplicateVMatrix,"A VMatrix that removes any duplicated entry in its source VMat.","")
 DECLARE_OBJECT_PTR (RemoveDuplicateVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RemoveRowsVMatrix,"ONE LINE DESC","NO HELP")
 RemoveRowsVMatrix *.

VMat removeRows (VMat d, Vec rownums)
VMat removeRow (VMat d, int rownum)
 DECLARE_OBJECT_PTR (RemoveRowsVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RepeatSplitter,"Repeat a given splitter a certain amount of times, with the possibility to\n""shuffle randomly the dataset each time","NO HELP")
 DECLARE_OBJECT_PTR (RepeatSplitter)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (RowBufferedVMatrix,"A base class for VMatrices that keep the last row(s) in a buffer for faster access.","")
 RowBufferedVMatrix *.

 DECLARE_OBJECT_PTR (RowBufferedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (RowsSubVMatrix,"ONE LINE DESCRIPTION","MULTI-LINE \nHELP")
 DECLARE_OBJECT_PTR (RowsSubVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SelectColumnsVMatrix,"Selects variables from a source matrix according to given vector of indices.","Alternatively, the variables can be given by their names.")
 SelectColumnsVMatrix *.

 DECLARE_OBJECT_PTR (SelectColumnsVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SelectRowsFileIndexVMatrix,"ONE LINE DESC","NO HELP")
 SelectRowsFileIndexVMatrix *.

 DECLARE_OBJECT_PTR (SelectRowsFileIndexVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SelectRowsVMatrix,"VMat class that selects samples from a source matrix according to given vector of indices.","")
 SelectRowsVMatrix *.

 DECLARE_OBJECT_PTR (SelectRowsVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SequentialSplitter,"ONE LINE DESCR","SequentialSplitter implements several splits, TODO: Comments")
 DECLARE_OBJECT_PTR (SequentialSplitter)
 PLEARN_IMPLEMENT_OBJECT (ShiftAndRescaleVMatrix,"ONE LINE DESCR","ShiftAndRescaleVMatrix allows to shift and scale the first n_inputs columns of an underlying_vm.\n")
 ShiftAndRescaleVMatrix *.

 DECLARE_OBJECT_PTR (ShiftAndRescaleVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SortRowsVMatrix,"Sort the samples of a VMatrix according to one (or more) given columns.","The implementation is not efficient at all, feel free to improve it !")
 SortRowsVMatrix *.

 DECLARE_OBJECT_PTR (SortRowsVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SourceVMatrix,"Super-class for VMatrices that point to another one (the source vmatrix)","")
 DECLARE_OBJECT_PTR (SourceVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SourceVMatrixSplitter,"Returns the splits of an underlying splitter, seen by a SourceVMatrix.","")
 PLEARN_IMPLEMENT_OBJECT (SparseVMatrix,"ONE LINE DESC","NO HELP")
 SparseVMatrix *.

 DECLARE_OBJECT_PTR (SparseVMatrix)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Splitter,"ONE LINE DESCR","NO HELP")
void split (VMat d, real test_fraction, VMat &train, VMat &test, int i, bool use_all)
Vec randomSplit (VMat d, real test_fraction, VMat &train, VMat &test)
void split (VMat d, real validation_fraction, real test_fraction, VMat &train, VMat &valid, VMat &test, bool do_shuffle)
 Splits the dataset d into 3 subsets.

void randomSplit (VMat d, real validation_fraction, real test_fraction, VMat &train, VMat &valid, VMat &test)
 Splits the dataset d into 3 subsets (similar to above).

 DECLARE_OBJECT_PTR (Splitter)
 PLEARN_IMPLEMENT_OBJECT (StrTableVMatrix,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (StrTableVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SubInputVMatrix,"A VMat that only takes part of the input of its source VMat.","This can be useful for instance to only take the first k components\n""after applying some dimensionality reduction method.")
 DECLARE_OBJECT_PTR (SubInputVMatrix)
 PLEARN_IMPLEMENT_OBJECT (SubVMatrix,"ONE LINE DESCR","NO HELP")
 SubVMatrix *.

 DECLARE_OBJECT_PTR (SubVMatrix)
 PLEARN_IMPLEMENT_OBJECT (TemporalHorizonVMatrix,"ONE LINE DESCR"," VMat class that delay the last entries of an underlying VMat by a certain horizon.\n")
 TemporalHorizonVMatrix *.

 DECLARE_OBJECT_PTR (TemporalHorizonVMatrix)
 PLEARN_IMPLEMENT_OBJECT (TestInTrainSplitter,"A splitter that adds the test points given by another splitter into the training set.","The underlying splitter should return train / test sets of constant size.\n""For instance, if the underlying splitter returns 3 splits of (train,test)\n""pairs with size 2000 and 500, this splitter will return:\n"" - for 'percentage_added' == 5%, 15 splits of size 2100 and 100, with each\n"" test point appearing once and only once in a train set and a test set\n"" - for 'percentage_added' == 20%, 6 splits of size 2400,400 and 2400,100, with\n"" each test point appearing once or more in a train set, and only once in a\n"" test set (note that the test points appearing more than once in a train set\n"" will be those at the beginning of the test sets returned by the underlying\n"" splitter)\n")
 DECLARE_OBJECT_PTR (TestInTrainSplitter)
VMat thresholdVMat (VMat d, real threshold, real cold_value=0.0, real hot_value=1.0, bool gt_threshold=true)
 PLEARN_IMPLEMENT_OBJECT (ToBagSplitter,"A Splitter that makes any existing splitter operate on bags only.","The dataset provided must contain bag information, as described in\n""SumOverBagsVariable")
 DECLARE_OBJECT_PTR (ToBagSplitter)
 PLEARN_IMPLEMENT_OBJECT (TrainTestBagsSplitter,"Splits a dataset in two parts","TrainTestBagsSplitter implements a single split of the dataset into\n""a training set and a test set (the test part being the last few samples of the dataset)\n""Optionally a third set is provided which is the training set itself (in order to test on it)\n")
 DECLARE_OBJECT_PTR (TrainTestBagsSplitter)
 PLEARN_IMPLEMENT_OBJECT (TrainTestSplitter,"ONE LINE DESCR","TrainTestSplitter implements a single split of the dataset into a training-set and a test-set (the test part being the last few samples of the dataset)")
 DECLARE_OBJECT_PTR (TrainTestSplitter)
 PLEARN_IMPLEMENT_OBJECT (TrainValidTestSplitter,"This splitter will basically return [Train+Valid, Test].","The train test returned by the splitter is formed from the first n_train+n_valid\n""samples in the dataset. The other samples are returned in the test set.\n""The validation and test sets (given by the samples after the n_train-th one) can\n""be shuffled in order to get a different validation and test sets at each split.\n""However, the train set (the first n_train samples) remains fixed.")
 DECLARE_OBJECT_PTR (TrainValidTestSplitter)
 PLEARN_IMPLEMENT_OBJECT (TransposeVMatrix,"A VMatrix that sees the transpose of another VMatrix.","")
 DECLARE_OBJECT_PTR (TransposeVMatrix)
 PLEARN_IMPLEMENT_OBJECT (UniformizeVMatrix,"ONE LINE DESC","NO HELP")
 UniformizeVMatrix *.

 DECLARE_OBJECT_PTR (UniformizeVMatrix)
 PLEARN_IMPLEMENT_OBJECT (UniformVMatrix,"ONE LINE DESC","NO HELP")
 Uniform VMatrix *.

 DECLARE_OBJECT_PTR (UniformVMatrix)
 PLEARN_IMPLEMENT_OBJECT (UpsideDownVMatrix,"ONE LINE DESCRIPTION","MULTI-LINE \nHELP")
 DECLARE_OBJECT_PTR (UpsideDownVMatrix)
 PLEARN_IMPLEMENT_OBJECT (VecExtendedVMatrix,"ONE LINE DESC","NO HELP")
 VecExtendedVMatrix *.

 DECLARE_OBJECT_PTR (VecExtendedVMatrix)
template<> void deepCopyField (VMat &field, CopiesMap &copies)
VMat loadAsciiAsVMat (const string &filename)
 Load an ASCII file and return the corresponding VMat (this will be a MemoryVMatrix, since the entire file is loaded in memory).

 DECLARE_OBJECT_PP (VMat, VMatrix)
void operator<< (const Mat &dest, const VMatrix &src)
void operator>> (const VMatrix &src, const Mat &dest)
void operator<< (const Mat &dest, const VMat &src)
void operator>> (const VMat &src, const Mat &dest)
Array< VMatoperator & (const VMat &d1, const VMat &d2)
ostream & operator<< (ostream &out, const VMat &m)
void computeWeightedMean (Vec weights, VMat d, Vec &meanvec)
 Statistics functions *.

void computeRange (VMat d, Vec &minvec, Vec &maxvec)
void computeRowMean (VMat d, Vec &meanvec)
void computeMean (VMat d, Vec &meanvec)
Mat computeBasicStats (VMat m)
void computeStats (VMat m, VecStatsCollector &st, bool report_progress)
TVec< StatsCollectorcomputeStats (VMat m, int maxnvalues, bool report_progress)
 Retirns the unconditional statistics of each field.

PP< ConditionalStatsCollectorcomputeConditionalStats (VMat m, int condfield, TVec< RealMapping > ranges)
 returns the cooccurence statistics conditioned on the given field

Array< MatcomputeConditionalMeans (VMat trainset, int targetsize, Mat &basic_stats)
void computeMeanAndVariance (VMat d, Vec &meanvec, Vec &variancevec)
void computeInputMean (VMat d, Vec &meanvec)
 Computes the (possibly weighted) mean and covariance of the input part of the dataset.

void computeInputMeanAndCovar (VMat d, Vec &meanvec, Mat &covarmat)
void computeInputMeanAndVariance (VMat d, Vec &meanvec, Vec &var)
void computeWeightedMeanAndCovar (Vec weights, VMat d, Vec &meanvec, Mat &covarmat)
real computeWeightedMeanAndCovar (VMat d, Vec &meanvec, Mat &covarmat, real threshold)
 Last column of d is supposed to contain the weight for each sample Samples with a weight less or equal to threshold will be ignored (returns the sum of all the weights actually used).

void computeMeanAndCovar (VMat m, Vec &meanvec, Mat &covarmat, ostream &logstream)
 computes empirical mean and covariance in a single pass

void computeMeanAndStddev (VMat d, Vec &meanvec, Vec &stddevvec)
void autocorrelation_function (const VMat &data, Mat &acf)
VMat normalize (VMat d, Vec meanvec, Vec stddevvec)
VMat normalize (VMat d, int inputsize, int ntrain)
 Here, mean and stddev are estimated on d.subMat(0,0,ntrain,inputsize).

VMat grep (VMat d, int col, Vec values, bool exclude)
map< real, intcountOccurencesInColumn (VMat m, int col)
 returns a map mapping all different values appearing in column col to their number of occurences

map< real, TVec< int > > indicesOfOccurencesInColumn (VMat m, int col)
 returns a map mapping all different values appearing in column col to a vector of the corresponding row indices in the VMat (this proceeds in 2 passes, first calling countOccurencesInColumn to allocate the exact memory)

VMat grep (VMat d, int col, Vec values, const string &indexfile, bool exclude)
VMat filter (VMat d, const string &indexfile)
VMat shuffle (VMat d)
 returns a SelectRowsVMatrix that has d's rows shuffled

VMat bootstrap (VMat d, bool reorder, bool norepeat)
 returns a SelectRowsVMatrix that has d's rows bootstrapped (sample with replacement and optionally re-ordered).

Mat transposeProduct (VMat m)
 computes M'.M

Mat transposeProduct (VMat m1, VMat m2)
 computes M1'.M2

Vec transposeProduct (VMat m1, Vec v2)
 computes M1'.V2

Mat productTranspose (VMat m1, VMat m2)
 computes M1.M2'

Mat product (Mat m1, VMat m2)
 computes M1.M2

VMat transpose (VMat m1)
 returns M1'

real linearRegression (VMat inputs, VMat outputs, real weight_decay, Mat theta_t, bool use_precomputed_XtX_XtY, Mat XtX, Mat XtY, real &sum_squared_Y, bool return_squared_loss, int verbose_every, bool cholesky)
Mat linearRegression (VMat inputs, VMat outputs, real weight_decay)
 Version that does all the memory allocations of XtX, XtY and theta_t. Returns theta_t.

real weightedLinearRegression (VMat inputs, VMat outputs, VMat gammas, real weight_decay, Mat theta_t, bool use_precomputed_XtX_XtY, Mat XtX, Mat XtY, real &sum_squared_Y, real &sum_gammas, bool return_squared_loss, int verbose_every, bool cholesky)
 Linear regression where each input point is given a different importance weight (the gammas); returns weighted average of squared loss.

Mat weightedLinearRegression (VMat inputs, VMat outputs, VMat gammas, real weight_decay)
 Version that does all the memory allocations of XtX, XtY and theta_t. Returns theta_t.

VMat rebalanceNClasses (VMat inputs, int nclasses, const string &filename)
void fullyRebalance2Classes (VMat inputs, const string &filename, bool save_indices)
 Rebalance a 2-class VMat such as to keep all the examples of the dominant class.

VMat temporalThreshold (VMat distr, int threshold_date, bool is_before, int yyyymmdd_col)
VMat temporalThreshold (VMat distr, int threshold_date, bool is_before, int yyyy_col, int mm_col, int dd_col)
void correlations (const VMat &x, const VMat &y, Mat &r, Mat &pvalues)
 Compute the correlations between each of the columns of x and each of the columns of y.

void computeNearestNeighbors (VMat dataset, Vec x, TVec< int > &neighbors, int ignore_row)
VMat normalize (VMat d, int inputsize)
time_t getDateOfCode (const string &codefile)
 PLEARN_IMPLEMENT_OBJECT (VMatLanguage,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_OBJECT (PreprocessingVMatrix,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (VMatLanguage)
 DECLARE_OBJECT_PTR (PreprocessingVMatrix)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (VMatrix,"ONE LINE DESCR","NO HELP")
 VMatrix *.

string getHost ()
int getPid ()
string getUser ()
 DECLARE_OBJECT_PTR (VMatrix)
 PLEARN_IMPLEMENT_OBJECT (VMatrixFromDistribution,"A VMatrix built from sampling a distribution","VMatrixFromDistribution implements a VMatrix whose data rows are drawn from a distribution\n""or that contains the density or log density sampled on a grid (depending on \"mode\").\n""The matrix is computed in memory at build time\n")
 PLEARN_IMPLEMENT_OBJECT (VVec,"ONE LINE DESCR","NO HELP")
void operator>> (const VVec &vv, const Vec &v)
void operator<< (const VVec &vv, const Vec &v)
void operator<< (const Vec &v, const VVec &vv)
void operator>> (const Vec &v, const VVec &vv)
 PLEARN_IMPLEMENT_OBJECT (VVMatrix,"A VMat that reads a '.vmat' file.","")
 DECLARE_OBJECT_PTR (VVMatrix)
 PLEARN_IMPLEMENT_OBJECT (YMDDatedVMatrix,"ONE LINE DESC","NO HELP")
 DECLARE_OBJECT_PTR (YMDDatedVMatrix)
 PLEARN_IMPLEMENT_OBJECT (AdaBoost,"AdaBoost boosting algorithm for TWO-CLASS classification","Given a classification weak-learner, this algorithm \"boosts\" it in\n""order to obtain a much more powerful classification algorithm.\n""The classifier is two-class, returning 0 or 1, or a number between 0 and 1\n""(in that case the user can set the 'pseudo_loss_adaboost' option, which\n""computes a more precise notion of error taking into account the precise\n""value outputted by the soft classifier).\n""The nstages option from PLearner is used to specify the desired\n""number of boosting rounds (but the algorithm can stop earlier if\n""the next weak learner is unable to unable to make significant progress.\n")
 DECLARE_OBJECT_PTR (AdaBoost)
 PLEARN_IMPLEMENT_OBJECT (ClassifierFromDensity,"A classifier built from density estimators using Bayes' rule.","ClassifierFromDensity allowd to build a classifier\n""by building one density estimator for each class, \n""and using Bayes rule to combine them. \n")
 DECLARE_OBJECT_PTR (ClassifierFromDensity)
 PLEARN_IMPLEMENT_OBJECT (MultiInstanceNNet,"Multi-instance feedforward neural network for probabilistic classification","The data has the form of a set of input vectors x_i associated with a single\n""label y. Each x_i is an instance and the overall set of instance is called a bag.\n""We don't know which of the inputs is responsible for the label, i.e.\n""there are hidden (not observed) labels y_i associated with each of the inputs x_i.\n""We also know that y=1 if at least one of the y_i is 1, otherwise y=0, i.e.\n"" y = y_1 or y_2 or ... y_m\n""In terms of probabilities, it means that\n"" P(Y=0|x_1..x_m) = \\prod_{i=1}^m P(y_i=0|x_i)\n""which determines the likelihood of the observation (x_1...x_m,y).\n""The neural network implements the computation of P(y_i=1|x_i). The same\n""model is assumed for all instances in the bag. The number of instances is variable but\n""bounded a-priori (max_n_instances). The gradient is computed for a whole bag\n""at a time. The architectural parameters and hyper-parameters of the model\n""are otherwise the same as for the generic NNet class.\n""The bags within each data set are specified with a 2nd target column\n""(the first column is 0, 1 or missing; it should not be missing for the\n""last column of the bag). The second target column should be 0,1,2, or 3:\n"" 1: first row of a bag\n"" 2: last row of a bag\n"" 3: simultaneously first and last, there is only one row in this bag\n"" 0: intermediate row of a bag\n""following the protocol expected by the SumOverBagsVariable.\n")
 DECLARE_OBJECT_PTR (MultiInstanceNNet)
 PLEARN_IMPLEMENT_OBJECT (ConditionalDensityNet,"Neural Network that Implements a Positive Random Variable Conditional Density","The input vector is used to compute parameters of an output density or output\n""cumulative distribution as well as output expected value. The ASSUMPTIONS\n""on the generating distribution P(Y|X) are the following:\n"" * Y is a single real value\n"" * 0 <= Y <= maxY, with maxY a known finite value\n"" * the density has a mass point at Y=0\n"" * the density is continuous for Y>0\n""The form of the conditional cumulative of Y is the following (separate_mass_points=false):\n"" P(Y<=y|theta) = (1/Z) (s(a) + sum_i u_i s(b_i) g(y,theta,i))\n""or for separate_mass_point=true:\n"" P(Y<=y|theta) = sigmoid(a) + (1-sigmoid(a))(sum_i u_i s(b_i) (g(y,theta,i)-g(0,theta,i))/Z\n""where s(z)=log(1+exp(z)) is the softplus function, and g is a monotonic function\n""in y whose first derivative and indefinite integral are known analytically.\n""The u_i are fixed from the unconditional distribution, such that s(b_i)=1 gives\n""approximately the right unconditional cumulative function (for infinite hardness):\n"" u_i = P(mu_{i-1}<Y<=mu_i) [unconditional].\n""The parameters theta of Y's distribution are (a,b_1,b_2,...,c_1,c_2,...,mu_1,mu_2,...),\n""which are obtained as the unconstrained outputs (no output transfer function) of a neural network.\n""The normalization constant Z is computed analytically easily: (separate_mass_point=false)\n"" Z = s(a) + sum_i u_i s(b_i) g(y,theta,i)\n""or for separate_mass_point=true:\n"" Z = sum_i s(b_i) (g(y,theta,i)-g(0,theta,i))\n""The current implementation considers two choices for g:\n"" - sigmoid_steps: g(y,theta,i) = sigmoid(h*s(c_i)*(y-mu_i)/(mu_{i+1}-mu_i))\n"" - sloped_steps: g(y,theta,i) = 1 + s(s(c_i)*(mu_i-y))-s(s(c_i)*(mu_{i+1}-y))/(s(c_i)*(mu_{i+1}-mu_i))\n""where h is the 'initial_hardness' option.\n""The density is analytically obtained using the derivative g' of g and\n""expected value is analytically obtained using the primitive G of g.\n""For the mass point at the origin,\n"" P(Y=0|theta) = P(Y<=0|theta).\n""(which is simply sigmoid(a) if separate_mass_point).\n""For positive values of Y: (separate_mass_point=false)\n"" p(y|theta) = (1/Z) sum_i s(b_i) g'(y,theta,i).\n""or for separate_mass_point=true:\n"" p(y|theta) = (1-sigmoid(a)) (1/Z) sum_i s(b_i) g'(y,theta,i).\n""And the expected value of Y is obtained using the primitive: (separate_mass_point=false)\n"" E[Y|theta] = (1/Z)*s(a)*M + sum_i u_i s(b_i)(G(M,theta,i)-G(0,theta,i)))\n""or for separate_mass_point=true:\n"" E[Y|theta] = M - ((sigmoid(a)-(1-sigmoid(a)*(1/Z)*sum_i u_i s(b_i)g(0,theta,i))*M + (1-sigmoid(a))*(1/Z)*sum_i u_i s(b_i)(G(M,theta,i)-G(0,theta,0)))\n""Training the model can be done by maximum likelihood (minimizing the log of the\n""density) or by minimizing the average of squared error (y-E[Y|theta])^2\n""or a combination of the two (with the max_likelihood_vs_squared_error_balance option).\n""The step 'centers' mu_i are initialized according to some rule, in the interval [0,maxY]:\n"" - uniform: at regular intervals in [0,maxY]\n"" - log-scale: as the exponential of values at regular intervals in [0,log(1+maxY)], minus 1.\n""The c_i and b_i are initialized to inverse_softplus(1), and a using the empirical unconditional P(Y=0).\n""For the output curve options (outputs_def='L',D','C', or 'S'), the lower_bound and upper_bound\n""options of PDistribution are automatically set to 0 and maxY respectively.\n")
 DECLARE_OBJECT_PTR (ConditionalDensityNet)
 PLEARN_IMPLEMENT_OBJECT (ConditionalDistribution,"ONE LINE DESCR","You must call setInput to set the condition before using the distribution")
 DECLARE_OBJECT_PTR (ConditionalDistribution)
 PLEARN_IMPLEMENT_OBJECT (ConditionalGaussianDistribution,"ConditionalGaussianDistribution is a gaussian distribution ""in which the parameters could be learned or specified manually.","")
 DECLARE_OBJECT_PTR (ConditionalGaussianDistribution)
 PLEARN_IMPLEMENT_OBJECT (Distribution,"This class is deprecated, use PDistribution instead.","NO HELP")
 DECLARE_OBJECT_PTR (Distribution)
 PLEARN_IMPLEMENT_OBJECT (EmpiricalDistribution,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (EmpiricalDistribution)
 PLEARN_IMPLEMENT_OBJECT (GaussianDistribution,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (GaussianDistribution)
 PLEARN_IMPLEMENT_OBJECT (GaussianProcessRegressor,"Basic version of Gaussian Process regression.","NO HELP")
 DECLARE_OBJECT_PTR (GaussianProcessRegressor)
 PLEARN_IMPLEMENT_OBJECT (GaussMix,"Gaussian mixture, either set non-parametrically or trained by EM.","GaussMix implements a mixture of L gaussians.\n""There are 4 possible parametrization types:\n"" - spherical : gaussians have covar matrix = diag(sigma). Parameter used : sigma.\n"" - diagonal : gaussians have covar matrix = diag(sigma_i). Parameters used : diags.\n"" - general : gaussians have an unconstrained covariance matrix.\n"" The user specifies the number 'n_eigen' of eigenvectors kept when\n"" decomposing the covariance matrix. The remaining eigenvectors are\n"" considered as having a fixed eigenvalue equal to the next highest\n"" eigenvalue in the decomposition.\n"" - factor : (not implemented!) as in the general case, the gaussians are defined\n"" with K<=D vectors (through KxD matrix 'V'), but these need not be\n"" orthogonal/orthonormal.\n"" The covariance matrix used will be V(t)V + psi with psi a D-vector\n"" (through parameter diags).\n""2 parameters are common to all 4 types :\n"" - alpha : the ponderation factor of the gaussians\n"" - mu : their centers\n")
 DECLARE_OBJECT_PTR (GaussMix)
 PLEARN_IMPLEMENT_OBJECT (HistogramDistribution,"Represents and possibly learns (using a smoother) a univariate distribution as a histogram.","This class represents a univariate distribution with a set of bins and their densities\n""The bins can be fixed or learned by a Binner object, and the densities\n""can be learned from a training set. The empirical densities in the bins can also\n""be smoothed with a Smoother (which is a general purpose univariate function\n""smoothing mechanism. If the data is not univariate, then only the LAST column\n""is considered. The smoother can either smooth the density or the survival fn.\n")
 DECLARE_OBJECT_PTR (HistogramDistribution)
 PLEARN_IMPLEMENT_OBJECT (LocallyWeightedDistribution,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (LocallyWeightedDistribution)
 PLEARN_IMPLEMENT_OBJECT (ManifoldParzen2,"ManifoldParzen implements a manifold Parzen.","")
void computeNearestNeighbors (Mat dataset, Vec x, Mat &neighbors, int ignore_row=-1)
void computePrincipalComponents (Mat dataset, Vec &eig_values, Mat &eig_vectors)
void computeLocalPrincipalComponents (Mat &dataset, int which_pattern, Mat &delta_neighbors, Vec &eig_values, Mat &eig_vectors, Vec &mean)
 DECLARE_OBJECT_PTR (ManifoldParzen2)
 PLEARN_IMPLEMENT_OBJECT (PConditionalDistribution,"(THIS CLASS IS DEPRECATED, use PDistribution instead). Conditional distribution or conditional density model P(Y|X)","Abstract superclass for conditional distribution classes.\n""It is a subclass of PDistribution, with the added method\n"" setInput(Vec& input)\n""to set X, that must be called before PDistribution methods such as\n""log_density,cdf,survival_fn,expectation,variance,generate.\n""The PDistribution option output_defs must be set to specify\n""what the PLearner method computeOutput will produce. If it is\n""set to 'l' (log_density), 'd' (density), 'c' (cdf), or 's' (survival_fn)\n""then the input part of the data should contain both the input X and\n""the 'target' Y values (targetsize()==0). Instead, if output_defs is set to\n"" 'e' (expectation) or 'v' (variance), then the input part of the data should\n""contain only X, while the target part should contain Y\n")
 PLEARN_IMPLEMENT_OBJECT (PDistribution,"PDistribution is the base class for distributions.\n","PDistributions derive from PLearner (as some of them may be fitted to data with train()),\n""but they have additional methods allowing for ex. to compute density or generate data points.\n""The default implementations of the learner-type methods for computing outputs and costs work as follows:\n"" - the outputs_def option allows to choose which outputs are produced\n"" - cost is a vector of size 1 containing only the negative log-likelihood (NLL), i.e. -log_density.\n""A PDistribution may be conditional P(Y|X), if the option 'conditional_flags' is set. If it is the case,\n""the input should always be made of both the 'input' part (X) and the 'target' part (Y), even if the\n""output may not need to use the Y part. The exception is when computeOutput() needs to be called\n""successively with the same value of X: in this case, after a first call with both X and Y, one may\n""only provide Y as input, and X will be assumed to be unchanged.\n")
 PLEARN_IMPLEMENT_OBJECT (SpiralDistribution,"Generates samples drawn from a 2D spiral","SpiralDistribution is a generative model that generates 2D (x,y) samples in the following manner:\n"" t ~ uniform([tmin, tmax])^uniformity \n"" x = lambda*t*sin(alpha*t) + N(0,sigma) \n"" y = lambda*t*cos(alpha*t) + N(0,sigma) \n")
 DECLARE_OBJECT_PTR (SpiralDistribution)
 PLEARN_IMPLEMENT_OBJECT (UnconditionalDistribution,"This class is a simplified version of PDistribution for unconditional distributions.","Its only goal is to hide the conditional side of PDistributions to make it simpler.")
 DECLARE_OBJECT_PTR (UnconditionalDistribution)
 PLEARN_IMPLEMENT_OBJECT (UniformDistribution,"Implements uniform distribution over intervals.","Currently, only very few methods are implemented.\n""For example, to sample points in 2D in [a,b] x [c,d], use\n"" min = [a c]\n"" max = [b d]\n")
 DECLARE_OBJECT_PTR (UniformDistribution)
 PLEARN_IMPLEMENT_OBJECT (AddCostToLearner,"A PLearner that just adds additional costs to another PLearner.","In addition, this learner can be used to compute costs on bags instead of\n""individual samples, using the option 'compute_costs_on_bags'.\n""\n""Feel free to make this class evolve by adding new costs, or rewriting it\n""in a better fashion, because this one is certainly not perfect.\n""To use the lift cost, do the following:\n"" (1) add a cost of type 1 to this object's option 'costs'\n"" (2) replace the template_stats_collector of your PTester with one like this:\n"" template_stats_collector =\n"" LiftStatsCollector (\n"" lift_fraction = 0.1 ;\n"" output_column = \"lift_output\" ;\n"" opposite_lift = 0 ; # to set to 1 if we want to optimize it\n"" sign_trick = 1 ;\n"" (3) ask for the lift in the stats:\n"" statnames = [\n"" \"E[test1.LIFT]\"\n"" \"E[test1.LIFT_MAX]\"\n"" ];")
 DECLARE_OBJECT_PTR (AddCostToLearner)
 PLEARN_IMPLEMENT_OBJECT (EmbeddedLearner,"Wraps an underlying learner","EmbeddedLearner implements nothing but forwarding \n""calls to an underlying learner. It is typically used as\n""baseclass for learners that are built on top of another learner")
 DECLARE_OBJECT_PTR (EmbeddedLearner)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (Learner,"DEPRECATED CLASS: Derive from PLearner instead","NO HELP")
void prettyprint_test_results (ostream &out, const Learner &learner, const Vec &results)
 PLEARN_IMPLEMENT_OBJECT (NeighborhoodSmoothnessNNet,"Feedforward neural network whose hidden units are smoothed according to input neighborhood\n","TODO")
 DECLARE_OBJECT_PTR (NeighborhoodSmoothnessNNet)
 PLEARN_IMPLEMENT_OBJECT (NeuralNet,"DEPRECATED: Use NNet instead","NO HELP")
 DECLARE_OBJECT_PTR (NeuralNet)
 PLEARN_IMPLEMENT_OBJECT (NNet,"Ordinary Feedforward Neural Network with 1 or 2 hidden layers","Neural network with many bells and whistles...")
 DECLARE_OBJECT_PTR (NNet)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (PLearner,"The base class for all PLearn learners.","")
 PLEARN_IMPLEMENT_OBJECT (SelectInputSubsetLearner,"PLearner which selects a subset of the inputs for an embedded learner.","This learner class contains an embedded learner for which it selects a subset of the inputs.\n""The subset can be either selected explicitly or chosen randomly (the user chooses what fraction\n""of the original inputs will be selected).")
 DECLARE_OBJECT_PTR (SelectInputSubsetLearner)
 PLEARN_IMPLEMENT_OBJECT (StackedLearner,"Implements stacking, that combines two levels of learner, the 2nd level using the 1st outputs as inputs","Stacking is a generic strategy in which two levels (or more, recursively) of learners\n""are combined. The lower level may have one or more learners, and they may be trained\n""on the same or different data from the upper level single learner. The outputs of the\n""1st level learners are concatenated and serve as inputs to the second level learner.\n""IT IS ASSUMED THAT ALL BASE LEARNERS HAVE THE SAME NUMBER OF INPUTS AND OUTPUTS\n""There is also the option to copy the input of the 1st level learner as additional\n"" inputs for the second level (put_raw_input).\n""A Splitter can optionally be provided to specify how to split the data into\n""the training /validation sets for the lower and upper levels respectively\n")
 DECLARE_OBJECT_PTR (StackedLearner)
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (StatefulLearner,"PLearner with an internal state","PLearner with an internal state.\n""It replaces, for efficacity and compatibility reasons, SequentialLearner.")
 DECLARE_OBJECT_PTR (StatefulLearner)
 PLEARN_IMPLEMENT_OBJECT (TestingLearner,"ONE LINE DESCRIPTION","MULTI-LINE \nHELP")
 DECLARE_OBJECT_PTR (TestingLearner)
 PLEARN_IMPLEMENT_OBJECT (GraphicalBiText,"Probabilistically tag a bitext (english-other language) with senses from WordNet","NO HELP")
VMat loadToVMat (string file, string name, int window, int n_examples)
bool lessPair (pair< int, float > &p1, pair< int, float > &p2)
 PLEARN_IMPLEMENT_OBJECT (Dictionary,"Mapping string->int and int->string","MULTI LINE\nHELP")
 DECLARE_OBJECT_PTR (Dictionary)
 PLEARN_IMPLEMENT_OBJECT (TextSenseSequenceVMatrix,"VMat class that takes another VMat which contains a sequence (rows) ""of words/sense/POS triplets extracted from a corpus and implements a ""representation of a target word and its context.","")
void samePos (ProbabilitySparseMatrix &m1, ProbabilitySparseMatrix &m2, string m1name, string m2name)
void check_prob (ProbabilitySparseMatrix &pYX, string Yname, string Xname)
void check_prob (Set Y, const map< int, real > &pYx)
void update (ProbabilitySparseMatrix &pYX, ProbabilitySparseMatrix &nYX)
void updateAndClearCounts (ProbabilitySparseMatrix &pYX, ProbabilitySparseMatrix &nYX)
ostream & operator<< (ostream &out, ProbabilitySparseMatrix &pyx)
void print (ostream &out, ProbabilitySparseMatrix &pyx, Set Y, Set X)
void print (ostream &out, RowMapSparseMatrix< real > &m)
void print (ostream &out, const map< int, real > &vec, int size)
void print (ostream &out, const map< int, real > &vec)
void print (ostream &out, const map< int, real > &vec, Set V)
PStreamoperator<< (PStream &out, const PPointableSet &pp_set)
PStreamoperator>> (PStream &in, PPointableSet &pp_set)
PPointableSetnewSet ()
void merge (Set a, Set b, Set res)
void difference (Set a, Set b, Set res)
void intersection (Set a, Set b, Set res)
ostream & operator<< (ostream &out, Set s)
string trimWord (string word)
bool isLetter (char c)
bool isDigit (char c)
bool isAlpha (char c)
bool isLegalPunct (char c)
string stemWord (string &word)
string stemWord (string &word, int wn_pos)
char * cstr (string &str)
void removeDelimiters (string &s, string delim, string replace)
bool startsWith (string &base, string s)
void replaceChars (string &str, string char_to_replace, string replacing_char)
 PLEARN_IMPLEMENT_OBJECT (Experiment,"DEPRECATED: use PTester instead","")
 DECLARE_OBJECT_PTR (Experiment)
void DX_write_2D_fields (ostream &out, const string &basename, TVec< Mat > fields, real x0, real y0, real deltax, real deltay, TVec< string > fieldnames=TVec< string >())
void DX_write_2D_fields (ostream &out, const string &basename, Vec X, Vec Y, TVec< Mat > fields)
TVec< MatcomputeOutputFields (PP< PLearner > learner, Vec X, Vec Y)
TVec< MatcomputeOutputFields (PP< PLearner > learner, int nx, int ny, real x0, real y0, real deltax, real deltay)
TVec< MatcomputeOutputFieldsAutoRange (PP< PLearner > learner, VMat dataset, int nx, int ny, real &x0, real &y0, real &deltax, real &deltay, real extraspace=.10)
void computeXYPositions (VMat dataset, int nx, int ny, Vec &X, Vec &Y, real extraspace=.10)
void DX_create_dataset_outputs_file (const string &filename, PP< PLearner > learner, VMat dataset)
 Will write a file containing a field with the dataset positions "dset" field will be input -> target, outputs.

void DX_create_grid_outputs_file (const string &filename, PP< PLearner > learner, VMat dataset, int nx, int ny, bool include_datapoint_grid=false, real xmin=MISSING_VALUE, real xmax=MISSING_VALUE, real ymin=MISSING_VALUE, real ymax=MISSING_VALUE, real extraspace=.10)
 The "outputs" field will contain sample-grid inputs -> outputs Where the sample grid is made of a regular grid of nx.ny points (in the range [xmin, xmax] x [ymin, ymax]) xmin, xmax, ymin and ymax may be left to MISSING_VALUE, in which case an automatic range will be determined from the range of the points in the given dataset extended by extraspace (ex: .10 == 10%).

 PLEARN_IMPLEMENT_OBJECT (GenerateDecisionPlot,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (GenerateDecisionPlot)
 PLEARN_IMPLEMENT_OBJECT (Grapher,"ONE LINE DESCR","NO HELP")
real color (int colornum, real lightness)
 DECLARE_OBJECT_PTR (Grapher)
Mat compute_learner_outputs (PP< PLearner > learner, VMat inputs)
void determine_grid_for_dataset (VMat dataset, int nx, int ny, real &x0, real &y0, real &deltax, real &deltay, real extraspace)
double determine_density_integral_from_log_densities_on_grid (Vec log_densities, real deltax, real deltay)
Mat compute_learner_outputs_on_grid (PP< PLearner > learner, int nx, int ny, real x0, real y0, real deltax, real deltay)
 Returns a nx*ny x learner->outputsize() matrix of outputs corresponding to the nx*ny grid points.

void DX_write_2D_data (ostream &out, const string &basename, Mat data)
 considers data to have 2d input (first 2 columns of data)

void DX_write_2D_data_for_grid (ostream &out, const string &basename, int nx, int ny, real x0, real y0, real deltax, real deltay, Mat data)
 data must have nx*ny rows and must corresponds to values associated with the 2D positions of the grid (typically learner outputs on that grid)

void DX_save_2D_data (const string &filename, const string &basename, Mat data)
 considers data to have 2d input (first 2 columns of data)

void DX_save_2D_data_for_grid (const string &filename, const string &basename, int nx, int ny, real x0, real y0, real deltax, real deltay, Mat data)
 data must have nx*ny rows and must corresponds to values associated with the 2D positions of the grid (typically learner outputs on that grid)

TVec< stringaddprepostfix (const string &prefix, const TVec< string > &names, const string &postfix)
template<class T> TVec< T > operator & (const T &x, const TVec< T > &v)
 PLEARN_IMPLEMENT_OBJECT (PTester,"Evaluates the performance of a PLearner","The PTester class allows you to describe a typical learning experiment that you wish to perform, \n""as a training/testing of a learning algorithm on a particular dataset.\n""The splitter is used to obtain one or several (such as for k-fold) splits of the dataset \n""and training/testing is performed on each split. \n""Requested statistics are computed, and all requested results are written in an appropriate \n""file inside the specified experiment directory. \n")
 DECLARE_OBJECT_PTR (PTester)
 PLEARN_IMPLEMENT_OBJECT (ConstantRegressor,"PLearner that outputs a constant (input-independent) vector.\n","ConstantRegressor is a PLearner that outputs a constant (input-independent\n""but training-data-dependent) vector. It is a regressor (i.e. during training\n""the constant vector is chosen to minimize the (possibly weighted) average\n""of the training set targets. Let\n"" N = number of training examples,\n"" M = target size (= output size),\n"" y_{ij} = the jth target value of the ith training example,\n"" w_i = weight associated to the ith training example,\n""then the j-th component of the learned vector is\n"" (sum_{i=1}^N w_i * y_ij) / (sum_{i=1}^N w_i)\n""The output can also be set manually with the 'constant_output' vector option\n")
 DECLARE_OBJECT_PTR (ConstantRegressor)
 PLEARN_IMPLEMENT_OBJECT (LinearRegressor,"Ordinary Least Squares and Ridge Regression, optionally weighted","This class performs OLS (Ordinary Least Squares) and Ridge Regression, optionally on weighted\n""data, by solving the linear equation (X'W X + weight_decay*n_examples*I) theta = X'W Y\n""where X is the (n_examples x (1+inputsize)) matrix of extended inputs (with a 1 in the first column),\n""Y is the (n_example x targetsize), W is a diagonal matrix of weights (one per example)\n""{the identity matrix if weightsize()==0 in the training set}, and theta is the resulting\n""set of parameters. W_{ii} is obtained from the weight column of the training set, if any.\n""This column must have width 0 (no weight) or 1.\n""A prediction (computeOutput) is obtained from an input vector as follows:\n"" output = theta * (1,input)\n""The criterion that is minimized by solving the above linear system is the squared loss""plus squared norm penalty (weight_decay*sum_{ij} theta_{ij}^2) PER EXAMPLE. This class also measures""the ordinary squared loss (||output-theta||^2). The two costs are named 'mse+penalty' and 'mse' respectively.\n""Training has two steps: (1) computing X'W X and X' W Y, (2) solving the linear system.\n""The first step takes time O(n_examples*inputsize^2 + n_examples*inputsize*outputsize).\n""The second step takes time O(inputsize^3).\n""If train() is called repeatedly with different values of weight_decay, without intervening\n""calls to forget(), then the first step will be done only once, and only the second step\n""is repeated.\n")
 DECLARE_OBJECT_PTR (LinearRegressor)
 PLEARN_IMPLEMENT_OBJECT (PLS,"Partial Least Squares Regression (PLSR).","You can use this learner to perform regression, and / or dimensionality\n""reduction.\n""PLS regression assumes the target Y and the data X are linked through:\n"" Y = T.Q' + E\n"" X = T.P' + F\n""The underlying coefficients T (the 'scores') and the loading matrices\n""Q and P are seeked. It is then possible to compute the prediction y for\n""a new input x, as well as its score vector t (its representation in\n""lower-dimensional coordinates).\n""The available algorithms to perform PLS (chosen by the 'method' option) are:\n""\n"" ==== PLS1 ====\n""The classical PLS algorithm, suitable only for a 1-dimensional target. The\n""following algorithm is taken from 'Factor Analysis in Chemistry', with an\n""additional loop that (I believe) was missing:\n"" (1) Let X (n x p) = the centered and normalized input data\n"" Let y (n x 1) = the centered and normalized target data\n"" Let k be the number of components extracted\n"" (2) s = y\n"" (3) lx' = s' X, s = X lx (normalized)\n"" (4) If s has changed by more than 'precision', loop to (3)\n"" (5) ly = s' y\n"" (6) lx' = s' X\n"" (7) Store s, lx and ly in the columns of respectively T, P and Q\n"" (8) X = X - s lx', y = y - s ly, loop to (2) k times\n"" (9) Set W = (T P')^(+) T, where the ^(+) is the right pseudoinverse\n""\n"" ==== Kernel ====\n""The code implements a NIPALS-PLS-like algorithm, which is a so-called\n""'kernel' algorithm (faster than more classical implementations).\n""The algorithm, inspired from 'Factor Analysis in Chemistry' and above all\n""www.statsoftinc.com/textbook/stpls.html, is the following:\n"" (1) Let X (n x p) = the centered and normalized input data\n"" Let Y (n x m) = the centered and normalized target data\n"" Let k be the number of components extracted\n"" (2) Initialize A_0 = X'Y, M_0 = X'X, C_0 = Identity(p), and h = 0\n"" (3) q_h = largest eigenvector of B_h = A_h' A_h, found by the NIPALS method:\n"" (3.a) q_h = a (normalized) randomn column of B_h\n"" (3.b) q_h = B_h q_h\n"" (3.c) normalize q_h\n"" (3.d) if q_h has changed by more than 'precision', go to (b)\n"" (4) w_h = C_h A_h q_h, normalize w_h and store it in a column of W (p x k)\n"" (5) p_h = M_h w_h, c_h = w_h' p_h, p_h = p_h / c_h and store it in a column\n"" of P (p x k)\n"" (6) q_h = A_h' w_h / c_h, and store it in a column of Q (m x k)\n"" (7) A_h+1 = A_h - c_h p_h q_h'\n"" M_h+1 = M_h - c_h p_h p_h',\n"" C_h+1 = C_h - w_h p_h\n"" (8) h = h+1, and if h < k, go to (3)\n""\n""The result is then given by:\n"" - Y = X B, with B (p x m) = W Q'\n"" - T = X W, where T is the score (reduced coordinates)\n""\n""You can choose to have the score (T) and / or the target (Y) in the output\n""of the learner (default is target only, i.e. regression).")
 DECLARE_OBJECT_PTR (PLS)
 PLEARN_IMPLEMENT_OBJECT (EmbeddedSequentialLearner,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (EmbeddedSequentialLearner)
 Declares a few other classes and functions related to this class.

 PLEARN_IMPLEMENT_OBJECT (MovingAverage,"ONE LINE DESCR","NO HELP")
 PLEARN_IMPLEMENT_ABSTRACT_OBJECT (SequentialLearner,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (SequentialLearner)
 Declares a few other classes and functions related to this class.

 PLEARN_IMPLEMENT_OBJECT (SequentialModelSelector,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (SequentialModelSelector)
 Declares a few other classes and functions related to this class.

 PLEARN_IMPLEMENT_OBJECT (SequentialValidation,"The SequentialValidation class allows you to describe a typical ""sequential validation experiment that you wish to perform.","NO HELP")
 DECLARE_OBJECT_PTR (SequentialValidation)
 Declares a few other classes and functions related to this class.

 PLEARN_IMPLEMENT_OBJECT (PTester,"Manages a learning experiment, with training and estimation of generalization error.","The PTester class allows you to describe a typical learning experiment that you wish to perform, \n""as a training/testing of a learning algorithm on a particular dataset.\n""The splitter is used to obtain one or several (such as for k-fold) splits of the dataset \n""and training/testing is performed on each split. \n""Requested statistics are computed, and all requested results are written in an appropriate \n""file inside the specified experiment directory. \n""Statistics can be either specified entirely from the 'statnames' option, or built from\n""'statnames' and 'statmask'. For instance, one may set:\n"" statnames = [ \"NLL\" \"mse\" ]\n"" statmask = [ [ \"E[*]\" ] [ \"test1.*\" \"test2.*\" ] [ \"E[*]\" \"STDERROR[*]\" ] ]\n""and this will compute:\n"" E[test1.E[NLL]], STDERROR[test1.E[NLL]], E[test2.E[NLL]], STDERROR[test2.E[NLL]]\n"" E[test1.E[mse]], STDERROR[test1.E[mse]], E[test2.E[mse]], STDERROR[test2.E[mse]]\n")
 PLEARN_IMPLEMENT_OBJECT (TestMethod,"ONE LINE DESCR","NO HELP")
 DECLARE_OBJECT_PTR (TestMethod)
 PLEARN_IMPLEMENT_OBJECT (Train,"An easy PTester","Sometimes, we want to train a learner easily.\n""But plearn learner train does not permit to use macro variable.\n""Train is the equivalent of plearn learner train but macro variables are usable\n""Example: plearn myTrain.plearn knn=12")
 PLEARN_IMPLEMENT_OBJECT (EntropyContrast,"Performs a EntropyContrast search","Detailed Description ")
 DECLARE_OBJECT_PTR (EntropyContrast)
Mat smartInitialization (VMat v, int n, real c, real regularization)
 PLEARN_IMPLEMENT_OBJECT (GaussianContinuum,"Learns a continuous (uncountable) Gaussian mixture with non-local parametrization","This learner implicitly estimates the density of the data through\n""a generalization of the Gaussian mixture model and of the TangentLearner\n""algorithm (see help on that class). The density is the fixed point of\n""a random walk {z_t} that follows the following transition probabilities:\n"" z_{t+1} sampled from a Gaussian associated with z_t, centered\n"" at z_t + mu(z_t), with covariance matrix S(z_t).\n""The semantic of that random walk is the following (and that is how\n""it will be estimated). Given a point z_t, the sample z_{t+1} represents\n""a 'near neighbor' of z_t. We assume that the density is smooth enough\n""that the cloud of 'near neighbors' around z_t can be modeled by a Gaussian.\n""The functions mu(.) and S(.) have globally estimated parameters (for example\n""using neural nets or linear functions of x, or linear functions of a basis).\n""Here we suppose that the eigenvalues of S(.) come from two groups:\n""the first group should correspond to locally estimated principal\n""directions of variations and there are no constraints on these eigenvalues\n""(except that they are positive), while the second group should correspond\n""to 'noise' directions, that have all the same value sigma2_noise\n""i.e. it is not necessary to explicitly model the directions of variations\n""(the eigenvectors) associated with the second group. In general we expect\n""sigma2_noise to be small compared to the first group eigenvalues, which\n""means that the Gaussians are flat in the corresponding directions, and\n""that the first group variations correspond to modeling a manifold near\n""which most of the data lie. Optionally, an embedding corresponding\n""to variations associated with the first group of eigenvalues can be learnt\n""by choosing for the architecture_type option a value of the form embedding_*.\n""Although the density is not available in closed form, it is easy (but maybe slow)\n""to sample from it: pick one of the training examples at random and then\n""follow the random walk (ideally, a long time). It is also possible in\n""principle to obtain a numerical estimate of the density at a point x,\n""by sampling enough random walk points around x.\n")
 DECLARE_OBJECT_PTR (GaussianContinuum)
 PLEARN_IMPLEMENT_OBJECT (Isomap,"Performs ISOMAP dimensionality reduction.","Be careful that when looking for the 'knn' nearest neighbors of a point x,\n""we consider all points from the training data D, including x itself if it\n""belongs to D. Thus, to obtain the same result as with the classical ISOMAP\n""algorithm, one should use one more neighbor.\n""Note also that when used out-of-sample, this will result in a different output\n""than an algorithm applying the same formula, but considering one less neighbor.\n")
 DECLARE_OBJECT_PTR (Isomap)
 PLEARN_IMPLEMENT_OBJECT (IsomapTangentLearner,"Tangent learning based on Isomap Kernel","MULTI-LINE \nHELP")
 PLEARN_IMPLEMENT_OBJECT (KernelPCA,"Kernel Principal Component Analysis","Perform PCA in a feature space phi(x), defined by a kernel K such that\n"" K(x,y) = < phi(x), phi(y) >\n")
 PLEARN_IMPLEMENT_OBJECT (KernelProjection,"Performs dimensionality reduction by learning eigenfunctions of a kernel.","")
 PLEARN_IMPLEMENT_OBJECT (KPCATangentLearner,"Tangent learning based on KPCA Kernel","MULTI-LINE \nHELP")
 DECLARE_OBJECT_PTR (KPCATangentLearner)
 PLEARN_IMPLEMENT_OBJECT (LLE,"Performs Locally Linear Embedding.","")
 PLEARN_IMPLEMENT_OBJECT (PCA,"Performs a Principal Component Analysis preprocessing (projecting on the principal directions).","This learner finds the empirical covariance matrix of the input part of\n""the training data, and learns to project its input vectors along the\n""principal eigenvectors of that matrix, optionally scaling by the inverse\n""of the square root of the eigenvalues (to obtained 'sphered', i.e.\n""Normal(0,I) data).\n""Alternative EM algorithms are provided, that may be useful when there is\n""a lot of data or the dimension is very high.\n")
 DECLARE_OBJECT_PTR (PCA)
 PLEARN_IMPLEMENT_OBJECT (SpectralClustering,"Spectral Clustering dimensionality reduction.","The current code only performs dimensionality reduction, and does not do\n""clustering.")
 DECLARE_OBJECT_PTR (SpectralClustering)
 PLEARN_IMPLEMENT_OBJECT (TangentLearner,"Learns local tangent plane of the manifold near which the data lie.","This learner models a manifold near which the data are supposed to lie.\n""The manifold is represented by a function which predicts a basis for the\n""tangent planes at each point x, given x in R^n. Let f_i(x) be the predicted i-th tangent\n""vector (in R^n). Then we will optimize the parameters that define the d functions f_i by\n""pushing the f_i so that they span the local tangent directions. Three criteria are\n""possible, according to the 'training_targets', 'normalize_by_neighbor_distance' and\n""'use_subspace_distance' option. The default criterion is the recommanded one, with\n"" training_targets='local_neighbors', normalize_by_neighbor_distance=1,\n""and use_subspace_distance=0 (it really did not work well in our experiments with\n""use_subspace_distance=1). This corresponds to the following cost function:\n"" sum_x sum_j min_w ||t(x,j) - sum_i w_i f_i(x)||^2 / ||t(x,j)||^2\n""where x is an example, t(x,j) is the difference vector between x and its j-th neighbor,\n""and the w_i are chosen freely for each j and x and correspond to the weights given to\n""each basis vector f_i(x) to obtain the projection of t(x,j) on the tangent plane.\n""More generally, if use_subspace_distance,\n"" criterion = min_{w,u} || sum_i w_i f_i - sum_j u_j t(x,j) ||^2\n"" under the constraint that ||w||=1.\n"" else\n"" criterion = sum_x sum_j min_w ||t(x,j) - sum_i w_i f_i(x)||^2 / ||t(x,j)||^2\n"" where the first sum is over training examples and w is a free d-vector,\n"" t(x,j) estimates local tangent directions based on near neighbors, and the denominator\n"" ||t(x,j)||^2 is optional (normalize_by_neighbor_distance). t(x,j)\n"" is defined according to the training_targets option:\n"" 'local_evectors' : local principal components (based on n_neighbors of x)\n"" 'local_neighbors': difference between x and its n_neighbors.\n""An additional criterion option that applies only to use_subspace_criterion=0 is\n""the orderered_vectors option, which applies a separate cost to each of the f_i:\n""the f_1 vector tries to make the projection of t(x,j) on f_1 close to t(x,j), while\n""the f_2 vector tries to make the projection of t(x,j) on the (f_1,f_2) basis close to t(x,j),\n""etc... i.e. the gradient on f_i is computed based on a cost that involves only\n""the projection on the first i vectors. This is analogous to principal component analysis:\n""the first vector tries to capture as much as possible of the variance, the second as much\n""as possible of the remaining variance, etc...\n""Different architectures are possible for the f_i(x) (architecture_type option):\n"" - multi_neural_network: one neural net per basis function\n"" - single_neural_network: single neural network with matrix output (one row per basis vector)\n"" - linear: F_{ij}(x) = sum_k A_{ijk} x_k\n"" - embedding_neural_network: the embedding function e_k(x) (for k-th dimension)\n"" is an ordinary neural network, and F_{ki}(x) = d(e_k(x))/d(x_i). This allows to\n"" output the embedding, instead of, or as well as, the tangent plane (output_type option).\n"" - embedding_quadratic: the embedding function e_k(x) (for k-th dimension)\n"" is a 2nd order polynomial of x, and F_{ki}(x) = d(e_k(x))/d(x_i). This allows to\n"" output the embedding, instead of, or as well as, the tangent plane (output_type option).\n")
 DECLARE_OBJECT_PTR (TangentLearner)
map< string, stringgetModelAliases (const string &filename)
 reads a modelalias -> object_representation map from a model.aliases file

void train_and_test (const string &modelalias, string trainalias, vector< string > testaliases)
vector< stringgetMultipleModelAliases (const string &model)
void cross_valid (const string &modelalias, string trainalias, int kval)
void use (const string &modelfile, const string &datasetalias)
void usage ()
int old_plearn_main (int argc, char **argv)
int plearn_main (int argc, char **argv)

Variables

PLearnInit _plearn_init_
const size_t PL_HASH_NOMBRES_MAGIQUES [256]
ostream * error_stream = &cerr
const string dbdir_name = ""
const char MissingString = '\0'
 A few constants for representing missing values.

const unsigned char MissingCharacter = (unsigned char)SCHAR_MIN
const signed char MissingSignedChar = (signed char)SCHAR_MIN
const short MissingShort = SHRT_MIN
const int MissingInt = INT_MIN
const float MissingFloat = MISSING_VALUE
const double MissingDouble = MISSING_VALUE
const PDate MissingDate
const int pl_dftbuflen = 4096
pl_stream_raw raw
pl_stream_clear_flags clear_flags
pl_stream_initiate initiate
pl_nullstreambuf null_streambuf
ostream nullout
 a null ostream: writing to it does nothing

istream nullin
 a null instream: reading from it does nothing

iostream nullinout
 a null iostream: reading/writing from/to it does nothing

const char DIGITsymbols [] = "0123456789"
const char ALPHAsymbols [] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const char * ORDINALS [] = {"d","nd","th","st",0}
const tRule rules []
const unsigned int Hash_UNUSED_TAG = 0xffffffffu
 DataType must have new, delete and copy constructor Keys are unique.

const void * Hash_DELETED_SLOT = (void *)0x00000001
const unsigned int Hash_NOMBRES_MAGIQUES [256]
double pl_gammln_cof [7]
float tanhtable [TANHTABLESIZE]
PLMathInitializer pl_math_initializer
_plearn_nan_type plearn_nan
long the_seed = 0
int iset = 0
real gset
double MAXLOG = 7.09782712893383996732E2
double MINLOG = -7.451332191019412076235E2
double MACHEP = 1.11022302462515654042E-16
double big = 4.503599627370496e15
double biginv = 2.22044604925031308085e-16
string vmat_view_dataset
 The specification of the dataset viewed by the vmat program.

bool displayvg = false
const int STORAGE_UNUSED_HANDLE = -1


Detailed Description

< for swap

pl_fdstream.{h|cc} Defines a stream buffer than can be created from a POSIX file descriptor, along with a stream to use that buffer.


Typedef Documentation

typedef int(* PLearn::compare_function)(const void *, const void *)
 

Definition at line 236 of file general.h.

Referenced by PLearn::RVArray::sort(), and PLearn::RVInstanceArray::sort().

typedef const map<int, real> PLearn::ConstSparseVec
 

Definition at line 14 of file ProbabilitySparseMatrix.h.

Referenced by PLearn::ProbabilitySparseMatrix::removeExtra().

typedef map<const void*,void*> PLearn::CopiesMap
 

Global typedef to make the map of copied objects (needed by the deep copy mechanism in Object) more palatable.

Definition at line 28 of file CopiesMap.h.

Referenced by deepCopy(), PLearn::SequentialModelSelector::makeDeepCopyFromShallowCopy(), PLearn::SequentialLearner::makeDeepCopyFromShallowCopy(), PLearn::PLearner::makeDeepCopyFromShallowCopy(), PLearn::NNet::makeDeepCopyFromShallowCopy(), PLearn::NeuralNet::makeDeepCopyFromShallowCopy(), PLearn::NeighborhoodSmoothnessNNet::makeDeepCopyFromShallowCopy(), PLearn::MultiInstanceNNet::makeDeepCopyFromShallowCopy(), PLearn::Learner::makeDeepCopyFromShallowCopy(), PLearn::Kernel::makeDeepCopyFromShallowCopy(), PLearn::GradientOptimizer::makeDeepCopyFromShallowCopy(), PLearn::GaussianDistribution::makeDeepCopyFromShallowCopy(), PLearn::EmpiricalDistribution::makeDeepCopyFromShallowCopy(), PLearn::EmbeddedSequentialLearner::makeDeepCopyFromShallowCopy(), PLearn::ConjGradientOptimizer::makeDeepCopyFromShallowCopy(), PLearn::AdaptGradientOptimizer::makeDeepCopyFromShallowCopy(), and PLearn::Function::operator()().

typedef Ker PLearn::CostFunc
 

a cost function maps (output,target) to a loss

************ CostFunc *

Definition at line 246 of file Kernel.h.

Referenced by PLearn::ClassifierFromDensity::computeCostsFromOutputs(), PLearn::Learner::computeLeaveOneOutCosts(), and PLearn::QuadraticUtilityCostFunction::QuadraticUtilityCostFunction().

typedef Array<PSDBVMFieldDiscrete> PLearn::FieldArray
 

In general, if there are N fields, x_1...x_N, and each can take y_i values, then the discrete value is:.

where {j=N+1}^N is defined to be 1.

For convenience, this class inherites from SDBVMFieldDiscrete, but does not use the inherited source_ member.

Definition at line 864 of file SDBVMat.h.

Referenced by PLearn::SDBVMFieldMultiDiscrete::SDBVMFieldMultiDiscrete().

typedef OptionList&(* PLearn::GETOPTIONLIST_METHOD)()
 

Definition at line 54 of file TypeFactory.h.

Referenced by PLearn::TypeFactory::register_type().

typedef map<string, string> PLearn::HAliases
 

Definition at line 60 of file HyperOptimizer.h.

Referenced by PLearn::HTryCombinations::optimize(), PLearn::HCoordinateDescent::optimize(), PLearn::HTryAll::optimize(), and PLearn::HSetVal::optimize().

typedef Array<PP<HyperOptimizer> > PLearn::HStrategy
 

Definition at line 59 of file HyperOptimizer.h.

typedef bool(* PLearn::ISA_METHOD)(Object *o)
 

Definition at line 55 of file TypeFactory.h.

Referenced by PLearn::TypeFactory::register_type().

typedef TinyVector<int, 7> PLearn::IVec
 

Definition at line 49 of file Tensor.h.

Referenced by PLearn::TTensor< T >::begin(), and PLearn::TTensorSubTensorIterator< T >::TTensorSubTensorIterator().

typedef map<double, string> PLearn::map_double_string
 

Definition at line 33 of file CopiesMap.h.

typedef map<float, string> PLearn::map_float_string
 

Definition at line 34 of file CopiesMap.h.

typedef map<string, double> PLearn::map_string_double
 

Definition at line 32 of file CopiesMap.h.

typedef map<string, float> PLearn::map_string_float
 

Some typedefs to use the NODEEPCOPY macro with.

Definition at line 31 of file CopiesMap.h.

typedef TMat<real> PLearn::Mat
 

Definition at line 802 of file TMat_decl.h.

Referenced by PLearn::ToBagSplitter::build_(), PLearn::RemoveDuplicateVMatrix::build_(), PLearn::StatsCollector::cdf(), computeConditionalMeans(), PLearn::SourceKernel::computeGramMatrix(), PLearn::LLEKernel::computeGramMatrix(), PLearn::DivisiveNormalizationKernel::computeGramMatrix(), PLearn::AdditiveNormalizationKernel::computeGramMatrix(), PLearn::ReconstructionWeightsKernel::computeLLEMatrix(), PLearn::GeodesicDistanceKernel::computeNearestGeodesicNeighbour(), PLearn::Kernel::computeNearestNeighbors(), PLearn::PConditionalDistribution::computeOutput(), PLearn::KPCATangentLearner::computeOutput(), PLearn::IsomapTangentLearner::computeOutput(), PLearn::HistogramDistribution::computeOutput(), PLearn::ReconstructionWeightsKernel::computeWeights(), PLearn::MatlabInterface::eigs_r11(), PLearn::PLMPI::exchangeColumnBlocks(), PLearn::Function::fbbprop(), PLearn::QuantilesStatsIterator::finish(), PLearn::PLS::forget(), PLearn::KernelProjection::forget(), PLearn::MatrixAffineTransformVariable::fprop(), PLearn::MatrixAffineTransformFeedbackVariable::fprop(), PLearn::AffineTransformVariable::fprop(), PLearn::FunctionalRandomVariable::FunctionalRandomVariable(), PLearn::VMatrix::getMat(), PLearn::SubVMatrix::getMat(), PLearn::ForwardVMatrix::getMat(), PLearn::LocalNeighborsDifferencesVMatrix::getNewRow(), loadLetters(), PLearn::SequentialModelSelector::matlabSave(), PLearn::MatRowVariable::MatRowVariable(), PLearn::MemoryVMatrix::MemoryVMatrix(), PLearn::MultiInstanceVMatrix::MultiInstanceVMatrix(), PLearn::PairsVMatrix::PairsVMatrix(), PLearn::Gnuplot::plot(), PLearn::Grapher::plot_1D_regression(), PLearn::VMat::precompute(), PLearn::ConcatRowsVMatrix::putMat(), PLearn::RandomVar::RandomVar(), PLearn::RandomVariable::RandomVariable(), PLearn::RemapLastColumnVMatrix::RemapLastColumnVMatrix(), PLearn::RGBImage::RGBImage(), PLearn::RowMapSparseMatrix< real >::RowMapSparseMatrix(), PLearn::RowMapSparseValueMatrix< T >::RowMapSparseValueMatrix(), PLearn::TestDependencyCommand::run(), PLearn::TestDependenciesCommand::run(), PLearn::SDBVMFieldICBCTargets::SDBVMFieldICBCTargets(), PLearn::SourceVariable::SourceVariable(), PLearn::StrTableVMatrix::StrTableVMatrix(), PLearn::YMDDatedVMatrix::subDistrRelativeDates(), PLearn::Learner::test(), PLearn::SparseMatrix::toMat(), PLearn::MovingAverage::train(), PLearn::ManifoldParzen2::train(), PLearn::AdaBoost::train(), PLearn::UniformizeVMatrix::UniformizeVMatrix(), PLearn::StatsIterator::update(), PLearn::StatsItArray::update(), PLearn::PLearner::useOnTrain(), PLearn::Var::Var(), PLearn::UniformDistribution::variance(), PLearn::SpiralDistribution::variance(), PLearn::EmpiricalDistribution::variance(), PLearn::Distribution::variance(), PLearn::ConditionalDensityNet::variance(), PLearn::VMat::VMat(), PLearn::VMatLanguage::VMatLanguage(), PLearn::GhostScript::writeBitmapHexString1Bit(), and PLearn::YMDDatedVMatrix::YMDDatedVMatrix().

typedef RandomVar PLearn::MatRandomVar
 

Definition at line 418 of file RandomVar.h.

typedef bool(* PLearn::MeasurerCallbackFunction)(int t, const Vec &costs)
 

Definition at line 77 of file Measurer.h.

typedef Object*(* PLearn::NEW_OBJECT)()
 

Typedef for the "new instance" function type, which returns a default-initialized Object.

Definition at line 53 of file TypeFactory.h.

Referenced by PLearn::TypeFactory::register_type().

typedef vector< PP<OptionBase> > PLearn::OptionList
 

Definition at line 133 of file OptionBase.h.

Referenced by PLearn::YMDDatedVMatrix::declareOptions(), PLearn::WeightedCostFunction::declareOptions(), PLearn::VVMatrix::declareOptions(), PLearn::VVec::declareOptions(), PLearn::VMatrixFromDistribution::declareOptions(), PLearn::VMatrix::declareOptions(), PLearn::PreprocessingVMatrix::declareOptions(), PLearn::VMatLanguage::declareOptions(), PLearn::VecStatsCollector::declareOptions(), PLearn::VecExtendedVMatrix::declareOptions(), PLearn::VecElementVariable::declareOptions(), PLearn::Variable::declareOptions(), PLearn::UpsideDownVMatrix::declareOptions(), PLearn::UniformVMatrix::declareOptions(), PLearn::UniformizeVMatrix::declareOptions(), PLearn::UniformDistribution::declareOptions(), PLearn::UnfoldedSumOfVariable::declareOptions(), PLearn::UnfoldedFuncVariable::declareOptions(), PLearn::UnequalConstantVariable::declareOptions(), PLearn::UnconditionalDistribution::declareOptions(), PLearn::UnaryVariable::declareOptions(), PLearn::UnaryHardSlopeVariable::declareOptions(), PLearn::UCISpecification::declareOptions(), PLearn::TransposeVMatrix::declareOptions(), PLearn::TrainValidTestSplitter::declareOptions(), PLearn::TrainTestSplitter::declareOptions(), PLearn::TrainTestBagsSplitter::declareOptions(), PLearn::Train::declareOptions(), PLearn::ToBagSplitter::declareOptions(), PLearn::TextSenseSequenceVMatrix::declareOptions(), PLearn::TestMethod::declareOptions(), PLearn::TestInTrainSplitter::declareOptions(), PLearn::TestingLearner::declareOptions(), PLearn::TemporalHorizonVMatrix::declareOptions(), PLearn::TangentLearner::declareOptions(), PLearn::SumOverBagsVariable::declareOptions(), PLearn::SumOfVariable::declareOptions(), PLearn::SubVMatrix::declareOptions(), PLearn::SubsampleVariable::declareOptions(), PLearn::SubMatVariable::declareOptions(), PLearn::SubMatTransposeVariable::declareOptions(), PLearn::SubInputVMatrix::declareOptions(), PLearn::QuantilesStatsIterator::declareOptions(), PLearn::LiftStatsIterator::declareOptions(), PLearn::MaxStatsIterator::declareOptions(), PLearn::MinStatsIterator::declareOptions(), PLearn::SharpeRatioStatsIterator::declareOptions(), PLearn::StderrStatsIterator::declareOptions(), PLearn::StddevStatsIterator::declareOptions(), PLearn::ExpMeanStatsIterator::declareOptions(), PLearn::MeanStatsIterator::declareOptions(), PLearn::StatsIterator::declareOptions(), PLearn::StatsCollector::declareOptions(), PLearn::StatefulLearner::declareOptions(), PLearn::StackedLearner::declareOptions(), PLearn::SquaredErrorCostFunction::declareOptions(), PLearn::SpiralDistribution::declareOptions(), PLearn::SpectralClustering::declareOptions(), PLearn::SparseVMatrix::declareOptions(), PLearn::SourceVMatrixSplitter::declareOptions(), PLearn::SourceVMatrix::declareOptions(), PLearn::SourceKernel::declareOptions(), PLearn::SortRowsVMatrix::declareOptions(), PLearn::SoftSlopeVariable::declareOptions(), PLearn::SoftSlopeIntegralVariable::declareOptions(), PLearn::Smoother::declareOptions(), PLearn::SigmoidPrimitiveKernel::declareOptions(), PLearn::SigmoidalKernel::declareOptions(), PLearn::ShiftAndRescaleVMatrix::declareOptions(), PLearn::ShellScript::declareOptions(), PLearn::SetOption::declareOptions(), PLearn::SequentialValidation::declareOptions(), PLearn::SequentialSplitter::declareOptions(), PLearn::SequentialModelSelector::declareOptions(), PLearn::SequentialLearner::declareOptions(), PLearn::SemiSupervisedProbClassCostVariable::declareOptions(), PLearn::SelectRowsVMatrix::declareOptions(), PLearn::SelectRowsFileIndexVMatrix::declareOptions(), PLearn::SelectInputSubsetLearner::declareOptions(), PLearn::SelectedOutputCostFunction::declareOptions(), PLearn::SelectColumnsVMatrix::declareOptions(), PLearn::ScaledGeneralizedDistanceRBFKernel::declareOptions(), PLearn::ScaledGaussianKernel::declareOptions(), PLearn::ScaledConditionalCDFSmoother::declareOptions(), PLearn::RunObject::declareOptions(), PLearn::RowsSubVMatrix::declareOptions(), PLearn::RowAtPositionVariable::declareOptions(), PLearn::ReshapeVariable::declareOptions(), PLearn::RepeatSplitter::declareOptions(), PLearn::RemoveRowsVMatrix::declareOptions(), PLearn::RemoveDuplicateVMatrix::declareOptions(), PLearn::RemapLastColumnVMatrix::declareOptions(), PLearn::RegularGridVMatrix::declareOptions(), PLearn::ReconstructionWeightsKernel::declareOptions(), PLearn::RealMapping::declareOptions(), PLearn::RangeVMatrix::declareOptions(), PLearn::QuadraticUtilityCostFunction::declareOptions(), PLearn::ProcessingVMatrix::declareOptions(), PLearn::PricingTransactionPairProfitFunction::declareOptions(), PLearn::PrecomputedVMatrix::declareOptions(), PLearn::PrecomputedKernel::declareOptions(), PLearn::PowVariable::declareOptions(), PLearn::PowDistanceKernel::declareOptions(), PLearn::PolynomialKernel::declareOptions(), PLearn::PlusConstantVariable::declareOptions(), PLearn::PLS::declareOptions(), PLearn::PLearnerOutputVMatrix::declareOptions(), PLearn::PLearner::declareOptions(), PLearn::PTester::declareOptions(), PLearn::PDistributionVariable::declareOptions(), PLearn::PDistribution::declareOptions(), PLearn::PConditionalDistribution::declareOptions(), PLearn::PCA::declareOptions(), PLearn::PairsVMatrix::declareOptions(), PLearn::Optimizer::declareOptions(), PLearn::OneHotVMatrix::declareOptions(), PLearn::OneHotVariable::declareOptions(), PLearn::OneHotSquaredLoss::declareOptions(), PLearn::ObjectGenerator::declareOptions(), PLearn::NormalizedDotProductKernel::declareOptions(), PLearn::NNet::declareOptions(), PLearn::NeuralNet::declareOptions(), PLearn::NeighborhoodSmoothnessNNet::declareOptions(), PLearn::NegLogProbCostFunction::declareOptions(), PLearn::NegKernel::declareOptions(), PLearn::NearestNeighborPredictionCost::declareOptions(), PLearn::NaryVariable::declareOptions(), PLearn::MultiInstanceVMatrix::declareOptions(), PLearn::MultiInstanceNNet::declareOptions(), PLearn::MovingAverageVMatrix::declareOptions(), PLearn::MovingAverage::declareOptions(), PLearn::MemoryVMatrix::declareOptions(), PLearn::MatRowVariable::declareOptions(), PLearn::MatrixSumOfVariable::declareOptions(), PLearn::MatrixOneHotSquaredLoss::declareOptions(), PLearn::MatrixElementsVariable::declareOptions(), PLearn::MarginPerceptronCostVariable::declareOptions(), PLearn::ManualBinner::declareOptions(), PLearn::LogOfGaussianDensityKernel::declareOptions(), PLearn::LocalNeighborsDifferencesVMatrix::declareOptions(), PLearn::LocallyWeightedDistribution::declareOptions(), PLearn::LLEKernel::declareOptions(), PLearn::LLE::declareOptions(), PLearn::LinearRegressor::declareOptions(), PLearn::LimitedGaussianSmoother::declareOptions(), PLearn::LiftStatsCollector::declareOptions(), PLearn::LiftBinaryCostFunction::declareOptions(), PLearn::LearnerProcessedVMatrix::declareOptions(), PLearn::Learner::declareOptions(), PLearn::LaplacianKernel::declareOptions(), PLearn::KPCATangentLearner::declareOptions(), PLearn::KNNVMatrix::declareOptions(), PLearn::KFoldSplitter::declareOptions(), PLearn::KernelVMatrix::declareOptions(), PLearn::KernelProjection::declareOptions(), PLearn::KernelPCA::declareOptions(), PLearn::Kernel::declareOptions(), PLearn::JulianizeVMatrix::declareOptions(), PLearn::JoinVMatrix::declareOptions(), PLearn::IsomapTangentLearner::declareOptions(), PLearn::Isomap::declareOptions(), PLearn::IsMissingVariable::declareOptions(), PLearn::IsAboveThresholdVariable::declareOptions(), PLearn::InterleaveVMatrix::declareOptions(), PLearn::IndexedVMatrix::declareOptions(), PLearn::IndexAtPositionVariable::declareOptions(), PLearn::HTryCombinations::declareOptions(), PLearn::HCoordinateDescent::declareOptions(), PLearn::HTryAll::declareOptions(), PLearn::HSetVal::declareOptions(), PLearn::HyperOptimizer::declareOptions(), PLearn::HistogramDistribution::declareOptions(), PLearn::GraphicalBiText::declareOptions(), PLearn::Grapher::declareOptions(), PLearn::GramVMatrix::declareOptions(), PLearn::GradientOptimizer::declareOptions(), PLearn::GetInputVMatrix::declareOptions(), PLearn::GeodesicDistanceKernel::declareOptions(), PLearn::GenerateDecisionPlot::declareOptions(), PLearn::GeneralizedOneHotVMatrix::declareOptions(), PLearn::GeneralizedDistanceRBFKernel::declareOptions(), PLearn::GaussMix::declareOptions(), PLearn::GaussianProcessRegressor::declareOptions(), PLearn::GaussianKernel::declareOptions(), PLearn::GaussianDistribution::declareOptions(), PLearn::GaussianDensityKernel::declareOptions(), PLearn::GaussianContinuum::declareOptions(), PLearn::Function::declareOptions(), PLearn::FractionSplitter::declareOptions(), PLearn::ForwardVMatrix::declareOptions(), PLearn::FinancePreprocVMatrix::declareOptions(), PLearn::FilterSplitter::declareOptions(), PLearn::FilteredVMatrix::declareOptions(), PLearn::FileVMatrix::declareOptions(), PLearn::FilePStreamBuf::declareOptions(), PLearn::ExtendedVMatrix::declareOptions(), PLearn::ExtendedVariable::declareOptions(), PLearn::ExplicitSplitter::declareOptions(), PLearn::Experiment::declareOptions(), PLearn::EqualConstantVariable::declareOptions(), PLearn::EntropyContrast::declareOptions(), PLearn::EmpiricalDistribution::declareOptions(), PLearn::EmbeddedSequentialLearner::declareOptions(), PLearn::EmbeddedLearner::declareOptions(), PLearn::ElementAtPositionVariable::declareOptions(), PLearn::DuplicateScalarVariable::declareOptions(), PLearn::DuplicateRowVariable::declareOptions(), PLearn::DuplicateColumnVariable::declareOptions(), PLearn::DivisiveNormalizationKernel::declareOptions(), PLearn::Distribution::declareOptions(), PLearn::DistanceKernel::declareOptions(), PLearn::DiskVMatrix::declareOptions(), PLearn::Dictionary::declareOptions(), PLearn::DBSplitter::declareOptions(), PLearn::DatedVMatrix::declareOptions(), PLearn::DatedJoinVMatrix::declareOptions(), PLearn::CutBelowThresholdVariable::declareOptions(), PLearn::CutAboveThresholdVariable::declareOptions(), PLearn::CumVMatrix::declareOptions(), PLearn::CrossReferenceVMatrix::declareOptions(), PLearn::ConvexBasisKernel::declareOptions(), PLearn::ConstantRegressor::declareOptions(), PLearn::ConjGradientOptimizer::declareOptions(), PLearn::ConditionalStatsCollector::declareOptions(), PLearn::ConditionalGaussianDistribution::declareOptions(), PLearn::ConditionalDensityNet::declareOptions(), PLearn::ConditionalCDFSmoother::declareOptions(), PLearn::ConcatRowsVMatrix::declareOptions(), PLearn::ConcatRowsSubVMatrix::declareOptions(), PLearn::ConcatOfVariable::declareOptions(), PLearn::ConcatColumnsVMatrix::declareOptions(), PLearn::CompactVMatrixPolynomialKernel::declareOptions(), PLearn::CompactVMatrixGaussianKernel::declareOptions(), PLearn::ClassMarginCostFunction::declareOptions(), PLearn::ClassifierFromDensity::declareOptions(), PLearn::ClassErrorCostFunction::declareOptions(), PLearn::CenteredVMatrix::declareOptions(), PLearn::BootstrapVMatrix::declareOptions(), PLearn::BootstrapSplitter::declareOptions(), PLearn::Binner::declareOptions(), PLearn::BinaryVariable::declareOptions(), PLearn::BatchVMatrix::declareOptions(), PLearn::AutoVMatrix::declareOptions(), PLearn::AsciiVMatrix::declareOptions(), PLearn::AffineTransformWeightPenalty::declareOptions(), PLearn::AdditiveNormalizationKernel::declareOptions(), PLearn::AddCostToLearner::declareOptions(), PLearn::AdaptGradientOptimizer::declareOptions(), PLearn::AdaBoost::declareOptions(), and PLearn::Object::readOptionVal().

typedef pair<real,StatsCollectorCounts*> PLearn::PairRealSCCType
 

Definition at line 65 of file StatsCollector.h.

Referenced by sortIdComparator(), and PLearn::StatsCollector::sortIds().

typedef ofstream PLearn::pofstream
 

The stream classes.

Definition at line 322 of file PLMPI.h.

typedef PP<Learner> PLearn::PPLearner
 

Definition at line 567 of file Learner.h.

typedef CostFunc PLearn::ProfitFunc
 

a profit function maps (output,target) to a profit

********************************************************************** FINANCIAL STUFF

Definition at line 254 of file Kernel.h.

typedef PP<SDBVMField> PLearn::PSDBVMField
 

Definition at line 196 of file SDBVMat.h.

Referenced by PLearn::SDBVMatrix::appendField().

typedef PP<SDBVMFieldDiscrete> PLearn::PSDBVMFieldDiscrete
 

Definition at line 661 of file SDBVMat.h.

typedef PP<SDBVMOutputCoder> PLearn::PSDBVMOutputCoder
 

Definition at line 139 of file SDBVMat.h.

typedef SimpleDB PLearn::SDB
 

A utility typedef for the common case.

Definition at line 1178 of file SimpleDB.h.

Referenced by PLearn::AutoSDBVMatrix::AutoSDBVMatrix(), halfShuffleRows(), PLearn::SDBVMatrix::SDBVMatrix(), and PLearn::SDBWithStats::SDBWithStats().

typedef PPointableSet::iterator PLearn::SetIterator
 

Definition at line 22 of file Set.h.

Referenced by PLearn::Set::begin(), PLearn::TextSenseSequenceVMatrix::build_(), PLearn::GraphicalBiText::build_(), check_prob(), PLearn::GraphicalBiText::check_set_pA(), PLearn::GraphicalBiText::compute_BN_likelihood(), PLearn::GraphicalBiText::compute_efs_likelihood(), PLearn::GraphicalBiText::compute_likelihood(), PLearn::GraphicalBiText::compute_node_level(), PLearn::GraphicalBiText::compute_nodemap(), PLearn::GraphicalBiText::compute_pMC(), PLearn::GraphicalBiText::compute_pTC(), PLearn::GraphicalBiText::computeKL(), PLearn::WordNetOntology::computeWordSenseUniqueIds(), PLearn::GraphicalBiText::distribute_pS_on_ancestors(), PLearn::Set::end(), PLearn::WordNetOntology::extractAncestors(), PLearn::WordNetOntology::extractDescendants(), PLearn::WordNetOntology::extractStrictDescendants(), PLearn::WordNetOntology::extractWordHighLevelSenses(), PLearn::WordNetOntology::extractWordNounAndVerbHighLevelSenses(), PLearn::WordNetOntology::fillTempWordToHighLevelSensesTVecMap(), PLearn::WordNetOntology::fillTempWordToSensesTVecMap(), PLearn::Set::find(), PLearn::WordNetOntology::getCategoriesAtLevel(), PLearn::WordNetOntology::getCategoriesUnderLevel(), PLearn::GraphicalBiText::getDeepestCommonAncestor(), PLearn::WordNetOntology::getDescendantCategoriesAtLevel(), PLearn::WordNetOntology::getDownToUpParentCategoriesAtLevel(), PLearn::WordNetOntology::getPredominentSyntacticClassForWord(), PLearn::WordNetOntology::getSecondLevelSensesForWord(), PLearn::WordNetOntology::getSyntacticClassesForWord(), PLearn::WordNetOntology::getThirdLevelSensesForWord(), PLearn::WordNetOntology::getWordAncestors(), PLearn::WordNetOntology::getWordSenseIdForSenseKey(), PLearn::GraphicalBiText::init(), PLearn::WordNetOntology::intersectAncestorsAndSenses(), PLearn::WordNetOntology::isWordUnknown(), PLearn::WordNetOntology::load(), PLearn::Set::merge(), operator<<(), PLearn::WordNetOntology::overlappingSynsets(), PLearn::WordNetOntology::print(), print(), PLearn::GraphicalBiText::print(), PLearn::GraphicalBiText::print_sensemap(), PLearn::WordNetOntology::printInvertedSynsetOntology(), PLearn::GraphicalBiText::printNode(), PLearn::WordNetOntology::printNodes(), PLearn::WordNetOntology::printOntology(), PLearn::WordNetOntology::printSynsetAncestors(), PLearn::WordNetOntology::printWordAncestors(), PLearn::WordNetOntology::printWordOntology(), PLearn::WordNetOntology::propagatePOSTypes(), PLearn::WordNetOntology::reduceWordPolysemy(), PLearn::WordNetOntology::reduceWordPolysemy_preserveSenseOverlapping(), PLearn::ProbabilitySparseMatrix::removeExtra(), PLearn::WordNetOntology::removeNonReachableSynsets(), PLearn::ProbabilitySparseMatrix::removeRow(), samePos(), PLearn::WordNetOntology::save(), PLearn::GraphicalBiText::set_nodemap(), PLearn::GraphicalBiText::test_WSD(), PLearn::WordNetOntology::unvisitDownward(), update(), updateAndClearCounts(), and PLearn::WordNetOntology::visitUpward().

typedef map<int, real> PLearn::SparseVec
 

Definition at line 13 of file ProbabilitySparseMatrix.h.

typedef PP<StatsIterator> PLearn::StatsIt
 

Definition at line 354 of file StatsIterator.h.

Referenced by PLearn::StatsItArray::StatsItArray().

typedef TTensor<real> PLearn::Tensor
 

Definition at line 402 of file Tensor.h.

typedef int PLearn::tFileHandle
 

Definition at line 47 of file MemoryMap.h.

Referenced by MemoryMap().

typedef real(* PLearn::tRealFunc)(real)
 

Definition at line 394 of file pl_math.h.

Referenced by PLearn::MixtureRandomVariable::EMBprop(), and PLearn::Function::verifyGradient().

typedef real(* PLearn::tRealReadFunc)(real, real)
 

Definition at line 395 of file pl_math.h.

typedef map<string,TypeMapEntry> PLearn::TypeMap
 

Definition at line 85 of file TypeFactory.h.

Referenced by displayObjectHelp(), and PLearn::TypeFactory::getTypeMap().

typedef TVec<real> PLearn::Vec
 

Definition at line 774 of file TVec_decl.h.

Referenced by PLearn::SourceKernel::addDataForKernelMatrix(), PLearn::RowMapSparseValueMatrix< T >::averageAcrossRowsAndColumns(), bootstrap(), PLearn::MatrixAffineTransformVariable::bprop(), PLearn::VMatrixFromDistribution::build_(), PLearn::GaussianContinuum::build_(), PLearn::ByteMemoryVMatrix::ByteMemoryVMatrix(), PLearn::CompactVMatrix::CompactVMatrix(), PLearn::CompressedVMatrix::CompressedVMatrix(), PLearn::VecCompressor::compressVec(), PLearn::Grapher::computeAutoGridrange(), PLearn::DivisiveNormalizationKernel::computeAverage(), PLearn::AdditiveNormalizationKernel::computeAverage(), PLearn::PCA::computeCostsFromOutputs(), PLearn::TestingLearner::computeOutput(), PLearn::TangentLearner::computeOutput(), PLearn::StatefulLearner::computeOutput(), PLearn::StackedLearner::computeOutput(), PLearn::PDistribution::computeOutput(), PLearn::KPCATangentLearner::computeOutput(), PLearn::IsomapTangentLearner::computeOutput(), PLearn::EmbeddedLearner::computeOutput(), PLearn::SequentialLearner::computeOutputAndCosts(), PLearn::ReconstructionWeightsKernel::computeWeights(), PLearn::SDBVMFieldICBCClassification::convertField(), PLearn::SDBVMFieldSumClaims::convertField(), PLearn::SDBVMFieldHasClaim::convertField(), PLearn::SDBVMFieldICBCTargets::convertField(), PLearn::SDBVMFieldDateGreater::convertField(), PLearn::SDBVMFieldDiscrete::convertField(), PLearn::SDBVMFieldDateDiff::convertField(), PLearn::SDBVMFieldMonths::convertField(), PLearn::SDBVMFieldDay::convertField(), PLearn::SDBVMFieldDate::convertField(), PLearn::SDBVMFieldFunc2::convertField(), PLearn::SDBVMFieldFunc1::convertField(), PLearn::SDBVMFieldSignedPower::convertField(), PLearn::SDBVMFieldPosAffine::convertField(), PLearn::SDBVMFieldAffine::convertField(), PLearn::SDBVMFieldDivSigma::convertField(), PLearn::SDBVMFieldNormalize::convertField(), PLearn::SDBVMFieldAsIs::convertField(), PLearn::SDBVMField::convertMissing(), PLearn::VarArray::copyFrom(), PLearn::TemporalHorizonVMatrix::dot(), PLearn::ConcatRowsVMatrix::dot(), PLearn::MatlabInterface::eigs_r11(), PLearn::NonRandomVariable::EMBprop(), PLearn::ConcatColumnsRandomVariable::EMBprop(), PLearn::ExtendedRandomVariable::EMBprop(), PLearn::SubVecRandomVariable::EMBprop(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::ElementWiseDivisionRandomVariable::EMBprop(), PLearn::LogRandomVariable::EMBprop(), PLearn::ExpRandomVariable::EMBprop(), PLearn::NegRandomVariable::EMBprop(), PLearn::RVArrayRandomElementRandomVariable::EMBprop(), PLearn::RandomElementOfRandomVariable::EMBprop(), PLearn::JointRandomVariable::EMBprop(), PLearn::WeightedCostFunction::evaluate(), PLearn::SquaredErrorCostFunction::evaluate(), PLearn::SigmoidPrimitiveKernel::evaluate(), PLearn::SigmoidalKernel::evaluate(), PLearn::SelectedOutputCostFunction::evaluate(), PLearn::QuadraticUtilityCostFunction::evaluate(), PLearn::PricingTransactionPairProfitFunction::evaluate(), PLearn::PrecomputedKernel::evaluate(), PLearn::PowDistanceKernel::evaluate(), PLearn::PolynomialKernel::evaluate(), PLearn::NormalizedDotProductKernel::evaluate(), PLearn::NegOutputCostFunction::evaluate(), PLearn::NegLogProbCostFunction::evaluate(), PLearn::NegKernel::evaluate(), PLearn::MulticlassErrorCostFunction::evaluate(), PLearn::LogOfGaussianDensityKernel::evaluate(), PLearn::LLEKernel::evaluate(), PLearn::LiftBinaryCostFunction::evaluate(), PLearn::LaplacianKernel::evaluate(), PLearn::GeodesicDistanceKernel::evaluate(), PLearn::GeneralizedDistanceRBFKernel::evaluate(), PLearn::GaussianDensityKernel::evaluate(), PLearn::DotProductKernel::evaluate(), PLearn::DistanceKernel::evaluate(), PLearn::DirectNegativeCostFunction::evaluate(), PLearn::DifferenceKernel::evaluate(), PLearn::ConvexBasisKernel::evaluate(), PLearn::CompactVMatrixPolynomialKernel::evaluate(), PLearn::CompactVMatrixGaussianKernel::evaluate(), PLearn::ClassMarginCostFunction::evaluate(), PLearn::ClassErrorCostFunction::evaluate(), PLearn::ClassDistanceProportionCostFunction::evaluate(), PLearn::Distribution::expectation(), PLearn::KernelProjection::forget(), PLearn::UnfoldedFuncVariable::fprop(), PLearn::MatrixSoftmaxVariable::fprop(), PLearn::MatrixAffineTransformFeedbackVariable::fprop(), PLearn::Function::fprop(), PLearn::FunctionalRandomVariable::FunctionalRandomVariable(), PLearn::GeneralizedOneHotVMatrix::GeneralizedOneHotVMatrix(), PLearn::VVMatrix::generateFilterIndexFile(), PLearn::MemoryVMatrix::getColumn(), PLearn::SDBVMFieldRemapIntervals::getIntervals(), PLearn::PreprocessingVMatrix::getNewRow(), PLearn::UpsideDownVMatrix::getNewRow(), PLearn::TransposeVMatrix::getNewRow(), PLearn::ThresholdVMatrix::getNewRow(), PLearn::TextSenseSequenceVMatrix::getNewRow(), PLearn::SubInputVMatrix::getNewRow(), PLearn::RowsSubVMatrix::getNewRow(), PLearn::RemapLastColumnVMatrix::getNewRow(), PLearn::RegularGridVMatrix::getNewRow(), PLearn::ProcessingVMatrix::getNewRow(), PLearn::PrecomputedVMatrix::getNewRow(), PLearn::PLearnerOutputVMatrix::getNewRow(), PLearn::PairsVMatrix::getNewRow(), PLearn::OneHotVMatrix::getNewRow(), PLearn::MultiInstanceVMatrix::getNewRow(), PLearn::MovingAverageVMatrix::getNewRow(), PLearn::LocalNeighborsDifferencesVMatrix::getNewRow(), PLearn::LearnerProcessedVMatrix::getNewRow(), PLearn::JulianizeVMatrix::getNewRow(), PLearn::FinancePreprocVMatrix::getNewRow(), PLearn::FileVMatrix::getNewRow(), PLearn::ExtendedVMatrix::getNewRow(), PLearn::DatedJoinVMatrix::getNewRow(), PLearn::CumVMatrix::getNewRow(), PLearn::ConcatColumnsVMatrix::getNewRow(), PLearn::AutoSDBVMatrix::getNewRow(), PLearn::AsciiVMatrix::getNewRow(), PLearn::Kernel::getParameters(), PLearn::CrossReferenceVMatrix::getRow(), PLearn::TrainTestBagsSplitter::getSplit(), PLearn::UniformVMatrix::getSubRow(), PLearn::SubVMatrix::getSubRow(), PLearn::SelectRowsFileIndexVMatrix::getSubRow(), PLearn::NistDB::getSubRow(), PLearn::InterleaveVMatrix::getSubRow(), PLearn::ForwardVMatrix::getSubRow(), PLearn::ConcatRowsSubVMatrix::getSubRow(), PLearn::Gnuplot::Gnuplot(), PLearn::QuantilesStatsIterator::init(), PLearn::LiftStatsIterator::init(), PLearn::MaxStatsIterator::init(), PLearn::MinStatsIterator::init(), PLearn::SharpeRatioStatsIterator::init(), PLearn::StderrStatsIterator::init(), PLearn::StddevStatsIterator::init(), PLearn::ExpMeanStatsIterator::init(), PLearn::MeanStatsIterator::init(), PLearn::SpiralDistribution::log_density(), PLearn::LocallyWeightedDistribution::log_density(), PLearn::EmpiricalDistribution::log_density(), PLearn::ManualBinner::ManualBinner(), PLearn::VarMeasurer::measure(), PLearn::Measurer::measure(), PLearn::CallbackMeasurer::measure(), PLearn::FilteredVMatrix::openIndex(), PLearn::Ker::operator()(), PLearn::Func::operator()(), PLearn::HTryAll::optimize(), PLearn::GradientOptimizer::optimize(), printvec(), PLearn::RandomVar::RandomVar(), PLearn::RandomVariable::RandomVariable(), PLearn::HTryCombinations::recursive_optimize(), removeRow(), PLearn::RemoveRowsVMatrix::RemoveRowsVMatrix(), PLearn::Variable::resize(), PLearn::RGBImagesVMatrix::RGBImagesVMatrix(), PLearn::RGBImageVMatrix::RGBImageVMatrix(), PLearn::VMat::rows(), PLearn::VMatLanguage::run(), PLearn::TestDependenciesCommand::run(), PLearn::SequentialValidation::run(), PLearn::KolmogorovSmirnovCommand::run(), PLearn::Experiment::run(), PLearn::RandomVarVMatrix::sample(), PLearn::SelectColumnsVMatrix::SelectColumnsVMatrix(), PLearn::SelectRowsVMatrix::SelectRowsVMatrix(), PLearn::SentencesBlocks::SentencesBlocks(), PLearn::UnconditionalDistribution::setInput(), PLearn::PConditionalDistribution::setInput(), PLearn::ConditionalDistribution::setInput(), PLearn::SDBVMOutputCoder::setOutput(), PLearn::ShiftAndRescaleVMatrix::ShiftAndRescaleVMatrix(), PLearn::GhostScript::show(), PLearn::ScaledConditionalCDFSmoother::smooth(), PLearn::LimitedGaussianSmoother::smooth(), PLearn::ConditionalCDFSmoother::smooth(), PLearn::SourceVariable::SourceVariable(), PLearn::SparseVMatrix::SparseVMatrix(), PLearn::CompactVMatrix::squareDifference(), PLearn::StrTableVMatrix::StrTableVMatrix(), PLearn::EmbeddedSequentialLearner::test(), PLearn::MovingAverage::train(), PLearn::ManifoldParzen2::train(), PLearn::HistogramDistribution::train(), PLearn::ConstantRegressor::train(), PLearn::RealMapping::transform(), PLearn::UniformizeVMatrix::UniformizeVMatrix(), PLearn::VecStatsCollector::update(), PLearn::StatsItArray::update(), PLearn::LiftStatsCollector::update(), PLearn::ConditionalStatsCollector::update(), PLearn::Distribution::use(), PLearn::Var::Var(), PLearn::VecElementVariable::VecElementVariable(), PLearn::VecExtendedVMatrix::VecExtendedVMatrix(), and PLearn::VVec::VVec().

typedef void(* PLearn::VOIDFUNC)()
 

Definition at line 53 of file StaticInitializer.h.

Referenced by PLearn::StaticInitializer::StaticInitializer().


Enumeration Type Documentation

enum PLearn::eNumericType
 

Enumeration values:
NT_NOT_NUMERIC 
NT_ORDINAL 
NT_CARDINAL 
NT_CURRENCY 
NT_PREFIXED 
NT_SUFFIXED 
NT_RANGE 
NT_TIME 
NT_CODE 
NT_PERCENT 
NT_UNKNOWN_NUMERIC_TYPE  looks numeric, but none of the above (ana or something)

Definition at line 66 of file TypesNumeriques.h.

enum PLearn::FieldType
 

A schema is simply a vector of field definitions. A field definition is a structure containing a field type and a precision. The meaning of precision is always the length in byte of the type.

Type : What Precision is:

Enumeration values:
Unknown 
StringType 
CharacterType 
SignedCharType 
ShortType 
IntType 
FloatType 
DoubleType 
DateType 

Definition at line 108 of file SimpleDB.h.

Referenced by PLearn::RowIterator::getFieldType(), PLearn::FieldValue::operator *(), PLearn::FieldValue::operator+(), PLearn::FieldValue::operator-(), PLearn::FieldValue::operator/(), PLearn::FieldValue::operator<(), and PLearn::FieldValue::operator==().

enum PLearn::SDBVMOutputCoding
 

Code a real number into a vector. Possible codings are one-hot-like variations, and, yes, identity. At the moment, the coding is specified by a simple enum, but later could be upgraded to support derived classes as well.

This class supports remapping MISSING_VALUEs that are passed to setOutput onto some arbitrary real number (including MISSING_VALUE).

One-hot coding supports a special treatment regarding missing values: if a MISSING_VALUE is passed to setOutput, and the missing_values_mapping leaves it as-is, and one-hot coding is in effect, all the elements of the one-hot vector are set to MISSING_VALUE.

Enumeration values:
SDBVMUnknownCoding 
SDBVMNumeric  straight output
SDBVMOneHot  classic one-hot
SDBVMOneHotMinus1  all but first element (which is skipped)

Definition at line 65 of file SDBVMat.h.

Referenced by PLearn::SDBVMFieldDiscrete::getOutputCoding(), and PLearn::SDBVMOutputCoder::getOutputCoding().


Function Documentation

Var abs Var  v  )  [inline]
 

Definition at line 73 of file AbsVariable.h.

Referenced by PLearn::RepeatSplitter::build_(), PLearn::KernelProjection::computeCostsFromOutputs(), PLearn::AddCostToLearner::computeCostsFromOutputs(), entropy(), PLearn::ConjGradientOptimizer::findDirection(), PLearn::ConjGradientOptimizer::fletcherSearchMain(), PLearn::ProductRandomVariable::invertible(), PLearn::Learner::measure(), PLearn::ConjGradientOptimizer::minCubic(), PLearn::ConjGradientOptimizer::minQuadratic(), PLearn::ConjGradientOptimizer::newtonSearch(), norm(), pownorm(), PLearn::SequentialModelSelector::sequenceCost(), PLearn::RealRange::span(), PLearn::SumAbsVariable::symbolicBprop(), PLearn::PLS::train(), PLearn::PCA::train(), and PLearn::HistogramDistribution::variance().

CostFunc PLearn::absolute_deviation int  singleoutputindex = -1  )  [inline]
 

Definition at line 133 of file DistanceKernel.cc.

string PLearn::abspath const string path  ) 
 

returns the absolute path of the (possibly relative) specified path.

if it's a directory, then there will be a trailing slash.

Definition at line 108 of file fileutils.cc.

References append_slash(), chdir(), extract_directory(), extract_filename(), getcwd(), and isdir().

Referenced by PLearn::PLearner::build_(), PLearn::PTester::build_(), PLearn::GaussianProcessRegressor::build_(), PLearn::FileVMatrix::FileVMatrix(), locateDatasetAliasesDir(), matlabSave(), readFileAndMacroProcess(), PLearn::PLearner::setExperimentDirectory(), PLearn::PTester::setExperimentDirectory(), PLearn::Learner::setExperimentDirectory(), and PLearn::VMatrix::setMetaDataDir().

Var accessElement const Vec &  v,
Var  index
[inline]
 

Definition at line 82 of file VecElementVariable.h.

Referenced by PLearn::AddCostToLearner::build_().

Var accessRow const Mat &  m,
Var  index
[inline]
 

Definition at line 84 of file MatRowVariable.h.

template<class T>
void add const TMat< T > &  m1,
const TMat< T > &  m2,
TMat< T > &  destination
 

Definition at line 4596 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void add const TVec< T > &  source1,
source2,
TVec< T > &  destination
 

Definition at line 1484 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

template<class T>
void add const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1467 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

SparseMatrix PLearn::add Array< SparseMatrix > &  matrices  ) 
 

add a bunch of sparse matrices and return result

Definition at line 297 of file SparseMatrix.cc.

References PLearn::SparseMatrix::beginRow, PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLearn::SparseMatrix::endRow, k, PLERROR, PLearn::TVec< T >::resize(), PLearn::SparseMatrix::row, PLearn::TVec< T >::size(), and PLearn::SparseMatrix::values.

Referenced by PLearn::Hash< KeyType, DataType >::addAndResize(), PLearn::MinusRandomVariable::EMBprop(), PLearn::FieldConvertCommand::FieldConvertCommand(), PLearn::SequentialModelSelector::matlabSave(), PLearn::SequentialLearner::matlabSave(), matlabSave(), operator+(), PLearn::ScaledGradientOptimizer::optimize(), and substract().

template<class T>
void addIfNonMissing const TVec< T > &  source,
const TVec< int > &  nnonmissing,
TVec< T >  destination
 

Definition at line 5742 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::GradientOptimizer::optimize(), PLearn::ExpMeanStatsIterator::update(), and PLearn::MeanStatsIterator::update().

string addpostfix const string text,
const string postfix
[inline]
 

Returns a string with the postfix appended to each *line* of the text string.

Definition at line 192 of file stringutils.h.

References addprepostfix().

vector<string> addpostfix const vector< string > &  names,
const string postfix
[inline]
 

returns the list of names, but with an appended postfix

Definition at line 178 of file stringutils.h.

References addprepostfix().

string addprefix const string prefix,
const string text
[inline]
 

Returns a string with the prefix prepended to each *line* of the text string.

Definition at line 187 of file stringutils.h.

References addprepostfix().

vector<string> addprefix const string prefix,
const vector< string > &  names
[inline]
 

returns the list of names, but with a prepended prefix

Definition at line 174 of file stringutils.h.

References addprepostfix().

Referenced by displayObjectHelp(), and lsdir_fullpath().

TVec< string > PLearn::addprepostfix const string prefix,
const TVec< string > &  names,
const string postfix
 

Definition at line 49 of file PExperiment.cc.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), and PLearn::TVec< T >::size().

string PLearn::addprepostfix const string prefix,
const string text,
const string postfix
 

Returns a string with the prefix prepended and the postfix appended to each *line* of the text string.

Definition at line 546 of file stringutils.cc.

References removenewline().

vector< string > PLearn::addprepostfix const string prefix,
const vector< string > &  names,
const string postfix
 

returns the list of names, but with a prepended prefix and an appended postfix

Definition at line 532 of file stringutils.cc.

Referenced by addpostfix(), and addprefix().

template<class T>
void addToColumns const TMat< T > &  mat,
const TVec< T >  col,
bool  ignored
 

Definition at line 3720 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void addToDiagonal const TMat< T > &  mat,
const TVec< T > &  lambda
 

Definition at line 3634 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), and PLERROR.

template<class T>
void addToDiagonal const TMat< T > &  mat,
lambda
 

Definition at line 3623 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), and PLearn::TMat< T >::mod().

Referenced by logPFittedGaussian(), and PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost().

template<class T>
void addToMat const TMat< T > &  mat,
scalar,
bool  ignored
 

Definition at line 3760 of file TMat_maths_impl.h.

template<class T>
void addToRows const TMat< T > &  mat,
const TVec< T >  row,
bool  ignored
 

Definition at line 3709 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length().

template<class T>
void addXandX2IfNonMissing const TVec< T > &  source,
const TVec< int > &  nnonmissing,
TVec< T >  somme,
TVec< T >  somme2
 

Definition at line 5762 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::StderrStatsIterator::update(), and PLearn::StddevStatsIterator::update().

Var affine_transform Var  vec,
Var  transformation
[inline]
 

first row of transformation is the bias.

Definition at line 84 of file AffineTransformVariable.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), and PLearn::ConditionalDensityNet::build_().

Var affine_transform_weight_penalty Var  transformation,
real  weight_decay,
real  bias_decay = 0,
bool  L1_penalty = false
[inline]
 

weight decay and bias decay terms This has not been tested yet [Pascal: a tester].

Definition at line 82 of file AffineTransformWeightPenalty.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), and PLearn::ConditionalDensityNet::build_().

template<class T>
void affineMatrixInitialize TMat< T >  W,
bool  output_on_columns = true,
real  scale = 1.0
[inline]
 

Definition at line 5554 of file TMat_maths_impl.h.

References PLearn::TMat< T >::clear(), fill_random_uniform(), PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

void PLearn::affineNormalization Mat  data,
Mat  W,
Vec  bias,
real  regularizer = 0
 

Definition at line 460 of file plapack.cc.

References computeMeanAndCovar(), eigen_SymmMat(), sqrt(), and PLearn::TMat< T >::width().

VarArray PLearn::allSources const VarArray &  v  ) 
 

returns all sources that influence the given vars

Definition at line 1094 of file VarArray.cc.

References PLearn::VarArray::sources(), and PLearn::VarArray::unmarkAncestors().

string PLearn::append_slash const string path  ) 
 

appends a trailing slash to path if there isn't already one

Definition at line 254 of file stringutils.cc.

References slash, and slash_char.

Referenced by abspath(), PLearn::DiskVMatrix::build_(), PLearn::VMatrix::getFieldInfos(), PLearn::VMatrix::loadFieldInfos(), PLearn::VMatrix::lockMetaDataDir(), PLearn::SequentialModelSelector::matlabSave(), PLearn::SequentialLearner::matlabSave(), matlabSave(), PLearn::PTester::perform(), PLearn::SequentialValidation::run(), PLearn::Experiment::run(), PLearn::VMatrix::saveFieldInfos(), PLearn::SequentialValidation::setExperimentDirectory(), PLearn::SequentialModelSelector::setExperimentDirectory(), PLearn::SequentialModelSelector::test(), PLearn::SequentialModelSelector::train(), PLearn::AdaBoost::train(), PLearn::VMatrix::unlockMetaDataDir(), and PLearn::PrecomputedVMatrix::usePrecomputed().

template<class T>
void apply T(*  func)(const TVec< T > &, const TVec< T > &),
const TMat< T > &  m1,
const TMat< T > &  m2,
TMat< T > &  dest
 

Definition at line 5337 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and PLERROR.

template<class T>
void apply T(*  func)(const TVec< T > &),
const TMat< T > &  m,
TMat< T > &  dest
 

Definition at line 5327 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and PLERROR.

template<class T, class U, class V>
void apply const TVec< T > &  src1,
const TVec< U > &  src2,
TVec< V > &  dest,
V(*  func)(T, U)
 

Transform a vector of T and a vector of U into a vector of V, through a binary function.

Definition at line 1400 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

template<class T, class U>
void apply const TVec< T > &  source,
TVec< U > &  destination,
U(*  func)(T)
 

Transform a vector of T into a vector of U through a unary function.

Definition at line 1385 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

template<class T, class U, class V>
TVec<U> apply const TVec< T > &  vec,
U(*  func)(V)
 

Transform a vector of T into a vector of U through a unary function.

Note: output type need not be specified in this case

Definition at line 1376 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length().

Referenced by PLearn::Kernel::apply(), PLearn::LogAddVariable::bprop(), PLearn::MixtureRandomVariable::EMBprop(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::MultinomialRandomVariable::EMUpdate(), PLearn::MixtureRandomVariable::EMUpdate(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), exp(), PLearn::LogAddVariable::fprop(), and PLearn::Function::verifyGradient().

Var argmax Var  v  )  [inline]
 

Definition at line 76 of file ArgmaxVariable.h.

template<class T>
int argmax const TMat< T > &  m  ) 
 

return maxi*width+maxj

Definition at line 4000 of file TMat_maths_impl.h.

References argmax(), and PLearn::TMat< T >::width().

template<class T>
void argmax const TMat< T > &  mat,
int maxi,
int maxj
 

Definition at line 3969 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
int argmax const TVec< T > &  vec,
bool  ignore_missing
 

Definition at line 575 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), MISSING_VALUE, and PLERROR.

template<class T>
int argmax const TVec< T > &  vec  ) 
 

Definition at line 556 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by argmax(), classification_confusion_matrix(), columnArgmax(), PLearn::ClassMarginCostFunction::evaluate(), PLearn::ClassErrorCostFunction::evaluate(), PLearn::ClassDistanceProportionCostFunction::evaluate(), findClosestPairsOfDifferentClass(), PLearn::MiniBatchClassificationLossVariable::fprop(), PLearn::ClassificationLossVariable::fprop(), PLearn::ArgmaxVariable::fprop(), PLearn::GraphicalBiText::init(), loadUSPS(), PLearn::Grapher::plot_2D_classification(), rowArgmax(), PLearn::MaxVariable::symbolicBprop(), and PLearn::SequentialModelSelector::train().

Var argmin Var  v  )  [inline]
 

Definition at line 76 of file ArgminVariable.h.

template<class T>
int argmin const TMat< T > &  m  ) 
 

return mini*width+minj

Definition at line 3991 of file TMat_maths_impl.h.

References argmin(), and PLearn::TMat< T >::width().

template<class T>
void argmin const TMat< T > &  mat,
int mini,
int minj
 

Stores the position of the min in the 'mini' & 'minj' arg.

Definition at line 3947 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
int argmin const TVec< T > &  vec,
bool  ignore_missing
 

Definition at line 625 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), MISSING_VALUE, and PLERROR.

template<class T>
int argmin const TVec< T > &  vec  ) 
 

Definition at line 606 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by argmin(), columnArgmin(), PLearn::ArgminVariable::fprop(), PLearn::HTryAll::optimize(), PLearn::HTryCombinations::recursive_optimize(), rowArgmin(), PLearn::MinVariable::symbolicBprop(), and PLearn::SequentialModelSelector::train().

Var argminOf Var  v,
Var  expression,
Var  values_of_v,
VarArray  inputs
[inline]
 

returns the value of v within the_values_of_v that gives the lowest value of expression (which may depend on inputs).

Definition at line 86 of file ArgminOfVariable.h.

void PLearn::autocorrelation_function const VMat &  data,
Mat &  acf
 

Definition at line 611 of file VMat_maths.cc.

References count, k, PLearn::VMat::length(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), sqrt(), square(), and PLearn::VMat::width().

template<class T>
void averageAcrossRowsAndColumns const TMat< T > &  mat,
TVec< T > &  avg_across_rows,
TVec< T > &  avg_across_columns,
bool  ignored
 

Definition at line 3684 of file TMat_maths_impl.h.

References PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLearn::TVec< T >::resize(), and PLearn::TMat< T >::width().

template<class T>
T avgdev const TVec< T > &  vec,
meanval
 

Definition at line 349 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

string PLearn::backslash_to_slash string  str  ) 
 

replaces all backslashes with slash

Definition at line 355 of file stringutils.cc.

Var binary_classification_loss Var  network_output,
Var  classnum
[inline]
 

Definition at line 78 of file BinaryClassificationLossVariable.h.

References PLERROR.

Referenced by PLearn::NNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), and PLearn::MultiInstanceNNet::build_().

template<class T>
int binary_search const TMat< T > &  src,
int  c,
x
 

Definition at line 313 of file TMat_sort.h.

References k, PLearn::TMat< T >::length(), and x.

template<class T>
int binary_search const TVec< T > &  src,
x
 

Definition at line 281 of file TMat_sort.h.

References k, PLearn::TVec< T >::length(), and x.

Referenced by estimatedCumProb(), PLearn::SDBVMFieldRemapIntervals::getDiscreteValue(), positionOfClosestElement(), and rebalanceNClasses().

real binomial_sample real  prob1  )  [inline]
 

alias

Definition at line 106 of file random.h.

References bnldev().

Referenced by PLearn::CompactVMatrix::perturb().

template<class T>
void binread istream &  in,
TVec< T > &  v
 

Definition at line 172 of file TVec_impl.h.

References binread(), PLearn::TVec< T >::data(), and PLearn::TVec< T >::resize().

void binread FILE *  in,
double *  x,
int  n
[inline]
 

Definition at line 226 of file pl_io.h.

References binread(), and x.

void binread FILE *  in,
float *  x,
int  n
[inline]
 

Definition at line 224 of file pl_io.h.

References x.

void binread FILE *  in,
unsigned short *  x,
int  n
[inline]
 

Definition at line 220 of file pl_io.h.

References x.

void binread FILE *  in,
short *  x,
int  n
[inline]
 

Definition at line 218 of file pl_io.h.

References x.

void binread FILE *  in,
unsigned int x,
int  n
[inline]
 

Definition at line 216 of file pl_io.h.

References x.

void binread FILE *  in,
int x,
int  n
[inline]
 

Definition at line 214 of file pl_io.h.

References x.

void binread FILE *  in,
double &  x
[inline]
 

Definition at line 203 of file pl_io.h.

References binread(), and x.

void binread FILE *  in,
float &  x
[inline]
 

Definition at line 201 of file pl_io.h.

References x.

void binread FILE *  in,
bool x
[inline]
 

Definition at line 194 of file pl_io.h.

References binread(), and x.

void binread FILE *  in,
unsigned short &  x
[inline]
 

Definition at line 189 of file pl_io.h.

References x.

void binread FILE *  in,
short &  x
[inline]
 

Definition at line 187 of file pl_io.h.

References x.

void binread FILE *  in,
unsigned int x
[inline]
 

Definition at line 185 of file pl_io.h.

References x.

void binread FILE *  in,
int x
[inline]
 

Definition at line 183 of file pl_io.h.

References x.

void binread FILE *  in,
unsigned char &  x
[inline]
 

Definition at line 181 of file pl_io.h.

References x.

void binread FILE *  in,
char &  x
[inline]
 

Definition at line 179 of file pl_io.h.

References x.

template<class A, class B>
void binread FILE *  in,
pair< A, B > &  x
[inline]
 

Definition at line 173 of file pl_io.h.

References binread(), and x.

template<class T>
void binread FILE *  in,
T *  x,
int  n
[inline]
 

Definition at line 165 of file pl_io.h.

References binread(), and x.

void binread istream &  in,
double *  x,
int  n
[inline]
 

Definition at line 140 of file pl_io.h.

References binread(), and x.

void binread istream &  in,
float *  x,
int  n
[inline]
 

Definition at line 138 of file pl_io.h.

References x.

void binread istream &  in,
unsigned short *  x,
int  n
[inline]
 

Definition at line 134 of file pl_io.h.

References x.

void binread istream &  in,
short *  x,
int  n
[inline]
 

Definition at line 132 of file pl_io.h.

References x.

void binread istream &  in,
unsigned int x,
int  n
[inline]
 

Definition at line 130 of file pl_io.h.

References x.

void binread istream &  in,
int x,
int  n
[inline]
 

Definition at line 128 of file pl_io.h.

References x.

void binread istream &  in,
double &  x
[inline]
 

Definition at line 117 of file pl_io.h.

References binread(), and x.

void binread istream &  in,
float &  x
[inline]
 

Definition at line 115 of file pl_io.h.

References x.

void binread istream &  in,
bool x
[inline]
 

Definition at line 108 of file pl_io.h.

References binread(), and x.

void binread istream &  in,
unsigned short &  x
[inline]
 

Definition at line 102 of file pl_io.h.

References x.

void binread istream &  in,
short &  x
[inline]
 

Definition at line 100 of file pl_io.h.

References x.

void binread istream &  in,
unsigned int x
[inline]
 

Definition at line 98 of file pl_io.h.

References x.

void binread istream &  in,
int x
[inline]
 

Definition at line 96 of file pl_io.h.

References x.

void binread istream &  in,
unsigned char &  x
[inline]
 

Definition at line 94 of file pl_io.h.

References x.

void binread istream &  in,
char &  x
[inline]
 

Definition at line 92 of file pl_io.h.

References x.

template<class A, class B>
void binread istream &  in,
pair< A, B > &  x
[inline]
 

Definition at line 86 of file pl_io.h.

References binread(), and x.

template<class T>
void binread istream &  in,
T *  x,
int  n
[inline]
 

Definition at line 78 of file pl_io.h.

References x.

Referenced by binread(), binread_compressed(), binread_double(), binreadField(), PLearn::RowMapSparseMatrix< real >::read(), read_compr_mode_and_size(), and PLearn::Learner::test().

void binread_ PStream in,
unsigned long *  x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
long *  x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
unsigned int x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
int x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
unsigned short *  x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
short *  x,
unsigned int  n,
unsigned char  typecode
 

void binread_ PStream in,
unsigned char *  x,
unsigned int  n,
unsigned char  typecode
[inline]
 

Definition at line 877 of file PStream.h.

References binread_(), and x.

void binread_ PStream in,
signed char *  x,
unsigned int  n,
unsigned char  typecode
[inline]
 

Definition at line 874 of file PStream.h.

References binread_(), and x.

void binread_ PStream in,
char *  x,
unsigned int  n,
unsigned char  typecode
[inline]
 

Definition at line 861 of file PStream.h.

References PLERROR, PLearn::PStream::read(), and x.

template<class Iterator>
void binread_ PStream in,
Iterator  it,
unsigned int  n,
unsigned char  typecode
 

Definition at line 847 of file PStream.h.

References PLERROR.

void PLearn::binread_ PStream in,
float *  x,
unsigned int  n,
unsigned char  typecode
 

Definition at line 1457 of file PStream.cc.

References endianswap(), PLERROR, PLearn::PStream::read(), val, and x.

void PLearn::binread_ PStream in,
double *  x,
unsigned int  n,
unsigned char  typecode
 

The binread_ for float and double are special.

Definition at line 1411 of file PStream.cc.

References endianswap(), PLERROR, val, and x.

void PLearn::binread_ PStream in,
bool x,
unsigned int  n,
unsigned char  typecode
 

Definition at line 1360 of file PStream.cc.

References PLearn::PStream::get(), PLERROR, and x.

Referenced by binread_(), PLearn::TMat< pair< real, real > >::read(), and readSequence().

void PLearn::binread_compressed FILE *  in,
float *  data,
int  l
 

Definition at line 380 of file pl_io.cc.

References binread(), mode, PLERROR, read_compr_mode_and_size(), and val.

void PLearn::binread_compressed FILE *  in,
double *  data,
int  l
 

Definition at line 332 of file pl_io.cc.

References binread(), mode, PLERROR, and read_compr_mode_and_size().

void PLearn::binread_compressed istream &  in,
float *  data,
int  l
 

Definition at line 217 of file pl_io.cc.

References binread(), mode, PLERROR, read_compr_mode_and_size(), and val.

void PLearn::binread_compressed istream &  in,
double *  data,
int  l
 

Definition at line 129 of file pl_io.cc.

References binread(), mode, PLERROR, and read_compr_mode_and_size().

Referenced by PLearn::DiskVMatrix::getNewRow().

template<class T>
void binread_double istream &  in,
TVec< T > &  v
 

Definition at line 195 of file TVec_impl.h.

References binread(), binread_double(), PLearn::TVec< T >::data(), and PLearn::TVec< T >::resize().

void binread_double FILE *  in,
float *  x,
int  n
[inline]
 

Definition at line 232 of file pl_io.h.

References binread(), and x.

void binread_double FILE *  in,
double *  x,
int  n
[inline]
 

Definition at line 230 of file pl_io.h.

References x.

void binread_double FILE *  in,
float &  x
[inline]
 

Definition at line 209 of file pl_io.h.

References binread_double(), and x.

void binread_double FILE *  in,
double &  x
[inline]
 

Definition at line 207 of file pl_io.h.

References x.

void binread_double istream &  in,
float *  x,
int  n
[inline]
 

Definition at line 146 of file pl_io.h.

References binread(), and x.

void binread_double istream &  in,
double *  x,
int  n
[inline]
 

Definition at line 144 of file pl_io.h.

References x.

void binread_double istream &  in,
float &  x
[inline]
 

Definition at line 123 of file pl_io.h.

References binread_double(), and x.

void binread_double istream &  in,
double &  x
[inline]
 

Definition at line 121 of file pl_io.h.

References x.

Referenced by binread_double(), and binreadField_double().

template<class T>
void binreadField istream &  in,
const string fieldname,
T &  x
 

Definition at line 244 of file pl_io_deprecated.h.

References binread(), readFieldName(), and x.

template<class T>
void binreadField_double istream &  in,
const string fieldname,
T &  x
 

Definition at line 252 of file pl_io_deprecated.h.

References binread_double(), readFieldName(), and x.

template<class T>
void binwrite ostream &  out,
const TVec< T > &  v
 

Definition at line 161 of file TVec_impl.h.

References binwrite(), PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

void binwrite FILE *  out,
const double *  x,
int  n
[inline]
 

Definition at line 225 of file pl_io.h.

References binwrite(), and x.

void binwrite FILE *  out,
const float *  x,
int  n
[inline]
 

Definition at line 223 of file pl_io.h.

References x.

void binwrite FILE *  out,
const unsigned short *  x,
int  n
[inline]
 

Definition at line 219 of file pl_io.h.

References x.

void binwrite FILE *  out,
const short *  x,
int  n
[inline]
 

Definition at line 217 of file pl_io.h.

References x.

void binwrite FILE *  out,
const unsigned int x,
int  n
[inline]
 

Definition at line 215 of file pl_io.h.

References x.

void binwrite FILE *  out,
const int x,
int  n
[inline]
 

multi-element versions, giving address and number of elements

Definition at line 213 of file pl_io.h.

References x.

void binwrite FILE *  out,
double  x
[inline]
 

Definition at line 202 of file pl_io.h.

References binwrite(), and x.

void binwrite FILE *  out,
float  x
[inline]
 

Definition at line 200 of file pl_io.h.

References x.

void binwrite FILE *  out,
bool  x
[inline]
 

note that bool are saved as unsigned short

Definition at line 191 of file pl_io.h.

References binwrite(), and x.

void binwrite FILE *  out,
unsigned short  x
[inline]
 

Definition at line 188 of file pl_io.h.

References x.

void binwrite FILE *  out,
short  x
[inline]
 

Definition at line 186 of file pl_io.h.

References x.

void binwrite FILE *  out,
unsigned int  x
[inline]
 

Definition at line 184 of file pl_io.h.

References x.

void binwrite FILE *  out,
int  x
[inline]
 

Definition at line 182 of file pl_io.h.

References x.

void binwrite FILE *  out,
unsigned char  x
[inline]
 

Definition at line 180 of file pl_io.h.

References x.

void binwrite FILE *  out,
char  x
[inline]
 

binwrite and binread for a few basic types

Definition at line 178 of file pl_io.h.

References x.

template<class A, class B>
void binwrite FILE *  out,
const pair< A, B >  x
[inline]
 

Definition at line 169 of file pl_io.h.

References binwrite(), and x.

template<class T>
void binwrite FILE *  out,
const T *  x,
int  n
[inline]
 

general purpose (but less efficient) version for pointers to things that have a binwrite/binread function

Definition at line 161 of file pl_io.h.

References binwrite(), and x.

void binwrite ostream &  out,
const double *  x,
int  n
[inline]
 

Definition at line 139 of file pl_io.h.

References binwrite(), and x.

void binwrite ostream &  out,
const float *  x,
int  n
[inline]
 

Definition at line 137 of file pl_io.h.

References x.

void binwrite ostream &  out,
const unsigned short *  x,
int  n
[inline]
 

Definition at line 133 of file pl_io.h.

References x.

void binwrite ostream &  out,
const short *  x,
int  n
[inline]
 

Definition at line 131 of file pl_io.h.

References x.

void binwrite ostream &  out,
const unsigned int x,
int  n
[inline]
 

Definition at line 129 of file pl_io.h.

References x.

void binwrite ostream &  out,
const int x,
int  n
[inline]
 

multi-element versions, giving address and number of elements

Definition at line 127 of file pl_io.h.

References x.

void binwrite ostream &  out,
double  x
[inline]
 

Definition at line 116 of file pl_io.h.

References binwrite(), and x.

void binwrite ostream &  out,
float  x
[inline]
 

Definition at line 114 of file pl_io.h.

References x.

void binwrite ostream &  out,
bool  x
[inline]
 

note that bool are saved as unsigned short

Definition at line 104 of file pl_io.h.

References binwrite(), and x.

void binwrite ostream &  out,
unsigned short  x
[inline]
 

Definition at line 101 of file pl_io.h.

References x.

void binwrite ostream &  out,
short  x
[inline]
 

Definition at line 99 of file pl_io.h.

References x.

void binwrite ostream &  out,
unsigned int  x
[inline]
 

Definition at line 97 of file pl_io.h.

References x.

void binwrite ostream &  out,
int  x
[inline]
 

Definition at line 95 of file pl_io.h.

References x.

void binwrite ostream &  out,
unsigned char  x
[inline]
 

Definition at line 93 of file pl_io.h.

References x.

void binwrite ostream &  out,
char  x
[inline]
 

binwrite and binread for a few basic types

Definition at line 91 of file pl_io.h.

References x.

template<class A, class B>
void binwrite ostream &  out,
const pair< A, B >  x
[inline]
 

Definition at line 82 of file pl_io.h.

References binwrite(), and x.

template<class T>
void binwrite ostream &  out,
const T *  x,
int  n
[inline]
 

general purpose (but less efficient) version for pointers to things that have a binwrite/binread function

Definition at line 74 of file pl_io.h.

References x.

Referenced by binwrite(), binwrite_compressed(), binwrite_double(), binwriteField(), main(), PLearn::Learner::test(), PLearn::RowMapSparseMatrix< real >::write(), and write_compr_mode_and_size().

void binwrite_ PStream out,
double *  x,
unsigned int  n
[inline]
 

Definition at line 841 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const double *  x,
unsigned int  n
[inline]
 

Definition at line 839 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
float *  x,
unsigned int  n
[inline]
 

Definition at line 836 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const float *  x,
unsigned int  n
[inline]
 

Definition at line 834 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
unsigned long *  x,
unsigned int  n
[inline]
 

Definition at line 831 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const unsigned long *  x,
unsigned int  n
[inline]
 

Definition at line 829 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
long *  x,
unsigned int  n
[inline]
 

Definition at line 826 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const long *  x,
unsigned int  n
[inline]
 

Definition at line 824 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
unsigned int x,
unsigned int  n
[inline]
 

Definition at line 821 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const unsigned int x,
unsigned int  n
[inline]
 

Definition at line 819 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
int x,
unsigned int  n
[inline]
 

Definition at line 816 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const int x,
unsigned int  n
[inline]
 

Definition at line 814 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
unsigned short *  x,
unsigned int  n
[inline]
 

Definition at line 811 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const unsigned short *  x,
unsigned int  n
[inline]
 

Definition at line 809 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
short *  x,
unsigned int  n
[inline]
 

Definition at line 806 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const short *  x,
unsigned int  n
[inline]
 

Definition at line 804 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
unsigned char *  x,
unsigned int  n
[inline]
 

Definition at line 801 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const unsigned char *  x,
unsigned int  n
[inline]
 

Definition at line 799 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
signed char *  x,
unsigned int  n
[inline]
 

Definition at line 796 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const signed char *  x,
unsigned int  n
[inline]
 

Definition at line 794 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
char *  x,
unsigned int  n
[inline]
 

Definition at line 791 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const char *  x,
unsigned int  n
[inline]
 

Definition at line 789 of file PStream.h.

References PLearn::PStream::write(), and x.

void binwrite_ PStream out,
const bool x,
unsigned int  n
[inline]
 

Definition at line 778 of file PStream.h.

References PLearn::PStream::put(), and x.

template<class Iterator>
void binwrite_ PStream out,
Iterator &  it,
unsigned int  n
 

Serialization of sequences *.

Definition at line 765 of file PStream.h.

References PLearn::PStream::outmode.

Referenced by PLearn::TMat< pair< real, real > >::write(), and writeSequence().

void PLearn::binwrite_compressed FILE *  out,
const float *  data,
int  l
 

Definition at line 420 of file pl_io.cc.

References PLERROR.

void PLearn::binwrite_compressed FILE *  out,
const double *  data,
int  l
 

Definition at line 375 of file pl_io.cc.

References PLERROR.

void PLearn::binwrite_compressed ostream &  out,
const float *  data,
int  l
 

Definition at line 257 of file pl_io.cc.

References binwrite(), val, and write_compr_mode_and_size().

void PLearn::binwrite_compressed ostream &  out,
const double *  data,
int  l
 

version for compressed array (efficient for sparse data, and small integer values) (format is detailed in .cc, see write_compr_mode_and_size function in general.cc)

Definition at line 172 of file pl_io.cc.

References binwrite(), val, and write_compr_mode_and_size().

Referenced by PLearn::DiskVMatrix::appendRow().

template<class T>
void binwrite_double ostream &  out,
const TVec< T > &  v
 

Definition at line 184 of file TVec_impl.h.

References binwrite(), binwrite_double(), PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

void binwrite_double FILE *  out,
const float *  x,
int  n
[inline]
 

Definition at line 231 of file pl_io.h.

References binwrite(), and x.

void binwrite_double FILE *  out,
const double *  x,
int  n
[inline]
 

Definition at line 229 of file pl_io.h.

References x.

void binwrite_double FILE *  out,
float  x
[inline]
 

Definition at line 208 of file pl_io.h.

References binwrite_double(), and x.

void binwrite_double FILE *  out,
double  x
[inline]
 

Definition at line 206 of file pl_io.h.

References x.

void binwrite_double ostream &  out,
const float *  x,
int  n
[inline]
 

Definition at line 145 of file pl_io.h.

References binwrite(), and x.

void binwrite_double ostream &  out,
const double *  x,
int  n
[inline]
 

Definition at line 143 of file pl_io.h.

References x.

void binwrite_double ostream &  out,
float  x
[inline]
 

Definition at line 122 of file pl_io.h.

References binwrite_double(), and x.

void binwrite_double ostream &  out,
double  x
[inline]
 

Definition at line 120 of file pl_io.h.

References x.

Referenced by binwrite_double(), and binwriteField_double().

template<class T>
void binwriteField ostream &  out,
const string fieldname,
const T &  x
 

generic field BINARY writing and reading

Definition at line 240 of file pl_io_deprecated.h.

References binwrite(), writeFieldName(), and x.

template<class T>
void binwriteField_double ostream &  out,
const string fieldname,
const T &  x
 

Definition at line 248 of file pl_io_deprecated.h.

References binwrite_double(), writeFieldName(), and x.

real PLearn::bnldev real  pp,
int  n = 1
 

returns a binomial random number with probability = 'pp' and trials number = 'n'

Definition at line 468 of file random.cc.

References exp(), log(), log_gamma(), Pi, sqrt(), and uniform_sample().

Referenced by binomial_sample().

VMat PLearn::bootstrap VMat  d,
bool  reorder = true,
bool  norepeat = true
 

returns a SelectRowsVMatrix that has d's rows bootstrapped (sample with replacement and optionally re-ordered).

Optionally the repeated rows are eliminated (this is actually done by shuffling and taking the first 2/3 of the rows, so the length() will be always the same). Note that the default values are fine for "on-line" learning algorithms but does not correspond to the usual "bootstrap".

Definition at line 782 of file VMat_maths.cc.

References PLearn::VMat::length(), PLearn::TVec< T >::resize(), PLearn::VMat::rows(), shuffleElements(), sortElements(), PLearn::TVec< T >::subVec(), uniform_multinomial_sample(), and Vec.

template<class T>
void bootstrap_rows const TMat< T > &  source,
TMat< T >  destination
 

sample with replacement the rows of source and put them in destination.

Definition at line 116 of file random.h.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::resize(), uniform_multinomial_sample(), and PLearn::TMat< T >::width().

real PLearn::bounded_uniform real  a,
real  b
 

returns a random number uniformly distributed between a and b

Definition at line 287 of file random.cc.

References RNMX, and uniform_sample().

Referenced by PLearn::UniformSampleVariable::fprop(), PLearn::UniformDistribution::generate(), and PLearn::SpiralDistribution::generate().

template<class T>
void bprop_tanh const TVec< T > &  tanh_x,
const TVec< T > &  d_tanh_x,
TVec< T > &  d_x
 

Definition at line 939 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

void bprop_update_layer real dy,
real x,
real dx,
real w,
int  n_y,
int  n_x,
real  learning_rate,
real  weight_decay
[inline]
 

Definition at line 398 of file TMat_maths_specialisation.h.

References prefetchnta, and x.

template<class T>
void bpropCholeskyDecomposition const TMat< T > &  A,
const TMat< T > &  L,
TMat< T > &  dC_dA,
TMat< T > &  dC_dL
 

Definition at line 4862 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), and PLearn::TMat< T >::resize().

template<class T>
void bpropCholeskySolve const TMat< T > &  L,
const TVec< T > &  x,
const TVec< T > &  y,
TMat< T > &  dC_dL,
TVec< T > &  dC_db,
TVec< T > &  dC_dx
 

Definition at line 5027 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), k, PLearn::TMat< T >::length(), and x.

char byte_order  )  [inline]
 

Definition at line 53 of file byte_order.h.

References BIG_ENDIAN_ORDER, and LITTLE_ENDIAN_ORDER.

Referenced by PLearn::IntVecFile::append(), PLearn::DiskVMatrix::build_(), PLearn::IntVecFile::getVec(), PLearn::IntVecFile::IntVecFile(), PLearn::PStream::operator>>(), PLearn::TMat< pair< real, real > >::read(), readSequence(), PLearn::TMat< pair< real, real > >::write(), PLearn::IntVecFile::writeFileSignature(), and writeSequence().

string PLearn::center const string s,
size_t  width,
char  padding = ' '
 

Definition at line 72 of file stringutils.cc.

Referenced by PLearn::TextProgressBarPlugin::addProgressBar(), computeLocalPrincipalComponents(), PLearn::GaussMix::computeMeansAndCovariances(), displayVarGraph(), PLearn::ConditionalDensityNet::initialize_mu(), OldDisplayVarGraph(), operator<<(), PLearn::ManifoldParzen2::train(), and PLearn::TextProgressBarPlugin::update().

int PLearn::chdir const string path  ) 
 

change current directory

Definition at line 100 of file fileutils.cc.

References PLERROR.

Referenced by abspath(), and readAndMacroProcess().

void check_prob Set  Y,
const map< int, real > &  pYx
[inline]
 

Definition at line 487 of file ProbabilitySparseMatrix.h.

References PLearn::Set::contains(), and PLERROR.

void check_prob ProbabilitySparseMatrix &  pYX,
string  Yname,
string  Xname
[inline]
 

Definition at line 463 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::Set::end(), PLearn::ProbabilitySparseMatrix::getPYx(), PLERROR, SetIterator, x, and PLearn::ProbabilitySparseMatrix::X.

template<class T>
void choleskyDecomposition const TMat< T > &  A,
TMat< T > &  L
 

Definition at line 4794 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, PLWARNING, PLearn::TMat< T >::resize(), sqrt(), sum(), and PLearn::TMat< T >::width().

Referenced by choleskyInvert(), choleskySolve(), logOfNormal(), solveLinearSystemByCholesky(), and solveTransposeLinearSystemByCholesky().

template<class T>
TMat<T> choleskyInvert const TMat< T > &  A  ) 
 

Definition at line 5164 of file TMat_maths_impl.h.

References choleskyInvert(), and PLearn::TMat< T >::length().

template<class T>
real choleskyInvert const TMat< T > &  A,
TMat< T > &  Ainv
 

Definition at line 5094 of file TMat_maths_impl.h.

References choleskyDecomposition(), PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), log(), PLearn::TMat< T >::resize(), and sum().

Referenced by choleskyInvert().

template<class T>
void choleskySolve const TMat< T > &  L,
TVec< T >  b,
TVec< T >  x
[inline]
 

Parameters:
x  So that y be optional

Definition at line 5733 of file TMat_maths_impl.h.

References choleskySolve(), and x.

template<class T>
TVec<T> choleskySolve const TMat< T > &  A,
const TVec< T > &  b
 

Definition at line 5151 of file TMat_maths_impl.h.

References choleskyDecomposition(), choleskySolve(), PLearn::TMat< T >::length(), and x.

template<class T>
void choleskySolve const TMat< T > &  L,
const TMat< T > &  B,
TMat< T > &  X,
TVec< T > &  y
 

Definition at line 4953 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, sum(), and PLearn::TMat< T >::width().

template<class T>
void choleskySolve const TMat< T > &  L,
TVec< T >  b,
TVec< T >  x,
TVec< T > &  y
 

Definition at line 4915 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, sum(), PLearn::TMat< T >::width(), and x.

Referenced by choleskySolve(), logOfNormal(), solveLinearSystemByCholesky(), and solveTransposeLinearSystemByCholesky().

CostFunc class_distance_proportion  )  [inline]
 

if outputs are neg distances to each class: dist_to_correct_class/(dist_to_correct_class+dist_to_closest_other_class)

Definition at line 79 of file ClassDistanceProportionCostFunction.h.

CostFunc class_error bool  output_is_classnum = false  )  [inline]
 

Definition at line 98 of file ClassErrorCostFunction.h.

Referenced by PLearn::ClassifierFromDensity::computeCostsFromOutputs().

CostFunc class_lift bool  make_positive = false  )  [inline]
 

Definition at line 87 of file LiftBinaryCostFunction.h.

CostFunc class_margin bool  binary_target_is_01 = false,
bool  output_is_positive = false
[inline]
 

difference between correct class score and max of other class' scores

Definition at line 96 of file ClassMarginCostFunction.h.

template<class T>
void classification_confusion_matrix TMat< T >  outputs,
TMat< T >  target_classes,
TMat< T >  confusion_matrix
 

Definition at line 5618 of file TMat_maths_impl.h.

References argmax(), and PLearn::TMat< T >::length().

Var classification_loss Var  network_output,
Var  classnum
[inline]
 

Definition at line 79 of file ClassificationLossVariable.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), and PLearn::NeighborhoodSmoothnessNNet::build_().

template<class T>
void clear const TMat< T > &  x  ) 
 

Definition at line 609 of file TMat_impl.h.

References x.

void clear_1 bool x  )  [inline]
 

Definition at line 189 of file general.h.

References x.

void clear_1 double &  x  )  [inline]
 

Definition at line 188 of file general.h.

References x.

void clear_1 float &  x  )  [inline]
 

Definition at line 187 of file general.h.

References x.

void clear_1 unsigned long &  x  )  [inline]
 

Definition at line 186 of file general.h.

References x.

void clear_1 long &  x  )  [inline]
 

Definition at line 185 of file general.h.

References x.

void clear_1 unsigned int x  )  [inline]
 

Definition at line 184 of file general.h.

References x.

void clear_1 int x  )  [inline]
 

Definition at line 183 of file general.h.

References x.

void clear_1 unsigned short &  x  )  [inline]
 

Definition at line 182 of file general.h.

References x.

void clear_1 short &  x  )  [inline]
 

Definition at line 181 of file general.h.

References x.

void clear_1 signed char &  x  )  [inline]
 

Definition at line 180 of file general.h.

References x.

void clear_1 unsigned char &  x  )  [inline]
 

Definition at line 179 of file general.h.

References x.

void clear_1 char &  x  )  [inline]
 

Definition at line 178 of file general.h.

References x.

template<class T>
void clear_1 T &  x  )  [inline]
 

clearing an element (that's called by clear_n...) Default implementation for clearing any type

(will work for objects, but not for base types like int, because the default "constructor" for int leaves it uninitialised... Hence the specialisations below

Definition at line 175 of file general.h.

References x.

Referenced by clear_n().

void clear_n unsigned long *  begin,
int  n
[inline]
 

Definition at line 233 of file general.h.

void clear_n long *  begin,
int  n
[inline]
 

Definition at line 230 of file general.h.

void clear_n unsigned int begin,
int  n
[inline]
 

Definition at line 227 of file general.h.

void clear_n int begin,
int  n
[inline]
 

Definition at line 224 of file general.h.

void clear_n unsigned short *  begin,
int  n
[inline]
 

Definition at line 221 of file general.h.

void clear_n short *  begin,
int  n
[inline]
 

Definition at line 218 of file general.h.

void clear_n unsigned char *  begin,
int  n
[inline]
 

Definition at line 215 of file general.h.

void clear_n char *  begin,
int  n
[inline]
 

Definition at line 212 of file general.h.

void clear_n bool begin,
int  n
[inline]
 

Definition at line 209 of file general.h.

void clear_n double *  begin,
int  n
[inline]
 

Definition at line 206 of file general.h.

void clear_n float *  begin,
int  n
[inline]
 

efficient specialisation for built-in types

Definition at line 203 of file general.h.

template<class For>
void clear_n For  begin,
int  n
[inline]
 

clears n elements starting at iterator position begin

Definition at line 193 of file general.h.

References clear_1().

Referenced by PLearn::TVec< pair< real, real > >::clear(), PLearn::TMat< pair< real, real > >::clear(), PLearn::Storage< pair< real, real > >::mem_alloc(), and PLearn::Storage< pair< real, real > >::resize().

Vec closestPointOnHyperplane const Vec &  x,
const Mat &  points,
real  weight_decay = 0.
[inline]
 

closest point to x on hyperplane that passes through all points (with weight decay)

Definition at line 615 of file plapack.h.

References constrainedLinearRegression(), transposeProduct(), and x.

Referenced by hyperplaneDistance().

real color int  colornum,
real  lightness
 

Definition at line 559 of file Grapher.cc.

References PLERROR, and rgb2real().

Referenced by displayPoints(), PLearn::RGBImage::fill(), PLearn::Grapher::plot_2D_classification(), and PLearn::GraphicalBiText::printNode().

template<class T>
void columnArgmax const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4252 of file TMat_maths_impl.h.

References argmax(), PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void columnArgmin const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4267 of file TMat_maths_impl.h.

References argmin(), PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
TMat<T> columnmatrix const TVec< T > &  v  )  [inline]
 

returns a view of this vector as a single column matrix

Definition at line 739 of file TMat_impl.h.

References PLearn::TVec< T >::length(), and PLearn::TVec< T >::toMat().

Referenced by PLearn::GaussMix::computeMeansAndCovariances(), PLearn::LocallyWeightedDistribution::log_density(), PLearn::GaussMix::updateFromConditionalSorting(), and PLearn::GaussMix::updateSampleWeights().

template<class T>
void columnMax const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4230 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), max(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::MatrixSoftmaxVariable::fprop().

template<class T>
void columnMean const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4170 of file TMat_maths_impl.h.

References columnSum(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by computeLocalPrincipalComponents(), computeMean(), computeMeanAndCovar(), computeMeanAndStddev(), computeMeanAndVariance(), PLearn::StackedLearner::computeOutput(), PLearn::MovingAverage::test(), PLearn::MovingAverage::train(), and PLearn::GaussianProcessRegressor::train().

template<class T>
void columnMin const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4241 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), min(), PLERROR, and PLearn::TMat< T >::width().

Var columnSum Var  v  )  [inline]
 

Definition at line 73 of file ColumnSumVariable.h.

template<class T>
void columnSum const TMat< T > &  mat,
TVec< T > &  result
 

all the operations below result in a row vector and are obtained by iterating (e.g. summing) over the row index, e.g. yielding the sum of each column in the result.

Definition at line 4146 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by columnMean(), PLearn::GaussMix::computeMeansAndCovariances(), PLearn::TimesRowVariable::symbolicBprop(), PLearn::PlusRowVariable::symbolicBprop(), PLearn::MinusTransposedColumnVariable::symbolicBprop(), PLearn::MinusRowVariable::symbolicBprop(), and PLearn::DuplicateRowVariable::symbolicBprop().

template<class T>
void columnSumOfSquares const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4159 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TVec< T >::length(), PLERROR, sum_of_squares(), and PLearn::TMat< T >::width().

template<class T>
void columnVariance const TMat< T > &  mat,
TVec< T > &  result,
const TVec< T > &  columnmean
 

Definition at line 4199 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, variance(), and PLearn::TMat< T >::width().

Referenced by computeMeanAndStddev(), and computeMeanAndVariance().

template<class T>
void columnWeightedMean const TMat< T > &  mat,
TVec< T > &  result
 

Definition at line 4181 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, PLearn::TMat< T >::toVecCopy(), weighted_mean(), and PLearn::TMat< T >::width().

template<class T>
void columnWeightedVariance const TMat< T > &  mat,
TVec< T > &  result,
const TVec< T > &  column_weighted_mean
 

Definition at line 4210 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), mean(), PLERROR, PLearn::TMat< T >::toVecCopy(), weighted_variance(), and PLearn::TMat< T >::width().

void PLearn::compactRepresentation char *  t  ) 
 

gives a (intermediate) code for a numeric string (starting with #)

Definition at line 202 of file TypesNumeriques.cc.

References compactRepresentationRangesAndOrdinals(), compactRepresentationShrinkNum(), and compactRepresentationTranslate().

Referenced by numericType().

void compactRepresentationRangesAndOrdinals char *  t  ) 
 

Definition at line 178 of file TypesNumeriques.cc.

Referenced by compactRepresentation().

void compactRepresentationShrinkNum char *  t  ) 
 

Definition at line 148 of file TypesNumeriques.cc.

Referenced by compactRepresentation().

void compactRepresentationTranslate char *  t  ) 
 

Definition at line 119 of file TypesNumeriques.cc.

References ALPHAsymbols, DIGITsymbols, elementOf(), ORDINALS, and stringPos().

Referenced by compactRepresentation().

int compare_string_pointers const void *  ts1,
const void *  ts2
[static]
 

Definition at line 1050 of file MatIO.cc.

Referenced by loadSTATLOG(), and loadUCIMLDB().

template<class T>
void complement_indices TVec< T > &  indices,
int  n,
TVec< T > &  complement_indices,
TVec< T > &  buffer
 

Definition at line 1676 of file TMat_maths_impl.h.

References complement_indices(), PLearn::TVec< T >::data(), PLearn::TVec< T >::fill(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

Referenced by complement_indices().

void PLearn::compress_vec char *  comprbuf,
const double *  data,
int  l,
bool  double_stored_as_float = false
 

Definition at line 535 of file pl_io.cc.

References val, and write_compr_mode_and_size_ptr().

template<class T>
void compressedTransposeProductAcc const TVec< T > &  result,
const TMat< T > &  m,
char *  comprbufvec
 

Definition at line 2366 of file TMat_maths_impl.h.

References endl(), PLearn::TMat< T >::length(), mode, PLERROR, and read_compr_mode_and_size_ptr().

Mat PLearn::compute2dGridOutputs Learner &  learner,
real  min_x = -1,
real  max_x = +1,
real  min_y = -1,
real  max_y = +1,
int  length = 200,
int  width = 200,
real  singleoutput_threshold = 0.
 

This will return a length*width matrix containing the computed outputs for a learner which has 2 dimensional input, where the inputs are taken on a regular grid ranging [min_x,max_x]x[min_y,max_y]. The mapping to the matrix m is m(i,j) = f(min_x+i*(max_x-min_x)/(length-1), min_y+j*(max_y-min_y)/(width-1)) If the output is of length 1: (class depends on which side of the threshold we are) the result put in m is output[0] - singleoutput_threshold If the output is of length 2: (score for each class) the result put in m is output[0]-output[1]

Definition at line 622 of file DisplayUtils.cc.

References PLearn::Learner::inputsize(), PLearn::Learner::outputsize(), PLERROR, and PLearn::Learner::use().

Referenced by displayDecisionSurface().

template<class T>
void compute_fastsigmoid const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 1000 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), fastsigmoid(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by fastsigmoid().

template<class T>
void compute_fasttanh const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 963 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), fasttanh(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by fasttanh().

template<class T>
void compute_inverse_sigmoid const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 1018 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), inverse_sigmoid(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by inverse_sigmoid().

Mat PLearn::compute_learner_outputs PP< PLearner >  learner,
VMat  dataset
 

Definition at line 48 of file learner_utils.cc.

References PLearn::VMat::length().

Mat PLearn::compute_learner_outputs_on_grid PP< PLearner >  learner,
int  nx,
int  ny,
real  x0,
real  y0,
real  deltax,
real  deltay
 

Returns a nx*ny x learner->outputsize() matrix of outputs corresponding to the nx*ny grid points.

Definition at line 88 of file learner_utils.cc.

References PLERROR, tostring(), PLearn::ProgressBar::update(), and x.

template<class T>
void compute_log const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 862 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), log(), and PLERROR.

Referenced by log().

template<class T>
void compute_safelog const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 898 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and safelog().

Referenced by safelog().

template<class T>
void compute_sigmoid const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 981 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and sigmoid().

Referenced by sigmoid().

template<class T>
void compute_sqrt const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 880 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and sqrt().

Referenced by sqrt().

template<class T>
void compute_tanh const TVec< T > &  src,
const TVec< T > &  dest
 

Definition at line 925 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and tanh().

Referenced by PLearn::EntropyContrast::computeNNcontinuous_hidden(), and tanh().

Mat PLearn::computeBasicStats VMat  m  ) 
 

The returned Mat is structured as follows: row 0: mean row 1: stddev row 2: min row 3: max row 4: nmissing row 5: nzero (==0) row 6: npositive (>0) row 7: nnegative (<0) row 8: mean of positive row 9: stddev of positive

Definition at line 128 of file VMat_maths.cc.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::fill(), is_missing(), PLearn::TVec< T >::length(), PLearn::VMat::length(), MAX_ROW, MEAN_ROW, MEANPOS_ROW, MIN_ROW, NMISSING_ROW, NNEGATIVE_ROW, NPOSITIVE_ROW, NZERO_ROW, sqrt(), square(), STDDEV_ROW, STDDEVPOS_ROW, val, and PLearn::VMat::width().

Referenced by computeConditionalMeans().

template<class T>
void computeColumnsMeanAndStddev const TMat< T > &  m,
TMat< T > &  meanvec,
TMat< T > &  stddevvec
 

compute the mean and standard deviations of the colums of m (looping over s) (the result is stored in column vectors meanvec and stddevvec)

Definition at line 4345 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), rowMean(), rowVariance(), and sqrt().

Array< Mat > PLearn::computeConditionalMeans VMat  trainset,
int  targetsize,
Mat &  basic_stats
 

Computes conditional mean and variance of each target, conditoned on the values of categorical integer input feature. The basic_stats matrix may be passed if previously computed (see computeBasicStats) or an empty matrix may be passed otherwise, that will compute the basic statistics.

An input feature #i is considered a categorical integer input if its min and max (as found in basic_stats) are integers and are not too far apart. For these, the correponding returned array[i] matrix will contain max-min+1 rows (one for each integer value between min and max inclusive), each row containing the corresponding input value, the number of times it occured, and mean and variance for each target. The returned matrix array[i] for input features that are not considered categorical integers are empty.

Definition at line 258 of file VMat_maths.cc.

References computeBasicStats(), is_integer(), PLearn::TMat< T >::isNotEmpty(), k, PLearn::TMat< T >::length(), PLearn::VMat::length(), PLearn::TVec< T >::length(), Mat, MAX_ROW, mean(), MIN_ROW, sqrt(), square(), PLearn::TVec< T >::subVec(), sum(), sumsquare(), variance(), and PLearn::VMat::width().

PP< ConditionalStatsCollector > PLearn::computeConditionalStats VMat  m,
int  condfield,
TVec< RealMapping ranges
 

returns the cooccurence statistics conditioned on the given field

Definition at line 235 of file VMat_maths.cc.

References endl(), PLearn::VMat::length(), and PLearn::VMat::width().

template<class T>
void computeCovar const TMat< T > &  m,
const TVec< T > &  meanvec,
TMat< T > &  covarmat
 

Definition at line 4302 of file TMat_maths_impl.h.

References externalProductScaleAcc(), PLearn::TMat< T >::length(), PLearn::TMat< T >::resize(), transposeProduct(), and PLearn::TMat< T >::width().

void PLearn::computeInputMean VMat  d,
Vec &  meanvec
 

Computes the (possibly weighted) mean and covariance of the input part of the dataset.

This will only call d->getExamplev

Definition at line 358 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), PLearn::VMat::getExample(), PLearn::VMat::length(), multiplyAcc(), and PLearn::TVec< T >::resize().

void PLearn::computeInputMeanAndCovar VMat  d,
Vec &  meanvec,
Mat &  covarmat
 

Definition at line 377 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), PLearn::TVec< T >::clear(), externalProductScaleAcc(), PLearn::VMat::getExample(), PLearn::VMat::length(), multiplyAcc(), PLearn::TMat< T >::resize(), and PLearn::TVec< T >::resize().

Referenced by PLearn::GaussMix::computeMeansAndCovariances(), and PLearn::PCA::train().

void PLearn::computeInputMeanAndVariance VMat  d,
Vec &  meanvec,
Vec &  var
 

Definition at line 406 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), PLearn::VMat::getExample(), PLearn::VMat::length(), multiplyAcc(), PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), and var().

Referenced by PLearn::GaussMix::computeMeansAndCovariances().

void computeLocalPrincipalComponents Mat &  dataset,
int  which_pattern,
Mat &  delta_neighbors,
Vec &  eig_values,
Mat &  eig_vectors,
Vec &  mean
 

Definition at line 139 of file ManifoldParzen2.cc.

References center(), columnMean(), computeNearestNeighbors(), computePrincipalComponents(), mean(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ManifoldParzen2::train().

void PLearn::computeMean VMat  d,
Vec &  meanvec
 

Definition at line 115 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), PLearn::VMat::length(), PLearn::TVec< T >::resize(), and PLearn::VMat::width().

template<class T>
void computeMean const TMat< T > &  m,
TVec< T > &  meanvec
[inline]
 

compute the mean of the rows of m (looping over columns)

Definition at line 4291 of file TMat_maths_impl.h.

References columnMean().

Referenced by PLearn::CenteredVMatrix::build_(), computeMeanAndVariance(), and PLearn::EmpiricalDistribution::expectation().

void PLearn::computeMeanAndCovar VMat  d,
Vec &  meanvec,
Mat &  covarmat,
ostream &  logstream = cerr
 

computes empirical mean and covariance in a single pass

< Parallel implementation

< default sequential implementation

Definition at line 518 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), PLearn::TVec< T >::clear(), computeMeanAndCovar(), PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), externalProductAcc(), externalProductScaleAcc(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::VMat::length(), PLERROR, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::size(), PLearn::VMat::toMat(), PLearn::TMat< T >::width(), and PLearn::VMat::width().

template<class T>
void computeMeanAndCovar const TMat< T > &  m,
TVec< T > &  meanvec,
TMat< T > &  covarmat
 

Definition at line 4312 of file TMat_maths_impl.h.

References columnMean(), externalProductScaleAcc(), PLearn::TMat< T >::length(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), transposeProduct(), and PLearn::TMat< T >::width().

Referenced by affineNormalization(), computeMeanAndCovar(), logPFittedGaussian(), PLearn::GaussianDistribution::train(), PLearn::ConditionalGaussianDistribution::train(), and PLearn::EmpiricalDistribution::variance().

void PLearn::computeMeanAndStddev VMat  d,
Vec &  meanvec,
Vec &  stddevvec
 

Definition at line 604 of file VMat_maths.cc.

References computeMeanAndVariance(), PLearn::TVec< T >::length(), and sqrt().

template<class T>
void computeMeanAndStddev const TMat< T > &  m,
TVec< T > &  meanvec,
TVec< T > &  stddevvec
 

compute the mean and standard deviations of the rows of m (looping over columns)

Definition at line 4333 of file TMat_maths_impl.h.

References columnMean(), columnVariance(), PLearn::TVec< T >::length(), and sqrt().

Referenced by PLearn::ShiftAndRescaleVMatrix::build_(), loadClassificationDataset(), normalize(), normalizeDataSet(), normalizeDataSets(), and PLearn::TestDependenciesCommand::run().

void PLearn::computeMeanAndVariance VMat  d,
Vec &  meanvec,
Vec &  variancevec
 

Definition at line 336 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), computeMean(), PLearn::VMat::length(), multiply(), PLearn::TVec< T >::resize(), substract(), and PLearn::VMat::width().

template<class T>
void computeMeanAndVariance const TMat< T > &  m,
TVec< T > &  meanvec,
TVec< T > &  variancevec
 

compute the mean and variance of the rows of m (looping over columns)

Definition at line 4295 of file TMat_maths_impl.h.

References columnMean(), and columnVariance().

Referenced by computeMeanAndStddev(), and PLearn::PCA::train().

void computeNearestNeighbors Mat  dataset,
Vec  x,
Mat &  neighbors,
int  ignore_row = -1
 

Definition at line 96 of file ManifoldParzen2.cc.

References PLearn::TVec< T >::first(), PLearn::BottomNI< T >::getBottomN(), k, PLearn::TMat< T >::length(), PLearn::BottomNI< T >::nZeros(), PLERROR, powdistance(), PLearn::BottomNI< T >::sort(), PLearn::BottomNI< T >::update(), and x.

void PLearn::computeNearestNeighbors VMat  dataset,
Vec  x,
TVec< int > &  neighbors,
int  ignore_row = -1
 

Definition at line 1282 of file VMat_maths.cc.

References PLearn::TVec< T >::first(), PLearn::BottomNI< T >::getBottomN(), k, PLearn::VMat::length(), PLearn::TVec< T >::length(), PLearn::BottomNI< T >::nZeros(), PLERROR, powdistance(), PLearn::BottomNI< T >::sort(), PLearn::BottomNI< T >::update(), PLearn::VMat::width(), and x.

Referenced by PLearn::LocalNeighborsDifferencesVMatrix::build_(), computeLocalPrincipalComponents(), PLearn::GaussianContinuum::get_image_matrix(), and PLearn::GaussianContinuum::train().

TVec< Mat > PLearn::computeOutputFields PP< PLearner >  learner,
int  nx,
int  ny,
real  x0,
real  y0,
real  deltax,
real  deltay
 

Definition at line 184 of file GenerateDecisionPlot.cc.

References k, PLearn::TVec< T >::resize(), tostring(), PLearn::ProgressBar::update(), and x.

TVec< Mat > PLearn::computeOutputFields PP< PLearner >  learner,
Vec  X,
Vec  Y
 

Definition at line 151 of file GenerateDecisionPlot.cc.

References k, PLearn::TVec< T >::length(), PLearn::TVec< T >::resize(), tostring(), and PLearn::ProgressBar::update().

Referenced by computeOutputFieldsAutoRange().

TVec< Mat > PLearn::computeOutputFieldsAutoRange PP< PLearner >  learner,
VMat  dataset,
int  nx,
int  ny,
real x0,
real y0,
real deltax,
real deltay,
real  extraspace = .10
 

Definition at line 217 of file GenerateDecisionPlot.cc.

References computeOutputFields(), computeRange(), and PLearn::VMat::subMatColumns().

void computePrincipalComponents Mat  dataset,
Vec &  eig_values,
Mat &  eig_vectors
 

Definition at line 120 of file ManifoldParzen2.cc.

References eigenVecOfSymmMat(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, PLearn::TMat< T >::resize(), transposeProduct(), and PLearn::TMat< T >::width().

Referenced by computeLocalPrincipalComponents().

void PLearn::computeRange VMat  d,
Vec &  minvec,
Vec &  maxvec
 

Definition at line 83 of file VMat_maths.cc.

References PLearn::TVec< T >::fill(), PLearn::VMat::length(), max(), min(), PLearn::TVec< T >::resize(), and PLearn::VMat::width().

Referenced by computeOutputFieldsAutoRange(), computeXYPositions(), determine_grid_for_dataset(), and DX_create_grid_outputs_file().

TVec< RealMapping > PLearn::computeRanges TVec< StatsCollector >  stats,
int  discrete_mincount,
int  continuous_mincount
 

Definition at line 592 of file StatsCollector.cc.

References k, PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

Referenced by PLearn::VMatrix::getRanges().

template<class T>
void computeRanks const TMat< T > &  mat,
TMat< T > &  ranks
 

For each column of mat, sort the elements and put in the 'ranks' matrix (of the same dimensions) the rank of original elements.

More precisely, Let mat(i,j) be the k-th largest element of column j, than ranks(i,j) will be k.

Definition at line 177 of file random.h.

References PLearn::TMat< T >::length(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), shuffleRows(), sortRows(), and PLearn::TMat< T >::width().

Referenced by SpearmanRankCorrelation().

void PLearn::computeRowMean VMat  d,
Vec &  meanvec
 

Definition at line 104 of file VMat_maths.cc.

References PLearn::VMat::length(), mean(), PLearn::TVec< T >::resize(), and PLearn::VMat::width().

TVec< StatsCollector > PLearn::computeStats VMat  m,
int  maxnvalues,
bool  report_progress = true
 

Retirns the unconditional statistics of each field.

Definition at line 212 of file VMat_maths.cc.

References PLearn::VMat::length(), PLearn::ProgressBar::update(), and PLearn::VMat::width().

void PLearn::computeStats VMat  m,
VecStatsCollector &  st,
bool  report_progress = true
 

Definition at line 191 of file VMat_maths.cc.

References PLearn::VecStatsCollector::finalize(), PLearn::VecStatsCollector::forget(), PLearn::VMat::length(), PLearn::VecStatsCollector::setFieldNames(), PLearn::ProgressBar::update(), PLearn::VecStatsCollector::update(), and PLearn::VMat::width().

Referenced by PLearn::ShiftAndRescaleVMatrix::build_(), and PLearn::VMatrix::getStats().

void PLearn::computeWeightedMean Vec  weights,
VMat  d,
Vec &  meanvec
 

Statistics functions *.

Definition at line 64 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), PLearn::TVec< T >::length(), PLearn::VMat::length(), PLERROR, PLearn::TVec< T >::resize(), sum(), and PLearn::VMat::width().

Referenced by computeWeightedMeanAndCovar().

real PLearn::computeWeightedMeanAndCovar VMat  d,
Vec &  meanvec,
Mat &  covarmat,
real  threshold = 0
 

Last column of d is supposed to contain the weight for each sample Samples with a weight less or equal to threshold will be ignored (returns the sum of all the weights actually used).

Definition at line 470 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), PLearn::TVec< T >::clear(), externalProductScaleAcc(), PLearn::VMat::length(), multiplyAcc(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), substract(), PLearn::TVec< T >::subVec(), and PLearn::VMat::width().

void PLearn::computeWeightedMeanAndCovar Vec  weights,
VMat  d,
Vec &  meanvec,
Mat &  covarmat
 

Definition at line 446 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), computeWeightedMean(), externalProductScaleAcc(), PLearn::VMat::length(), PLearn::TMat< T >::resize(), substract(), and PLearn::VMat::width().

Referenced by PLearn::GaussianDistribution::train().

void PLearn::computeXYPositions VMat  dataset,
int  nx,
int  ny,
Vec &  X,
Vec &  Y,
real  extraspace = .10
 

Definition at line 233 of file GenerateDecisionPlot.cc.

References computeRange(), PLearn::TVec< T >::data(), PLearn::VMat::length(), PLearn::TVec< T >::resize(), PLearn::VMat::subMatColumns(), and x.

template<class T>
TVec< T > PLearn::concat const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 202 of file TMat_impl.h.

References PLearn::TVec< T >::length().

template<class T>
TVec<T> concat const Array< TVec< T > > &  varray  ) 
 

Definition at line 149 of file Array_impl.h.

References PLearn::TVec< T >::data(), k, and PLearn::TVec< T >::length().

Referenced by PLearn::Learner::computeTestStatistics(), local_neighbors_differences(), PLearn::SequentialModelSelector::matlabSave(), removeElement(), and PLearn::Learner::test().

Var concatOf Var  output,
const VarArray &  inputs,
VMat  distr,
int  nsamples,
VarArray  parameters = VarArray()
[inline]
 

deprecated old version, do not use!

Definition at line 92 of file ConcatOfVariable.h.

References concatOf().

Var concatOf VMat  distr,
Func  f
[inline]
 

concatOf

Definition at line 88 of file ConcatOfVariable.h.

Referenced by concatOf().

CostFunc condprob_cost bool  normalize = false,
bool  smooth_map_outputs = false
[inline]
 

negative log conditional probability

Definition at line 103 of file NegLogProbCostFunction.h.

References normalize().

Referenced by PLearn::ClassifierFromDensity::computeCostsFromOutputs().

Vec PLearn::constrainedLinearRegression const Mat &  Xt,
const Vec &  Y,
real  lambda = 0.
 

Returns w that minimizes ||X.w - Y||^2 + lambda.||w||^2 under constraint w_i = 1 Xt is the transposed of the input matrix X; Y is the target vector. This doesn't include any bias term.

Definition at line 375 of file plapack.cc.

References dot(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, solveLinearSystem(), PLearn::TVec< T >::subVec(), and PLearn::TMat< T >::width().

Referenced by closestPointOnHyperplane().

bool PLearn::containsChar const char *  s,
const char *  symbols
 

true if string s contains any one of the characters in symbols.

Definition at line 79 of file TypesNumeriques.cc.

Referenced by looksNumeric().

Var convolve Var  input,
Var  mask
[inline]
 

Definition at line 74 of file ConvolveVariable.h.

template<class T>
void convolve TMat< T >  m,
TMat< T >  mask,
TMat< T >  result
 

Definition at line 5581 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, sum(), and PLearn::TMat< T >::width().

Referenced by PLearn::ConvolveVariable::fprop().

template<class In, class Out>
Out copy_cast In  first,
In  last,
Out  res
[inline]
 

Like std::copy, but with an explicit cast to the destination type.

Definition at line 160 of file general.h.

Referenced by operator<<().

template<class T>
T correlation const TVec< T > &  x,
const TVec< T > &  y
 

Definition at line 3888 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), PLERROR, sqrt(), and x.

template<class T>
T correlation const TMat< T > &  mat  ) 
 

Definition at line 3862 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, sqrt(), PLearn::TMat< T >::width(), and x.

Referenced by PLearn::FieldConvertCommand::FieldConvertCommand(), PLearn::TestDependenciesCommand::TestDependenciesCommand(), and PLearn::TestDependencyCommand::TestDependencyCommand().

void PLearn::correlations const VMat &  x,
const VMat &  y,
Mat &  r,
Mat &  pvalues
 

Compute the correlations between each of the columns of x and each of the columns of y.

The results are in the x.width() by y.width() matrix r. The p-values of the corresponding test (no correlation) are stored in the same-sized matrix pvalues.

Definition at line 1228 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::length(), PLearn::VMat::length(), PLERROR, PLWARNING, PLearn::TMat< T >::resize(), sqrt(), testNoCorrelationAsymptotically(), PLearn::TMat< T >::width(), PLearn::VMat::width(), and x.

Referenced by PLearn::TestDependencyCommand::run(), and PLearn::TestDependenciesCommand::run().

int PLearn::countNonBlankLinesOfFile const string filename  ) 
 

Will return the number of non-blank lines of file #-style comments are considered blank.

Definition at line 471 of file fileutils.cc.

References count, and PLERROR.

Referenced by PLearn::AsciiVMatrix::build_(), parseSizeFromRemainingLines(), and PLearn::StringTable::StringTable().

map< real, int > PLearn::countOccurencesInColumn VMat  m,
int  col
 

returns a map mapping all different values appearing in column col to their number of occurences

Definition at line 704 of file VMat_maths.cc.

References is_missing(), PLearn::VMat::length(), PLERROR, and val.

Referenced by indicesOfOccurencesInColumn().

template<class T>
T covariance const TVec< T > &  vec1,
const TVec< T > &  vec2,
mean1,
mean2
 

Definition at line 425 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::VecStatsCollector::getCovariance().

void PLearn::cp const string srcpath,
const string destpath
 

calls system with cp -R to recursively copy source to destination

Definition at line 358 of file fileutils.cc.

Referenced by loadSTATLOG(), and loadUCIMLDB().

Var cross_entropy Var  network_output,
Var  targets
[inline]
 

Definition at line 76 of file CrossEntropyVariable.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), and PLearn::AddCostToLearner::build_().

void cross_valid const string modelalias,
string  trainalias,
int  kval
 

Definition at line 279 of file old_plearn_main.cc.

References endl(), exitmsg(), getDataSet(), getDatasetAliases(), getModelAliases(), isfile(), PLearn::TMat< T >::length(), PLearn::VMat::length(), loadAscii(), PLWARNING, read(), PLearn::TMat< T >::resize(), save(), PLearn::TVec< T >::size(), split(), tostring(), PLearn::TMat< T >::width(), and PLearn::VMat::width().

Referenced by old_plearn_main().

char * PLearn::cstr string s  ) 
 

Definition at line 2880 of file WordNetOntology.cc.

References cstr().

Referenced by cstr(), PLearn::WordNetOntology::extractSenses(), PLearn::WordNetOntology::extractTaggedWordFrequencies(), PLearn::WordNetOntology::getWordSenseIdForSenseKey(), PLearn::WordNetOntology::hasSenseInWordNet(), and stemWord().

Var cutAboveThreshold Var  v,
real  threshold
[inline]
 

Definition at line 76 of file CutAboveThresholdVariable.h.

Referenced by negative().

Var cutBelowThreshold Var  v,
real  threshold
[inline]
 

Definition at line 76 of file CutBelowThresholdVariable.h.

Referenced by positive().

Var d_hard_slope Var  x,
Var  left,
Var  right
[inline]
 

Definition at line 79 of file HardSlopeVariable.h.

References ifThenElse(), invertElements(), left(), right(), var(), and x.

Var d_soft_slope Var  x,
Var  smoothness,
Var  left,
Var  right
[inline]
 

Definition at line 85 of file SoftSlopeVariable.h.

References left(), right(), sigmoid(), and x.

real d_soft_slope real  x,
real  smoothness = 1,
real  left = 0,
real  right = 1
[inline]
 

Definition at line 368 of file pl_math.h.

References left(), right(), sigmoid(), and x.

Referenced by PLearn::ConditionalDensityNet::build_().

string * PLearn::data_filename_2_filenames const string filename,
int nb_files
 

take a filename containing the name of a file per line, and return theses names as a string* of length nb_files

Definition at line 187 of file stringutils.cc.

References fname, and PLERROR.

float PLearn::date_to_float const PDate &  t  ) 
 

converts date to float: ex: September 29 1972: 720929; December 25 2002: 1021225 Also converts missing date to missing flat value and vice-versa.

Definition at line 162 of file PDate.cc.

References PLearn::PDate::day, PLearn::PDate::isMissing(), MISSING_VALUE, PLearn::PDate::month, and PLearn::PDate::year.

Referenced by PLearn::VMatLanguage::run(), PLearn::RowIterator::toDouble(), and PLearn::FieldValue::toDouble().

double PLearn::datetime_to_double const PDateTime &  t  ) 
 

converts date/time to double: for example: September 29 1972: 720929; December 25 2002: 1021225.

Hours/minutes/seconds are represented as FRACTIONS of days. Also converts missing date to missing double value and vice-versa.

Definition at line 137 of file PDateTime.cc.

References PLearn::PDateTime::day, hhmmss_to_double(), PLearn::PDateTime::hour, PLearn::PDateTime::isMissing(), PLearn::PDateTime::min, MISSING_VALUE, PLearn::PDateTime::month, PLearn::PDateTime::sec, and PLearn::PDateTime::year.

Referenced by PLearn::PDateTime::operator<().

DECLARE_OBJECT_PP VMat  ,
VMatrix 
 

DECLARE_OBJECT_PP Var  ,
Variable 
 

DECLARE_OBJECT_PP Func  ,
Function 
 

DECLARE_OBJECT_PP Ker  ,
Kernel 
 

DECLARE_OBJECT_PTR TangentLearner   ) 
 

DECLARE_OBJECT_PTR SpectralClustering   ) 
 

DECLARE_OBJECT_PTR PCA   ) 
 

DECLARE_OBJECT_PTR KPCATangentLearner   ) 
 

PLearn::DECLARE_OBJECT_PTR Isomap   ) 
 

DECLARE_OBJECT_PTR GaussianContinuum   ) 
 

DECLARE_OBJECT_PTR EntropyContrast   ) 
 

DECLARE_OBJECT_PTR TestMethod   ) 
 

DECLARE_OBJECT_PTR SequentialValidation   ) 
 

Declares a few other classes and functions related to this class.

DECLARE_OBJECT_PTR SequentialModelSelector   ) 
 

Declares a few other classes and functions related to this class.

DECLARE_OBJECT_PTR SequentialLearner   ) 
 

Declares a few other classes and functions related to this class.

DECLARE_OBJECT_PTR EmbeddedSequentialLearner   ) 
 

Declares a few other classes and functions related to this class.

DECLARE_OBJECT_PTR PLS   ) 
 

DECLARE_OBJECT_PTR LinearRegressor   ) 
 

DECLARE_OBJECT_PTR ConstantRegressor   ) 
 

PLearn::DECLARE_OBJECT_PTR PTester   ) 
 

DECLARE_OBJECT_PTR Grapher   ) 
 

DECLARE_OBJECT_PTR GenerateDecisionPlot   ) 
 

DECLARE_OBJECT_PTR Experiment   ) 
 

DECLARE_OBJECT_PTR Dictionary   ) 
 

DECLARE_OBJECT_PTR TestingLearner   ) 
 

DECLARE_OBJECT_PTR StatefulLearner   ) 
 

DECLARE_OBJECT_PTR StackedLearner   ) 
 

DECLARE_OBJECT_PTR SelectInputSubsetLearner   ) 
 

DECLARE_OBJECT_PTR NNet   ) 
 

DECLARE_OBJECT_PTR NeuralNet   ) 
 

DECLARE_OBJECT_PTR NeighborhoodSmoothnessNNet   ) 
 

DECLARE_OBJECT_PTR EmbeddedLearner   ) 
 

DECLARE_OBJECT_PTR AddCostToLearner   ) 
 

DECLARE_OBJECT_PTR UniformDistribution   ) 
 

DECLARE_OBJECT_PTR UnconditionalDistribution   ) 
 

DECLARE_OBJECT_PTR SpiralDistribution   ) 
 

DECLARE_OBJECT_PTR ManifoldParzen2   ) 
 

DECLARE_OBJECT_PTR LocallyWeightedDistribution   ) 
 

DECLARE_OBJECT_PTR HistogramDistribution   ) 
 

DECLARE_OBJECT_PTR GaussMix   ) 
 

DECLARE_OBJECT_PTR GaussianProcessRegressor   ) 
 

DECLARE_OBJECT_PTR GaussianDistribution   ) 
 

DECLARE_OBJECT_PTR EmpiricalDistribution   ) 
 

DECLARE_OBJECT_PTR Distribution   ) 
 

DECLARE_OBJECT_PTR ConditionalGaussianDistribution   ) 
 

DECLARE_OBJECT_PTR ConditionalDistribution   ) 
 

DECLARE_OBJECT_PTR ConditionalDensityNet   ) 
 

DECLARE_OBJECT_PTR MultiInstanceNNet   ) 
 

DECLARE_OBJECT_PTR ClassifierFromDensity   ) 
 

DECLARE_OBJECT_PTR AdaBoost   ) 
 

DECLARE_OBJECT_PTR YMDDatedVMatrix   ) 
 

DECLARE_OBJECT_PTR VVMatrix   ) 
 

PLearn::DECLARE_OBJECT_PTR VMatrix   ) 
 

DECLARE_OBJECT_PTR PreprocessingVMatrix   ) 
 

DECLARE_OBJECT_PTR VMatLanguage   ) 
 

DECLARE_OBJECT_PTR VecExtendedVMatrix   ) 
 

DECLARE_OBJECT_PTR UpsideDownVMatrix   ) 
 

DECLARE_OBJECT_PTR UniformVMatrix   ) 
 

DECLARE_OBJECT_PTR UniformizeVMatrix   ) 
 

DECLARE_OBJECT_PTR TransposeVMatrix   ) 
 

DECLARE_OBJECT_PTR TrainValidTestSplitter   ) 
 

DECLARE_OBJECT_PTR TrainTestSplitter   ) 
 

PLearn::DECLARE_OBJECT_PTR TrainTestBagsSplitter   ) 
 

DECLARE_OBJECT_PTR ToBagSplitter   ) 
 

DECLARE_OBJECT_PTR TestInTrainSplitter   ) 
 

DECLARE_OBJECT_PTR TemporalHorizonVMatrix   ) 
 

DECLARE_OBJECT_PTR SubVMatrix   ) 
 

DECLARE_OBJECT_PTR SubInputVMatrix   ) 
 

DECLARE_OBJECT_PTR StrTableVMatrix   ) 
 

DECLARE_OBJECT_PTR Splitter   ) 
 

DECLARE_OBJECT_PTR SparseVMatrix   ) 
 

PLearn::DECLARE_OBJECT_PTR SourceVMatrix   ) 
 

DECLARE_OBJECT_PTR SortRowsVMatrix   ) 
 

DECLARE_OBJECT_PTR ShiftAndRescaleVMatrix   ) 
 

DECLARE_OBJECT_PTR SequentialSplitter   ) 
 

DECLARE_OBJECT_PTR SelectRowsVMatrix   ) 
 

DECLARE_OBJECT_PTR SelectRowsFileIndexVMatrix   ) 
 

DECLARE_OBJECT_PTR SelectColumnsVMatrix   ) 
 

DECLARE_OBJECT_PTR RowsSubVMatrix   ) 
 

DECLARE_OBJECT_PTR RowBufferedVMatrix   ) 
 

DECLARE_OBJECT_PTR RepeatSplitter   ) 
 

DECLARE_OBJECT_PTR RemoveRowsVMatrix   ) 
 

DECLARE_OBJECT_PTR RemoveDuplicateVMatrix   ) 
 

DECLARE_OBJECT_PTR RemapLastColumnVMatrix   ) 
 

DECLARE_OBJECT_PTR RegularGridVMatrix   ) 
 

DECLARE_OBJECT_PTR RangeVMatrix   ) 
 

DECLARE_OBJECT_PTR ProcessingVMatrix   ) 
 

DECLARE_OBJECT_PTR PrecomputedVMatrix   ) 
 

PLearn::DECLARE_OBJECT_PTR PLearnerOutputVMatrix   ) 
 

DECLARE_OBJECT_PTR PairsVMatrix   ) 
 

DECLARE_OBJECT_PTR OneHotVMatrix   ) 
 

DECLARE_OBJECT_PTR MultiInstanceVMatrix   ) 
 

PLearn::DECLARE_OBJECT_PTR MovingAverageVMatrix   ) 
 

Declares a few other classes and functions related to this class.

DECLARE_OBJECT_PTR MemoryVMatrix   ) 
 

DECLARE_OBJECT_PTR LocalNeighborsDifferencesVMatrix   ) 
 

PLearn::DECLARE_OBJECT_PTR LearnerProcessedVMatrix   ) 
 

DECLARE_OBJECT_PTR KNNVMatrix   ) 
 

DECLARE_OBJECT_PTR KFoldSplitter   ) 
 

DECLARE_OBJECT_PTR JulianizeVMatrix   ) 
 

DECLARE_OBJECT_PTR JoinVMatrix   ) 
 

DECLARE_OBJECT_PTR InterleaveVMatrix   ) 
 

DECLARE_OBJECT_PTR IndexedVMatrix   ) 
 

DECLARE_OBJECT_PTR GramVMatrix   ) 
 

DECLARE_OBJECT_PTR GetInputVMatrix   ) 
 

DECLARE_OBJECT_PTR GeneralizedOneHotVMatrix   ) 
 

DECLARE_OBJECT_PTR FractionSplitter   ) 
 

DECLARE_OBJECT_PTR ForwardVMatrix   ) 
 

DECLARE_OBJECT_PTR FinancePreprocVMatrix   ) 
 

DECLARE_OBJECT_PTR FilterSplitter   ) 
 

DECLARE_OBJECT_PTR FilteredVMatrix   ) 
 

DECLARE_OBJECT_PTR FileVMatrix   ) 
 

DECLARE_OBJECT_PTR ExtendedVMatrix   ) 
 

DECLARE_OBJECT_PTR ExplicitSplitter   ) 
 

DECLARE_OBJECT_PTR DiskVMatrix   ) 
 

DECLARE_OBJECT_PTR DBSplitter   ) 
 

DECLARE_OBJECT_PTR DatedVMatrix   ) 
 

DECLARE_OBJECT_PTR DatedJoinVMatrix   ) 
 

DECLARE_OBJECT_PTR CumVMatrix   ) 
 

DECLARE_OBJECT_PTR CrossReferenceVMatrix   ) 
 

DECLARE_OBJECT_PTR ConcatRowsVMatrix   ) 
 

DECLARE_OBJECT_PTR ConcatRowsSubVMatrix   ) 
 

DECLARE_OBJECT_PTR ConcatColumnsVMatrix   ) 
 

DECLARE_OBJECT_PTR CompressedVMatrix   ) 
 

DECLARE_OBJECT_PTR CenteredVMatrix   ) 
 

DECLARE_OBJECT_PTR ByteMemoryVMatrix   ) 
 

DECLARE_OBJECT_PTR BootstrapVMatrix   ) 
 

DECLARE_OBJECT_PTR BootstrapSplitter   ) 
 

DECLARE_OBJECT_PTR BatchVMatrix   ) 
 

DECLARE_OBJECT_PTR AutoVMatrix   ) 
 

DECLARE_OBJECT_PTR AsciiVMatrix   ) 
 

DECLARE_OBJECT_PTR WeightedSumSquareVariable   ) 
 

DECLARE_OBJECT_PTR VecElementVariable   ) 
 

DECLARE_OBJECT_PTR VarRowVariable   ) 
 

DECLARE_OBJECT_PTR VarRowsVariable   ) 
 

DECLARE_OBJECT_PTR Variable   ) 
 

DECLARE_OBJECT_PTR VarElementVariable   ) 
 

DECLARE_OBJECT_PTR VarColumnsVariable   ) 
 

DECLARE_OBJECT_PTR VarArrayElementVariable   ) 
 

DECLARE_OBJECT_PTR UnfoldedSumOfVariable   ) 
 

DECLARE_OBJECT_PTR UnfoldedFuncVariable   ) 
 

DECLARE_OBJECT_PTR UnequalConstantVariable   ) 
 

DECLARE_OBJECT_PTR UnaryHardSlopeVariable   ) 
 

DECLARE_OBJECT_PTR TransposeProductVariable   ) 
 

DECLARE_OBJECT_PTR TimesVariable   ) 
 

DECLARE_OBJECT_PTR TimesScalarVariable   ) 
 

DECLARE_OBJECT_PTR TimesRowVariable   ) 
 

DECLARE_OBJECT_PTR TimesConstantVariable   ) 
 

DECLARE_OBJECT_PTR TimesColumnVariable   ) 
 

DECLARE_OBJECT_PTR TanhVariable   ) 
 

DECLARE_OBJECT_PTR SumVariable   ) 
 

DECLARE_OBJECT_PTR SumSquareVariable   ) 
 

DECLARE_OBJECT_PTR SumOverBagsVariable   ) 
 

DECLARE_OBJECT_PTR SumOfVariable   ) 
 

DECLARE_OBJECT_PTR SumAbsVariable   ) 
 

DECLARE_OBJECT_PTR SubsampleVariable   ) 
 

DECLARE_OBJECT_PTR SubMatVariable   ) 
 

DECLARE_OBJECT_PTR SubMatTransposeVariable   ) 
 

DECLARE_OBJECT_PTR SquareVariable   ) 
 

DECLARE_OBJECT_PTR SquareRootVariable   ) 
 

DECLARE_OBJECT_PTR SourceVariable   ) 
 

DECLARE_OBJECT_PTR SoftSlopeVariable   ) 
 

DECLARE_OBJECT_PTR SoftSlopeIntegralVariable   ) 
 

DECLARE_OBJECT_PTR SoftplusVariable   ) 
 

DECLARE_OBJECT_PTR SoftmaxVariable   ) 
 

DECLARE_OBJECT_PTR SoftmaxLossVariable   ) 
 

DECLARE_OBJECT_PTR SignVariable   ) 
 

DECLARE_OBJECT_PTR SigmoidVariable   ) 
 

DECLARE_OBJECT_PTR SemiSupervisedProbClassCostVariable   ) 
 

DECLARE_OBJECT_PTR RowSumVariable   ) 
 

DECLARE_OBJECT_PTR RowAtPositionVariable   ) 
 

DECLARE_OBJECT_PTR RightPseudoInverseVariable   ) 
 

DECLARE_OBJECT_PTR ReshapeVariable   ) 
 

DECLARE_OBJECT_PTR ProjectionErrorVariable   ) 
 

DECLARE_OBJECT_PTR ProductVariable   ) 
 

DECLARE_OBJECT_PTR ProductTransposeVariable   ) 
 

PLearn::DECLARE_OBJECT_PTR PowVariable   ) 
 

DECLARE_OBJECT_PTR PlusVariable   ) 
 

DECLARE_OBJECT_PTR PlusScalarVariable   ) 
 

DECLARE_OBJECT_PTR PlusRowVariable   ) 
 

DECLARE_OBJECT_PTR PlusConstantVariable   ) 
 

DECLARE_OBJECT_PTR PlusColumnVariable   ) 
 

DECLARE_OBJECT_PTR PLogPVariable   ) 
 

PLearn::DECLARE_OBJECT_PTR PDistributionVariable   ) 
 

DECLARE_OBJECT_PTR OneHotVariable   ) 
 

DECLARE_OBJECT_PTR OneHotSquaredLoss   ) 
 

DECLARE_OBJECT_PTR NllSemisphericalGaussianVariable   ) 
 

DECLARE_OBJECT_PTR NegCrossEntropySigmoidVariable   ) 
 

DECLARE_OBJECT_PTR NegateElementsVariable   ) 
 

DECLARE_OBJECT_PTR MulticlassLossVariable   ) 
 

DECLARE_OBJECT_PTR MinVariable   ) 
 

DECLARE_OBJECT_PTR MinusVariable   ) 
 

DECLARE_OBJECT_PTR MinusTransposedColumnVariable   ) 
 

DECLARE_OBJECT_PTR MinusRowVariable   ) 
 

DECLARE_OBJECT_PTR MinusColumnVariable   ) 
 

DECLARE_OBJECT_PTR MiniBatchClassificationLossVariable   ) 
 

DECLARE_OBJECT_PTR MaxVariable   ) 
 

DECLARE_OBJECT_PTR Max2Variable   ) 
 

DECLARE_OBJECT_PTR MatRowVariable   ) 
 

DECLARE_OBJECT_PTR MatrixSumOfVariable   ) 
 

DECLARE_OBJECT_PTR MatrixSoftmaxVariable   ) 
 

DECLARE_OBJECT_PTR MatrixSoftmaxLossVariable   ) 
 

DECLARE_OBJECT_PTR MatrixOneHotSquaredLoss   ) 
 

DECLARE_OBJECT_PTR MatrixInverseVariable   ) 
 

DECLARE_OBJECT_PTR MatrixElementsVariable   ) 
 

DECLARE_OBJECT_PTR MatrixAffineTransformVariable   ) 
 

DECLARE_OBJECT_PTR MatrixAffineTransformFeedbackVariable   ) 
 

DECLARE_OBJECT_PTR MarginPerceptronCostVariable   ) 
 

DECLARE_OBJECT_PTR LogVariable   ) 
 

DECLARE_OBJECT_PTR LogSoftmaxVariable   ) 
 

DECLARE_OBJECT_PTR LogAddVariable   ) 
 

DECLARE_OBJECT_PTR LiftOutputVariable   ) 
 

DECLARE_OBJECT_PTR LeftPseudoInverseVariable   ) 
 

DECLARE_OBJECT_PTR IsSmallerVariable   ) 
 

DECLARE_OBJECT_PTR IsMissingVariable   ) 
 

DECLARE_OBJECT_PTR IsLargerVariable   ) 
 

DECLARE_OBJECT_PTR IsAboveThresholdVariable   ) 
 

DECLARE_OBJECT_PTR InvertElementsVariable   ) 
 

DECLARE_OBJECT_PTR InterValuesVariable   ) 
 

DECLARE_OBJECT_PTR IndexAtPositionVariable   ) 
 

DECLARE_OBJECT_PTR IfThenElseVariable   ) 
 

DECLARE_OBJECT_PTR HardSlopeVariable   ) 
 

DECLARE_OBJECT_PTR Function   ) 
 

DECLARE_OBJECT_PTR ExtendedVariable   ) 
 

DECLARE_OBJECT_PTR ExpVariable   ) 
 

DECLARE_OBJECT_PTR ErfVariable   ) 
 

DECLARE_OBJECT_PTR EqualVariable   ) 
 

DECLARE_OBJECT_PTR EqualScalarVariable   ) 
 

DECLARE_OBJECT_PTR EqualConstantVariable   ) 
 

DECLARE_OBJECT_PTR ElementAtPositionVariable   ) 
 

DECLARE_OBJECT_PTR DuplicateScalarVariable   ) 
 

DECLARE_OBJECT_PTR DuplicateRowVariable   ) 
 

DECLARE_OBJECT_PTR DuplicateColumnVariable   ) 
 

DECLARE_OBJECT_PTR DotProductVariable   ) 
 

DECLARE_OBJECT_PTR DivVariable   ) 
 

DECLARE_OBJECT_PTR DilogarithmVariable   ) 
 

DECLARE_OBJECT_PTR DiagonalizedFactorsProductVariable   ) 
 

DECLARE_OBJECT_PTR DeterminantVariable   ) 
 

DECLARE_OBJECT_PTR CutBelowThresholdVariable   ) 
 

DECLARE_OBJECT_PTR CutAboveThresholdVariable   ) 
 

DECLARE_OBJECT_PTR CrossEntropyVariable   ) 
 

DECLARE_OBJECT_PTR ConvolveVariable   ) 
 

DECLARE_OBJECT_PTR ConcatRowsVariable   ) 
 

DECLARE_OBJECT_PTR ConcatOfVariable   ) 
 

DECLARE_OBJECT_PTR ConcatColumnsVariable   ) 
 

DECLARE_OBJECT_PTR ColumnIndexVariable   ) 
 

DECLARE_OBJECT_PTR ClassificationLossVariable   ) 
 

DECLARE_OBJECT_PTR BinaryClassificationLossVariable   ) 
 

DECLARE_OBJECT_PTR ArgminVariable   ) 
 

DECLARE_OBJECT_PTR ArgmaxVariable   ) 
 

DECLARE_OBJECT_PTR AffineTransformWeightPenalty   ) 
 

DECLARE_OBJECT_PTR AffineTransformVariable   ) 
 

DECLARE_OBJECT_PTR AbsVariable   ) 
 

DECLARE_OBJECT_PTR Optimizer   ) 
 

DECLARE_OBJECT_PTR HTryCombinations   ) 
 

DECLARE_OBJECT_PTR HCoordinateDescent   ) 
 

DECLARE_OBJECT_PTR HTryAll   ) 
 

DECLARE_OBJECT_PTR HSetVal   ) 
 

DECLARE_OBJECT_PTR HyperOptimizer   ) 
 

DECLARE_OBJECT_PTR GradientOptimizer   ) 
 

DECLARE_OBJECT_PTR ShellScript   ) 
 

DECLARE_OBJECT_PTR RunObject   ) 
 

DECLARE_OBJECT_PTR ObjectGenerator   ) 
 

DECLARE_OBJECT_PTR NearestNeighborPredictionCost   ) 
 

DECLARE_OBJECT_PTR VecStatsCollector   ) 
 

DECLARE_OBJECT_PTR QuantilesStatsIterator   ) 
 

DECLARE_OBJECT_PTR LiftStatsIterator   ) 
 

DECLARE_OBJECT_PTR MaxStatsIterator   ) 
 

DECLARE_OBJECT_PTR MinStatsIterator   ) 
 

DECLARE_OBJECT_PTR SharpeRatioStatsIterator   ) 
 

DECLARE_OBJECT_PTR StderrStatsIterator   ) 
 

DECLARE_OBJECT_PTR StddevStatsIterator   ) 
 

DECLARE_OBJECT_PTR ExpMeanStatsIterator   ) 
 

DECLARE_OBJECT_PTR MeanStatsIterator   ) 
 

DECLARE_OBJECT_PTR StatsIterator   ) 
 

DECLARE_OBJECT_PTR StatsCollector   ) 
 

DECLARE_OBJECT_PTR Smoother   ) 
 

DECLARE_OBJECT_PTR ScaledConditionalCDFSmoother   ) 
 

DECLARE_OBJECT_PTR ManualBinner   ) 
 

DECLARE_OBJECT_PTR LimitedGaussianSmoother   ) 
 

DECLARE_OBJECT_PTR LiftStatsCollector   ) 
 

DECLARE_OBJECT_PTR ConditionalStatsCollector   ) 
 

DECLARE_OBJECT_PTR ConditionalCDFSmoother   ) 
 

DECLARE_OBJECT_PTR Binner   ) 
 

DECLARE_OBJECT_PTR WeightedCostFunction   ) 
 

DECLARE_OBJECT_PTR SquaredErrorCostFunction   ) 
 

DECLARE_OBJECT_PTR SourceKernel   ) 
 

DECLARE_OBJECT_PTR SigmoidPrimitiveKernel   ) 
 

DECLARE_OBJECT_PTR SigmoidalKernel   ) 
 

DECLARE_OBJECT_PTR SelectedOutputCostFunction   ) 
 

DECLARE_OBJECT_PTR ScaledLaplacianKernel   ) 
 

DECLARE_OBJECT_PTR ScaledGeneralizedDistanceRBFKernel   ) 
 

DECLARE_OBJECT_PTR ScaledGaussianKernel   ) 
 

DECLARE_OBJECT_PTR ReconstructionWeightsKernel   ) 
 

DECLARE_OBJECT_PTR QuadraticUtilityCostFunction   ) 
 

DECLARE_OBJECT_PTR PricingTransactionPairProfitFunction   ) 
 

DECLARE_OBJECT_PTR PrecomputedKernel   ) 
 

DECLARE_OBJECT_PTR PowDistanceKernel   ) 
 

DECLARE_OBJECT_PTR PolynomialKernel   ) 
 

DECLARE_OBJECT_PTR NormalizedDotProductKernel   ) 
 

DECLARE_OBJECT_PTR NegOutputCostFunction   ) 
 

DECLARE_OBJECT_PTR NegLogProbCostFunction   ) 
 

DECLARE_OBJECT_PTR NegKernel   ) 
 

DECLARE_OBJECT_PTR MulticlassErrorCostFunction   ) 
 

DECLARE_OBJECT_PTR LogOfGaussianDensityKernel   ) 
 

PLearn::DECLARE_OBJECT_PTR LLEKernel   ) 
 

DECLARE_OBJECT_PTR LiftBinaryCostFunction   ) 
 

DECLARE_OBJECT_PTR LaplacianKernel   ) 
 

PLearn::DECLARE_OBJECT_PTR Kernel   ) 
 

DECLARE_OBJECT_PTR GeodesicDistanceKernel   ) 
 

DECLARE_OBJECT_PTR GeneralizedDistanceRBFKernel   ) 
 

DECLARE_OBJECT_PTR GaussianKernel   ) 
 

DECLARE_OBJECT_PTR GaussianDensityKernel   ) 
 

DECLARE_OBJECT_PTR DotProductKernel   ) 
 

DECLARE_OBJECT_PTR DivisiveNormalizationKernel   ) 
 

DECLARE_OBJECT_PTR DistanceKernel   ) 
 

DECLARE_OBJECT_PTR DifferenceKernel   ) 
 

DECLARE_OBJECT_PTR ConvexBasisKernel   ) 
 

DECLARE_OBJECT_PTR CompactVMatrixPolynomialKernel   ) 
 

PLearn::DECLARE_OBJECT_PTR CompactVMatrixGaussianKernel   ) 
 

DECLARE_OBJECT_PTR ClassMarginCostFunction   ) 
 

DECLARE_OBJECT_PTR ClassErrorCostFunction   ) 
 

DECLARE_OBJECT_PTR ClassDistanceProportionCostFunction   ) 
 

DECLARE_OBJECT_PTR AdditiveNormalizationKernel   ) 
 

DECLARE_OBJECT_PTR FilePStreamBuf   ) 
 

DECLARE_OBJECT_PTR UCISpecification   ) 
 

DECLARE_TYPE_TRAITS VarArray   ) 
 

DECLARE_TYPE_TRAITS StatsItArray   ) 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE bool  ,
0x12  ,
0x12 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE double  ,
0x10  ,
0x11 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE float  ,
0x0E  ,
0x0F 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE long  ,
0x07  ,
0x08 
 

PLearn::DECLARE_TYPE_TRAITS_FOR_BASETYPE unsigned  int,
0x0B  ,
0x0C 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE int  ,
0x07  ,
0x08 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE unsigned  short,
0x05  ,
0x06 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE short  ,
0x03  ,
0x04 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE unsigned  char,
0x02  ,
0x02 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE signed  char,
0x01  ,
0x01 
 

DECLARE_TYPE_TRAITS_FOR_BASETYPE char  ,
0x01  ,
0x01 
 

template<class ObjectType, class OptionType>
void declareOption OptionList &  ol,
const string optionname,
OptionType *ObjectType::*  member_ptr,
OptionBase::flag_t  flags,
const string description,
const string defaultval = ""
[inline]
 

Definition at line 121 of file Option.h.

template<class ObjectType, class OptionType>
void declareOption OptionList &  ol,
const string optionname,
OptionType ObjectType::*  member_ptr,
OptionBase::flag_t  flags,
const string description,
const string defaultval = ""
[inline]
 

For flags, you should specify one of OptionBase::buildoption, OptionBase::learntoption or OptionBase::tuningoption If the option is not to be serialized, you can additionally specify OptionBase::nosave.

The "type" printed in the help is given by TypeTraits<OptionType>::name(). The "default value" printed in optionHelp() will be a serialization of the value of the field in a default constructed instance, (which should be ok in most cases), unless you explicitly specify it as the last argument here (It is recomended that you *don't* specify it explicitly, unless you really must).

Parameters:
ol  the list to which this option should be appended
optionname  the name of this option
member_ptr  &YourClass::your_field
description  see the flags in OptionBase a description of the option
defaultval  the default value for this option, as set by the default constructor

Definition at line 111 of file Option.h.

References PLearn::OptionBase::flag_t.

Referenced by PLearn::YMDDatedVMatrix::declareOptions(), PLearn::WeightedCostFunction::declareOptions(), PLearn::VVMatrix::declareOptions(), PLearn::VVec::declareOptions(), PLearn::VMatrixFromDistribution::declareOptions(), PLearn::VMatrix::declareOptions(), PLearn::PreprocessingVMatrix::declareOptions(), PLearn::VMatLanguage::declareOptions(), PLearn::VecStatsCollector::declareOptions(), PLearn::VecExtendedVMatrix::declareOptions(), PLearn::VecElementVariable::declareOptions(), PLearn::Variable::declareOptions(), PLearn::UniformVMatrix::declareOptions(), PLearn::UniformizeVMatrix::declareOptions(), PLearn::UniformDistribution::declareOptions(), PLearn::UnfoldedSumOfVariable::declareOptions(), PLearn::UnfoldedFuncVariable::declareOptions(), PLearn::UnequalConstantVariable::declareOptions(), PLearn::UnaryVariable::declareOptions(), PLearn::UnaryHardSlopeVariable::declareOptions(), PLearn::UCISpecification::declareOptions(), PLearn::TrainValidTestSplitter::declareOptions(), PLearn::TrainTestSplitter::declareOptions(), PLearn::TrainTestBagsSplitter::declareOptions(), PLearn::Train::declareOptions(), PLearn::ToBagSplitter::declareOptions(), PLearn::TextSenseSequenceVMatrix::declareOptions(), PLearn::TestMethod::declareOptions(), PLearn::TestInTrainSplitter::declareOptions(), PLearn::TestingLearner::declareOptions(), PLearn::TemporalHorizonVMatrix::declareOptions(), PLearn::TangentLearner::declareOptions(), PLearn::SumOverBagsVariable::declareOptions(), PLearn::SumOfVariable::declareOptions(), PLearn::SubVMatrix::declareOptions(), PLearn::SubsampleVariable::declareOptions(), PLearn::SubMatVariable::declareOptions(), PLearn::SubMatTransposeVariable::declareOptions(), PLearn::SubInputVMatrix::declareOptions(), PLearn::QuantilesStatsIterator::declareOptions(), PLearn::LiftStatsIterator::declareOptions(), PLearn::SharpeRatioStatsIterator::declareOptions(), PLearn::StderrStatsIterator::declareOptions(), PLearn::StddevStatsIterator::declareOptions(), PLearn::ExpMeanStatsIterator::declareOptions(), PLearn::MeanStatsIterator::declareOptions(), PLearn::StatsIterator::declareOptions(), PLearn::StatsCollector::declareOptions(), PLearn::StackedLearner::declareOptions(), PLearn::SquaredErrorCostFunction::declareOptions(), PLearn::SpiralDistribution::declareOptions(), PLearn::SpectralClustering::declareOptions(), PLearn::SourceVMatrixSplitter::declareOptions(), PLearn::SourceVMatrix::declareOptions(), PLearn::SourceKernel::declareOptions(), PLearn::SortRowsVMatrix::declareOptions(), PLearn::SoftSlopeVariable::declareOptions(), PLearn::SoftSlopeIntegralVariable::declareOptions(), PLearn::SigmoidPrimitiveKernel::declareOptions(), PLearn::SigmoidalKernel::declareOptions(), PLearn::ShiftAndRescaleVMatrix::declareOptions(), PLearn::ShellScript::declareOptions(), PLearn::SetOption::declareOptions(), PLearn::SequentialValidation::declareOptions(), PLearn::SequentialSplitter::declareOptions(), PLearn::SequentialModelSelector::declareOptions(), PLearn::SequentialLearner::declareOptions(), PLearn::SemiSupervisedProbClassCostVariable::declareOptions(), PLearn::SelectRowsVMatrix::declareOptions(), PLearn::SelectRowsFileIndexVMatrix::declareOptions(), PLearn::SelectInputSubsetLearner::declareOptions(), PLearn::SelectedOutputCostFunction::declareOptions(), PLearn::SelectColumnsVMatrix::declareOptions(), PLearn::ScaledGeneralizedDistanceRBFKernel::declareOptions(), PLearn::ScaledGaussianKernel::declareOptions(), PLearn::ScaledConditionalCDFSmoother::declareOptions(), PLearn::RunObject::declareOptions(), PLearn::RowsSubVMatrix::declareOptions(), PLearn::RowAtPositionVariable::declareOptions(), PLearn::ReshapeVariable::declareOptions(), PLearn::RepeatSplitter::declareOptions(), PLearn::RemoveRowsVMatrix::declareOptions(), PLearn::RemoveDuplicateVMatrix::declareOptions(), PLearn::RemapLastColumnVMatrix::declareOptions(), PLearn::RegularGridVMatrix::declareOptions(), PLearn::ReconstructionWeightsKernel::declareOptions(), PLearn::RealMapping::declareOptions(), PLearn::RangeVMatrix::declareOptions(), PLearn::QuadraticUtilityCostFunction::declareOptions(), PLearn::ProcessingVMatrix::declareOptions(), PLearn::PricingTransactionPairProfitFunction::declareOptions(), PLearn::PrecomputedVMatrix::declareOptions(), PLearn::PrecomputedKernel::declareOptions(), PLearn::PowVariable::declareOptions(), PLearn::PowDistanceKernel::declareOptions(), PLearn::PolynomialKernel::declareOptions(), PLearn::PlusConstantVariable::declareOptions(), PLearn::PLS::declareOptions(), PLearn::PLearnerOutputVMatrix::declareOptions(), PLearn::PLearner::declareOptions(), PLearn::PTester::declareOptions(), PLearn::PDistributionVariable::declareOptions(), PLearn::PDistribution::declareOptions(), PLearn::PConditionalDistribution::declareOptions(), PLearn::PCA::declareOptions(), PLearn::PairsVMatrix::declareOptions(), PLearn::Optimizer::declareOptions(), PLearn::OneHotVMatrix::declareOptions(), PLearn::OneHotVariable::declareOptions(), PLearn::OneHotSquaredLoss::declareOptions(), PLearn::ObjectGenerator::declareOptions(), PLearn::NormalizedDotProductKernel::declareOptions(), PLearn::NNet::declareOptions(), PLearn::NeuralNet::declareOptions(), PLearn::NeighborhoodSmoothnessNNet::declareOptions(), PLearn::NegLogProbCostFunction::declareOptions(), PLearn::NegKernel::declareOptions(), PLearn::NearestNeighborPredictionCost::declareOptions(), PLearn::NaryVariable::declareOptions(), PLearn::MultiInstanceVMatrix::declareOptions(), PLearn::MultiInstanceNNet::declareOptions(), PLearn::MovingAverageVMatrix::declareOptions(), PLearn::MovingAverage::declareOptions(), PLearn::MemoryVMatrix::declareOptions(), PLearn::MatRowVariable::declareOptions(), PLearn::MatrixSumOfVariable::declareOptions(), PLearn::MatrixOneHotSquaredLoss::declareOptions(), PLearn::MatrixElementsVariable::declareOptions(), PLearn::MarginPerceptronCostVariable::declareOptions(), PLearn::ManualBinner::declareOptions(), PLearn::LogOfGaussianDensityKernel::declareOptions(), PLearn::LocalNeighborsDifferencesVMatrix::declareOptions(), PLearn::LocallyWeightedDistribution::declareOptions(), PLearn::LLEKernel::declareOptions(), PLearn::LLE::declareOptions(), PLearn::LinearRegressor::declareOptions(), PLearn::LiftStatsCollector::declareOptions(), PLearn::LiftBinaryCostFunction::declareOptions(), PLearn::LearnerProcessedVMatrix::declareOptions(), PLearn::Learner::declareOptions(), PLearn::LaplacianKernel::declareOptions(), PLearn::KPCATangentLearner::declareOptions(), PLearn::KNNVMatrix::declareOptions(), PLearn::KFoldSplitter::declareOptions(), PLearn::KernelVMatrix::declareOptions(), PLearn::KernelProjection::declareOptions(), PLearn::KernelPCA::declareOptions(), PLearn::Kernel::declareOptions(), PLearn::JoinVMatrix::declareOptions(), PLearn::IsomapTangentLearner::declareOptions(), PLearn::Isomap::declareOptions(), PLearn::IsMissingVariable::declareOptions(), PLearn::IsAboveThresholdVariable::declareOptions(), PLearn::InterleaveVMatrix::declareOptions(), PLearn::IndexedVMatrix::declareOptions(), PLearn::IndexAtPositionVariable::declareOptions(), PLearn::HTryCombinations::declareOptions(), PLearn::HCoordinateDescent::declareOptions(), PLearn::HTryAll::declareOptions(), PLearn::HSetVal::declareOptions(), PLearn::HyperOptimizer::declareOptions(), PLearn::HistogramDistribution::declareOptions(), PLearn::GraphicalBiText::declareOptions(), PLearn::Grapher::declareOptions(), PLearn::GramVMatrix::declareOptions(), PLearn::GradientOptimizer::declareOptions(), PLearn::GeodesicDistanceKernel::declareOptions(), PLearn::GenerateDecisionPlot::declareOptions(), PLearn::GeneralizedOneHotVMatrix::declareOptions(), PLearn::GeneralizedDistanceRBFKernel::declareOptions(), PLearn::GaussMix::declareOptions(), PLearn::GaussianProcessRegressor::declareOptions(), PLearn::GaussianKernel::declareOptions(), PLearn::GaussianDistribution::declareOptions(), PLearn::GaussianDensityKernel::declareOptions(), PLearn::GaussianContinuum::declareOptions(), PLearn::Function::declareOptions(), PLearn::FractionSplitter::declareOptions(), PLearn::ForwardVMatrix::declareOptions(), PLearn::FinancePreprocVMatrix::declareOptions(), PLearn::FilterSplitter::declareOptions(), PLearn::FilteredVMatrix::declareOptions(), PLearn::FileVMatrix::declareOptions(), PLearn::FilePStreamBuf::declareOptions(), PLearn::ExtendedVMatrix::declareOptions(), PLearn::ExtendedVariable::declareOptions(), PLearn::ExplicitSplitter::declareOptions(), PLearn::Experiment::declareOptions(), PLearn::EqualConstantVariable::declareOptions(), PLearn::EntropyContrast::declareOptions(), PLearn::EmbeddedSequentialLearner::declareOptions(), PLearn::EmbeddedLearner::declareOptions(), PLearn::ElementAtPositionVariable::declareOptions(), PLearn::DuplicateScalarVariable::declareOptions(), PLearn::DuplicateRowVariable::declareOptions(), PLearn::DuplicateColumnVariable::declareOptions(), PLearn::DivisiveNormalizationKernel::declareOptions(), PLearn::Distribution::declareOptions(), PLearn::DistanceKernel::declareOptions(), PLearn::DiskVMatrix::declareOptions(), PLearn::Dictionary::declareOptions(), PLearn::DBSplitter::declareOptions(), PLearn::DatedJoinVMatrix::declareOptions(), PLearn::CutBelowThresholdVariable::declareOptions(), PLearn::CutAboveThresholdVariable::declareOptions(), PLearn::CumVMatrix::declareOptions(), PLearn::CrossReferenceVMatrix::declareOptions(), PLearn::ConvexBasisKernel::declareOptions(), PLearn::ConstantRegressor::declareOptions(), PLearn::ConjGradientOptimizer::declareOptions(), PLearn::ConditionalStatsCollector::declareOptions(), PLearn::ConditionalGaussianDistribution::declareOptions(), PLearn::ConditionalDensityNet::declareOptions(), PLearn::ConditionalCDFSmoother::declareOptions(), PLearn::ConcatRowsVMatrix::declareOptions(), PLearn::ConcatRowsSubVMatrix::declareOptions(), PLearn::ConcatOfVariable::declareOptions(), PLearn::ConcatColumnsVMatrix::declareOptions(), PLearn::CompactVMatrixPolynomialKernel::declareOptions(), PLearn::CompactVMatrixGaussianKernel::declareOptions(), PLearn::ClassMarginCostFunction::declareOptions(), PLearn::ClassifierFromDensity::declareOptions(), PLearn::ClassErrorCostFunction::declareOptions(), PLearn::CenteredVMatrix::declareOptions(), PLearn::BootstrapVMatrix::declareOptions(), PLearn::BootstrapSplitter::declareOptions(), PLearn::BinaryVariable::declareOptions(), PLearn::BatchVMatrix::declareOptions(), PLearn::AutoVMatrix::declareOptions(), PLearn::AsciiVMatrix::declareOptions(), PLearn::AffineTransformWeightPenalty::declareOptions(), PLearn::AdditiveNormalizationKernel::declareOptions(), PLearn::AddCostToLearner::declareOptions(), PLearn::AdaptGradientOptimizer::declareOptions(), and PLearn::AdaBoost::declareOptions().

template<class T>
TVec<T> deepCopy const TVec< T > &  source,
CopiesMap &  copies
[inline]
 

Definition at line 88 of file TVec_impl.h.

References PLearn::TVec< T >::deepCopy().

template<class T>
TVec<T> deepCopy const TVec< T > &  source  )  [inline]
 

< create empty map

Definition at line 81 of file TVec_impl.h.

References CopiesMap, and deepCopy().

template<class T>
TMat<T> deepCopy const TMat< T >  source,
CopiesMap  copies
[inline]
 

Definition at line 599 of file TMat_impl.h.

References PLearn::TMat< T >::deepCopy().

template<class T>
TMat<T> deepCopy const TMat< T >  source  )  [inline]
 

< create empty map

Definition at line 592 of file TMat_impl.h.

References CopiesMap, and deepCopy().

template<class T>
T* deepCopy PP< T >  source  )  [inline]
 

This function simply calls the previous one with an initially empty map.

makes a copie of the PP with all it's fields, respecting the dependance shceme between elements and without allowing double copies of equal elements.

< create empty map

Definition at line 224 of file PP.h.

References CopiesMap, and deepCopy().

template<class T>
T* deepCopy PP< T >  source,
CopiesMap &  copies
 

A simple template function.

Definition at line 215 of file PP.h.

References deepCopy().

template<class T>
T* deepCopy const T *  source  )  [inline]
 

This function simply calls the previous one with an initially empty map.

< create empty map

Definition at line 84 of file CopiesMap.h.

References CopiesMap, and deepCopy().

template<class T>
T* deepCopy const T *  source,
CopiesMap &  copies
 

A simple template function that calls the method.

Definition at line 79 of file CopiesMap.h.

Referenced by PLearn::ClassifierFromDensity::build_(), and deepCopy().

template<>
void PLearn::deepCopyField VMat &  field,
CopiesMap &  copies
 

Definition at line 115 of file VMat.cc.

template<>
void deepCopyField VarArray &  field,
CopiesMap &  copies
[inline]
 

Definition at line 235 of file VarArray.h.

References PLearn::VarArray::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField Var &  field,
CopiesMap &  copies
[inline]
 

Specialized in order to display a warning message.

Definition at line 74 of file Var.h.

References PLWARNING.

template<>
void PLearn::deepCopyField Func &  field,
CopiesMap &  copies
 

Definition at line 638 of file Func.cc.

template<class T>
void deepCopyField TVec< T > &  field,
CopiesMap &  copies
[inline]
 

Definition at line 94 of file TVec_impl.h.

References PLearn::TVec< T >::makeDeepCopyFromShallowCopy().

template<class T>
void deepCopyField TMat< T > &  field,
CopiesMap &  copies
[inline]
 

Definition at line 603 of file TMat_impl.h.

References PLearn::TMat< T >::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField StatsItArray &  field,
CopiesMap &  copies
[inline]
 

Definition at line 402 of file StatsIterator.h.

References PLearn::Array< StatsIt >::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField StatsCollector &  field,
CopiesMap &  copies
[inline]
 

Apparently needed to specialize this method, otherwise it was the generic deepCopyField from CopiesMap.h that was called when deep copying a TVec<StatsCollector>.

Definition at line 216 of file StatsCollector.h.

References PLearn::Object::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField Mat &  field,
CopiesMap &  copies
[inline]
 

Definition at line 103 of file Mat.h.

References PLearn::TMat< T >::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField Vec &  field,
CopiesMap &  copies
[inline]
 

Definition at line 72 of file Mat.h.

References PLearn::TVec< T >::makeDeepCopyFromShallowCopy().

template<>
void deepCopyField Ker &  field,
CopiesMap &  copies
[inline]
 

Definition at line 224 of file Kernel.h.

template<class T>
void deepCopyField PP< T > &  field,
CopiesMap &  copies
[inline]
 

Any pointer or smart pointer: call deepCopy().

Definition at line 198 of file PP.h.

References PLWARNING.

template<class T>
void deepCopyField T *&  field,
CopiesMap &  copies
[inline]
 

Definition at line 71 of file CopiesMap.h.

template<class T>
void deepCopyField T &  ,
CopiesMap & 
[inline]
 

Types that do not require deep copy. Any type not handled below: do nothing.

Support for generic deep copying

Deep copying is defined for objects in the following manner: + copy constructors should always do a shallow copy. + a public method OBJTYPE* deepCopy(map<const void*, void*>& copies) const should be defined to allow deepCopying + the deepCopy method should be virtual for classes that are designed to be subclassed Take a close look at the Object class in Object.h to see how this is done.

no op

Definition at line 61 of file CopiesMap.h.

References PLWARNING.

template<class T>
void deepCopyField Array< T > &  field,
CopiesMap &  copies
[inline]
 

Definition at line 86 of file Array_impl.h.

References PLearn::Array< T >::makeDeepCopyFromShallowCopy().

Referenced by PLearn::Storage< pair< real, real > >::deepCopy(), PLearn::WeightedCostFunction::makeDeepCopyFromShallowCopy(), PLearn::VVMatrix::makeDeepCopyFromShallowCopy(), PLearn::VMatrix::makeDeepCopyFromShallowCopy(), PLearn::VecStatsCollector::makeDeepCopyFromShallowCopy(), PLearn::VecElementVariable::makeDeepCopyFromShallowCopy(), PLearn::Variable::makeDeepCopyFromShallowCopy(), PLearn::UnfoldedSumOfVariable::makeDeepCopyFromShallowCopy(), PLearn::UnfoldedFuncVariable::makeDeepCopyFromShallowCopy(), PLearn::TVec< T >::makeDeepCopyFromShallowCopy(), PLearn::TMat< T >::makeDeepCopyFromShallowCopy(), PLearn::TextSenseSequenceVMatrix::makeDeepCopyFromShallowCopy(), PLearn::TemporalHorizonVMatrix::makeDeepCopyFromShallowCopy(), PLearn::TangentLearner::makeDeepCopyFromShallowCopy(), PLearn::SumOverBagsVariable::makeDeepCopyFromShallowCopy(), PLearn::SumOfVariable::makeDeepCopyFromShallowCopy(), PLearn::SubVMatrix::makeDeepCopyFromShallowCopy(), PLearn::QuantilesStatsIterator::makeDeepCopyFromShallowCopy(), PLearn::LiftStatsIterator::makeDeepCopyFromShallowCopy(), PLearn::SharpeRatioStatsIterator::makeDeepCopyFromShallowCopy(), PLearn::StderrStatsIterator::makeDeepCopyFromShallowCopy(), PLearn::StddevStatsIterator::makeDeepCopyFromShallowCopy(), PLearn::StatsIterator::makeDeepCopyFromShallowCopy(), PLearn::StackedLearner::makeDeepCopyFromShallowCopy(), PLearn::Splitter::makeDeepCopyFromShallowCopy(), PLearn::SourceVMatrix::makeDeepCopyFromShallowCopy(), PLearn::SourceVariable::makeDeepCopyFromShallowCopy(), PLearn::SourceKernel::makeDeepCopyFromShallowCopy(), PLearn::SortRowsVMatrix::makeDeepCopyFromShallowCopy(), PLearn::ShellScript::makeDeepCopyFromShallowCopy(), PLearn::SequentialModelSelector::makeDeepCopyFromShallowCopy(), PLearn::SequentialLearner::makeDeepCopyFromShallowCopy(), PLearn::SelectRowsVMatrix::makeDeepCopyFromShallowCopy(), PLearn::SelectInputSubsetLearner::makeDeepCopyFromShallowCopy(), PLearn::SelectedOutputCostFunction::makeDeepCopyFromShallowCopy(), PLearn::SelectColumnsVMatrix::makeDeepCopyFromShallowCopy(), PLearn::ScaledLaplacianKernel::makeDeepCopyFromShallowCopy(), PLearn::ScaledGeneralizedDistanceRBFKernel::makeDeepCopyFromShallowCopy(), PLearn::ScaledGaussianKernel::makeDeepCopyFromShallowCopy(), PLearn::RowBufferedVMatrix::makeDeepCopyFromShallowCopy(), PLearn::RepeatSplitter::makeDeepCopyFromShallowCopy(), PLearn::RegularGridVMatrix::makeDeepCopyFromShallowCopy(), PLearn::PTester::makeDeepCopyFromShallowCopy(), PLearn::PrecomputedVMatrix::makeDeepCopyFromShallowCopy(), PLearn::PrecomputedKernel::makeDeepCopyFromShallowCopy(), PLearn::PLS::makeDeepCopyFromShallowCopy(), PLearn::PLearnerOutputVMatrix::makeDeepCopyFromShallowCopy(), PLearn::PLearner::makeDeepCopyFromShallowCopy(), PLearn::PDistributionVariable::makeDeepCopyFromShallowCopy(), PLearn::PDistribution::makeDeepCopyFromShallowCopy(), PLearn::PCA::makeDeepCopyFromShallowCopy(), PLearn::Optimizer::makeDeepCopyFromShallowCopy(), PLearn::NNet::makeDeepCopyFromShallowCopy(), PLearn::NeuralNet::makeDeepCopyFromShallowCopy(), PLearn::NeighborhoodSmoothnessNNet::makeDeepCopyFromShallowCopy(), PLearn::NegKernel::makeDeepCopyFromShallowCopy(), PLearn::NaryVariable::makeDeepCopyFromShallowCopy(), PLearn::MultiInstanceVMatrix::makeDeepCopyFromShallowCopy(), PLearn::MultiInstanceNNet::makeDeepCopyFromShallowCopy(), PLearn::MovingAverageVMatrix::makeDeepCopyFromShallowCopy(), PLearn::MemoryVMatrix::makeDeepCopyFromShallowCopy(), PLearn::MatRowVariable::makeDeepCopyFromShallowCopy(), PLearn::MatrixSumOfVariable::makeDeepCopyFromShallowCopy(), PLearn::MatrixElementsVariable::makeDeepCopyFromShallowCopy(), PLearn::LogSumVariable::makeDeepCopyFromShallowCopy(), PLearn::LocalNeighborsDifferencesVMatrix::makeDeepCopyFromShallowCopy(), PLearn::LinearRegressor::makeDeepCopyFromShallowCopy(), PLearn::LiftStatsCollector::makeDeepCopyFromShallowCopy(), PLearn::LearnerProcessedVMatrix::makeDeepCopyFromShallowCopy(), PLearn::Learner::makeDeepCopyFromShallowCopy(), PLearn::KNNVMatrix::makeDeepCopyFromShallowCopy(), PLearn::Kernel::makeDeepCopyFromShallowCopy(), PLearn::JulianizeVMatrix::makeDeepCopyFromShallowCopy(), PLearn::IndexedVMatrix::makeDeepCopyFromShallowCopy(), PLearn::HistogramDistribution::makeDeepCopyFromShallowCopy(), PLearn::GaussMix::makeDeepCopyFromShallowCopy(), PLearn::GaussianProcessRegressor::makeDeepCopyFromShallowCopy(), PLearn::GaussianKernel::makeDeepCopyFromShallowCopy(), PLearn::GaussianDistribution::makeDeepCopyFromShallowCopy(), PLearn::GaussianContinuum::makeDeepCopyFromShallowCopy(), PLearn::Function::makeDeepCopyFromShallowCopy(), PLearn::ForwardVMatrix::makeDeepCopyFromShallowCopy(), PLearn::FinancePreprocVMatrix::makeDeepCopyFromShallowCopy(), PLearn::FilterSplitter::makeDeepCopyFromShallowCopy(), PLearn::ExplicitSplitter::makeDeepCopyFromShallowCopy(), PLearn::EmpiricalDistribution::makeDeepCopyFromShallowCopy(), PLearn::EmbeddedSequentialLearner::makeDeepCopyFromShallowCopy(), PLearn::EmbeddedLearner::makeDeepCopyFromShallowCopy(), PLearn::DatedJoinVMatrix::makeDeepCopyFromShallowCopy(), PLearn::CumVMatrix::makeDeepCopyFromShallowCopy(), PLearn::ConditionalStatsCollector::makeDeepCopyFromShallowCopy(), PLearn::ConditionalDensityNet::makeDeepCopyFromShallowCopy(), PLearn::ConditionalCDFSmoother::makeDeepCopyFromShallowCopy(), PLearn::ConcatOfVariable::makeDeepCopyFromShallowCopy(), PLearn::CompactVMatrix::makeDeepCopyFromShallowCopy(), PLearn::ClassifierFromDensity::makeDeepCopyFromShallowCopy(), PLearn::BatchVMatrix::makeDeepCopyFromShallowCopy(), PLearn::Array< T >::makeDeepCopyFromShallowCopy(), PLearn::ArgminOfVariable::makeDeepCopyFromShallowCopy(), PLearn::AddCostToLearner::makeDeepCopyFromShallowCopy(), and PLearn::AdaBoost::makeDeepCopyFromShallowCopy().

Var det Var  m  )  [inline]
 

Definition at line 78 of file DeterminantVariable.h.

template<class T>
T det const TMat< T > &  LU,
int  detsign
 

Definition at line 5262 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
T det const TMat< T > &  A  ) 
 

Definition at line 5240 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy(), PLearn::TMat< T >::length(), LU_decomposition(), maxabs(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DeterminantVariable::fprop(), and PLearn::ProductRandomVariable::invertible().

double PLearn::determine_density_integral_from_log_densities_on_grid Vec  log_densities,
real  deltax,
real  deltay
 

Definition at line 80 of file learner_utils.cc.

References exp(), and logadd().

void PLearn::determine_grid_for_dataset VMat  dataset,
int  nx,
int  ny,
real x0,
real y0,
real deltax,
real deltay,
real  extraspace = .10
 

Definition at line 65 of file learner_utils.cc.

References computeRange(), and PLearn::VMat::subMatColumns().

void dgesdd_ char *  JOBZ,
int M,
int N,
double *  A,
int LDA,
double *  S,
double *  U,
int LDU,
double *  VT,
int LDVT,
double *  WORK,
int LWORK,
int IWORK,
int INFO
 

Referenced by lapack_Xgesdd_().

void dgesv_ int N,
int NRHS,
double *  A,
int LDA,
int IPIV,
double *  B,
int LDB,
int INFO
 

Referenced by lapackSolveLinearSystem().

void dgetrf_ int M,
int N,
double *  A,
int LDA,
int IPIV,
int INFO
 

Referenced by matInvert().

void dgetri_ int N,
double *  A,
int LDA,
int IPIV,
double *  WORK,
int LWORK,
int INFO
 

Referenced by matInvert().

template<class T>
TVec<T> diag const TMat< T > &  mat  ) 
 

Definition at line 3656 of file TMat_maths_impl.h.

References diag(), and PLearn::TMat< T >::length().

template<class T>
void diag const TMat< T > &  mat,
const TVec< T > &  d
 

Definition at line 3648 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TMat< T >::length().

Referenced by PLearn::EntropyContrast::compute_df_dx(), diag(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), and PLearn::EntropyContrast::train().

Var diagonalized_factors_product Var  left_matrix,
Var  center_diagonal,
Var  right_matrix
[inline]
 

Definition at line 87 of file DiagonalizedFactorsProductVariable.h.

Referenced by PLearn::TangentLearner::build_(), and PLearn::GaussianContinuum::build_().

template<class T>
void diagonalizedFactorsProduct TMat< T > &  result,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
bool  accumulate = false
 

return the matrix with elements (i,j) = sum_k U_{ik} d_k V_{kj}

Definition at line 2421 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::EntropyContrast::compute_df_dx(), PLearn::DiagonalizedFactorsProductVariable::fprop(), and PLearn::EntropyContrast::train().

template<class T>
void diagonalizedFactorsProductBprop const TMat< T > &  dCdresult,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
TMat< T > &  dCdU,
TVec< T > &  dCdd,
TMat< T > &  dCdV
 

GIVEN that res(i,j) = sum_k U_{ik} d_k V_{kj}, and given dC/dres, U,d and V, accumulate gradients on dC/dU, dC/dd and dC/dV: dC/dU[i,k] += sum_j dC/dres[i,j] d_k V[k,j] dC/dd[k] += sum_{ij} dC/dres[i,j] U[i,k] V[k,j] dC/dV[k,j] += d_k * sum_i U[i,k] dC/dres[i,j].

Definition at line 2455 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::bprop().

template<class T>
void diagonalizedFactorsProductTranspose TMat< T > &  result,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
bool  accumulate = false
 

return the matrix with elements (i,j) = sum_k U_{ik} d_k V_{jk}

Definition at line 2492 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::fprop().

template<class T>
void diagonalizedFactorsProductTransposeBprop const TMat< T > &  dCdresult,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
TMat< T > &  dCdU,
TVec< T > &  dCdd,
TMat< T > &  dCdV
 

Definition at line 2527 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::bprop().

template<class T>
void diagonalizedFactorsTransposeProduct TMat< T > &  result,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
bool  accumulate = false
 

return the matrix with elements (i,j) = sum_k U_{ki} d_k V_{kj}

Definition at line 2567 of file TMat_maths_impl.h.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::fprop().

template<class T>
void diagonalizedFactorsTransposeProductBprop const TMat< T > &  dCdresult,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
TMat< T > &  dCdU,
TVec< T > &  dCdd,
TMat< T > &  dCdV
 

Definition at line 2600 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::bprop().

template<class T>
void diagonalizedFactorsTransposeProductTranspose TMat< T > &  result,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
bool  accumulate = false
 

return the matrix with elements (i,j) = sum_k U_{ki} d_k V_{jk}

Definition at line 2641 of file TMat_maths_impl.h.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::fprop().

template<class T>
void diagonalizedFactorsTransposeProductTransposeBprop const TMat< T > &  dCdresult,
const TMat< T > &  U,
const TVec< T >  d,
const TMat< T >  V,
TMat< T > &  dCdU,
TVec< T > &  dCdd,
TMat< T > &  dCdV
 

Definition at line 2673 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::DiagonalizedFactorsProductVariable::bprop().

template<class MatT>
void diagonalizeSubspace MatT &  A,
Mat &  X,
Vec &  Ax,
Mat &  solutions,
Vec &  evalues,
Mat &  evectors
 

Diagonalize the sub-space spanned by the rows of X(mxn) with respect to symmetric matrix A(nxn), m<=n. The eigenpairs will be put in the evalues/evectors arguments (expressed in the basis of X), and the corresponding basis in R^n will be put in the solutions(kxn) matrix.

The function proceeds as follows:

GramSchmid orthornormalize X, so that X X' = I(mxm) C(mxm) = X A X' solve small eigensystem C = V' S V (V = evectors, S = evalues) solutions = V X

Thus in the end we have

solutions solutions' = V X X' V' = I if X was orthonormal to start with solutions A solutions' = V X A X' V' = V C V' = S

first collect C = X A X'

symmetric part

then diagonalize C = evectors' * diag(evalues) * evectors

the eigen-values should be in increasing order convert the eigenvectors corresponding to the smallest eigenvalues

< already 0

Definition at line 641 of file plapack.h.

References PLearn::TVec< T >::clear(), PLearn::TMat< T >::copy(), dot(), eigen_SymmMat(), endl(), GramSchmidtOrthogonalization(), PLearn::TMat< T >::length(), multiplyAcc(), norm(), product(), and PLearn::TMat< T >::subMatRows().

template<class T>
TMat< T > PLearn::diagonalmatrix const TVec< T > &  v  ) 
 

Definition at line 578 of file TMat_impl.h.

References PLearn::TVec< T >::length().

template<class T>
void diagonalOfSquare const TMat< T > &  mat,
const TVec< T > &  d
 

Definition at line 3664 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), and pownorm().

void difference Set  a,
Set  b,
Set  res
[inline]
 

Definition at line 93 of file Set.h.

References PLearn::Set::begin(), and PLearn::Set::end().

Referenced by PLearn::FieldConvertCommand::FieldConvertCommand(), PLearn::MinusRandomVariable::MinusRandomVariable(), and PLearn::PlusRandomVariable::PlusRandomVariable().

template<class T>
void diffSquareMultiplyAcc const TMat< T > &  mat,
const TMat< T > &  x,
const TMat< T > &  y,
scale
 

Definition at line 3565 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void diffSquareMultiplyAcc const TVec< T > &  vec,
const TVec< T > &  x,
const TVec< T > &  y,
scale
 

TVec[i] += (x[i]-y[i])^2*scale;.

Definition at line 2173 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::DiagonalNormalRandomVariable::EMBprop().

template<class T>
void diffSquareMultiplyScaledAcc const TVec< T > &  vec,
const TVec< T > &  x,
const TVec< T > &  y,
fact1,
fact2
 

TVec[i] = TVec[i]*fact1 + (x[i]-y[i])^2*fact2;.

Definition at line 2191 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Var dilogarithm Var  v  )  [inline]
 

Definition at line 75 of file DilogarithmVariable.h.

real PLearn::dilogarithm real  x  ) 
 

return the dilogarithm function dilogarithm(x) = sum_{i=1}^{} x^i/i^2 = int_{z=x}^0 log(1-z)/z dz It is also useful because -dilogarithm(-exp(x)) is the primitive of the softplus function log(1+exp(x)).

Definition at line 203 of file pl_math.cc.

References is_missing(), MISSING_VALUE, PLWARNING, positive_dilogarithm(), and x.

Referenced by PLearn::DilogarithmVariable::fprop(), and softplus_primitive().

CostFunc directnegative_costfunc  )  [inline]
 

Definition at line 67 of file DirectNegativeCostFunction.h.

void PLearn::displayBasicStats VMat  vm  ) 
 

Definition at line 174 of file vmatmain.cc.

References endl(), PLearn::VMat::fieldName(), k, and PLearn::VMat::width().

Referenced by vmatmain().

void PLearn::displayDecisionSurface GhostScript &  gs,
real  destx,
real  desty,
real  destwidth,
real  destheight,
Learner &  learner,
Mat  trainset,
Vec  svindexes = Vec(),
Vec  outlierindexes = Vec(),
int  nextsvindex = -1,
real  min_x = -1,
real  max_x = +1,
real  min_y = -1,
real  max_y = +1,
real  radius = 0.05,
int  nx = 200,
int  ny = 200
 

This will display a rectangle (0,0,nx,ny) containing a 2D image of the decision surface for the given learner with the points of each class displayed with + and x, and optionally, the points in svindexes circled in black and the points in outlierindexes circled in gray.

Definition at line 696 of file DisplayUtils.cc.

References PLearn::TMat< T >::column(), compute2dGridOutputs(), PLearn::GhostScript::displayGray(), displayPoints(), PLearn::GhostScript::drawCircle(), PLearn::GhostScript::grestore(), PLearn::GhostScript::gsave(), PLearn::Learner::inputsize(), k, PLearn::TVec< T >::length(), PLearn::GhostScript::mapping(), max(), min(), PLearn::Learner::outputsize(), PLearn::GhostScript::setdash(), PLearn::GhostScript::setlinewidth(), and x.

void PLearn::displayFunction Func  f,
bool  display_values = false,
bool  display_differentiation = false,
real  boxwidth = 100,
const char *  the_filename = 0,
bool  must_wait = true
 

Definition at line 614 of file DisplayUtils.cc.

References displayVarGraph().

Referenced by PLearn::SumOfVariable::fbprop(), PLearn::ConditionalDensityNet::train(), and PLearn::Function::verifySymbolicGradient().

void PLearn::displayHistogram Gnuplot &  gs,
Mat  dataColumn,
int  n_bins = 0,
Vec *  bins = 0,
bool  regular_bins = false,
bool  normalized = false,
string  extra_args = ""
 

Display a histogram of the density of the data column. By default set n_bins according to the number of data points: n_bins = min(5+dataColumn.length()/10,1000), but user can override this. By default (n_bins=0) the bins are equally spaced such that each bin receives approximately the same number of points. The user can override that by providing a vector of bin boundaries (min to bins[0], bins[0] to bins[1], ... bins[n_bins-2] to max) then n_bins will be bins.length()+1, or the user can specify regularly spaced bins with the bool argument. If normalized the the relative frequencies rather than actual frequencies are plotted.

Definition at line 57 of file DisplayUtils.cc.

References PLearn::TVec< T >::data(), histogram(), left(), PLearn::TVec< T >::length(), MIN, PLearn::Gnuplot::plot(), PLWARNING, PLearn::TVec< T >::resize(), sortElements(), and PLearn::TMat< T >::toVecCopy().

void PLearn::displayObjectHelp ostream &  out,
const string classname
 

Will display the help message for an object of the given classname.

Definition at line 105 of file TypeFactory.cc.

References addprefix(), PLearn::TypeMapEntry::constructor, endl(), PLearn::TypeMapEntry::getoptionlist_method, PLearn::TypeFactory::getTypeMap(), PLearn::TypeMapEntry::isa_method, PLearn::TypeMapEntry::multi_line_help, PLearn::TypeMapEntry::one_line_descr, PLERROR, and TypeMap.

Referenced by old_plearn_main(), and PLearn::HelpCommand::run().

void PLearn::displayPoints GhostScript &  gs,
Mat  data,
real  radius,
bool  color = false
 

this draws x and + with the given radius for all the points in data (supposed to have width 3: [x, y, classnum]

Definition at line 649 of file DisplayUtils.cc.

References color(), PLearn::GhostScript::drawCross(), PLearn::TMat< T >::length(), and PLearn::GhostScript::setcolor().

Referenced by displayDecisionSurface().

void PLearn::displayVarGraph const VarArray &  outputs,
bool  display_values = false,
real  boxwidth = 100,
const char *  the_filename = 0,
bool  must_wait = true,
VarArray  display0_only_these = VarArray()
 

VarGraph *.

* VarGraph *

Definition at line 177 of file DisplayUtils.cc.

References PLearn::TmpFilenames::addFilename(), PLearn::VarArray::ancestors(), PLearn::TVec< Var >::append(), center(), PLearn::GhostScript::centerShow(), PLearn::VarArray::clearMark(), PLearn::TVec< Var >::contains(), distance(), PLearn::GhostScript::drawArrow(), PLearn::GhostScript::drawBox(), PLearn::Var::length(), PLearn::VarArray::setMark(), PLearn::TVec< Var >::size(), PLearn::VarArray::sources(), PLearn::VarArray::unmarkAncestors(), PLearn::GhostScript::usefont(), PLearn::Var::width(), and x.

Referenced by displayFunction(), PLearn::Function::fbprop(), PLearn::LogVariable::fprop(), PLearn::GradientOptimizer::optimize(), and PLearn::GradientOptimizer::optimizeN().

template<class T>
T dist const TVec< T > &  vec1,
const TVec< T > &  vec2,
double  n
 

Definition at line 760 of file TMat_maths_impl.h.

References mypow(), powdistance(), and sqrt().

Referenced by PLearn::GeodesicDistanceKernel::computeNearestGeodesicNeighbour(), PLearn::Kernel::estimateHistograms(), PLearn::GeodesicDistanceKernel::evaluate(), PLearn::DistanceKernel::evaluate(), findClosestPairsOfDifferentClass(), PLearn::GaussMix::kmeans(), L1distance(), L2distance(), PLearn::PDistributionVariable::PDistributionVariable(), positionOfClosestElement(), PLearn::GeodesicDistanceKernel::setDataForKernelMatrix(), PLearn::PLS::train(), and PLearn::ManifoldParzen2::train().

Var PLearn::distance Var  input1,
Var  input2,
real  n
 

Definition at line 103 of file Var_utils.cc.

References norm().

Referenced by displayVarGraph().

template<class T>
void divide source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1539 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

template<class T>
void divide const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1522 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

template<class T>
void divide const TVec< T > &  source1,
source2,
TVec< T > &  destination
[inline]
 

Definition at line 1517 of file TMat_maths_impl.h.

References multiply().

Referenced by makeRowsSumTo1(), and PLearn::TestDependenciesCommand::TestDependenciesCommand().

int dnaupd_ long int ,
const char *  ,
long int ,
const char *  ,
long int ,
double *  ,
double *  ,
long int ,
double *  ,
long int ,
long int ,
long int ,
double *  ,
double *  ,
long int ,
long int ,
short  ,
short 
 

Referenced by eigenSparseNonSymmMat().

int dneupd_ long int ,
const char *  ,
long int ,
double *  ,
double *  ,
double *  ,
long int ,
double *  ,
double *  ,
double *  ,
const char *  ,
long int ,
const char *  ,
long int ,
double *  ,
double *  ,
long int ,
double *  ,
long int ,
long int ,
long int ,
double *  ,
double *  ,
long int ,
long int ,
short  ,
short  ,
short 
 

Referenced by eigenSparseNonSymmMat().

Var dot Var  v1,
Var  v2
[inline]
 

dot product

Definition at line 82 of file DotProductVariable.h.

template<class T>
T dot const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 1124 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::data(), PLearn::TMat< T >::isCompact(), PLERROR, and PLearn::TMat< T >::size().

template<class V, class T, class U>
V dot const TVec< T > &  vec1,
const TVec< U > &  vec2
 

Special dot product that allows TVec's of different types, as long as operator*(T,U) is defined.

The return type V must be specified in all circumstances, e.g. : TVec<int> v1; TVec<float> v2; double result = dot<double>(v1,v2);

Definition at line 1108 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

template<class T>
T dot const TVec< T > &  vec1,
const TVec< T > &  vec2
 

Definition at line 1087 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::EntropyContrast::compute_extra_grad_wrt_df_dx(), PLearn::ConjGradientOptimizer::computeCostAndDerivative(), PLearn::KernelProjection::computeCostsFromOutputs(), PLearn::ConjGradientOptimizer::computeDerivative(), PLearn::GaussMix::computeLogLikelihood(), PLearn::PCA::computeOutput(), PLearn::ConjGradientOptimizer::conjpomdp(), constrainedLinearRegression(), PLearn::ConjGradientOptimizer::daiYuan(), diagonalizeSubspace(), PLearn::VMatrix::dot(), PLearn::RowBufferedVMatrix::dot(), PLearn::CompactVMatrix::dot(), PLearn::CompactVMatrix::dotProduct(), PLearn::SigmoidPrimitiveKernel::evaluate(), PLearn::SigmoidalKernel::evaluate(), PLearn::PolynomialKernel::evaluate(), PLearn::NormalizedDotProductKernel::evaluate(), PLearn::DotProductKernel::evaluate(), PLearn::CompactVMatrixPolynomialKernel::evaluate(), PLearn::GaussianProcessRegressor::expectation(), extract_directory(), PLearn::ConjGradientOptimizer::findDirection(), PLearn::ProjectionErrorVariable::fprop(), PLearn::ConjGradientOptimizer::gSearch(), PLearn::ConjGradientOptimizer::hestenesStiefel(), PLearn::GaussianProcessRegressor::inverseCovTimesVec(), linearRegression(), locateDatasetAliasesDir(), logOfCompactGaussian(), logOfNormal(), mahalanobis_distance(), makeExplicitPath(), PLearn::ConjGradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimizeN(), PLearn::ConjGradientOptimizer::polakRibiere(), productTranspose(), projectOnOrthogonalSubspace(), PLearn::GaussianProcessRegressor::QFormInverse(), PLearn::TimesScalarVariable::symbolicBprop(), PLearn::PowVariableVariable::symbolicBprop(), PLearn::PLS::train(), PLearn::PCA::train(), PLearn::LinearRegressor::train(), PLearn::Function::verifyGradient(), and weightedLinearRegression().

real dot_product real  s,
real x,
real y,
int  n
[inline]
 

Definition at line 368 of file TMat_maths_specialisation.h.

References x.

Referenced by PLearn::CompactVMatrix::dot(), PLearn::CompactVMatrixPolynomialKernel::evaluate(), and PLearn::PolynomialKernel::evaluateFromDot().

PDateTime PLearn::double_to_datetime double  f  ) 
 

Definition at line 146 of file PDateTime.cc.

References PLearn::PDateTime::day, double_to_hhmmss(), PLearn::PDateTime::hour, is_missing(), PLearn::PDateTime::min, PLearn::PDateTime::month, PLearn::PDateTime::sec, and PLearn::PDateTime::year.

void PLearn::double_to_hhmmss double  fraction,
int hh,
int mm,
int ss
 

convert a day fraction (< 1) to hours/minutes/seconds

Definition at line 173 of file PDateTime.cc.

Referenced by double_to_datetime(), and PLearn::PDateTime::PDateTime().

int dsaupd_ long int ,
const char *  ,
long int ,
const char *  ,
long int ,
double *  ,
double *  ,
long int ,
double *  ,
long int ,
long int ,
long int ,
double *  ,
double *  ,
long int ,
long int ,
short  ,
short 
 

Referenced by eigenSparseSymmMat().

int dseupd_ long int ,
const char *  ,
long int ,
double *  ,
double *  ,
long int ,
double *  ,
const char *  ,
long int ,
const char *  ,
long int ,
double *  ,
double *  ,
long int ,
double *  ,
long int ,
long int ,
long int ,
double *  ,
double *  ,
long int ,
long int ,
short  ,
short  ,
short 
 

Referenced by eigenSparseSymmMat().

void dsyev_ char *  JOBZ,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  W,
double *  WORK,
int LWORK,
int INFO
 

Referenced by eigen_SymmMat().

void dsyevr_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
int ISUPPZ,
double *  WORK,
int LWORK,
int IWORK,
int LIWORK,
int INFO
 

Referenced by lapack_Xsyevr_().

void dsyevx_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
double *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
 

Referenced by lapack_Xsyevx_().

void dsygvx_ int ITYPE,
char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  B,
int LDB,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
double *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
 

Referenced by lapack_Xsygvx_().

Var duplicateColumn Var  v,
int  the_width
[inline]
 

Definition at line 81 of file DuplicateColumnVariable.h.

References PLERROR.

Var duplicateRow Var  v,
int  the_length
[inline]
 

Definition at line 80 of file DuplicateRowVariable.h.

References PLERROR.

Var duplicateScalar Var  v,
int  the_length,
int  the_width
[inline]
 

Definition at line 81 of file DuplicateScalarVariable.h.

References PLERROR.

void PLearn::DX_create_dataset_outputs_file const string filename,
PP< PLearner >  learner,
VMat  dataset
 

Will write a file containing a field with the dataset positions "dset" field will be input -> target, outputs.

Definition at line 278 of file GenerateDecisionPlot.cc.

References endl(), PLearn::VMat::getExample(), PLearn::VMat::length(), and PLearn::ProgressBar::update().

Referenced by PLearn::GenerateDecisionPlot::run().

void PLearn::DX_create_grid_outputs_file const string filename,
PP< PLearner >  learner,
VMat  dataset,
int  nx,
int  ny,
bool  include_datapoint_grid = false,
real  xmin = MISSING_VALUE,
real  xmax = MISSING_VALUE,
real  ymin = MISSING_VALUE,
real  ymax = MISSING_VALUE,
real  extraspace = .10
 

The "outputs" field will contain sample-grid inputs -> outputs Where the sample grid is made of a regular grid of nx.ny points (in the range [xmin, xmax] x [ymin, ymax]) xmin, xmax, ymin and ymax may be left to MISSING_VALUE, in which case an automatic range will be determined from the range of the points in the given dataset extended by extraspace (ex: .10 == 10%).

This regular grid is possibly complemented (if include_datapoint_grid) with an irregular grid made of the x and y coordinates of the dataset that fall within the [xmin, xmax] x [ymin, ymax] range.

Definition at line 343 of file GenerateDecisionPlot.cc.

References PLearn::ProgressBar::close(), computeRange(), endl(), exp(), PLearn::VMat::getExample(), is_missing(), PLearn::VMat::length(), logadd(), PLearn::VMat::subMatColumns(), tostring(), PLearn::ProgressBar::update(), and x.

Referenced by PLearn::GenerateDecisionPlot::run().

void PLearn::DX_save_2D_data const string filename,
const string basename,
Mat  data
 

considers data to have 2d input (first 2 columns of data)

Definition at line 207 of file learner_utils.cc.

References DX_write_2D_data(), and PLERROR.

void PLearn::DX_save_2D_data_for_grid const string filename,
const string basename,
int  nx,
int  ny,
real  x0,
real  y0,
real  deltax,
real  deltay,
Mat  data
 

data must have nx*ny rows and must corresponds to values associated with the 2D positions of the grid (typically learner outputs on that grid)

Definition at line 215 of file learner_utils.cc.

References DX_write_2D_data_for_grid(), and PLERROR.

void PLearn::DX_write_2D_data ostream &  out,
const string basename,
Mat  data
 

considers data to have 2d input (first 2 columns of data)

Definition at line 117 of file learner_utils.cc.

References PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

Referenced by DX_save_2D_data().

void PLearn::DX_write_2D_data_for_grid ostream &  out,
const string basename,
int  nx,
int  ny,
real  x0,
real  y0,
real  deltax,
real  deltay,
Mat  data
 

data must have nx*ny rows and must corresponds to values associated with the 2D positions of the grid (typically learner outputs on that grid)

Definition at line 158 of file learner_utils.cc.

References PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

Referenced by DX_save_2D_data_for_grid().

void PLearn::DX_write_2D_fields ostream &  out,
const string basename,
Vec  X,
Vec  Y,
TVec< Mat >  fields
 

Definition at line 99 of file GenerateDecisionPlot.cc.

References k, PLearn::TVec< T >::length(), and tostring().

void PLearn::DX_write_2D_fields ostream &  out,
const string basename,
TVec< Mat >  fields,
real  x0,
real  y0,
real  deltax,
real  deltay,
TVec< string fieldnames = TVec<string>()
 

If fieldnames is omitted then the fields will be named basename_0 basename_1 ... Otherwise they are named basename_ followed by the corresponding field name.

Definition at line 53 of file GenerateDecisionPlot.cc.

References k, PLearn::TVec< T >::length(), and tostring().

int PLearn::eigen_SymmMat Mat &  in,
Vec &  e_value,
Mat &  e_vector,
int n_evalues_found,
bool  compute_all,
int  nb_eigen,
bool  compute_vectors = true,
bool  largest_evalues = true
 

This function compute some or all eigenvalues (and optionnaly the corresponding eigenvectors) of a symmetric matrix. The eigenvectors are returned in the ROWS of e_vector.

Note1: If compute_all==true, then the field nb_eigen will not be used.

Note2: Your input matrix `in' will be over-written

Note3: If compute_all=false only some eigen-values (and optionally e-vectors) are computed. This flag allows to select whether those largest in magnitude (the default) or smallest in magnitude are selected.

Note4: This function is slightly modified. Now, you do not have to check if your input matrix is symmetric or not. Note5: The vectors and eigenvalues seem to be sorted in increasing order (

Definition at line 49 of file plapack.cc.

References PLearn::TMat< T >::data(), dsyev_(), endl(), PLearn::TMat< T >::isSymmetric(), lapack_Xsyevx_(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, PLWARNING, PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), ssyev_(), and PLearn::TMat< T >::width().

Referenced by affineNormalization(), diagonalizeSubspace(), eigen_SymmMat_decreasing(), and multivariate_normal().

int PLearn::eigen_SymmMat_decreasing Mat &  in,
Vec &  e_value,
Mat &  e_vector,
int n_evalues_found,
bool  compute_all,
int  nb_eigen,
bool  compute_vectors = true,
bool  largest_evalues = true
 

same as the previous call, but eigenvalues/vectors are sorted by largest firat (in decreasing order)

Definition at line 207 of file plapack.cc.

References eigen_SymmMat(), PLWARNING, PLearn::TVec< T >::swap(), and PLearn::TMat< T >::swapUpsideDown().

template<class MatT>
int eigenSparseNonSymmMat MatT &  A,
Vec  e_values,
Mat  e_vectors,
long int n_evalues,
int  max_n_iter = 300,
bool  compute_vectors = true,
bool  largest_evalues = true,
bool  according_to_magnitude = true,
bool  both_ends = false
 

Same arguments as eigenSparseSymmMat except that A is not symmetric. We ignore the imaginary part if there is one. See ARPACK/SRC files for more details. To get the eigen pairs in the same order as in plapack's eigenVecOfSymmMat, do the same thing as above, but you don't have to swap the eigen vectors and eigen values.

< half of the e-values from each end of the spectrum

< we need some extra space...

Definition at line 185 of file parpack.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), dnaupd_(), dneupd_(), PLearn::TMat< T >::length(), MIN, PLERROR, PLWARNING, product(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), select(), snaupd_(), sneupd_(), PLearn::TVec< T >::subVec(), PLearn::TMat< T >::width(), and x.

template<class MatT>
int eigenSparseSymmMat MatT &  A,
Vec  e_values,
Mat  e_vectors,
long int n_evalues,
int  max_n_iter = 300,
bool  compute_vectors = true,
bool  largest_evalues = true,
bool  according_to_magnitude = true,
bool  both_ends = false
 

Compute some eigenvalues, and optionally eigenvectors, of a symmetric, possibly sparse, generalized matrix A. The only operation that will be performed on A (repetitively) is the matrix-vector product, i.e. A.product(Vec x, Vec y), yielding y = A x.

This uses the ARPACK library.

Returns 0 if all went well. Otherwise, see the INFO values set by [ds][eu]pd in the ARPACK/SRC files.

It is possible that only a subset of the eigenvalues are found (as given by n_evalues upon return, and the new size of e_vectors/e_values). Note also that e_vectors might be internally and temporarily re-allocated to a larger size, with at most 1.5 times more rows.

If you want the eigen values and eigen vectors to be returned in the same order as in plapack's eigenVecOfSymmMat, "according_to_magnitude" must be set to false and you must swap the eigen values and the eigen vectors. i.e. do something like: ............................................................. Mat evectors(nb_principal_components,train_set.length()); Vec evalues(nb_principal_components); int status; long int n_ev=nb_principal_components;

status = eigenSparseSymmMat(A, evalues, evectors, n_ev, 300, true, true, false); if (status<0 || status>1) PLERROR("MyClass: eigenSparseSymmMat return error code number %d (see ARPACK dsaupd INFO variable)", status); if (status==1 || n_ev != nb_principal_components) PLERROR("MyClass: eigenSparseSymmMat computed only %d e-vectors rather than the required %d", n_ev, nb_principal_components);

evalues.swap(); evectors.swapUpsideDown(); .............................................................

< half of the e-values from each end of the spectrum

< we need some extra space...

Definition at line 88 of file parpack.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), dsaupd_(), dseupd_(), PLearn::TMat< T >::length(), MIN, PLERROR, PLWARNING, product(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), select(), ssaupd_(), sseupd_(), PLearn::TVec< T >::subVec(), PLearn::TMat< T >::width(), and x.

template<class num_t>
void eigenVecOfSymmMat TMat< num_t > &  m,
int  k,
TVec< num_t > &  eigen_values,
TMat< num_t > &  eigen_vectors
 

Computes up to k largest eigen_values and corresponding eigen_vectors of symmetric matrix m.

Parameters eigen_values and eigen_vectors are resized accordingly and filled by the call. The eigenvalues are returned in decreasing order (largest first). The corresponding eigenvectors are in the *ROWS* of eigen_vectors WARNING: m is destroyed during the operation.

Definition at line 322 of file plapack.h.

References k, lapackEIGEN(), PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::swap(), PLearn::TMat< T >::swapUpsideDown(), and PLearn::TMat< T >::width().

Referenced by PLearn::GaussMix::computeMeansAndCovariances(), computePrincipalComponents(), PLearn::PCA::train(), PLearn::KernelProjection::train(), PLearn::GaussianProcessRegressor::train(), PLearn::GaussianDistribution::train(), and PLearn::GaussMix::updateFromConditionalSorting().

bool elementOf const char *  s,
const char  t
 

Definition at line 113 of file TypesNumeriques.cc.

Referenced by compactRepresentationTranslate().

template<class T>
void PLearn::elementsEqualTo const TVec< T > &  source,
const T &  value,
const TVec< T > &  destination
 

put in destination 1's when (*this)[i]==value, 0 otherwise

Definition at line 187 of file TMat_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Var PLearn::ElogP ConditionalExpression  conditional_expression,
RVInstanceArray &  parameters_to_learn,
bool  clearMarksUponReturn = true
 

This is like logP but it represents the expected log-probability of obs=conditional_expression.LHS.v given the RHS, where the expectation is over the "hidden" random variables of EM in mixtures, as a function of the values of the parameters_to_learn.

Definition at line 642 of file RandomVar.cc.

References logP().

Referenced by EM(), and PLearn::MixtureRandomVariable::logP().

real EM ConditionalExpression  conditional_expression,
RVArray  parameters_to_learn,
VMat  distr,
int  n_samples,
int  max_n_iterations,
real  relative_improvement_threshold,
bool  compute_final_train_NLL
 

Definition at line 497 of file RandomVar.cc.

References ElogP(), endl(), PLearn::RVInstanceArray::instances(), PLearn::ConditionalExpression::LHS, logP(), meanOf(), PLERROR, propagationPath(), PLearn::ConditionalExpression::RHS, PLearn::TVec< RandomVar >::size(), PLearn::RVInstance::v, PLearn::RVInstance::V, and PLearn::RVArray::values().

real PLearn::EM ConditionalExpression  conditional_expression,
RVArray  parameters_to_learn,
VMat  distr,
int  n_samples,
int  max_n_iterations = 1,
real  relative_improvement_threshold = 0.001,
bool  accept_worsening_likelihood = false,
bool  compute_final_train_NLL = true
 

NOTE NOTE NOTE:

THE ORDER OF THE VALUES IN THE DISTRIBUTION MUST BE: (1) conditioning variables (RHS), (2) output variables

Definition at line 460 of file RandomVar.cc.

References PLearn::RVInstanceArray::instances(), PLearn::ConditionalExpression::LHS, logP(), propagationPath(), PLearn::ConditionalExpression::RHS, PLearn::RVInstance::v, PLearn::RVInstance::V, and PLearn::RVArray::values().

void endianswap double *  ptr,
int  n = 1
[inline]
 

Definition at line 82 of file byte_order.h.

References endianswap8().

void endianswap float *  ptr,
int  n = 1
[inline]
 

Definition at line 81 of file byte_order.h.

References endianswap4().

void endianswap unsigned long *  ptr,
int  n = 1
[inline]
 

Definition at line 80 of file byte_order.h.

References endianswap4().

void endianswap long *  ptr,
int  n = 1
[inline]
 

Definition at line 79 of file byte_order.h.

References endianswap4().

void endianswap unsigned int ptr,
int  n = 1
[inline]
 

Definition at line 78 of file byte_order.h.

References endianswap4().

void endianswap int ptr,
int  n = 1
[inline]
 

Definition at line 77 of file byte_order.h.

References endianswap4().

void endianswap unsigned short *  ptr,
int  n = 1
[inline]
 

Definition at line 76 of file byte_order.h.

References endianswap2().

void endianswap short *  ptr,
int  n = 1
[inline]
 

Definition at line 75 of file byte_order.h.

References endianswap2().

void endianswap unsigned char *  ptr,
int  n = 1
[inline]
 

Definition at line 72 of file byte_order.h.

void endianswap signed char *  ptr,
int  n = 1
[inline]
 

Definition at line 71 of file byte_order.h.

void endianswap char *  ptr,
int  n = 1
[inline]
 

Definition at line 70 of file byte_order.h.

Referenced by PLearn::IntVecFile::append(), PLearn::DiskVMatrix::appendRow(), binread_(), PLearn::DiskVMatrix::build_(), PLearn::DiskVMatrix::getNewRow(), PLearn::IntVecFile::getVec(), new_read_compressed(), PLearn::PStream::operator>>(), PLearn::TMat< pair< real, real > >::read(), readSequence(), reverse_double(), reverse_float(), reverse_int(), reverse_short(), reverse_uint(), and reverse_ushort().

void PLearn::endianswap2 void *  ptr,
int  n
 

swaps endians for n 2-byte elements (such as short)

Definition at line 43 of file byte_order.cc.

Referenced by endianswap().

void PLearn::endianswap4 void *  ptr,
int  n
 

swaps endians for n 4-byte elements (such as int or float)

Definition at line 54 of file byte_order.cc.

Referenced by endianswap().

void PLearn::endianswap8 void *  ptr,
int  n
 

swaps endians for n 8-byte elements (such as double)

Definition at line 66 of file byte_order.cc.

Referenced by endianswap().

PStream & PLearn::endl PStream out  ) 
 

Definition at line 58 of file PStream.cc.

References PLearn::PStream::endl().

Referenced by PLearn::CompactVMatrix::append(), PLearn::AsciiVMatrix::appendRow(), PLearn::AsciiVMatrix::AsciiVMatrix(), PLearn::PLearnCommandRegistry::badcommand(), PLearn::GraphicalBiText::build_(), PLearn::GramVMatrix::build_(), PLearn::DatedJoinVMatrix::build_(), PLearn::AddCostToLearner::build_(), PLearn::GhostScript::centerShow(), PLearn::GraphicalBiText::check_consitency(), PLearn::CompactVMatrix::CompactVMatrix(), PLearn::VMatLanguage::compileStream(), compressedTransposeProductAcc(), PLearn::GraphicalBiText::compute_likelihood(), PLearn::GraphicalBiText::compute_nodemap(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), computeConditionalStats(), PLearn::Learner::computeCosts(), PLearn::KernelProjection::computeCostsFromOutputs(), PLearn::LiftStatsCollector::computeLift(), PLearn::SDBWithStats::computeStats(), PLearn::ConjGradientOptimizer::ConjGradientOptimizer(), PLearn::SDBVMFieldICBCTargets::convertField(), PLearn::GhostScript::copypage(), PLearn::CountEventsSemaphore::CountEventsSemaphore(), PLearn::VVMatrix::createPreproVMat(), cross_valid(), diagonalizeSubspace(), PLearn::Function::differentiate(), displayBasicStats(), PLearn::GhostScript::displayBlack(), PLearn::GhostScript::displayGray(), displayObjectHelp(), PLearn::ShellProgressBar::done(), PLearn::GhostScript::drawBox(), PLearn::GhostScript::drawCircle(), PLearn::GhostScript::drawLine(), DX_create_dataset_outputs_file(), DX_create_grid_outputs_file(), eigen_SymmMat(), EM(), PLearn::RandomVariable::EM(), PLearn::MixtureRandomVariable::EMBprop(), PLearn::RandomVariable::epoch(), PLearn::GhostScript::erasepage(), errormsg(), PLearn::GaussianKernel::evaluate_i_j(), exitmsg(), PLearn::Gnuplot::export_ps(), PLearn::RowMapSparseMatrix< real >::exportToMatlabReadableFormat(), PLearn::WordNetOntology::extractAncestors(), PLearn::WordNetOntology::extractDescendants(), extractFiles(), PLearn::WordNetOntology::extractTaggedWordFrequencies(), PLearn::Gnuplot::featureplot(), PLearn::GhostScript::fillBox(), PLearn::GhostScript::fillCircle(), PLearn::LiftStatsCollector::finalize(), PLearn::ConjGradientOptimizer::findDirection(), PLearn::SimpleDB< KeyType, QueryResult >::findEqualLinear(), PLearn::ConjGradientOptimizer::fletcherSearchMain(), PLearn::ProjectionErrorVariable::fprop(), PLearn::LogVariable::fprop(), PLearn::ProbabilitySparseMatrix::fullPrint(), PLearn::VVMatrix::generateVMatIndex(), PLearn::GaussianContinuum::get_image_matrix(), PLearn::Dictionary::getId(), PLearn::GhostScript::GhostScript(), PLearn::Gnuplot::Gnuplot(), halfShuffleRows(), PLearn::PLearnCommandRegistry::help(), PLearn::HelpCommand::helpAboutScript(), PLearn::HelpCommand::helpCommands(), PLearn::HelpCommand::helpDatasets(), PLearn::HelpCommand::helpOverview(), PLearn::HelpCommand::helpScripts(), PLearn::Gnuplot::histoplot(), PLearn::SimpleDB< KeyType, QueryResult >::indexColumn(), PLearn::GraphicalBiText::init(), interactiveDisplayCDF(), lapackSVD(), PLearn::Popen::launch(), PLearn::MatlabInterface::launch(), PLearn::IPopen::launch(), PLearn::MatlabInterface::launchAndWaitFor(), PLearn::ConjGradientOptimizer::lineSearch(), PLearn::RGBImage::loadJPEG(), PLearn::SimpleDB< KeyType, QueryResult >::loadSchema(), loadToVMat(), PLearn::VMatrix::lockMetaDataDir(), main(), matInvert(), matlabSave(), PLearn::VarMeasurer::measure(), PLearn::Learner::measure(), PLearn::Gnuplot::multiplot(), PLearn::ConjGradientOptimizer::newtonSearch(), old_plearn_main(), PLearn::Learner::openTestResultsStreams(), PLearn::Learner::openTrainObjectiveStream(), operator<<(), PLearn::ScaledGradientOptimizer::optimize(), PLearn::GradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimizeN(), PLearn::AdaptGradientOptimizer::optimizeN(), PLearn::Learner::outputResultLineToFile(), plearn_main(), PLearn::Gnuplot::plot(), PLearn::Gnuplot::plot3d(), PLearn::Grapher::plot_1D_regression(), PLearn::Grapher::plot_2D_classification(), PLearn::Gnuplot::plotcdf(), PLearn::Gnuplot::plotClasses(), PLearn::Gnuplot::plotdensity(), plotVMats(), prettyprint_test_results(), PLearn::WordNetOntology::print(), PLearn::VMatrix::print(), PLearn::Variable::print(), PLearn::StatsCollector::print(), PLearn::RealMapping::print(), print(), PLearn::Object::print(), PLearn::GraphicalBiText::print(), PLearn::Array< char * >::print(), PLearn::PLearnCommandRegistry::print_command_summary(), print_diff(), PLearn::GraphicalBiText::print_sensemap(), printDistanceStatistics(), PLearn::VMatrix::printFieldInfo(), printFieldNames(), PLearn::VMatrix::printFields(), PLearn::UnfoldedSumOfVariable::printInfo(), PLearn::UnfoldedFuncVariable::printInfo(), PLearn::UnaryVariable::printInfo(), PLearn::SumOverBagsVariable::printInfo(), PLearn::SumOfVariable::printInfo(), PLearn::SourceVariable::printInfo(), PLearn::NaryVariable::printInfo(), PLearn::MatrixSumOfVariable::printInfo(), PLearn::BinaryVariable::printInfo(), PLearn::TVec< pair< real, real > >::println(), PLearn::VarArray::printNames(), PLearn::GraphicalBiText::printNode(), PLearn::WordNetOntology::printNodes(), PLearn::WordNetOntology::printStats(), PLearn::ConjGradientOptimizer::printStep(), PLearn::WordNetOntology::printSynset(), PLearn::WordNetOntology::printSynsetAncestors(), PLearn::WordNetOntology::printWordAncestors(), PLearn::WordNetOntology::printWordOntology(), randomShuffleRows(), readAndMacroProcess(), reduceInputSize(), PLearn::WordNetOntology::reduceWordPolysemy_preserveSenseOverlapping(), PLearn::Profiler::report(), PLearn::Hash< KeyType, DataType >::resize(), PLearn::ResourceSemaphore::ResourceSemaphore(), PLearn::Train::run(), PLearn::TestDependencyCommand::run(), PLearn::TestDependenciesCommand::run(), PLearn::SequentialValidation::run(), PLearn::NearestNeighborPredictionCost::run(), PLearn::KolmogorovSmirnovCommand::run(), PLearn::JulianDateCommand::run(), PLearn::Grapher::run(), PLearn::GhostScript::run(), PLearn::GenerateDecisionPlot::run(), PLearn::AutoRunCommand::run(), PLearn::WordNetOntology::save(), PLearn::VMatrix::saveAMAT(), saveAscii(), PLearn::VMatrix::saveFieldInfos(), PLearn::RowMapSparseMatrix< real >::saveNonZeroElements(), PLearn::RGBImage::savePPM(), PLearn::WordNetOntology::savePredominentSyntacticClasses(), PLearn::SimpleDB< KeyType, QueryResult >::saveSchema(), PLearn::VMatrix::saveStats(), PLearn::SDBWithStats::saveStats(), PLearn::VMatrix::saveStringMappings(), PLearn::WordNetOntology::saveVocInWordnet(), PLearn::GraphicalBiText::sensetag_valid_bitext(), PLearn::GraphicalBiText::senseTagBitext(), PLearn::GraphicalBiText::set_nodemap(), PLearn::Gnuplot::seteps(), PLearn::VMatrix::setSFIFFilename(), PLearn::Gnuplot::setxrange(), PLearn::Gnuplot::setyrange(), PLearn::GhostScript::show(), PLearn::GhostScript::showpage(), PLearn::ScaledConditionalCDFSmoother::smooth(), PLearn::ProbVector::smoothNormalize(), SpearmanRankCorrelation(), split(), PLearn::Learner::stop_if_wanted(), PLearn::SequentialModelSelector::test(), PLearn::MovingAverage::test(), PLearn::GraphicalBiText::test_WSD(), PLearn::TangentLearner::train(), PLearn::SequentialModelSelector::train(), PLearn::PLS::train(), PLearn::NNet::train(), PLearn::NeighborhoodSmoothnessNNet::train(), PLearn::MultiInstanceNNet::train(), PLearn::MovingAverage::train(), PLearn::KernelProjection::train(), PLearn::GaussianContinuum::train(), PLearn::EntropyContrast::train(), PLearn::ConditionalDensityNet::train(), PLearn::ClassifierFromDensity::train(), PLearn::AdaBoost::train(), train_and_test(), PLearn::RowMapSparseMatrix< real >::transposeProduct(), PLearn::TextProgressBarPlugin::update(), PLearn::GraphicalBiText::update_WSD_model(), usage(), use(), PLearn::GhostScript::usefont(), PLearn::Function::verifyGradient(), PLearn::Function::verifyHessian(), PLearn::Function::verifyrfprop(), PLearn::Function::verifySymbolicGradient(), viewVMat(), vmatmain(), warningmsg(), PLearn::CountEventsSemaphore::~CountEventsSemaphore(), PLearn::GhostScript::~GhostScript(), PLearn::Gnuplot::~Gnuplot(), PLearn::ResourceSemaphore::~ResourceSemaphore(), and PLearn::SharedMemory< T >::~SharedMemory().

Var PLearn::entropy Var  v,
bool  normalize = true
 

Definition at line 91 of file Var_utils.cc.

References abs(), log(), normalize(), plogp(), and sum().

const char * PLearn::eNumericTypeNames int  a  ) 
 

converts a code in corresponding string

Definition at line 55 of file TypesNumeriques.cc.

References NT_CARDINAL, NT_CODE, NT_CURRENCY, NT_NOT_NUMERIC, NT_ORDINAL, NT_PERCENT, NT_PREFIXED, NT_RANGE, NT_SUFFIXED, NT_TIME, and NT_UNKNOWN_NUMERIC_TYPE.

template<class T>
void equals const TMat< T > &  src,
v,
TMat< T > &  dest
 

Definition at line 5277 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void equals const TVec< T > &  src,
v,
TVec< T > &  dest
 

Definition at line 1695 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Var erf Var  v  )  [inline]
 

Definition at line 71 of file ErfVariable.h.

void PLearn::errormsg const char *  msg,
  ...
 

Definition at line 64 of file plerror.cc.

References endl(), ERROR_MSG_SIZE, and error_stream.

int establish_connection const int  argc,
const char *  argv[]
[inline]
 

Definition at line 145 of file IPopen.h.

References establish_connection(), and PLERROR.

int establish_connection const char *  hostname,
int  port_no
[inline]
 

Definition at line 143 of file IPopen.h.

References establish_connection(), and hostname().

int PLearn::establish_connection int  n_hosts,
const char *  hostnames[],
int  port_no
 

Definition at line 99 of file IPopen.cc.

References PLERROR.

Referenced by establish_connection().

template<class T>
T estimatedCumProb x,
TVec< T >  bins
 

Definition at line 1812 of file TMat_maths_impl.h.

References binary_search(), k, PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::UniformizeVMatrix::getNewRow().

vector< string > PLearn::execute const string command  ) 
 

Returns the full output of the command as a vector of strings, containing the lines of the answer (with any newline character removed). The command must not be waiting for input on its standard input or this call will never return.

Definition at line 171 of file Popen.cc.

References PLearn::PStream::getline(), and PLearn::Popen::in.

Referenced by PLearn::ShellProgressBar::getWcAsciiFileLineCount().

void PLearn::exitmsg const char *  msg,
  ...
 

Definition at line 109 of file plerror.cc.

References endl(), ERROR_MSG_SIZE, and error_stream.

Referenced by cross_valid(), getMultipleModelAliases(), old_plearn_main(), train_and_test(), and use().

Var exp Var  v  )  [inline]
 

Definition at line 73 of file ExpVariable.h.

RandomVar PLearn::exp RandomVar  x  ) 
 

exponential function applied element-by-element

Definition at line 448 of file RandomVar.cc.

References x.

template<class T>
TVec<T> exp TVec< T >  vec  ) 
 

Definition at line 1639 of file TMat_maths_impl.h.

References apply(), and safeexp().

template<class T>
void exp const TVec< T > &  x,
TVec< T > &  y
 

computes y <- exp(x)

Definition at line 107 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::resize(), and x.

Referenced by bnldev(), PLearn::NllSemisphericalGaussianVariable::bprop(), PLearn::ErfVariable::bprop(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), PLearn::ClassifierFromDensity::computeOutput(), PLearn::GaussMix::computePosteriors(), PLearn::PDistribution::density(), PLearn::Distribution::density(), PLearn::ConditionalGaussianDistribution::density(), determine_density_integral_from_log_densities_on_grid(), DX_create_grid_outputs_file(), PLearn::MixtureRandomVariable::ElogP(), PLearn::MixtureRandomVariable::EMBprop(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::LogRandomVariable::EMBprop(), PLearn::SigmoidPrimitiveKernel::evaluate(), PLearn::ScaledLaplacianKernel::evaluate(), PLearn::ScaledGeneralizedDistanceRBFKernel::evaluate(), PLearn::ScaledGaussianKernel::evaluate(), PLearn::LaplacianKernel::evaluate(), PLearn::GeneralizedDistanceRBFKernel::evaluate(), PLearn::GaussianDensityKernel::evaluate(), PLearn::ConvexBasisKernel::evaluate(), PLearn::CompactVMatrixGaussianKernel::evaluate(), PLearn::GaussianKernel::evaluateFromSquaredNormOfDifference(), PLearn::ExpMeanStatsIterator::finish(), gamdev(), gauss_01_density(), gauss_density_var(), geometric_mean(), PLearn::GaussianContinuum::get_image_matrix(), incomplete_beta(), PLearn::ConditionalDensityNet::initialize_mu(), inverse_softplus(), PLearn::LogRandomVariable::invertible(), KS_test(), logadd(), PLearn::DiagonalNormalRandomVariable::logP(), logsub(), PLearn::RandomVariable::P(), pl_gcf(), pl_gser(), poidev(), PLearn::VMatLanguage::run(), safeexp(), PLearn::GaussMix::setInput(), PLearn::DiagonalNormalRandomVariable::setValueFromParentsValue(), PLearn::ExpRandomVariable::setValueFromParentsValue(), softmax(), softplus(), softplus_primitive(), PLearn::SoftmaxLossVariable::symbolicBprop(), PLearn::LogAddVariable::symbolicBprop(), PLearn::ErfVariable::symbolicBprop(), PLearn::GaussianContinuum::train(), and PLearn::AdaBoost::train().

StatsIt exp_mean_stats  )  [inline]
 

Definition at line 419 of file StatsIterator.h.

real PLearn::expdev  ) 
 

returns an exponential distributed random number

Definition at line 318 of file random.cc.

References log(), and uniform_sample().

template<class T>
void exponentialMovingAverageUpdate const TVec< T > &  vec,
const TVec< T > &  x,
alpha
 

TVec[i] = (1-alpha)*TVec[i]+x[i]*alpha;.

Definition at line 2056 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::ScaledGradientOptimizer::optimize().

template<class T>
void exponentialMovingSquareUpdate const TVec< T > &  vec,
const TVec< T > &  x,
alpha
 

TVec[i] = (1-alpha)*TVec[i]+x[i]^2*alpha;.

Definition at line 2092 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::ScaledGradientOptimizer::optimize().

template<class T>
void exponentialMovingVarianceUpdate const TVec< T > &  vec,
const TVec< T > &  x,
const TVec< T > &  mu,
alpha
 

TVec[i] = (1-alpha)*TVec[i]+(x[i]-mu[i])^2*alpha;.

Definition at line 2071 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Var extend Var  v,
real  extension_value = 1.0,
int  n_extend = 1
[inline]
 

simple extension of a vector (same semantic as old extend, when we only had vectors)

Definition at line 104 of file ExtendedVariable.h.

References PLERROR.

Var extend Var  v,
int  top_extent,
int  bottom_extent,
int  left_extent,
int  right_extent,
real  fill_value = 0.0
[inline]
 

general extension of a matrix in any direction

Definition at line 100 of file ExtendedVariable.h.

RandomVar PLearn::extend RandomVar  v,
real  extension_value = 1.0,
int  n_extend = 1
 

Definition at line 453 of file RandomVar.cc.

Referenced by PLearn::ExtendedRandomVariable::setValueFromParentsValue(), PLearn::SubMatVariable::symbolicBprop(), and PLearn::SubMatTransposeVariable::symbolicBprop().

template<class T>
void externalProduct const TMat< T > &  mat,
const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 2979 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::VecStatsCollector::getCorrelation().

template<class T>
void externalProductAcc const TMat< T > &  mat,
const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 2999 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, val, and PLearn::TMat< T >::width().

Referenced by PLearn::VMatrix::accumulateXtX(), PLearn::VMatrix::accumulateXtY(), PLearn::AffineTransformVariable::bprop(), computeMeanAndCovar(), and linearRegression().

template<class T>
void externalProductScaleAcc const TMat< T > &  mat,
const TVec< T > &  v1,
const TVec< T > &  v2,
gamma,
alpha
 

Definition at line 3059 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void externalProductScaleAcc const TMat< T > &  mat,
const TVec< T > &  v1,
const TVec< T > &  v2,
gamma
 

Definition at line 3039 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProjectionErrorVariable::bprop(), computeCovar(), computeInputMeanAndCovar(), computeMeanAndCovar(), computeWeightedMeanAndCovar(), PLearn::VecStatsCollector::update(), and weightedLinearRegression().

string PLearn::extract_directory const string filepath  ) 
 

Returns everything before the last '/' including the '/' (if there's no '/' it returns "./").

Definition at line 146 of file stringutils.cc.

References dot(), and slash.

Referenced by abspath(), PLearn::AutoSDBVMatrix::AutoSDBVMatrix(), PLearn::VVMatrix::createPreproVMat(), PLearn::VVMatrix::extractSourceMatrix(), force_mkdir_for_file(), getDataSet(), PLearn::VVMatrix::getDateOfVMat(), locateDatasetAliasesDir(), makeFileNameValid(), readAndMacroProcess(), and readFileAndMacroProcess().

string PLearn::extract_extension const string filepath  ) 
 

Returns everything after the last '.' of the filename (i.e.

excluding the directory paths if any is present), including the '.' (if there's no '.' in the filename it returns "")

Definition at line 158 of file stringutils.cc.

References extract_filename().

Referenced by getDataSet(), getDataSetDate(), loadMat(), makeFileNameValid(), readFileAndMacroProcess(), PLearn::ReadAndWriteCommand::run(), PLearn::HelpCommand::run(), and vmatmain().

string PLearn::extract_filename const string filepath  ) 
 

** File path manipulation functions ** Returns everything after the last '/' (if there's no '/' returns filepath)

Definition at line 137 of file stringutils.cc.

References slash.

Referenced by abspath(), PLearn::AutoSDBVMatrix::AutoSDBVMatrix(), extract_extension(), extract_filename_without_extension(), getDataSet(), matlabR11eigs(), readAndMacroProcess(), and readFileAndMacroProcess().

string PLearn::extract_filename_without_extension const string filepath  ) 
 

Returns everything before the last '.' of the filename, excluding the '.' (if there's no '.' in the filename it returns the whole filename).

Definition at line 168 of file stringutils.cc.

References extract_filename().

Referenced by makeFileNameValid().

real FABS real  x  )  [inline]
 

Definition at line 250 of file pl_math.h.

References x.

Referenced by PLearn::LiftStatsIterator::update(), PLearn::LiftStatsCollector::update(), and PLearn::Function::verifyGradient().

template<class T>
TVec<T> fastsigmoid const TVec< T > &  src  )  [inline]
 

Definition at line 1014 of file TMat_maths_impl.h.

References compute_fastsigmoid(), and PLearn::TVec< T >::length().

real fastsigmoid const real x  )  [inline]
 

Definition at line 193 of file pl_math.h.

References fasttanh(), and x.

Referenced by compute_fastsigmoid().

template<class T>
TVec<T> fasttanh const TVec< T > &  src  )  [inline]
 

Definition at line 977 of file TMat_maths_impl.h.

References compute_fasttanh(), and PLearn::TVec< T >::length().

real fasttanh const real x  )  [inline]
 

Definition at line 166 of file pl_math.h.

References DOUBLE_TO_INT, MAXTANHX, tanhtable, TANHTABLESIZE, and x.

Referenced by compute_fasttanh(), and fastsigmoid().

bool PLearn::file_exists const string filename  ) 
 

Definition at line 114 of file general.cc.

Referenced by PLearn::VVMatrix::extractSourceMatrix(), filter(), fullyRebalance2Classes(), grep(), PLearn::SDBWithStats::hasStats(), PLearn::HelpCommand::helpAboutScript(), input2dSet(), PLearn::IntVecFile::open(), PLearn::FilteredVMatrix::openIndex(), plearn_main(), PLearn::VMat::precompute(), rebalanceNClasses(), PLearn::RunCommand::run(), PLearn::SourceVMatrix::setMetaDataDir(), and PLearn::Learner::stop_if_wanted().

int PLearn::file_size const string filename  ) 
 

Simple file info.

Definition at line 71 of file general.cc.

Referenced by PLearn::SimpleDB< KeyType, QueryResult >::computeSize(), and filter().

long PLearn::filesize const string filename  ) 
 

Returns the length of a file, measured in bytes.

Definition at line 323 of file fileutils.cc.

References PLERROR.

Referenced by PLearn::IntVecFile::getVersionAndSize(), loadFileAsString(), MemoryMap(), and PLearn::Storage< pair< real, real > >::Storage().

template<class T>
void fill_one_hot const TVec< T > &  vec,
int  hotpos,
coldvalue,
hotvalue
 

Definition at line 1308 of file TMat_maths_impl.h.

References PLearn::TVec< T >::fill(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::OneHotVMatrix::getNewRow(), PLearn::GeneralizedOneHotVMatrix::getNewRow(), and one_hot().

void PLearn::fill_random_discrete const Vec &  dest,
const Vec &  set
 

sample each element from the given set

Definition at line 570 of file random.cc.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), PLearn::TVec< T >::length(), and uniform_multinomial_sample().

Referenced by PLearn::NNet::initializeParams().

void PLearn::fill_random_normal const Mat &  dest,
real  mean = 0,
real  sdev = 1
 

Definition at line 613 of file random.cc.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::end(), gaussian_mu_sigma(), and mean().

void PLearn::fill_random_normal const Vec &  dest,
const Vec &  mean,
const Vec &  stdev
 

sample each element from multivariate Normal(mean,diag(sdev^2)) distribution

Definition at line 589 of file random.cc.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), gaussian_mu_sigma(), PLearn::TVec< T >::length(), mean(), and PLERROR.

void PLearn::fill_random_normal const Vec &  dest,
real  mean = 0,
real  stdev = 1
 

sample each element from Normal(mean,sdev^2) distribution

Definition at line 580 of file random.cc.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), gaussian_mu_sigma(), and mean().

Referenced by PLearn::GaussianDistribution::generate(), PLearn::GaussMix::generateFromGaussian(), PLearn::NeuralNet::initializeParams(), PLearn::NeighborhoodSmoothnessNNet::initializeParams(), PLearn::MultiInstanceNNet::initializeParams(), PLearn::ConditionalDensityNet::initializeParams(), and PLearn::PCA::train().

void PLearn::fill_random_uniform const Mat &  dest,
real  minval = 0,
real  maxval = 1
 

Definition at line 604 of file random.cc.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::end(), and uniform_sample().

void PLearn::fill_random_uniform const Vec &  dest,
real  minval = 0,
real  maxval = 1
 

sample each element from uniform distribution U[minval,maxval]

Definition at line 560 of file random.cc.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), and uniform_sample().

Referenced by affineMatrixInitialize(), PLearn::EntropyContrast::initialize_NNcontinuous(), PLearn::TangentLearner::initializeParams(), PLearn::NNet::initializeParams(), PLearn::GaussianContinuum::initializeParams(), and PLearn::Function::verifyGradient().

template<class T>
void fillItSymmetric const TMat< T > &  mat  ) 
 

Fill the bottom left part of a matrix with its top right part, so that it becomes symmetric.

Definition at line 2805 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and PLearn::TMat< T >::mod().

VMat PLearn::filter VMat  d,
const string indexfile
 

returns a VMat that contains only the lines that do not have any MISSING_VALUE The indexes of the rows of the original matrix are recorded in the indexfile BEWARE: If the indexfile already exists, it is *not* recomputed, but used as is.

Definition at line 758 of file VMat_maths.cc.

References PLearn::IntVecFile::append(), file_exists(), file_size(), PLearn::TVec< T >::hasMissing(), PLearn::VMat::length(), PLearn::VMat::rows(), and PLearn::VMat::width().

Referenced by PLearn::EmbeddedSequentialLearner::train().

bool PLearn::find const vector< string > &  command_line,
const string option
 

also useful to find "turn-on" options in a command line, i.e. of the form option this function just returns true if the option is found in the command_line. Note: this may be useful with command_line = stringvector(argc,argv);

Definition at line 586 of file stringutils.cc.

Referenced by PLearn::Hash< KeyType, DataType >::del(), PLearn::Hash< KeyType, DataType >::element(), PLearn::Hash< KeyType, DataType >::hashAddress(), PLearn::VMatrix::removeStringMapping(), and PLearn::SimpleDB< KeyType, QueryResult >::truncateFromRow().

Mat PLearn::findClosestPairsOfDifferentClass int  k,
VMat  data,
Ker  dist
 

Definition at line 687 of file Kernel.cc.

References argmax(), dist(), k, sortRows(), and PLearn::TVec< real >::subVec().

int findit const vector< string > &  v,
string  element
 

Definition at line 525 of file stringutils.cc.

int findpos const vector< string > &  v,
string  element
 

return index of element in v, or -1 if not found

PDate float_to_date double  d  )  [inline]
 

Definition at line 170 of file PDate.h.

References float_to_date().

PDate PLearn::float_to_date float  f  ) 
 

Definition at line 170 of file PDate.cc.

References PLearn::PDate::day, is_missing(), PLearn::PDate::month, and PLearn::PDate::year.

Referenced by PLearn::DatedJoinVMatrix::build_(), PLearn::SDBVMFieldMonths::convertField(), PLearn::SDBVMFieldDay::convertField(), PLearn::SDBVMFieldDate::convertField(), float_to_date(), and PLearn::VMatLanguage::run().

PStream & PLearn::flush PStream out  ) 
 

Definition at line 52 of file PStream.cc.

References PLearn::PStream::flush().

Referenced by PLearn::Learner::computeLeaveOneOutCosts(), PLearn::VVec::print(), PLearn::KernelProjection::train(), vmatmain(), and PLearn::pl_fdstream::~pl_fdstream().

bool PLearn::force_mkdir const string dirname  ) 
 

Forces directory creation if it doesn't already exist. (also creates any missing directory along its path) Return value indicates success (true) or failure (false). If the directory already exists, true is returned.

Definition at line 252 of file fileutils.cc.

References isdir(), mode, remove_trailing_slash(), and slash.

Referenced by PLearn::AutoSDBVMatrix::AutoSDBVMatrix(), PLearn::VVMatrix::build_(), PLearn::SequentialValidation::build_(), PLearn::PLearner::build_(), PLearn::PTester::build_(), PLearn::GaussianProcessRegressor::build_(), PLearn::DiskVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), force_mkdir_for_file(), PLearn::VMatrix::loadStringMapping(), PLearn::VMatrix::lockMetaDataDir(), matlabSave(), PLearn::SequentialValidation::run(), PLearn::Experiment::run(), PLearn::PLearner::setExperimentDirectory(), PLearn::PTester::setExperimentDirectory(), PLearn::Learner::setExperimentDirectory(), and PLearn::VMatrix::setMetaDataDir().

void PLearn::force_mkdir_for_file const string filepath  ) 
 

Extracts the directory part of the filepath and calls force_mkdir Calls PLERROR in case of failure.

Definition at line 286 of file fileutils.cc.

References extract_directory(), force_mkdir(), and PLERROR.

Referenced by PLearn::FileVMatrix::build_(), save(), saveStringInFile(), PLearn::VMatrix::saveStringMappings(), and PLearn::VMatrix::setSFIFFilename().

bool PLearn::force_rmdir const string dirname  ) 
 

Forces removal of directory and all its content Return value indicates success (true) or failure (false) If the directory does not exist, false is returned.

Definition at line 296 of file fileutils.cc.

References isdir(), and lsdir_fullpath().

Referenced by PLearn::VVMatrix::createPreproVMat(), PLearn::VMatrix::saveDMAT(), and PLearn::PrecomputedVMatrix::usePrecomputed().

double fread_double FILE *  f,
bool  is_file_bigendian = true
[inline]
 

Definition at line 101 of file pl_io_deprecated.h.

References fread_double().

void PLearn::fread_double FILE *  f,
float *  ptr,
int  n,
bool  is_file_bigendian = true
 

reads disk doubles into float array

Definition at line 268 of file pl_io_deprecated.cc.

References fread_double().

void PLearn::fread_double FILE *  f,
double *  ptr,
int  n,
bool  is_file_bigendian = true
 

Definition at line 255 of file pl_io_deprecated.cc.

References reverse_double().

Referenced by fread_double(), PLearn::FileVMatrix::getNewRow(), loadPMat(), and loadPVec().

float fread_float FILE *  f,
bool  is_file_bigendian = true
[inline]
 

Definition at line 99 of file pl_io_deprecated.h.

References fread_float().

void PLearn::fread_float FILE *  f,
double *  ptr,
int  n,
bool  is_file_bigendian = true
 

reads disk floats into double array

Definition at line 246 of file pl_io_deprecated.cc.

References fread_float().

void PLearn::fread_float FILE *  f,
float *  ptr,
int  n,
bool  is_file_bigendian = true
 

Definition at line 233 of file pl_io_deprecated.cc.

References reverse_float().

Referenced by fread_float(), PLearn::FileVMatrix::getNewRow(), loadADMat(), loadADVec(), loadPMat(), loadPVec(), loadSNMat(), and loadSNVec().

int fread_int FILE *  f,
bool  is_file_bigendian = true
[inline]
 

The following calls read a single value from the file, assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness used on the current architecture.

Definition at line 97 of file pl_io_deprecated.h.

References fread_int().

void PLearn::fread_int FILE *  f,
int ptr,
int  n,
bool  is_file_bigendian = true
 

Reads binary data from a file assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness on the current architecture.

Definition at line 220 of file pl_io_deprecated.cc.

References reverse_int().

Referenced by fread_int(), PLearn::IntVecFile::get(), loadADMat(), loadADVec(), loadSNMat(), and loadSNVec().

void PLearn::fread_short FILE *  f,
unsigned short *  ptr,
int  n,
bool  is_file_bigendian = true
 

Definition at line 277 of file pl_io_deprecated.cc.

References reverse_ushort().

void PLearn::fullyRebalance2Classes VMat  inputs,
const string filename,
bool  save_indices = true
 

Rebalance a 2-class VMat such as to keep all the examples of the dominant class.

Definition at line 1153 of file VMat_maths.cc.

References PLearn::TmpFilenames::addFilename(), file_exists(), fname, PLearn::VMat::lastColumn(), PLearn::VMat::length(), MAX, PLearn::IntVecFile::put(), PLearn::TVec< T >::resize(), PLearn::VMat::rows(), PLearn::VMat::save(), PLearn::VMat::toMat(), and PLearn::TMat< T >::toVecCopy().

void fwrite_double FILE *  f,
double  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 110 of file pl_io_deprecated.h.

References fwrite_double().

void PLearn::fwrite_double FILE *  f,
const float *  ptr,
int  n,
bool  is_file_bigendian = true
 

writes float array to double file

Definition at line 209 of file pl_io_deprecated.cc.

References fwrite_double().

void PLearn::fwrite_double FILE *  f,
const double *  ptr,
int  n,
bool  is_file_bigendian = true
 

Definition at line 185 of file pl_io_deprecated.cc.

References reverse_double().

Referenced by PLearn::FileVMatrix::appendRow(), fwrite_double(), PLearn::FileVMatrix::put(), and PLearn::FileVMatrix::putSubRow().

void fwrite_float FILE *  f,
float  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 108 of file pl_io_deprecated.h.

References fwrite_float().

void PLearn::fwrite_float FILE *  f,
const double *  ptr,
int  n,
bool  is_file_bigendian = true
 

writes double array to float file

Definition at line 176 of file pl_io_deprecated.cc.

References fwrite_float().

void PLearn::fwrite_float FILE *  f,
const float *  ptr,
int  n,
bool  is_file_bigendian = true
 

Definition at line 152 of file pl_io_deprecated.cc.

References reverse_float().

Referenced by PLearn::FileVMatrix::appendRow(), fwrite_float(), PLearn::FileVMatrix::put(), PLearn::FileVMatrix::putSubRow(), saveSNMat(), and saveSNVec().

void fwrite_int FILE *  f,
int  value,
bool  is_file_bigendian = true
[inline]
 

The following calls write a single value to the file in the specified representation, regardeless of the endianness on the current architecture.

Definition at line 106 of file pl_io_deprecated.h.

References fwrite_int().

void PLearn::fwrite_int FILE *  f,
const int ptr,
int  n,
bool  is_file_bigendian = true
 

Writes binary data to the file in the specified representation (little or big endian) regardeless of the endianness used on the current architecture.

Definition at line 128 of file pl_io_deprecated.cc.

References reverse_int().

Referenced by fwrite_int(), PLearn::IntVecFile::put(), saveSNMat(), and saveSNVec().

real PLearn::gamdev int  ia  ) 
 

returns a gamma distributed random number

Definition at line 393 of file random.cc.

References exp(), log(), PLERROR, sqrt(), uniform_sample(), and x.

real PLearn::gauss_01_cum real  x  ) 
 

For X ~ Normal(0,1), cumulative probability function P(X<x).

Definition at line 158 of file pl_erf.cc.

References pl_erf(), and x.

Referenced by gauss_01_quantile(), gauss_cum(), normal_cdf(), p_value(), and testNoCorrelationAsymptotically().

real PLearn::gauss_01_density real  x  ) 
 

for X ~ Normal(0,1), return density of X at x

Definition at line 191 of file pl_erf.cc.

References exp(), Sqrt2Pi, and x.

Referenced by gauss_density_stddev().

real PLearn::gauss_01_log_density real  x  ) 
 

Definition at line 196 of file pl_erf.cc.

References Log2Pi, and x.

real PLearn::gauss_01_quantile real  q  ) 
 

For X ~ Normal(0,1), inverse of cumulative probability function P(X<x) i.e. approximately gauss_01_quantile(gauss_01_cum(x)) ~=~ x (the inverse is computed with a binary search, the bisection method)

Definition at line 165 of file pl_erf.cc.

References gauss_01_cum().

real gauss_cum real  x,
real  mu,
real  sigma
[inline]
 

Definition at line 74 of file pl_erf.h.

References gauss_01_cum(), and x.

Referenced by PLearn::LimitedGaussianSmoother::smooth().

real gauss_density_stddev real  x,
real  mu,
real  sigma
[inline]
 

Definition at line 91 of file pl_erf.h.

References gauss_01_density(), and x.

real PLearn::gauss_density_var real  x,
real  mu,
real  var
 

Definition at line 208 of file pl_erf.cc.

References exp(), Sqrt2Pi, var(), and x.

real PLearn::gauss_log_density_stddev real  x,
real  mu,
real  sigma
 

Definition at line 213 of file pl_erf.cc.

References log(), Log2Pi, and x.

Referenced by PLearn::GaussMix::computeLogLikelihood().

real PLearn::gauss_log_density_var real  x,
real  mu,
real  var
 

Definition at line 201 of file pl_erf.cc.

References log(), Log2Pi, var(), and x.

Referenced by PLearn::GaussianProcessRegressor::computeCostsFromOutputs().

real PLearn::gaussian_01  ) 
 

returns a random number gaussian with mean 0 and standard deviation 1

Definition at line 328 of file random.cc.

References gset, iset, log(), sqrt(), the_seed, and uniform_sample().

Referenced by gaussian_mu_sigma(), PLearn::EntropyContrast::gen_normal_0_1(), multivariate_normal(), and normal_sample().

real PLearn::gaussian_mixture_mu_sigma Vec &  w,
const Vec &  mu,
const Vec &  sigma
 

returns a random number with mixture of gaussians, "w" is the mixture (must be positive numbers summing to 1), "mu" and "sigma" are the vectors of means and standard deviations for each gaussian

Definition at line 369 of file random.cc.

References PLearn::TVec< T >::data(), gaussian_mu_sigma(), and PLearn::TVec< T >::length().

real PLearn::gaussian_mu_sigma real  mu,
real  sigma
 

returns a random number gaussian with mean "mu" and standard dev "sigma"

Definition at line 356 of file random.cc.

References gaussian_01().

Referenced by fill_random_normal(), PLearn::DiagonalNormalSampleVariable::fprop(), gaussian_mixture_mu_sigma(), PLearn::SpiralDistribution::generate(), PLearn::GaussMix::generateFromGaussian(), and PLearn::AdaBoost::train().

template<class num_t>
void generalizedEigenVecOfSymmMat TMat< num_t > &  m1,
TMat< num_t > &  m2,
int  itype,
int  k,
TVec< num_t > &  eigen_values,
TMat< num_t > &  eigen_vectors
 

Computes up to k largest eigen_values and corresponding eigen_vectors of a real generalized symmetric-definite eigenproblem, of the form m1*x=(lambda)*m2*x (itype = 1), m1*m2*x=(lambda)*x (itype = 2) or m2*m1*x=(lambda)*x (itype = 3) m1 and m2 are assumed to be symmetric and m2 is also positive definite.

Parameters eigen_values and eigen_vectors are resized accordingly and filled by the call. The eigenvalues are returned in decreasing order (largest first). The corresponding eigenvectors are in the *ROWS* of eigen_vectors WARNING: m1 and m2 are destroyed during the operation.

Definition at line 350 of file plapack.h.

References k, lapackGeneralizedEIGEN(), PLearn::TMat< T >::length(), PLERROR, PLearn::TVec< T >::resize(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::swap(), PLearn::TMat< T >::swapUpsideDown(), and PLearn::TMat< T >::width().

template<class T>
T geometric_mean const TMat< T > &  mat  ) 
 

Definition at line 3823 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), exp(), PLearn::TMat< T >::length(), log(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
T geometric_mean const TVec< T > &  vec  ) 
 

Definition at line 363 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), exp(), PLearn::TVec< T >::length(), and PLERROR.

string PLearn::get_option const vector< string > &  command_line,
const string option,
const string default_value
 

The command_line is made of pairs of the form option value Look for an option in the command_line, and return the corresponding value if it is found, or return default_value otherwise. Note: this may be useful with command_line = stringvector(argc,argv);

Definition at line 577 of file stringutils.cc.

long PLearn::get_seed  ) 
 

returns the current seed used by the random number generator

Definition at line 211 of file random.cc.

References seed(), and the_seed.

char getAfterSkipBlanks istream &  in  )  [inline]
 

gets the first char after removal of blanks

Definition at line 164 of file fileutils.h.

char getAfterSkipBlanksAndComments istream &  in  )  [inline]
 

gets the first char after removal of blanks and comments

Definition at line 167 of file fileutils.h.

References skipBlanksAndComments().

Referenced by readAndMacroProcess().

string PLearn::getcwd  ) 
 

returns the absolute path to the current working directory as a string

Definition at line 81 of file fileutils.cc.

Referenced by abspath(), getDataSet(), getDataSetDate(), and readAndMacroProcess().

VMat PLearn::getDataSet const string datasetstring,
const string alias = ""
 

datasetstring can be one of the following:

  • the name of a preprogrammed dataset (possibly with parameter specification)
  • the path of the basename of an .sdb
  • the path of a file in one of the recognized matrix data formats
  • the path of a directory containing a dataset
  • the name of an alias in the dataset.aliases file of the current directory or one of its parents alias is a short name that can be used as part of a filename containing results related to the dataset ( it's set using the VMat's setAlias method, and code that wishes to use it can acces it by calling getAlias)

Definition at line 109 of file getDataSet.cc.

References c_str(), extract_directory(), extract_extension(), extract_filename(), getcwd(), getDatasetAliases(), isfile(), PLearn::PP< VMatrix >::isNull(), loadAsciiAsVMat(), loadAsciiSingleBinaryDescriptor(), loadClassificationDataset(), newObject(), pathexists(), PLERROR, readFileAndMacroProcess(), remove_extension(), removeblanks(), slash, split(), and vconcat().

Referenced by PLearn::AutoVMatrix::build_(), PLearn::LearnerCommand::compute_outputs(), PLearn::VVMatrix::createPreproVMat(), cross_valid(), plotVMats(), PLearn::VVMatrix::processJoinSection(), PLearn::TestDependencyCommand::run(), PLearn::TestDependenciesCommand::run(), PLearn::KolmogorovSmirnovCommand::run(), PLearn::LearnerCommand::test(), PLearn::LearnerCommand::train(), train_and_test(), use(), viewVMat(), and vmatmain().

map< string, string > PLearn::getDatasetAliases const string dir_or_file_path = "."  ) 
 

Looks for 'dataset.aliases' file in specified directory and its parent directories; loads it and returns the corresponding map.

Returns an empty map if file was not found.

Definition at line 258 of file getDataSet.cc.

References PLearn::PStream::get(), isfile(), locateDatasetAliasesDir(), PLERROR, read(), removeblanks(), and ws().

Referenced by cross_valid(), getDataSet(), getDataSetDate(), train_and_test(), and use().

time_t PLearn::getDataSetDate const string datasetstring,
const string alias = ""
 

Definition at line 75 of file getDataSet.cc.

References extract_extension(), getcwd(), getDatasetAliases(), isfile(), mtime(), pathexists(), PLERROR, and slash.

Referenced by PLearn::VVMatrix::getDateOfVMat(), PLearn::VMatLanguage::preprocess(), and vmatmain().

string PLearn::getDataSetHelp  ) 
 

returns a help describing the datasetstring parameter of getDataSet

Definition at line 64 of file getDataSet.cc.

References loadClassificationDatasetHelp().

Referenced by PLearn::HelpCommand::helpDatasets(), old_plearn_main(), and vmatmain().

time_t PLearn::getDateOfCode const string codefile  ) 
 

Definition at line 56 of file VMatLanguage.cc.

References mtime(), and PLERROR.

Referenced by PLearn::VVMatrix::getDateOfVMat().

string getHost  ) 
 

Definition at line 675 of file VMatrix.cc.

Referenced by PLearn::VMatrix::lockMetaDataDir().

bool getList char *  str,
int  curj,
const VMat &  vm,
Vec &  outList,
char *  strReason
 

Definition at line 294 of file vmatmain.cc.

References PLearn::TVec< T >::clear(), pl_isnumber(), PLearn::TVec< T >::push_back(), split(), toint(), and PLearn::VMat::width().

Referenced by viewVMat().

map< string, string > PLearn::getModelAliases const string filename  ) 
 

reads a modelalias -> object_representation map from a model.aliases file

Definition at line 72 of file old_plearn_main.cc.

References PLERROR, remove_comments(), removeblanks(), smartReadUntilNext(), and ws().

Referenced by cross_valid(), getMultipleModelAliases(), and train_and_test().

vector< string > PLearn::getMultipleModelAliases const string model  ) 
 

Definition at line 260 of file old_plearn_main.cc.

References exitmsg(), getModelAliases(), and isfile().

Referenced by old_plearn_main().

void PLearn::getNextNonBlankLine istream &  in,
string line
 

returns the next non blank line (#-style comments are considered blank)

Definition at line 451 of file fileutils.cc.

Referenced by PLearn::AsciiVMatrix::build_(), and parseSizeFromRemainingLines().

vector< string > PLearn::getNonBlankLines const string in  ) 
 

Returns a vector of string containing only non-empty lines, as you guessed it.

Definition at line 594 of file stringutils.cc.

References isBlank(), and split().

Referenced by PLearn::VVMatrix::buildFilteredVMatFromVPL(), and PLearn::VVMatrix::extractSourceMatrix().

int getPid  ) 
 

Definition at line 680 of file VMatrix.cc.

Referenced by PLearn::VMatrix::lockMetaDataDir().

template<class T>
TVec<T> getQuantiles const TVec< T > &  vec,
int  q
 

returns a vector of length q+1 that contains the q quantiles of the sorted vector v and the last value corresponds to the last value of the vector vec.

Definition at line 1928 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

int PLearn::getSystemTotalMemory  ) 
 

Definition at line 9 of file procinfo.cc.

References PLERROR.

Referenced by PLearn::TestDependenciesCommand::run().

string getUser  ) 
 

Definition at line 685 of file VMatrix.cc.

Referenced by PLearn::VMatrix::lockMetaDataDir().

template<class T>
int GramSchmidtOrthogonalization TMat< T >  A,
tolerance = 1e-6
 

Orthonormalize in-place the rows of the given matrix, using successive projections on the orthogonal subspace of the previously found basis.

The resulting matrix has the following properties:

  • its rows spans the same space as A
  • its rows are orthogonal (dot product = 0)
  • its rows are of norm 1 However, it may happen that the original rows of A were not linearly independent. In that case the, algorithm returns the number of rows that were successfully obtained (and the user should probably then do A = A.subMatRows(0,result) to obtain the basis). The tolerance argument is the minimum value of the norm of a row when projected orthogonal to the previous ones for this row to contribute to the basis.

Definition at line 5654 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), norm(), and projectOnOrthogonalSubspace().

Referenced by diagonalizeSubspace(), and PLearn::PCA::train().

VMat PLearn::grep VMat  d,
int  col,
Vec  values,
const string indexfile,
bool  exclude = false
 

Same as above, except that the indexes of the rows are stored on disk rather than in memory (a SelectRowsFileIndexVMatrix is returned rather than a SelectRowsVMatrix) BEWARE: If the indexfile already exists, it is *not* recomputed, but used as is.

Definition at line 743 of file VMat_maths.cc.

References PLearn::IntVecFile::append(), PLearn::TVec< T >::contains(), file_exists(), PLearn::VMat::length(), and PLearn::VMat::rows().

VMat PLearn::grep VMat  d,
int  col,
Vec  values,
bool  exclude = false
 

If exclude==false (the default) returns a VMat containing only the rows whose column col has a value that belongs to the given set of authorized values If exclude==true returns a VMat with all the other rows (corresponds to grep -v) [MISSING_VALUE is a possible value and is handled correctly]

Definition at line 689 of file VMat_maths.cc.

References PLearn::TVec< T >::contains(), PLearn::TVec< T >::copy(), PLearn::VMat::length(), PLearn::VMat::rows(), and PLearn::TVec< T >::subVec().

template<class T>
TMat<T> grep TMat< T >  data,
int  col,
value,
bool  exclude = false
[inline]
 

Same as above, but with a single value argument.

Definition at line 5738 of file TMat_maths_impl.h.

References grep().

template<class T>
TMat<T> grep TMat< T >  data,
int  col,
TVec< T >  values,
bool  exclude = false
 

Definition at line 5563 of file TMat_maths_impl.h.

References PLearn::TMat< T >::compact(), PLearn::TVec< T >::contains(), PLearn::TMat< T >::length(), PLearn::TMat< T >::resize(), and PLearn::TMat< T >::width().

Referenced by grep().

void PLearn::halfShuffleRows SDB &  sdb  ) 
 

not quite a random shuffle (see implementation) but more efficient use of disk cache

Definition at line 937 of file SimpleDB.cc.

References endl(), PLearn::SimpleDB< KeyType, QueryResult >::getInRow(), PLearn::SimpleDB< KeyType, QueryResult >::getSchema(), k, PLearn::SimpleDB< KeyType, QueryResult >::length(), SDB, and PLearn::SimpleDB< KeyType, QueryResult >::setRow().

Var hard_slope Var  x,
Var  left,
Var  right
[inline]
 

Definition at line 75 of file HardSlopeVariable.h.

References left(), right(), and x.

real hard_slope real  x,
real  left = 0,
real  right = 1
[inline]
 

Definition at line 338 of file pl_math.h.

References left(), right(), and x.

Referenced by PLearn::UnaryHardSlopeVariable::fprop(), PLearn::HardSlopeVariable::fprop(), soft_slope(), and tabulated_soft_slope().

real PLearn::hard_slope_integral real  left = 0,
real  right = 1,
real  a = 0,
real  b = 1
 

Definition at line 220 of file pl_math.cc.

Referenced by soft_slope_integral(), and tabulated_soft_slope_integral().

template<class T>
T harmonic_mean const TVec< T > &  vec,
bool  ignore_missing = false
 

Definition at line 325 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), MISSING_VALUE, and PLERROR.

size_t PLearn::hashbytes const char *  byte_start,
size_t  byte_length
 

**************** Hash tables support *************************

basic hashing function that can be used in defining the hashing functions for objects or any type. This one mixes the bits in the byte_length bytes starting at byte_start, and returns an integer between 0 and MAXINT

Definition at line 78 of file pl_hash_fun.cc.

References PL_HASH_NOMBRES_MAGIQUES.

Referenced by hashval().

template<class T, unsigned SizeBits, class Allocator>
unsigned int hashval const SmallVector< T, SizeBits, Allocator > &  v  )  [inline]
 

hash function for hash tables

Definition at line 170 of file SmallVector.h.

References hashbytes().

template<class T>
size_t hashval const T &  x  )  [inline]
 

default which will work in many cases but not all

Definition at line 75 of file pl_hash_fun.h.

References hashbytes(), and x.

size_t hashval const char *  strng  )  [inline]
 

hashing function which must be redefined for classes that can be used as keys:

unsigned int hash(const T& object); or unsigned int hash(const T object);

This function returns ANY unsigned int (i.e. between 0 and MAXINT) (it is hash(x)table_size that will be used to choose an address in the hash table).

It is defined here for some built-in types:

Definition at line 70 of file pl_hash_fun.h.

References hashbytes(), and strlen().

VMat hconcat Array< VMat >  ds  )  [inline]
 

Definition at line 97 of file ConcatColumnsVMatrix.h.

VMat hconcat VMat  d1,
VMat  d2
[inline]
 

Definition at line 94 of file ConcatColumnsVMatrix.h.

Var hconcat const VarArray &  varray  )  [inline]
 

Definition at line 78 of file ConcatColumnsVariable.h.

RandomVar PLearn::hconcat const RVArray &  a  ) 
 

Definition at line 456 of file RandomVar.cc.

template<class T>
TMat<T> hconcat const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

Definition at line 214 of file Array_impl.h.

References hconcat().

template<class T>
TMat<T> hconcat const Array< TMat< T > > &  ar  ) 
 

Definition at line 190 of file Array_impl.h.

References PLERROR, and PLearn::TMat< T >::subMatColumns().

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::VVMatrix::createPreproVMat(), hconcat(), loadUSPS(), PLearn::LocallyWeightedDistribution::log_density(), PLearn::SequentialModelSelector::matlabSave(), removeColumn(), PLearn::ConcatColumnsRandomVariable::setValueFromParentsValue(), PLearn::TangentLearner::train(), PLearn::GaussianContinuum::train(), and PLearn::ClassifierFromDensity::train().

double PLearn::hhmmss_to_double int  hh,
int  mm,
int  ss
 

converts an hours/minutes/seconds to a day fraction

Definition at line 166 of file PDateTime.cc.

Referenced by datetime_to_double(), and PLearn::PDateTime::toJulianDay().

template<class T>
TVec<T> histogram const TVec< T > &  vec,
minval,
maxval,
int  nbins
 

Definition at line 466 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and val.

Referenced by displayHistogram(), and PLearn::Gnuplot::histoplot().

string PLearn::hostname  ) 
 

Definition at line 139 of file general.cc.

References PLERROR.

Referenced by establish_connection(), and PLearn::IPServer::machine_name().

real hyperplaneDistance const Vec &  x,
const Mat &  points,
real  weight_decay = 0.
[inline]
 

Distance between point x and closest point on hyperplane that passes through all points.

Definition at line 619 of file plapack.h.

References closestPointOnHyperplane(), L2distance(), and x.

int PLearn::ICBCpartition const Vec &  claims,
real  threshold
 

Given a vector of claims (representing the claim values for different KOLs) and a threshold value, the following function returns the class (integer between 0 and 3) of the claims vector. There are 4 classes:

  • class 0: "neg" claim vector (i.e. at least one claim in the vector < 0 and the others = 0)
  • class 1: "zero" claim vector (i.e. all claims in the vector are 0)
  • class 2: "smallpos" claim vector (i.e. at least one claim in the vector > 0 and all claims < threshold)
  • class 3: "largepos" claim vector (i.e. at least one claim in the vector >= threshold)

Definition at line 1210 of file SDBVMat.cc.

References PLearn::TVec< T >::length().

Referenced by PLearn::SDBVMFieldICBCClassification::convertField().

Var ifThenElse Var  If,
Var  Then,
Var  Else
[inline]
 

IT WOULD BE NICE IF WE COULD REDEFINE (:?).

Definition at line 86 of file IfThenElseVariable.h.

template<class T>
void ifThenElse const TVec< T > &  if_vec,
const TVec< T > &  then_vec,
const TVec< T > &  else_vec,
TVec< T > &  dest
 

Definition at line 1771 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::NeuralNet::build_(), PLearn::ConditionalDensityNet::build_(), d_hard_slope(), PLearn::PowVariableVariable::symbolicBprop(), PLearn::IfThenElseVariable::symbolicBprop(), and PLearn::AbsVariable::symbolicBprop().

double PLearn::incbcf double  a,
double  b,
double  x
 

Definition at line 813 of file random.cc.

References big, biginv, MACHEP, and x.

double PLearn::incbd double  a,
double  b,
double  x
 

Definition at line 903 of file random.cc.

References big, biginv, MACHEP, and x.

real PLearn::incomplete_beta real  z,
real  x,
real  y
 

Note that z must be in [0,1]

Definition at line 146 of file random.cc.

References exp(), incomplete_beta_continued_fraction(), log(), log_beta(), PLERROR, and x.

Referenced by student_t_cdf().

real incomplete_beta_continued_fraction real  z,
real  x,
real  y
 

Definition at line 94 of file random.cc.

References k, PLWARNING, and x.

Referenced by incomplete_beta().

map< real, TVec< int > > PLearn::indicesOfOccurencesInColumn VMat  m,
int  col
 

returns a map mapping all different values appearing in column col to a vector of the corresponding row indices in the VMat (this proceeds in 2 passes, first calling countOccurencesInColumn to allocate the exact memory)

Definition at line 726 of file VMat_maths.cc.

References countOccurencesInColumn(), and PLearn::VMat::length().

Referenced by PLearn::ClassifierFromDensity::train().

Mat PLearn::input2dSet const string filename = "data2d.amat"  ) 
 

This will input a 2d binary classification problem (launches a java applet).

Definition at line 59 of file databases.cc.

References file_exists(), JAVA, loadAscii(), and shuffleRows().

Referenced by loadClassificationDataset().

void PLearn::interactiveDisplayCDF const Array< VMat > &  vmats  ) 
 

Definition at line 96 of file vmatmain.cc.

References PLearn::StatsCollector::cdf(), endl(), k, PLearn::TVec< T >::length(), pgetline(), PLearn::Gnuplot::plot(), PLearn::TVec< T >::size(), split(), and toint().

Referenced by vmatmain().

void intersection Set  a,
Set  b,
Set  res
[inline]
 

Definition at line 100 of file Set.h.

References PLearn::Set::begin(), and PLearn::Set::end().

Var interValues Var  values  )  [inline]
 

if values = [x1,x2,...,x10], the resulting variable is [(x1+x2)/2,(x2+x3)/2, ...

(x9+x10)/2]

Definition at line 80 of file InterValuesVariable.h.

template<class T>
void inverse const TMat< T > &  m,
TMat< T > &  inv
 

Definition at line 4709 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::resize(), and PLearn::TMat< T >::width().

template<class T>
TMat<T> inverse TMat< T > &  m  ) 
 

Definition at line 4699 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length().

Referenced by PLearn::MatrixInverseVariable::fprop(), leftPseudoInverse(), matInvert(), and rightPseudoInverse().

template<class T>
TVec<T> inverse_sigmoid const TVec< T > &  src  )  [inline]
 

Definition at line 1032 of file TMat_maths_impl.h.

References compute_inverse_sigmoid(), and PLearn::TVec< T >::length().

real inverse_sigmoid real  x  )  [inline]
 

numerically stable version of inverse_sigmoid(x) = log(x/(1-x))

Definition at line 278 of file pl_math.h.

References FEQUAL, log(), PLERROR, and x.

Referenced by compute_inverse_sigmoid(), PLearn::ConditionalDensityNet::initializeParams(), and PLearn::ConditionalDensityNet::train().

real inverse_softplus real  y  )  [inline]
 

inverse of softplus function

Definition at line 327 of file pl_math.h.

References exp(), log(), and MISSING_VALUE.

Referenced by PLearn::ConditionalDensityNet::initializeParams(), and PLearn::ConditionalDensityNet::train().

template<class T>
TVec<T> inverted const TVec< T > &  vec  ) 
 

Definition at line 1053 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Var invertElements Var  v  )  [inline]
 

Definition at line 72 of file InvertElementsVariable.h.

template<class T>
void invertElements const TMat< T > &  m  ) 
 

x'_ij = 1.0/x_ij;

Definition at line 4642 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void invertElements const TVec< T > &  vec  ) 
 

Definition at line 1045 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Referenced by PLearn::ShiftAndRescaleVMatrix::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), d_hard_slope(), normalize(), operator/(), PLearn::DivVariable::symbolicBprop(), and PLearn::PLS::train().

real ipow real  x,
int  p
[inline]
 

Definition at line 261 of file pl_math.h.

References x.

Referenced by PLearn::CompactVMatrixPolynomialKernel::evaluate(), and PLearn::PolynomialKernel::evaluateFromDot().

bool is_integer real  x  )  [inline]
 

Definition at line 248 of file pl_math.h.

References x.

Referenced by computeConditionalMeans(), and PLearn::ClassErrorCostFunction::evaluate().

bool is_missing float  x  )  [inline]
 

Missing value for double and float are represented by NaN.

Definition at line 246 of file pl_math.h.

References x.

bool is_missing double  x  )  [inline]
 

Missing value for double and float are represented by NaN.

Definition at line 243 of file pl_math.h.

References x.

template<class T>
bool is_missing const T &  x  )  [inline]
 

Tells if the passed value means "missing" for its data-type.

The default version of returns false (not a "missing value")

Definition at line 240 of file pl_math.h.

Referenced by argmax(), argmin(), PLearn::RealMapping::binnumber(), PLearn::FinancePreprocVMatrix::build_(), computeBasicStats(), PLearn::SDBVMFieldICBCTargets::convertField(), PLearn::SDBVMFieldDate::convertField(), PLearn::SDBVMField::convertMissing(), countOccurencesInColumn(), dilogarithm(), double_to_datetime(), DX_create_grid_outputs_file(), PLearn::ConditionalStatsCollector::findrange(), float_to_date(), PLearn::DilogarithmVariable::fprop(), PLearn::ConcatRowsVMatrix::fullyCheckMappings(), PLearn::ConcatRowsVMatrix::get(), PLearn::MovingAverageVMatrix::getNewRow(), PLearn::JoinVMatrix::getNewRow(), PLearn::FinancePreprocVMatrix::getNewRow(), PLearn::CumVMatrix::getNewRow(), PLearn::ConcatRowsVMatrix::getSubRow(), PLearn::VMatrix::getValString(), PLearn::SDBVMOutputCoder::handleOtherAndMissing(), harmonic_mean(), PLearn::TVec< pair< real, real > >::hasMissing(), PLearn::TMat< pair< real, real > >::hasMissing(), PLearn::RealMapping::map(), matlabSave(), mean(), new_get_compr_data_type(), new_write_raw_data_as(), PLearn::VMatLanguage::preprocess(), PLearn::RealMapping::print(), print_diff(), remove_missing(), PLearn::VMatLanguage::run(), PLearn::SDBVMOutputCoder::setOutput(), PLearn::StrTableVMatrix::StrTableVMatrix(), sum(), PLearn::VMFieldStat::update(), PLearn::StatsCollector::update(), PLearn::LiftStatsCollector::update(), PLearn::ConditionalStatsCollector::update(), viewVMat(), weighted_mean(), and PLearn::PStream::writeAsciiNum().

real is_positive real  x  )  [inline]
 

"hard" version of the sigmoid, i.e.

the indicator function that is 1 if its argument is STRICTLY positive, and 0 otherwise

Definition at line 275 of file pl_math.h.

References x.

Var isAboveThreshold Var  v,
real  threshold = 0,
real  truevalue = 1,
real  falsevalue = 0,
bool  strict = false
[inline]
 

Definition at line 80 of file IsAboveThresholdVariable.h.

Referenced by PLearn::ConditionalDensityNet::build_().

bool PLearn::isAlpha char  c  ) 
 

Definition at line 2832 of file WordNetOntology.cc.

References isDigit(), and isLetter().

bool PLearn::isBlank const string s  ) 
 

returns true if s is a blank line (containing only space, tab, until end of line or a # comment-character is reached

Definition at line 303 of file stringutils.cc.

Referenced by getNonBlankLines(), PLearn::VMatLanguage::preprocess(), and PLearn::VVMatrix::processJoinSection().

Var PLearn::isdifferent Var  v1,
Var  v2
 

Definition at line 198 of file Var_operators.cc.

References isequal().

bool PLearn::isDigit char  c  ) 
 

Definition at line 2827 of file WordNetOntology.cc.

Referenced by isAlpha(), and trimWord().

bool PLearn::isdir const string path  ) 
 

returns true if the given path is an existing directory (or a symbolic link pointing to a directory)

Definition at line 139 of file fileutils.cc.

Referenced by abspath(), PLearn::DiskVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), force_mkdir(), force_rmdir(), goAndCreateDir(), makedir(), PLearn::VMatrix::resolveFieldInfoLink(), train_and_test(), and PLearn::PrecomputedVMatrix::usePrecomputed().

Var PLearn::isequal Var  v1,
Var  v2
 

First case: v1 and v2 are two vectors of length() l resulting Var is 1 if for all i=0 to l-1, v1->value[i] == v2->value[i], 0 otherwise Second case: one of v1 or v2 is a scalar variable (length() 1) and the other is a vector of length() l resulting Var is a vector of length() l, doing an element-wise comparison

Definition at line 98 of file EqualVariable.cc.

Referenced by isdifferent(), PLearn::FunctionalRandomVariable::logP(), operator!=(), and operator==().

bool PLearn::isfile const string path  ) 
 

returns true if the given path is an existing regular file (or a symbolic link pointing to a file)

Definition at line 153 of file fileutils.cc.

Referenced by PLearn::AsciiVMatrix::AsciiVMatrix(), PLearn::FileVMatrix::build_(), PLearn::DiskVMatrix::build_(), PLearn::AsciiVMatrix::build_(), PLearn::VVMatrix::buildFilteredVMatFromVPL(), PLearn::VVMatrix::createPreproVMat(), cross_valid(), PLearn::FileVMatrix::FileVMatrix(), PLearn::VVMatrix::generateVMatIndex(), getDataSet(), getDatasetAliases(), getDataSetDate(), PLearn::VMatrix::getFieldInfos(), PLearn::AutoSDBVMatrix::getMappings(), getMultipleModelAliases(), PLearn::VVMatrix::getPrecomputedDataName(), PLearn::VMatrix::getRanges(), PLearn::VMatrix::getSFIFFilename(), PLearn::VMatrix::getStats(), PLearn::GhostScript::GhostScript(), PLearn::VVMatrix::isPrecomputedAndUpToDate(), PLearn::VMatrix::loadStringMapping(), locateDatasetAliasesDir(), old_plearn_main(), PLearn::IntVecFile::open(), PLearn::VMatrix::resolveFieldInfoLink(), train_and_test(), and PLearn::PrecomputedVMatrix::usePrecomputed().

template<class T>
void isLargerThan const TVec< T > &  first,
const TVec< T > &  second,
TVec< T > &  dest
 

Definition at line 1711 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

template<class T>
void isLargerThanOrEqualTo const TVec< T > &  first,
const TVec< T > &  second,
TVec< T > &  dest
 

Definition at line 1726 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

bool PLearn::isLegalPunct char  c  ) 
 

Definition at line 2837 of file WordNetOntology.cc.

Referenced by trimWord().

bool PLearn::isLetter char  c  ) 
 

Definition at line 2822 of file WordNetOntology.cc.

Referenced by isAlpha(), and trimWord().

bool PLearn::isMapKeysAreInt map< real, int > &  m  ) 
 

check that all keys of the map are int values

Definition at line 126 of file general.cc.

Referenced by PLearn::CompactVMatrix::CompactVMatrix().

Var isMissing Var  x  )  [inline]
 

Definition at line 79 of file IsMissingVariable.h.

References x.

Referenced by PLearn::NeuralNet::build_().

bool PLearn::isParagraphBlank const string s  ) 
 

returns true if s is a blank paragraph (containing only space, tab, until end of **string**)

Definition at line 318 of file stringutils.cc.

template<class T>
void isSmallerThan const TVec< T > &  first,
const TVec< T > &  second,
TVec< T > &  dest
 

Definition at line 1741 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

template<class T>
void isSmallerThanOrEqualTo const TVec< T > &  first,
const TVec< T > &  second,
TVec< T > &  dest
 

Definition at line 1756 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

string join const TVec< string > &  s,
const string separator
[inline]
 

Definition at line 832 of file TMat_impl.h.

References PLearn::TVec< T >::size().

string PLearn::join const vector< string > &  s,
const string separator = " "
 

makes a single string from a vector of strings

Definition at line 499 of file stringutils.cc.

string join const Array< string > &  s,
const string separator
[inline]
 

Definition at line 132 of file Array_impl.h.

References PLearn::TVec< T >::size().

Referenced by PLearn::Learner::measure(), PLearn::Learner::openTestResultsStreams(), PLearn::Learner::openTrainObjectiveStream(), PLearn::PTester::perform(), PLearn::VMatLanguage::preprocess(), and PLearn::SequentialValidation::run().

real PLearn::KS_test Vec &  v1,
Vec &  v2,
int  conv = 10
 

Returns result of Kolmogorov-Smirnov test between 2 samples The call sorts v1 and v2.

Definition at line 247 of file stats_utils.cc.

References KS_test().

void PLearn::KS_test Vec &  v1,
Vec &  v2,
int  conv,
real D,
real p_value
 

Kolmogorov-Smirnov test.

Computes D (the max abs dfference between the 2 cdfs) and p_value P(random variable D > observed D|no difference in true prob) A reasonable value for D is 10. The call sorts v1 and v2.

Definition at line 238 of file stats_utils.cc.

References KS_test(), PLearn::TVec< T >::length(), max_cdf_diff(), and p_value().

real PLearn::KS_test real  D,
real  N,
int  conv = 10
 

Return the probability that the Kolmogorov-Smirnov statistic D takes the observed value or greater, given the null hypothesis that the distributions that are compared are really identical. N is the effective number of samples used for comparing the distributions. The argument conv gives the precision with which this probability is computed. A value above 10 does not bring much improvement. Note that the statistic D can be obtained as follows:

Comparing two empirical distributions from data sets D_1 and D_2: Let F_1(x) the empirical cumulative distribution of D_1 of size N_1, and let F_2(x) the empirical cumulative distribution of D_2 of size N_2. Then

D = max_x | F_1(x) - F_2(x) |

and the effective N is N_1 N_2 / (N_1 + N_2).

Comparing a theoretical distribution F and a data set D of size N with empirical cumulative distribution F_N:

D = max_x | F(x) - F_N(x) |

This function returns the following

P(D > observed d | same distributions) estimated by 2 sum_{k=1}^{infty} (-1)^{k-1} exp(-2k^2 a^2)

where a = sqrt(D*(sqrt(N)+0.12+0.11/sqrt(N)))

Ref: Stephens, M.A. (1970), Journal of the Royal Statistical Society B, vol. 32, pp. 115-122.

Definition at line 221 of file stats_utils.cc.

References exp(), k, sqrt(), and x.

Referenced by KS_test(), and PLearn::KolmogorovSmirnovCommand::run().

template<class T>
T kthOrderedElement const TVec< T > &  vec,
int  k
[inline]
 

returns the value of the kth ordered element of v k can take values 0 to vec.length()-1

Definition at line 1875 of file TMat_maths_impl.h.

References k, and positionOfkthOrderedElement().

Referenced by median().

template<class T>
T L1distance const TVec< T > &  vec1,
const TVec< T > &  vec2
[inline]
 

Definition at line 775 of file TMat_maths_impl.h.

References dist().

template<class T>
T L2distance const TVec< T > &  vec1,
const TVec< T > &  vec2
[inline]
 

Definition at line 771 of file TMat_maths_impl.h.

References dist().

Referenced by hyperplaneDistance(), and printDistanceStatistics().

void lapack_Xgesdd_ char *  JOBZ,
int M,
int N,
float *  A,
int LDA,
float *  S,
float *  U,
int LDU,
float *  VT,
int LDVT,
float *  WORK,
int LWORK,
int IWORK,
int INFO
[inline]
 

Definition at line 64 of file plapack.h.

References sgesdd_().

void lapack_Xgesdd_ char *  JOBZ,
int M,
int N,
double *  A,
int LDA,
double *  S,
double *  U,
int LDU,
double *  VT,
int LDVT,
double *  WORK,
int LWORK,
int IWORK,
int INFO
[inline]
 

Definition at line 61 of file plapack.h.

References dgesdd_().

Referenced by lapackSVD().

void lapack_Xsyevr_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
int ISUPPZ,
double *  WORK,
int LWORK,
int IWORK,
int LIWORK,
int INFO
[inline]
 

Definition at line 70 of file plapack.h.

References dsyevr_().

void lapack_Xsyevr_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
int ISUPPZ,
float *  WORK,
int LWORK,
int IWORK,
int LIWORK,
int INFO
[inline]
 

Definition at line 67 of file plapack.h.

References ssyevr_().

void lapack_Xsyevx_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
float *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
[inline]
 

Definition at line 58 of file plapack.h.

References ssyevx_().

void lapack_Xsyevx_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
double *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
[inline]
 

Definition at line 55 of file plapack.h.

References dsyevx_().

Referenced by eigen_SymmMat(), and lapackEIGEN().

void lapack_Xsygvx_ int ITYPE,
char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  B,
int LDB,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
float *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
[inline]
 

Definition at line 76 of file plapack.h.

References ssygvx_().

void lapack_Xsygvx_ int ITYPE,
char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
double *  A,
int LDA,
double *  B,
int LDB,
double *  VL,
double *  VU,
int IL,
int IU,
double *  ABSTOL,
int M,
double *  W,
double *  Z,
int LDZ,
double *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
[inline]
 

Definition at line 73 of file plapack.h.

References dsygvx_().

Referenced by lapackGeneralizedEIGEN().

template<class num_t>
void lapackEIGEN const TMat< num_t > &  A,
TVec< num_t > &  eigenvals,
TMat< num_t > &  eigenvecs,
char  RANGE = 'A',
num_t  low = 0,
num_t  high = 0,
num_t  ABSTOL = 0
 

Computes the eigenvalues and eigenvectors of a symmetric (NxN) matrix A.

Meaning of RANGE: 'A': all eigenvalues will be found. 'V': all eigenvalues in the half-open interval (low,high] will be found. 'I': will find eigenvals with indexes int(low) to int(high) included (smallest eigenval having index 0)

ABSTOL is the tolerance (see lapack doc for call dsyevx_ )

If you do not wish to compute eigenvectors, provide a null (empty) 'eigenvecs'.

Upon return, eigenvals will contain the M eigenvalues found in increasing order (it will be resized to M). And eigenvecs (unless initially null) will be resized to an MxN matrix containing the corresponding M eigenvectors in its *rows*.

Definition at line 101 of file plapack.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::isEmpty(), PLearn::TMat< T >::isNotEmpty(), lapack_Xsyevx_(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), and PLearn::TMat< T >::width().

Referenced by eigenVecOfSymmMat().

template<class num_t>
void lapackGeneralizedEIGEN const TMat< num_t > &  A,
const TMat< num_t > &  B,
int  ITYPE,
TVec< num_t > &  eigenvals,
TMat< num_t > &  eigenvecs,
char  RANGE = 'A',
num_t  low = 0,
num_t  high = 0,
num_t  ABSTOL = 0
 

Computes the eigenvalues and eigenvectors of a real generalized symmetric-definite eigenproblem, of the form A*x=(lambda)*B*x, A*Bx=(lambda)*x, or B*A*x=(lambda)*x A and B are assumed to be symmetric and B is also positive definite.

Meaning of ITYPE Specifies the problem type to be solved: = 1: A*x = (lambda)*B*x = 2: A*B*x = (lambda)*x = 3: B*A*x = (lambda)*x

Meaning of RANGE: 'A': all eigenvalues will be found. 'V': all eigenvalues in the half-open interval (low,high] will be found. 'I': will find eigenvals with indexes int(low) to int(high) included (smallest eigenval having index 0)

ABSTOL is the tolerance (see lapack doc for call dsygvx_ )

If you do not wish to compute eigenvectors, provide a null (empty) 'eigenvecs'.

Upon return, eigenvals will contain the M eigenvalues found in increasing order (it will be resized to M). And eigenvecs (unless initially null) will be resized to an MxN matrix containing the corresponding M eigenvectors in its *rows*.

Definition at line 222 of file plapack.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::isEmpty(), PLearn::TMat< T >::isNotEmpty(), lapack_Xsygvx_(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), and PLearn::TMat< T >::width().

Referenced by generalizedEigenVecOfSymmMat().

int PLearn::lapackSolveLinearSystem Mat &  At,
Mat &  Bt,
TVec< int > &  pivots
 

Solves AX = B This is a simple wrapper over the lapack routine. It expects At and Bt (transposes of A and B) as input, as well as storage for resulting pivots vector of ints of same length as A. The call overwrites Bt, putting the transposed solution Xt in there, and At is also overwritten to contain the factors L and U from the factorization A = P*L*U; (the unit diagonal elements of L are not stored). The lapack status is returned: = 0: successful exit < 0: if INFO = -i, the i-th argument had an illegal value > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, so the solution could not be computed.

Definition at line 293 of file plapack.cc.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), dgesv_(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, PLearn::TVec< T >::resize(), sgesv_(), and PLearn::TMat< T >::width().

Referenced by solveLinearSystem().

template<class num_t>
void lapackSVD const TMat< num_t > &  At,
TMat< num_t > &  Ut,
TVec< num_t > &  S,
TMat< num_t > &  V,
char  JOBZ = 'A',
real  safeguard = 1
 

Performs the SVD decomposition A = U.S.Vt See SVD(...) for more details.

CAREFUL: the 'At' matrix argument is changed in the process!

This is a straight forward call to the lapack function. As fortran uses column-major matrices, and we use row-major matrices, it's really as if we had to pass the transpose of A (denoted At) and were getting back the transpose of U (Ut) and V.

If you want a version without the funny transposes, look at SVD (which simply calls this one with a different order of parameters...)

Definition at line 386 of file plapack.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), endl(), PLearn::TMat< T >::isNotEmpty(), lapack_Xgesdd_(), PLearn::TMat< T >::length(), min(), PLearn::TMat< T >::mod(), PLERROR, PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), and PLearn::TMat< T >::width().

Referenced by PLearn::GaussianContinuum::compute_train_and_validation_costs(), PLearn::ProjectionErrorVariable::fprop(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::GaussianContinuum::make_random_walk(), and SVD().

string PLearn::left const string s,
size_t  width,
char  padding = ' '
 

aligns the given string in a cell having the given width

Definition at line 56 of file stringutils.cc.

Referenced by PLearn::SoftSlopeVariable::bprop(), PLearn::SoftSlopeIntegralVariable::bprop(), PLearn::HardSlopeVariable::bprop(), d_hard_slope(), d_soft_slope(), displayHistogram(), PLearn::SoftSlopeVariable::fprop(), PLearn::SoftSlopeIntegralVariable::fprop(), PLearn::HardSlopeVariable::fprop(), hard_slope(), PLearn::HardSlopeVariable::HardSlopeVariable(), operator<(), operator<<(), operator<=(), operator>(), operator>=(), soft_slope(), soft_slope_integral(), soft_slope_limit(), PLearn::SoftSlopeIntegralVariable::SoftSlopeIntegralVariable(), PLearn::SoftSlopeVariable::SoftSlopeVariable(), split_on_first(), tabulated_soft_slope(), tabulated_soft_slope_integral(), and PLearn::UnaryHardSlopeVariable::UnaryHardSlopeVariable().

Var leftPseudoInverse Var  v  )  [inline]
 

Definition at line 76 of file LeftPseudoInverseVariable.h.

template<class T>
void leftPseudoInverse const TMat< T > &  m,
TMat< T > &  inv
 

Definition at line 4663 of file TMat_maths_impl.h.

References inverse(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
TMat<T> leftPseudoInverse TMat< T > &  m  ) 
 

Definition at line 4653 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

Referenced by PLearn::LeftPseudoInverseVariable::fprop(), and PLearn::ProductRandomVariable::invertible().

bool lessPair pair< int, float > &  p1,
pair< int, float > &  p2
 

Definition at line 1034 of file GraphicalBiText.cc.

Var lift_output Var  linear_output,
Var  target
[inline]
 

Definition at line 71 of file LiftOutputVariable.h.

Referenced by PLearn::NNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), and PLearn::MultiInstanceNNet::build_().

StatsIt lift_stats int  the_index = 0,
real  the_fraction = 0.1
[inline]
 

Definition at line 415 of file StatsIterator.h.

Mat PLearn::linearRegression VMat  inputs,
VMat  outputs,
real  weight_decay
 

Version that does all the memory allocations of XtX, XtY and theta_t. Returns theta_t.

Definition at line 974 of file VMat_maths.cc.

References linearRegression(), and PLearn::VMat::width().

real PLearn::linearRegression VMat  inputs,
VMat  outputs,
real  weight_decay,
Mat  theta_t,
bool  use_precomputed_XtX_XtY,
Mat  XtX,
Mat  XtY,
real sum_squared_Y,
bool  return_squared_loss = false,
int  verbose_computation_every = 0,
bool  cholesky = true
 

computes the result of the linear regression into theta_t Parameters must have the following sizes: inputs(l,n) outputs(l,m) theta_t(n+1,m) XtX(n+1,n+1) XtY(n+1,m) The n+1 is due to the inclusion of the bias terms in the matrices (first row of theta_t) If use_precomputed_XtX_XtY is false, then they are computed. Otherwise they are used as they are (typically passed precomputed from a previous call made with a possibly different weight_decay). Returns average of squared loss.

Definition at line 907 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), dot(), externalProductAcc(), PLearn::TMat< T >::length(), PLearn::VMat::length(), PLERROR, product(), solveLinearSystem(), solveLinearSystemByCholesky(), PLearn::TMat< T >::width(), PLearn::VMat::width(), and x.

template<class T>
void linearRegression TVec< T >  inputs,
TVec< T >  outputs,
weight_decay,
TVec< T >  theta_t
 

Definition at line 5465 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and PLERROR.

template<class T>
void linearRegression TMat< T >  inputs,
TMat< T >  outputs,
weight_decay,
TMat< T >  theta_t
 

Definition at line 5393 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, solveLinearSystemByCholesky(), and PLearn::TMat< T >::width().

Referenced by linearRegression(), and PLearn::LinearRegressor::train().

template<class T>
void linearRegressionNoBias TMat< T >  inputs,
TMat< T >  outputs,
weight_decay,
TMat< T >  weights
 

Definition at line 5359 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, solveLinearSystemByCholesky(), transposeProduct(), and PLearn::TMat< T >::width().

template<class T>
void load const string filepath,
T &  x
[inline]
 

Definition at line 1172 of file PStream.h.

References PLERROR, and x.

Referenced by PLearn::NearestNeighborPredictionCost::build_(), PLearn::CompactVMatrix::CompactVMatrix(), PLearn::LearnerCommand::compute_outputs(), PLearn::AutoSDBVMatrix::getMappings(), PLearn::VMatrix::getRanges(), PLearn::VMatrix::getStats(), PLearn::WordNetOntology::load(), PLearn::Object::load(), main(), PLearn::Learner::measure(), PLearn::RGBImageDB::RGBImageDB(), PLearn::RowMapSparseMatrix< real >::RowMapSparseMatrix(), PLearn::ReadAndWriteCommand::run(), PLearn::SparseVMatrix::SparseVMatrix(), PLearn::LearnerCommand::test(), PLearn::LearnerCommand::train(), PLearn::GaussianContinuum::train(), and PLearn::WordNetOntology::WordNetOntology().

Mat PLearn::loadADMat const string filename  ) 
 

Native AD format.

Definition at line 1011 of file MatIO.cc.

References PLearn::TMat< T >::data(), fread_float(), fread_int(), and PLERROR.

Vec PLearn::loadADVec const string filename  ) 
 

Definition at line 1031 of file MatIO.cc.

References PLearn::TVec< T >::data(), fread_float(), fread_int(), and PLERROR.

template<class T>
void PLearn::loadAscii const string filename,
TVec< T > &  vec
 

Definition at line 399 of file MatIO.h.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::clear(), PLearn::TVec< T >::end(), MISSING_VALUE, PLERROR, and PLearn::TVec< T >::resize().

template<class T>
void PLearn::loadAscii const string filename,
TMat< T > &  mat
 

Definition at line 481 of file MatIO.h.

References loadAscii().

template<class T>
void PLearn::loadAscii const string filename,
TMat< T > &  mat,
TVec< string > &  fieldnames,
TVec< map< string, real > > *  map_sr = 0
 

WARNING: use only for float, double, and int types. Other type are not guaranteed to work intelligent functions that will load a file in almost all ascii formats that ever existed in this lab.

Definition at line 195 of file MatIO.h.

References PLearn::TVec< T >::clear(), PLearn::TVec< T >::fill(), k, PLearn::TVec< T >::length(), map_sr, MISSING_VALUE, parseSizeFromRemainingLines(), pl_isnumber(), PLERROR, PLWARNING, removeblanks(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), skipBlanksAndComments(), split(), toint(), tostring(), val, and ws().

Referenced by cross_valid(), input2dSet(), loadAscii(), loadAsciiAsVMat(), loadATT800(), loadBreastCancer(), loadBreastCancerWisconsin(), loadCallxx(), loadDiabetes(), loadLetters(), loadMat(), and loadVec().

VMat PLearn::loadAsciiAsVMat const string filename  ) 
 

Load an ASCII file and return the corresponding VMat (this will be a MemoryVMatrix, since the entire file is loaded in memory).

Definition at line 124 of file VMat.cc.

References loadAscii(), map_sr, mtime(), and PLearn::TVec< T >::size().

Referenced by getDataSet(), and loadUCIAMat().

template<class T>
void loadAsciiSingleBinaryDescriptor const string filename,
TMat< T > &  mat
 

Load an ASCII matrix whose format is: (entry_name, long_binary_dscriptor) with 'long_binary_dscriptor' being of the form '001100101011', each character being an entry of the matrix.

(entry_name is ignored). Header must be: #size: length width

Definition at line 341 of file MatIO.h.

References PLERROR, removeblanks(), PLearn::TMat< T >::resize(), skipBlanksAndComments(), split(), toint(), and ws().

Referenced by getDataSet().

void PLearn::loadAsciiWithoutSize const string filename,
const Mat &  mat
 

Definition at line 875 of file MatIO.cc.

References PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

void PLearn::loadAsciiWithoutSize const string filename,
const Vec &  vec
 

Reads and writes an ascii file without the size header (assuming that the size(length() and width()) is set).

Definition at line 834 of file MatIO.cc.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::MatlabInterface::eigs_r11(), and matlabR11eigs().

int PLearn::loadATT800 VMat &  training_set,
VMat &  test_set
 

Definition at line 303 of file databases.cc.

References PLearn::TMat< T >::column(), dbdir_name, PLearn::TMat< T >::length(), loadAscii(), normalizeDataSets(), shuffleRows(), PLearn::TMat< T >::subMatColumns(), PLearn::TMat< T >::subMatRows(), sum(), and PLearn::TMat< T >::width().

int PLearn::loadBreastCancer VMat &  training_set,
VMat &  validation_set,
VMat &  test_set,
int  ntrain,
int  nvalid,
bool  uniq = true
 

These calls return the number of classes...

Definition at line 198 of file databases.cc.

References dbdir_name, PLearn::TMat< T >::length(), loadAscii(), normalizeDataSets(), shuffleRows(), and PLearn::TMat< T >::subMatRows().

VMat PLearn::loadBreastCancerWisconsin bool  normalize = true,
bool  uniq = true
 

Definition at line 182 of file databases.cc.

References dbdir_name, loadAscii(), normalize(), normalizeDataSet(), shuffleRows(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

Referenced by loadClassificationDataset().

void PLearn::loadCallxx int  year,
VMat &  d
 

Definition at line 667 of file databases.cc.

References c_str(), dbdir_name, and loadAscii().

void PLearn::loadClassificationDataset const string dbname,
int inputsize,
int nclasses,
VMat &  trainset,
VMat &  testset,
bool  normalizeinputs,
VMat &  allset
 

Definition at line 746 of file databases.cc.

References computeMeanAndStddev(), input2dSet(), PLearn::TVec< T >::length(), PLearn::TMat< T >::load(), loadBreastCancerWisconsin(), loadLetters(), loadMNIST(), loadUCI(), loadUSPS(), normalize(), PLERROR, remapLastColumn(), split(), PLearn::VMat::subMatRows(), PLearn::TVec< T >::subVec(), toint(), PLearn::TMat< T >::width(), and PLearn::VMat::width().

Referenced by getDataSet().

string loadClassificationDatasetHelp  )  [inline]
 

This will return a VMat with a target in the last column in {0,..,nclasses-1} (for binary classification possible values are 0 and 1 (not -1)). Possible dbname are: 2d letters breast usps mnist usps0 ... usps9 nist0 ... usps9 The dbname can optionally be followed by :size in which case only the 'size' first elements of trainset and testset will be kept.

Definition at line 127 of file databases.h.

Referenced by getDataSetHelp().

void PLearn::loadCorel Mat &  training_set,
Mat &  validation_set,
Mat &  test_set,
int  negative_class = 2,
int  positive_class = 3
 

Definition at line 615 of file databases.cc.

References PLearn::TMat< T >::fill(), PLearn::TMat< T >::length(), loadCorelDatamat(), PLearn::TMat< T >::resize(), shuffleRows(), smoothCorelHisto(), PLearn::TMat< T >::subMat(), and PLearn::TMat< T >::width().

void PLearn::loadCorelDatamat int  classnum,
Mat &  train,
Mat &  valid,
Mat &  test
 

Definition at line 514 of file databases.cc.

References c_str(), PLearn::TMat< T >::data(), dbdir_name, PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::resize(), reverse_float(), and PLearn::TMat< T >::width().

Referenced by loadCorel().

int PLearn::loadDiabetes VMat &  training_set,
VMat &  validation_set,
VMat &  test_set,
int  ntrain,
int  nvalid
 

Definition at line 281 of file databases.cc.

References dbdir_name, PLearn::TMat< T >::length(), loadAscii(), normalizeDataSets(), shuffleRows(), and PLearn::TMat< T >::subMatRows().

VMat PLearn::loadDiabetes bool  normalize = true  ) 
 

Definition at line 267 of file databases.cc.

References dbdir_name, loadAscii(), normalize(), normalizeDataSet(), shuffleRows(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

string PLearn::loadFileAsString const string filepath  ) 
 

Returns the whole content of the file as a string.

Definition at line 335 of file fileutils.cc.

References filesize(), and PLERROR.

Referenced by PLearn::VMatrix::lockMetaDataDir(), and PLearn::VMatrix::resolveFieldInfoLink().

void PLearn::loadGnuplot const string filename,
Mat &  mat
 

Format readable by gnuplot.

Definition at line 613 of file MatIO.cc.

References PLERROR, and PLearn::TMat< T >::resize().

Referenced by loadHousing().

VMat PLearn::loadHousing bool  normalize = true  ) 
 

Definition at line 235 of file databases.cc.

References dbdir_name, loadGnuplot(), normalize(), normalizeDataSet(), and PLearn::TMat< T >::subMatColumns().

VMat PLearn::loadIonosphere  ) 
 

Definition at line 259 of file databases.cc.

References dbdir_name, loadUCIMLDB(), and shuffleRows().

void PLearn::loadJPEGrgb const string jpeg_filename,
Mat &  rgbmat,
int row_size,
int  scale = 1
 

read a file in JPEG format (read the RGB components). this will be resized to a (npixels x 3) matrix, where the (R,G,B) pixels are ordered by rows of the original image. To figure the actual image dimensions, the row size is also returned (so the number of columns is length()/row_size). An optional subsampling factor can be given (1,2,4 or 8) The R,G,B components always range from 0 to 255.

Definition at line 1484 of file MatIO.cc.

References PLearn::TMat< T >::data(), k, PLERROR, and PLearn::TMat< T >::resize().

void loadLetters int inputsize,
int nclasses,
VMat &  trainset,
VMat &  testset
 

Definition at line 736 of file databases.cc.

References dbdir_name, loadAscii(), PLearn::TMat< T >::subMatRows(), and PLearn::TMat< T >::width().

int PLearn::loadLetters VMat &  training_set,
VMat &  validation_set,
VMat &  test_set,
int  n_letters,
real  validation_fraction = .2,
real  test_fraction = .4,
bool  do_shuffle = true
 

Definition at line 494 of file databases.cc.

References PLearn::VMat::length(), loadLetters(), normalizeDataSets(), and PLearn::VMat::subMatRows().

VMat PLearn::loadLetters int  n_letters,
bool  do_shuffle
 

Definition at line 455 of file databases.cc.

References PLearn::TMat< T >::copy(), dbdir_name, k, PLearn::TMat< T >::length(), loadAscii(), Mat, PLERROR, PLearn::TMat< T >::resize(), shuffleRows(), and PLearn::TMat< T >::width().

int PLearn::loadLetters VMat &  training_set,
VMat &  validation_set,
VMat &  test_set,
char *  letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
real  validation_fraction = .2,
real  test_fraction = .4,
bool  do_shuffle = true
 

Definition at line 406 of file databases.cc.

References PLearn::TMat< T >::copy(), dbdir_name, k, PLearn::TMat< T >::length(), loadAscii(), Mat, normalizeDataSets(), PLearn::TMat< T >::resize(), shuffleRows(), strlen(), PLearn::TMat< T >::subMatRows(), and PLearn::TMat< T >::width().

VMat PLearn::loadLetters const char *  class0,
const char *  class1,
bool  normalize = true
 

Definition at line 366 of file databases.cc.

References dbdir_name, PLearn::TMat< T >::length(), loadAscii(), normalize(), normalizeDataSet(), strlen(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

VMat PLearn::loadLetters bool  normalize = true  ) 
 

Definition at line 351 of file databases.cc.

References dbdir_name, loadAscii(), normalize(), normalizeDataSet(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

Referenced by loadClassificationDataset(), and loadLetters().

void PLearn::loadMat const string filename,
TMat< double > &  mat
 

Definition at line 110 of file MatIO.cc.

References extract_extension(), loadAscii(), loadPMat(), and PLERROR.

void PLearn::loadMat const string filename,
TMat< float > &  mat
 

Tries to guess the format...

(quite dumb right now) This is currently what the old constructor with string argument did

Definition at line 88 of file MatIO.cc.

References extract_extension(), loadAscii(), loadPMat(), and PLERROR.

void loadMNIST VMat &  training_set,
VMat &  test_set
[inline]
 

Definition at line 71 of file NistDB.h.

Referenced by loadClassificationDataset().

Object * PLearn::loadObject const string filename  ) 
 

Loads an object from the given file (no macro-preprocessing is performed).

Definition at line 340 of file Object.cc.

References PLearn::Object::build(), PLERROR, and readObject().

Referenced by readObject(), train_and_test(), and use().

VMat PLearn::loadPimaIndians bool  normalize = true  ) 
 

Definition at line 223 of file databases.cc.

References dbdir_name, loadUCIMLDB(), normalize(), normalizeDataSet(), shuffleRows(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

void PLearn::loadPMat const string filename,
TMat< double > &  mat
 

Definition at line 449 of file MatIO.cc.

References DATAFILE_HEADERLENGTH, fread_double(), fread_float(), header, PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::resize(), and PLearn::TMat< T >::width().

void PLearn::loadPMat const string filename,
TMat< float > &  mat
 

Definition at line 393 of file MatIO.cc.

References DATAFILE_HEADERLENGTH, fread_double(), fread_float(), header, PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::resize(), and PLearn::TMat< T >::width().

Referenced by PLearn::TMat< pair< real, real > >::load(), and loadMat().

void PLearn::loadPVec const string filename,
TVec< double > &  vec
 

Definition at line 276 of file MatIO.cc.

References PLearn::TVec< T >::data(), DATAFILE_HEADERLENGTH, fread_double(), fread_float(), header, PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

void PLearn::loadPVec const string filename,
TVec< float > &  vec
 

Definition at line 226 of file MatIO.cc.

References PLearn::TVec< T >::data(), DATAFILE_HEADERLENGTH, fread_double(), fread_float(), header, PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

Referenced by PLearn::TVec< pair< real, real > >::load(), and loadVec().

Mat PLearn::loadSNMat const string filename  ) 
 

SN Format.

Definition at line 917 of file MatIO.cc.

References fread_float(), fread_int(), and PLERROR.

Referenced by loadUSPS().

Vec PLearn::loadSNVec const string filename  ) 
 

Definition at line 972 of file MatIO.cc.

References PLearn::TVec< T >::data(), fread_float(), fread_int(), and PLERROR.

VMat PLearn::loadSonar  ) 
 

Definition at line 251 of file databases.cc.

References dbdir_name, loadUCIMLDB(), and shuffleRows().

Mat PLearn::loadSTATLOG const string filename,
char ****  to_symbols = 0,
int **  to_n_symbols = 0
 

STATLOG machine-learning-database-format Format used for most of the STATLOG machine-learning-database. The missing value is represented with the '?' character in the source file, and with the MISSING_VALUE in the Mat. If some symbols are detected then integer codes are assigned to them (by sorting them for each symbolic column in lexicographic order). The *to_symbols table has one element per column, each of which is a table of strings. The number of strings (i.e., symbols) for each column is given in the table *to_n_symbols.

Definition at line 1298 of file MatIO.cc.

References compare_string_pointers(), convert_STATLOG_BUF_LEN, cp(), PLearn::TMat< T >::data(), MISSING_VALUE, PLERROR, strlen(), and PLearn::TMat< T >::width().

VMat PLearn::loadToVMat string  file,
string  name,
int  window,
int  n_examples
 

Definition at line 463 of file GraphicalBiText.cc.

References PLearn::ShellProgressBar::done(), PLearn::ShellProgressBar::draw(), endl(), PLearn::TMat< T >::length(), PLearn::VMat::length(), PLearn::ShellProgressBar::update(), and PLearn::VMat::width().

Referenced by PLearn::GraphicalBiText::init().

void PLearn::loadUCI VMat &  trainset,
VMat &  testset,
VMat &  allset,
string  db_spec,
string  id,
bool normalize,
const string type
 

Load the train, test and all datasets for a UCI database.

The 'normalize' parameter can be changed: if it is set to true in input, it may be changed to false when the method returns (this is because the data will already be normalized, and no additional normalization is needed).

Definition at line 875 of file databases.cc.

References dbdir_name, PLearn::VMat::length(), loadUCIAMat(), loadUCISet(), macroLoadObject(), normalize(), PLERROR, PLearn::TMat< T >::subMatColumns(), PLearn::TMat< T >::subMatRows(), PLearn::VMat::toMat(), vconcat(), and PLearn::VMat::width().

Referenced by loadClassificationDataset().

void PLearn::loadUCIAMat VMat &  data,
string  file,
PP< UCISpecification >  uci_spec
 

Load a AMAT format UCI dataset in the given VMatrix.

Definition at line 945 of file databases.cc.

References PLearn::VMat::length(), loadAsciiAsVMat(), PLERROR, PLearn::TVec< T >::resize(), PLearn::TVec< T >::subVec(), and PLearn::VMat::width().

Referenced by loadUCI().

Mat PLearn::loadUCIMLDB const string filename,
char ****  to_symbols = 0,
int **  to_n_symbols = 0,
TVec< int > *  max_in_col = 0,
TVec< string > *  header_columns = 0
 

UCI machine-learning-database format Format used for most of the UCI machine-learning-database. The missing value is represented with the '?' character in the source file, and with the MISSING_VALUE in the Mat. If some symbols are detected then integer codes are assigned to them (by sorting them for each symbolic column in lexicographic order). The *to_symbols table has one element per column, each of which is a table of strings. The number of strings (i.e., symbols) for each column is given in the table *to_n_symbols. Additionally, if provided, the 'max_in_col' vector will contain the (rounded to lowest integer) value of the maxium in each column (this will be -1 if there is no numerical value in the column). Also, if 'header_columns' vector is provided, the first line is considered to be the header and the vector will contain the column names.

Definition at line 1057 of file MatIO.cc.

References PLearn::TVec< T >::append(), compare_string_pointers(), convert_UCIMLDB_BUF_LEN, cp(), PLearn::TMat< T >::data(), PLearn::TVec< T >::fill(), MISSING_VALUE, pl_isnumber(), PLERROR, PLearn::TVec< T >::resize(), strlen(), and PLearn::TMat< T >::width().

Referenced by loadIonosphere(), loadPimaIndians(), loadSonar(), and loadUCISet().

void PLearn::loadUCISet VMat &  data,
string  file,
PP< UCISpecification >  uci_spec
 

Load a specific UCI dataset in the given VMatrix.

Definition at line 991 of file databases.cc.

References k, PLearn::TMat< T >::length(), loadUCIMLDB(), PLERROR, PLearn::TVec< T >::resize(), PLearn::TVec< T >::size(), PLearn::TVec< T >::subVec(), PLearn::VMat::width(), and PLearn::TMat< T >::width().

Referenced by loadUCI().

VMat PLearn::loadUSPS bool  use_smooth = true  ) 
 

Definition at line 713 of file databases.cc.

References argmax(), dbdir_name, hconcat(), PLearn::TMat< T >::length(), and loadSNMat().

void PLearn::loadUSPS VMat &  trainset,
VMat &  testset,
bool  use_smooth = true
 

Definition at line 677 of file databases.cc.

References argmax(), dbdir_name, hconcat(), PLearn::TMat< T >::length(), and loadSNMat().

Referenced by loadClassificationDataset().

void PLearn::loadVec const string filename,
TVec< double > &  vec
 

Definition at line 145 of file MatIO.cc.

References loadAscii(), loadPVec(), and PLERROR.

void PLearn::loadVec const string filename,
TVec< float > &  vec
 

Definition at line 133 of file MatIO.cc.

References loadAscii(), loadPVec(), and PLERROR.

VMat local_neighbors_differences VMat  source,
int  n_neighbors,
bool  concat = false,
bool  append_indexes = false
[inline]
 

Definition at line 121 of file LocalNeighborsDifferencesVMatrix.h.

References PLearn::LocalNeighborsDifferencesVMatrix::append_indexes, PLearn::LocalNeighborsDifferencesVMatrix::build(), concat(), PLearn::LocalNeighborsDifferencesVMatrix::concat_neighbors, PLearn::LocalNeighborsDifferencesVMatrix::n_neighbors, and PLearn::SourceVMatrix::source.

Referenced by PLearn::NearestNeighborPredictionCost::run(), PLearn::TangentLearner::train(), and PLearn::GaussianContinuum::train().

string PLearn::locateDatasetAliasesDir const string dir_or_file_path = "."  ) 
 

Looks for 'dataset.aliases' file in specified directory and its parent directories; Returns the directory containing dataset.aliases (returned string will be terminated by a slash) or an empty string if not found.

Definition at line 241 of file getDataSet.cc.

References abspath(), dot(), extract_directory(), isfile(), pathexists(), PLERROR, remove_trailing_slash(), and slash.

Referenced by getDatasetAliases().

Var log Var  v  )  [inline]
 

Definition at line 72 of file LogVariable.h.

RandomVar PLearn::log RandomVar  x  ) 
 

natural logarithm function applied element-by-element

Definition at line 451 of file RandomVar.cc.

References x.

template<class T>
TVec<T> log const TVec< T > &  src  )  [inline]
 

Definition at line 876 of file TMat_maths_impl.h.

References compute_log(), and PLearn::TVec< T >::length().

real PLearn::log real  base,
real  a
 

Definition at line 96 of file pl_math.cc.

Referenced by bnldev(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::GaussMix::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::AsciiVMatrix::build_(), choleskyInvert(), PLearn::GraphicalBiText::compute_likelihood(), compute_log(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), PLearn::GaussMix::computePosteriors(), PLearn::MixtureRandomVariable::ElogP(), PLearn::ExpRandomVariable::EMBprop(), entropy(), PLearn::SigmoidPrimitiveKernel::evaluate(), PLearn::LogOfGaussianDensityKernel::evaluate(), PLearn::GaussianDensityKernel::evaluate(), PLearn::ConvexBasisKernel::evaluate(), expdev(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::NegCrossEntropySigmoidVariable::fprop(), PLearn::CrossEntropyVariable::fprop(), gamdev(), gauss_log_density_stddev(), gauss_log_density_var(), gaussian_01(), geometric_mean(), PLearn::GaussianContinuum::get_image_matrix(), incomplete_beta(), PLearn::GraphicalBiText::init(), inverse_sigmoid(), inverse_softplus(), PLearn::ProductRandomVariable::invertible(), PLearn::ExpRandomVariable::invertible(), PLearn::HistogramDistribution::log_density(), PLearn::ConditionalDensityNet::log_density(), log_gamma(), logOfCompactGaussian(), logOfNormal(), PLearn::MultinomialRandomVariable::logP(), PLearn::MixtureRandomVariable::logP(), PLearn::DiagonalNormalRandomVariable::logP(), logtwo(), neg_log_pi(), normal(), pl_dgammlndz(), pl_gammln(), pl_gcf(), pl_gser(), poidev(), positive_dilogarithm(), PLearn::GaussMix::precomputeStuff(), PLearn::VMatLanguage::run(), safeflog(), PLearn::GaussMix::setInput(), PLearn::LogRandomVariable::setValueFromParentsValue(), sum_of_log(), PLearn::PowVariableVariable::symbolicBprop(), PLearn::PLogPVariable::symbolicBprop(), PLearn::GraphicalBiText::test_WSD(), PLearn::GaussianContinuum::train(), and PLearn::ClassifierFromDensity::train().

real PLearn::log_beta real  x,
real  y
 

returns the natural logarithm of the beta function

Definition at line 89 of file random.cc.

References log_gamma(), and x.

Referenced by incomplete_beta().

real PLearn::log_gamma real  x  ) 
 

returns the natural logarithm of the gamma function

Definition at line 69 of file random.cc.

References log(), and x.

Referenced by bnldev(), log_beta(), and poidev().

Var log_softmax Var  v  )  [inline]
 

Definition at line 72 of file LogSoftmaxVariable.h.

template<class T>
void log_softmax const TVec< T > &  x,
TVec< T > &  y
 

Definition at line 96 of file TMat_maths_impl.h.

References logadd(), max(), and x.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), and PLearn::LogSoftmaxVariable::fprop().

Var logadd Var  input  )  [inline]
 

Definition at line 73 of file LogSumVariable.h.

Var logadd Var &  input1,
Var &  input2
[inline]
 

Definition at line 77 of file LogAddVariable.h.

template<class T>
T logadd const TVec< T > &  vec  ) 
 

Definition at line 1282 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), LOG_INIT, logadd(), and sum().

real PLearn::logadd real  log_a,
real  log_b
 

compute log(exp(log_a)+exp(log_b)) without losing too much precision

Definition at line 117 of file pl_math.cc.

References exp(), and MINUS_LOG_THRESHOLD.

Referenced by PLearn::ClassifierFromDensity::computeOutput(), PLearn::GaussMix::computePosteriors(), determine_density_integral_from_log_densities_on_grid(), DX_create_grid_outputs_file(), PLearn::MixtureRandomVariable::ElogP(), PLearn::LogSumVariable::fprop(), PLearn::LogAddVariable::fprop(), PLearn::GaussMix::log_density(), log_softmax(), logadd(), PLearn::MixtureRandomVariable::logP(), and PLearn::GaussMix::setInput().

real PLearn::logOfCompactGaussian const Vec &  x,
const Vec &  mu,
const Vec &  eigenvalues,
const Mat &  eigenvectors,
real  gamma = 1e-6,
bool  add_gamma_to_eigenval = false
 

returns log P(x|gaussian) with a gaussian represented compactly by the first few eigenvalues and eigenvectors of its covariance matrix.

For numerical stability, you may consider adding some lambda to the diagonal of C

Normal(x; mu,C) = 1/sqrt((2PI)^d * det(C)) exp( -0.5 (x-mu)'.inv(C).(x-mu) ) = exp [ -0.5( d*log(2PI) + log(det(D)) ) -0.5 (x-mu)'.inv(C).(x-mu) ] ____________/ ________/ \/ \/ logcoef q

Let z = inv(C).(x-mu) ==> z is the solution of C.z = x-mu And then we have q = (x-mu)'.z

So computing q is simply a matter of solving this linear equation in z, and then computing q.

Definition at line 167 of file distr_maths.cc.

References PLearn::TVec< T >::data(), dot(), PLearn::TVec< T >::length(), log(), PLearn::TVec< T >::resize(), square(), val, and x.

Referenced by PLearn::GaussianDistribution::log_density().

real PLearn::logOfNormal const Vec &  x,
const Vec &  mu,
const Mat &  C
 

Definition at line 211 of file distr_maths.cc.

References choleskyDecomposition(), choleskySolve(), dot(), log(), PLearn::TVec< T >::resize(), substract(), and x.

Referenced by logPFittedGaussian().

Var PLearn::logP ConditionalExpression  conditional_expression,
bool  clearMarksUponReturn = true,
RVInstanceArray *  parameters_to_learn = 0
 

Construct a Var that computes logP(RandomVariable == value | RHS ) in terms of the value Var and the Vars in the RHS, where RHS is a list of the form (X1==x1 && X2==x2 && X3==x3) where Xi are RandomVar's and xi are Var's which represent the value that are given to the conditioning variables Xi. Normally the marks used to identify RVs which are deterministically determined from the RHS are cleared upon return (unless specified with the optional 2nd argument).

Definition at line 616 of file RandomVar.cc.

References PLearn::ConditionalExpression::LHS, PLearn::ConditionalExpression::RHS, PLearn::TVec< RVInstance >::size(), PLearn::RVInstance::v, and PLearn::RVInstance::V.

Referenced by PLearn::MixtureRandomVariable::ElogP(), ElogP(), EM(), PLearn::MultinomialRandomVariable::logP(), PLearn::MixtureRandomVariable::logP(), PLearn::DiagonalNormalRandomVariable::logP(), PLearn::FunctionalRandomVariable::logP(), and PLearn::RandomVariable::P().

real PLearn::logPFittedGaussian const Vec &  x,
const Mat &  X,
real  lambda
 

Fits a gaussian to the points in X (computing its mean and covariance matrix, and adding lambda to the diagonal of that covariance matrix) Then calls logOfNormal to return log(p(x | the_gaussian)).

Definition at line 242 of file distr_maths.cc.

References addToDiagonal(), computeMeanAndCovar(), logOfNormal(), and x.

real PLearn::logsub real  log_a,
real  log_b
 

compute log(exp(log_a)-exp(log_b)) without losing too much precision

Definition at line 135 of file pl_math.cc.

References exp(), FEQUAL, MINUS_LOG_THRESHOLD, and PLERROR.

real PLearn::logtwo real  a  ) 
 

Definition at line 101 of file pl_math.cc.

References log(), and LOG_2.

bool PLearn::looksNumeric const char *  s  ) 
 

tells wether this string looks like a numeric entity

Definition at line 107 of file TypesNumeriques.cc.

References containsChar(), and DIGITsymbols.

Referenced by PLearn::MatlabInterface::eigs_r11(), main(), matlabR11eigs(), and numericType().

string PLearn::lowerstring const string s  ) 
 

convert a string to all lowercase

Definition at line 263 of file stringutils.cc.

Referenced by PLearn::SimpleDB< KeyType, QueryResult >::loadSchema(), and main().

vector< string > PLearn::lsdir const string dirpath  ) 
 

Returns a list of all entries in the given directory (omitting entries "." and "..") If the direcotry cannot be opened an error is issued. The returned entries are not full paths.

Definition at line 175 of file fileutils.cc.

References PLERROR.

Referenced by lsdir_fullpath(), and train_and_test().

vector< string > PLearn::lsdir_fullpath const string dirpath  ) 
 

Same as lsdir, except dirpath is prepended to the entries' names.

Definition at line 244 of file fileutils.cc.

References addprefix(), lsdir(), remove_trailing_slash(), and slash.

Referenced by force_rmdir().

template<class T>
void LU_decomposition TMat< T > &  A,
TVec< T > &  Trow,
int detsign,
TVec< T > *  p = 0
 

Definition at line 5173 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), k, PLearn::TMat< T >::length(), maxabs(), PLearn::TMat< T >::mod(), PLERROR, PLearn::TMat< T >::swapRows(), and PLearn::TMat< T >::width().

Referenced by det().

Object * PLearn::macroLoadObject const string filename  ) 
 

same as previous, but no need to pass a variables map

Definition at line 361 of file Object.cc.

References macroLoadObject().

Object * PLearn::macroLoadObject const string filename,
map< string, string > &  vars
 

Same as loadObject but first performs macro-processing on the file vars may be initialised with the values of some variables and upon return it will also contain newly $DEFINED variables.

Definition at line 352 of file Object.cc.

References PLearn::Object::build(), readFileAndMacroProcess(), and readObject().

Referenced by loadUCI(), and macroLoadObject().

template<class T>
T mahalanobis_distance const TVec< T > &  input,
const TVec< T > &  meanvec,
const TMat< T > &  inversecovmat
 

Definition at line 4282 of file TMat_maths_impl.h.

References dot(), and product().

string PLearn::makeExplicitPath const string filename  ) 
 

returns "./"+filename if filename is relative to current dir

Definition at line 573 of file fileutils.cc.

References dot(), removeblanks(), slash, and slash_char.

Referenced by PLearn::VVMatrix::build_().

string PLearn::makeFileNameValid const string filename  ) 
 

Definition at line 525 of file fileutils.cc.

References extract_directory(), extract_extension(), extract_filename_without_extension(), pathexists(), PLWARNING, and tostring().

Referenced by PLearn::VMatrix::getSFIFFilename(), PLearn::VMatrix::isSFIFDirect(), and PLearn::VMatrix::setSFIFFilename().

template<class T>
void makeItSymmetric const TMat< T > &  mat,
max_dif
 

Definition at line 2820 of file TMat_maths_impl.h.

References PLearn::TMat< T >::isSquare(), PLearn::TMat< T >::length(), PLERROR, PLWARNING, and PLearn::TMat< T >::width().

Mat makeMat int  length,
int  width,
const string values
[inline]
 

convenience construction from string allows to write things such as Mat m = newMat(2,2, "1 2 3 4")

Definition at line 58 of file MatIO.h.

template<class T>
void makeRowsSumTo1 const TMat< T > &  mat  ) 
 

Definition at line 3450 of file TMat_maths_impl.h.

References divide(), PLearn::TMat< T >::length(), and sum().

Vec makeVec int  length,
const string values
[inline]
 

Definition at line 61 of file MatIO.h.

void PLearn::manual_seed long  x  ) 
 

initialzes the random number generator with the given long "x"

Definition at line 185 of file random.cc.

References iset, the_seed, and x.

Referenced by PLearn::RepeatSplitter::build_(), PLearn::AdaBoost::forget(), PLearn::VVMatrix::generateVMatIndex(), PLearn::TangentLearner::initializeParams(), PLearn::NNet::initializeParams(), PLearn::NeuralNet::initializeParams(), PLearn::NeighborhoodSmoothnessNNet::initializeParams(), PLearn::MultiInstanceNNet::initializeParams(), PLearn::GaussianContinuum::initializeParams(), PLearn::ConditionalDensityNet::initializeParams(), PLearn::UniformDistribution::resetGenerator(), PLearn::SpiralDistribution::resetGenerator(), PLearn::GaussMix::resetGenerator(), PLearn::GaussianDistribution::resetGenerator(), PLearn::ConditionalDensityNet::resetGenerator(), seed(), and PLearn::EntropyContrast::train().

Var margin_perceptron_cost Var  output,
Var  target,
real  margin
[inline]
 

Definition at line 83 of file MarginPerceptronCostVariable.h.

Referenced by PLearn::NNet::build_().

RandomVar PLearn::marginalize const RandomVar &  RV,
const RandomVar &  hiddenRV
 

integrate the RV over the given hiddenRV and return the resulting new RandomVariable. This may be difficult to do in general...

Definition at line 653 of file RandomVar.cc.

References PLERROR.

Referenced by PLearn::MixtureRandomVariable::ElogP(), PLearn::MultinomialRandomVariable::logP(), PLearn::MixtureRandomVariable::logP(), PLearn::DiagonalNormalRandomVariable::logP(), and PLearn::FunctionalRandomVariable::logP().

template<class T>
T matColumnDotVec const TMat< T > &  mat,
int  j,
const TVec< T >  v
 

return dot product of j-th column with vector v

Definition at line 2731 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), and PLERROR.

int PLearn::matInvert Mat &  in,
Mat &  inverse
 

This function compute the inverse of a matrix.

WARNING: the input matrix 'in' is overwritten in the process.

Definition at line 219 of file plapack.cc.

References PLearn::TMat< T >::data(), dgetrf_(), dgetri_(), endl(), inverse(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, sgetrf_(), sgetri_(), and PLearn::TMat< T >::width().

Referenced by PLearn::PCA::train().

void matlabR11eigs RowMapSparseMatrix< double > &  A,
Mat  eigen_vectors,
Vec  eigen_values,
string  which_eigenvalues = "LM"
 

Compute k eigen-values / eigen-vectors of a sparse symmetric matrix using the eigs program of matlab-r11 (see PLearn/Contrib/matlab/eigs_r11.m). The 'which_eigenvalues' argument specifies which eigenvalues are desired: a number the k eigen-values closest to that number "LM" Largest Magnitude (the default) "SM" Smallest Magnitude "LR" Largest Real part "SR" Smallest Real part "BE" Both Ends. Computes k/2 eigenvalues from each end of the spectrum (one more from the high end if k is odd.) where k is the length of the eigen_values vector. If eigen_vectors.length()==0 then only the eigen_values are computed. N.B. in comparison with other methods available in PLearn or elsewhere, this function is particularly useful when dealing with symmetric sparse matrices whose smallest eigen-pairs are sought.

void matlabR11eigs RowMapSparseMatrix< real > &  A,
Mat  eigen_vectors,
Vec  eigen_values,
string  which_eigenvalues
 

Definition at line 144 of file MatlabInterface.cc.

References PLearn::TmpFilenames::addFilename(), PLearn::RowMapSparseMatrix< T >::exportToMatlabReadableFormat(), extract_filename(), header, PLearn::Popen::in, PLearn::MatlabInterface::launchAndWaitFor(), PLearn::RowMapSparseMatrix< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), loadAsciiWithoutSize(), looksNumeric(), PLearn::MatlabInterface::matlab, PLearn::MatlabInterface::matlab_file_header, PLERROR, PLWARNING, remove_extension(), PLearn::TMat< T >::resize(), PLearn::TVec< T >::resize(), toint(), and tostring().

void PLearn::matlabSave const string dir,
const string plot_title,
const Vec &  xValues,
const Mat &  yValues,
const Vec &  add_col,
const Vec &  bounds,
TVec< string legend = TVec<string>(),
bool  save_plot = true
 

This is the *real* matlabSave function.

1) If xValues is empty, the yValues are plotted against the row indices.

2) If xValues is not empty and its length is not equal to the length of yValues, then its length must be one and the value xValues[0] will be the start index for the xValues.

Definition at line 710 of file MatIO.cc.

References abspath(), add(), append_slash(), endl(), force_mkdir(), is_missing(), PLearn::TVec< T >::isNotEmpty(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), MISSING_VALUE, PLERROR, PLearn::TVec< T >::resize(), tostring(), underscore_to_space(), and PLearn::TMat< T >::width().

void PLearn::matlabSave const string dir,
const string plot_title,
const Mat &  data,
const Vec &  add_col,
const Vec &  bounds,
TVec< string legend = TVec<string>(),
bool  save_plot = true
 

Simply calls the coming matlabSave function with an empty xValues Vec. See below.

Definition at line 695 of file MatIO.cc.

References matlabSave().

void PLearn::matlabSave const string dir,
const string plot_title,
const Vec &  xValues,
const Vec &  yValues,
const Vec &  add_col,
const Vec &  bounds,
string  lengend = "",
bool  save_plot = true
 

Definition at line 681 of file MatIO.cc.

References PLearn::TVec< T >::append(), PLearn::TVec< T >::length(), and matlabSave().

void PLearn::matlabSave const string dir,
const string plot_title,
const Vec &  data,
const Vec &  add_col,
const Vec &  bounds,
string  lengend = "",
bool  save_plot = true
 

The following two are simply calling the matrix version after transforming the Vec in a one column Mat. See below.

Definition at line 667 of file MatIO.cc.

References PLearn::TVec< T >::append(), and PLearn::TVec< T >::length().

Referenced by PLearn::SequentialModelSelector::matlabSave(), PLearn::SequentialLearner::matlabSave(), and matlabSave().

Var matrixElements Var  expression,
const Var &  i,
const Var &  j,
int  ni,
int  nj,
const VarArray &  parameters
[inline]
 

Definition at line 94 of file MatrixElementsVariable.h.

Var matrixIndex Var  mat,
Var  index
[inline]
 

Definition at line 77 of file ColumnIndexVariable.h.

References PLERROR, and PLearn::Var::width().

Referenced by neg_log_pi().

Var matrixInverse Var  v  )  [inline]
 

Definition at line 73 of file MatrixInverseVariable.h.

template<class T>
T matRowDotVec const TMat< T > &  mat,
int  i,
const TVec< T >  v
 

return dot product of i-th row with vector v

Definition at line 2714 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, PLearn::TMat< T >::rowdata(), and PLearn::TMat< T >::width().

template<class T>
void matRowsDots TVec< T >  v,
const TMat< T > &  A,
const TMat< T > &  B
 

return dot products of i-th row of A with i-th row of B in vector v

Definition at line 2748 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void matRowsDotsAcc TVec< T >  v,
const TMat< T > &  A,
const TMat< T > &  B
 

return dot products of i-th row of A with i-th row of B in vector v

Definition at line 2776 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Var max Var  v  )  [inline]
 

Definition at line 73 of file MaxVariable.h.

Var max Var  v1,
Var  v2
[inline]
 

Definition at line 81 of file Max2Variable.h.

template<class T>
T max const TMat< T > &  mat  ) 
 

Definition at line 3930 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void max const TVec< T > &  source1,
source2,
TVec< T > &  destination
 

Definition at line 1569 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), MAX, and PLearn::TVec< T >::resize().

template<class T>
void max const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1552 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), MAX, PLERROR, and PLearn::TVec< T >::resize().

template<class T>
T max const TVec< T > &  vec  ) 
 

Definition at line 56 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::UniformizeVMatrix::build_(), PLearn::MovingAverageVMatrix::build_(), PLearn::FinancePreprocVMatrix::build_(), PLearn::CumVMatrix::build_(), PLearn::AddCostToLearner::build_(), PLearn::RealMapping::checkConsistency(), columnMax(), PLearn::AddCostToLearner::computeCostsFromOutputs(), PLearn::GaussMix::computeLogLikelihood(), computeRange(), displayDecisionSurface(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), PLearn::ConcatRowsVMatrix::ensureMappingsConsistency(), PLearn::ClassMarginCostFunction::evaluate(), PLearn::ClassDistanceProportionCostFunction::evaluate(), PLearn::Gnuplot::featureplot(), PLearn::ConcatRowsVMatrix::fullyCheckMappings(), PLearn::GeneralizedOneHotVMatrix::GeneralizedOneHotVMatrix(), PLearn::GaussMix::generateFromGaussian(), PLearn::JoinVMatrix::getNewRow(), PLearn::Gnuplot::histoplot(), log_softmax(), PLearn::RealMapping::maxMappedToValue(), PLearn::ScaledGradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimizeN(), PLearn::GaussMix::precomputeStuff(), PLearn::ArrayAllocatorTrivial< T, SizeBits >::resize(), PLearn::ArrayAllocator< T, SizeBits >::resize(), PLearn::RGBImageVMatrix::RGBImageVMatrix(), rowMax(), PLearn::TestDependenciesCommand::run(), PLearn::ShellProgressBar::set(), PLearn::PDistribution::setConditionalFlagsWithoutUpdate(), PLearn::SourceVMatrix::setMetaInfoFromSource(), PLearn::ShellProgressBar::ShellProgressBar(), softmax(), PLearn::GraphicalBiText::test_WSD(), PLearn::TestDependenciesCommand::TestDependenciesCommand(), PLearn::EmbeddedSequentialLearner::train(), PLearn::GaussMix::updateFromConditionalSorting(), PLearn::Function::verifyGradient(), PLearn::Function::verifyHessian(), PLearn::Function::verifyrfprop(), viewVMat(), and vmatmain().

real PLearn::max_cdf_diff Vec &  v1,
Vec &  v2
 

Returns the max of the difference between the empirical cdf of 2 series of values Side-effect: the call sorts v1 and v2.

Definition at line 140 of file stats_utils.cc.

References PLearn::TVec< T >::length(), and sortElements().

Referenced by KS_test().

StatsIt max_stats  )  [inline]
 

Definition at line 412 of file StatsIterator.h.

template<class T>
T maxabs const TVec< T > &  vec  ) 
 

Definition at line 497 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by det(), and LU_decomposition().

Var PLearn::mean Var  v  ) 
 

******************************* user-friendly Var interface *

Definition at line 57 of file Var_utils.cc.

References sum().

template<class T>
T mean const TMat< T > &  mat  ) 
 

Definition at line 3808 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
T mean const TVec< T > &  vec,
bool  ignore_missing = false
 

if ignore_missing==true, then the mean is computed by ignoring the possible MISSING_VALUE in the Vec.

if ignore_missing==false, then MISSING_VALUE is returned if one element of the Vec is MISSING_VALUE.

Definition at line 301 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), MISSING_VALUE, and PLERROR.

Referenced by columnWeightedVariance(), computeConditionalMeans(), computeLocalPrincipalComponents(), PLearn::GaussMix::computeMeansAndCovariances(), computeRowMean(), PLearn::DiagonalNormalRandomVariable::DiagonalNormalRandomVariable(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), PLearn::EmpiricalDistribution::expectation(), fill_random_normal(), PLearn::StddevStatsIterator::finish(), PLearn::StatsCollector::getAllValuesMapping(), PLearn::FinancePreprocVMatrix::getNewRow(), normal(), PLearn::ScaledGradientOptimizer::optimize(), paired_t_test(), PLearn::SequentialModelSelector::paired_t_test(), rowMean(), PLearn::TestDependenciesCommand::run(), PLearn::SequentialModelSelector::sequenceCost(), PLearn::GaussianProcessRegressor::setInput_const(), PLearn::TestDependenciesCommand::TestDependenciesCommand(), PLearn::GaussianProcessRegressor::train(), PLearn::EmpiricalDistribution::variance(), and vmatmain().

StatsIt mean_stats  )  [inline]
 

Definition at line 408 of file StatsIterator.h.

Referenced by PLearn::Learner::Learner().

Var meanOf Var  output,
const VarArray &  inputs,
VMat  distr,
int  nsamples,
VarArray  parameters = VarArray()
[inline]
 

deprecated old version do not use!

Definition at line 107 of file SumOfVariable.h.

References meanOf().

Var meanOf VMat  distr,
Func  f,
int  nsamples
[inline]
 

meanOf

Definition at line 103 of file SumOfVariable.h.

Var meanOf VMat  distr,
Func  f,
int  nsamples,
int  input_size
[inline]
 

Definition at line 100 of file MatrixSumOfVariable.h.

Referenced by EM(), meanOf(), PLearn::TangentLearner::train(), PLearn::NNet::train(), PLearn::NeuralNet::train(), PLearn::GaussianContinuum::train(), and PLearn::ConditionalDensityNet::train().

template<class T>
T median const TVec< T > &  vec  )  [inline]
 

returns the median value of vec

Definition at line 1880 of file TMat_maths_impl.h.

References kthOrderedElement(), and PLearn::TVec< T >::length().

void * PLearn::MemoryMap const char *  filename,
tFileHandle &  handle,
bool  read_only,
off_t &  filesize
 

returns a pointer to the memory-mapped file or 0 if it fails for some reason.

Definition at line 116 of file MemoryMap.cc.

References filesize(), open, PLERROR, and tFileHandle.

Referenced by PLearn::Storage< pair< real, real > >::Storage().

void PLearn::memoryUnmap void *  mapped_pointer,
tFileHandle  handle,
int  length
 

Definition at line 141 of file MemoryMap.cc.

Referenced by PLearn::Storage< pair< real, real > >::pointTo(), and PLearn::Storage< pair< real, real > >::~Storage().

void merge Set  a,
Set  b,
Set  res
[inline]
 

Definition at line 86 of file Set.h.

References PLearn::Set::begin(), and PLearn::Set::end().

Var min Var  v  )  [inline]
 

Definition at line 71 of file MinVariable.h.

template<class T>
T min const TMat< T > &  mat  ) 
 

Definition at line 3914 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void min const TVec< T > &  source1,
source2,
TVec< T > &  destination
 

Definition at line 1600 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), MIN, and PLearn::TVec< T >::resize().

template<class T>
void min const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1583 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), MIN, PLERROR, and PLearn::TVec< T >::resize().

template<class T>
T min const TVec< T > &  vec  ) 
 

Definition at line 482 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::UniformizeVMatrix::build_(), PLearn::AddCostToLearner::build_(), columnMin(), PLearn::GeodesicDistanceKernel::computeNearestGeodesicNeighbour(), computeRange(), displayDecisionSurface(), PLearn::GeodesicDistanceKernel::evaluate(), PLearn::ConjGradientOptimizer::fletcherSearchMain(), PLearn::GeneralizedOneHotVMatrix::GeneralizedOneHotVMatrix(), PLearn::MovingAverageVMatrix::getNewRow(), PLearn::JoinVMatrix::getNewRow(), lapackSVD(), PLearn::ConjGradientOptimizer::optimize(), PLearn::ConjGradientOptimizer::optimizeN(), PLearn::PDateTime::PDateTime(), PLearn::ArrayAllocatorTrivial< T, SizeBits >::resize(), PLearn::ArrayAllocator< T, SizeBits >::resize(), PLearn::RGBImageVMatrix::RGBImageVMatrix(), rowMin(), PLearn::TestDependenciesCommand::run(), PLearn::NearestNeighborPredictionCost::run(), PLearn::ShellProgressBar::set(), PLearn::GeodesicDistanceKernel::setDataForKernelMatrix(), PLearn::ShellProgressBar::ShellProgressBar(), sortColumns(), PLearn::TestDependenciesCommand::TestDependenciesCommand(), PLearn::GaussianDistribution::train(), PLearn::ConditionalDensityNet::train(), viewVMat(), and vmatmain().

StatsIt min_stats  )  [inline]
 

Definition at line 411 of file StatsIterator.h.

template<class T>
T minabs const TVec< T > &  vec,
int  index = int()
 

Definition at line 533 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Var minus Var  v,
Var  w
[inline]
 

Definition at line 77 of file MinusVariable.h.

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_(), and PLearn::FieldConvertCommand::FieldConvertCommand().

RandomVar PLearn::mixture RVArray  components,
RandomVar  log_weights
 

A mixture of distributions, with the given components and the convex weights given by weights = softmax(log_weights). Note that the log_weights argument represents unnormalized log-probabilities (i.e normalization is automatically done inside the mixture).

Definition at line 736 of file RandomVar.cc.

Referenced by PLearn::RVArrayRandomElementRandomVariable::logP().

time_t PLearn::mtime const string path  ) 
 

returns the time of last modification of file (or 0 if file does not exist).

Definition at line 162 of file fileutils.cc.

Referenced by PLearn::MultiInstanceVMatrix::build_(), PLearn::FileVMatrix::build_(), PLearn::DiskVMatrix::build_(), PLearn::VVMatrix::buildFilteredVMatFromVPL(), PLearn::VVMatrix::createPreproVMat(), getDataSetDate(), getDateOfCode(), PLearn::VVMatrix::getDateOfVMat(), PLearn::VMatrix::getStats(), PLearn::VVMatrix::isPrecomputedAndUpToDate(), loadAsciiAsVMat(), PLearn::FilteredVMatrix::openIndex(), PLearn::VMatLanguage::preprocess(), and PLearn::AutoRunCommand::run().

Var multiclass_loss Var  network_output,
Var  targets
[inline]
 

Definition at line 81 of file MulticlassLossVariable.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), and PLearn::NeighborhoodSmoothnessNNet::build_().

RandomVar PLearn::multinomial RandomVar  log_probabilities  ) 
 

A discrete probability distribution which assigns probabilities[i] = softmax(log_probabilities)[i] to each of the discrete values i=0, 1, ... N-1. Note that the argument represents unnormalized log-probabilities (i.e normalization is automatically done inside the multinomial).

Definition at line 741 of file RandomVar.cc.

int PLearn::multinomial_sample const Vec &  distribution  ) 
 

returns a random deviate from a discrete distribution given explicitely by 'distribution'

Definition at line 536 of file random.cc.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and uniform_sample().

Referenced by PLearn::MultinomialSampleVariable::fprop(), PLearn::GaussMix::generateFromGaussian(), and PLearn::CompactVMatrix::perturb().

template<class T>
void multiply const TMat< T > &  result,
const TMat< T > &  x,
scale
 

Definition at line 3462 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::compact_begin(), PLearn::TMat< T >::compact_end(), PLearn::TMat< T >::end(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void multiply const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1418 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

template<class T>
void multiply const TVec< T > &  source1,
source2,
TVec< T > &  destination
[inline]
 

destination = source1*source2

Definition at line 230 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

Referenced by computeMeanAndVariance(), divide(), PLearn::MultinomialRandomVariable::EMUpdate(), PLearn::MixtureRandomVariable::EMUpdate(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), PLearn::MinusRandomVariable::EMUpdate(), PLearn::PlusRandomVariable::EMUpdate(), PLearn::ProjectionErrorVariable::fprop(), PLearn::GaussianProcessRegressor::inverseCovTimesVec(), operator *(), operator/(), and PLearn::ConstantRegressor::train().

template<class T>
void multiplyAcc const TMat< T > &  mat,
const TMat< T > &  x,
const TMat< T > &  y
 

Definition at line 3533 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void multiplyAcc const TMat< T > &  mat,
const TMat< T > &  x,
scale
 

Definition at line 3506 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::compact_begin(), PLearn::TMat< T >::compact_end(), PLearn::TMat< T >::end(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void multiplyAcc const TVec< T > &  vec,
const TVec< T > &  x,
const TVec< T > &  y
 

vec[i] += x[i]*y[i];

Definition at line 2110 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

template<class T>
void multiplyAcc const TVec< T > &  vec,
const TVec< T > &  x,
scale
 

vec[i] += x[i]*scale;

Definition at line 2043 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::ProjectionErrorVariable::bprop(), PLearn::LogSumVariable::bprop(), PLearn::AffineTransformWeightPenalty::bprop(), computeInputMean(), computeInputMeanAndCovar(), computeInputMeanAndVariance(), computeWeightedMeanAndCovar(), diagonalizeSubspace(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::ProductRandomVariable::EMBprop(), PLearn::MinusRandomVariable::EMBprop(), PLearn::PlusRandomVariable::EMBprop(), PLearn::VMatrix::evaluateKernelWeightedTargetSum(), projectOnOrthogonalSubspace(), and PLearn::PCA::reconstruct().

template<class T>
void multiplyAdd const TVec< T > &  source1,
const TVec< T > &  source2,
source3,
TVec< T > &  destination
 

Definition at line 1435 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

Referenced by PLearn::GaussianProcessRegressor::inverseCovTimesVec(), and PLearn::ConstantRegressor::train().

template<class T>
void multiplyScaledAdd const TVec< T > &  source,
a,
b,
TVec< T > &  destination
 

Definition at line 1453 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

void PLearn::multivariate_normal Vec &  x,
const Vec &  mu,
const Vec &  e_values,
const Mat &  e_vectors,
Vec &  z
 

generate a vector x sampled from the normal with mean mu and covariance matrix A = evectors * diagonal(e_values) * evectors' (the normal(0,I) originally sampled to obtain x is stored in z).

Unlike the other variants of this function, this one does not allocate anything.

Definition at line 444 of file plapack.cc.

References gaussian_01(), PLearn::TVec< T >::length(), PLearn::TVec< T >::resize(), sqrt(), and x.

Vec PLearn::multivariate_normal const Vec &  mu,
const Vec &  e_values,
const Mat &  e_vectors
 

generate 1 vector sampled from the normal with mean mu and covariance matrix A = evectors * diagonal(e_values) * evectors'

Definition at line 429 of file plapack.cc.

References gaussian_01(), PLearn::TVec< T >::length(), sqrt(), and x.

Vec PLearn::multivariate_normal const Vec &  mu,
const Mat &  A
 

generate a vector sampled from the normal with mean vector mu and covariance matrix A

Definition at line 424 of file plapack.cc.

References multivariate_normal(), and PLearn::TMat< T >::toVec().

Mat PLearn::multivariate_normal const Vec &  mu,
const Mat &  A,
int  N
 

generate N vectors sampled from the normal with mean vector mu and covariance matrix A

Definition at line 411 of file plapack.cc.

References PLearn::TMat< T >::appendRow(), PLearn::TMat< T >::copy(), eigen_SymmMat(), and PLearn::TVec< T >::length().

Referenced by PLearn::ConditionalGaussianDistribution::generate(), and multivariate_normal().

void PLearn::mv const string file  ) 
 

calls system mv command with string file as parameters

Definition at line 390 of file fileutils.cc.

Referenced by PLearn::DiagonalNormalRandomVariable::EMUpdate(), and PLearn::VVMatrix::generateVMatIndex().

void PLearn::mvforce const string file  ) 
 

calls system mv command with string file as parameters will not prompt before overwriting

Definition at line 396 of file fileutils.cc.

Referenced by PLearn::VVMatrix::createPreproVMat().

real mypow real  x,
real  p
[inline]
 

Definition at line 258 of file pl_math.h.

References pow(), and x.

Referenced by PLearn::PowVariable::bprop(), PLearn::SDBVMFieldSignedPower::convertField(), dist(), PLearn::PowVariable::fprop(), norm(), powdistance(), pownorm(), weighted_distance(), and weighted_powdistance().

int n_choose int  M,
int  N
[inline]
 

Return M choose N, i.e., M! / ( N! (M-N)! ).

Definition at line 375 of file pl_math.h.

References k.

Var PLearn::neg_log_pi Var  p,
Var  index
 

Definition at line 60 of file Var_utils.cc.

References log(), and matrixIndex().

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), and PLearn::NeighborhoodSmoothnessNNet::build_().

CostFunc neg_output_costfunc  )  [inline]
 

returns -output[0]. This is for density estimators whose use(x) method typically computes log(p(x))

Definition at line 70 of file NegOutputCostFunction.h.

Referenced by PLearn::Distribution::Distribution().

Var negateElements Var  v  )  [inline]
 

Definition at line 76 of file NegateElementsVariable.h.

template<class T>
void negateElements const TMat< T > &  m  ) 
 

x'_ij = -x_ij;

Definition at line 4632 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void negateElements const TVec< T > &  vec  ) 
 

Definition at line 1037 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Referenced by PLearn::ShiftAndRescaleVMatrix::build_(), normalize(), PLearn::PLS::train(), and PLearn::PCA::train().

Var negative Var  v  )  [inline]
 

Definition at line 79 of file CutAboveThresholdVariable.h.

References cutAboveThreshold().

real negative real  a  )  [inline]
 

Definition at line 98 of file pl_math.h.

unsigned char new_get_compr_data_type float  x  ) 
 

Definition at line 733 of file pl_io.cc.

References is_missing(), and x.

unsigned char new_get_compr_data_type double  x,
double  tolerance
 

Definition at line 720 of file pl_io.cc.

References is_missing(), and x.

Referenced by new_write_compressed().

size_t PLearn::new_read_compressed FILE *  in,
real vec,
int  l,
bool  swap_endians = false
 

Reads the l doubles in the new compressed formtat from in Returns the number of bytes read.

Set swap_endians to true if the data was written on a machine with a different endianness than the current one, so that the endians get swapped.

Definition at line 623 of file pl_io.cc.

References endianswap(), MISSING_VALUE, mode, and val.

Referenced by PLearn::DiskVMatrix::getNewRow().

size_t PLearn::new_write_compressed FILE *  out,
real vec,
int  l,
double  tolerance = 1e-6,
bool  swap_endians = false
 

Writes the l doubles in new compressed format to out.

Returns the number of bytes written tolerance is the maximum allowed error tolerance to store doubles as floats. Set swap_endians to true if the data is to be written in the different byte-order from this machine's

Definition at line 819 of file pl_io.cc.

References new_get_compr_data_type(), new_write_mode_and_size(), new_write_raw_data_as(), and PLERROR.

Referenced by PLearn::DiskVMatrix::appendRow().

size_t new_write_mode_and_size FILE *  out,
bool  insert_zeroes,
unsigned int  N,
unsigned char  data_type
 

returns number of bytes written

Definition at line 745 of file pl_io.cc.

References mode.

Referenced by new_write_compressed().

size_t new_write_raw_data_as FILE *  out,
real vec,
int  l,
unsigned char  data_type
 

Definition at line 782 of file pl_io.cc.

References is_missing(), and val.

Referenced by new_write_compressed().

string PLearn::newFilename const string  directory = "/tmp/",
const string  prefix = "",
bool  is_directory = false
 

Returns a temporary file (or directory) name suitable for a unique (one time) use.

< save in current dir

Definition at line 498 of file fileutils.cc.

References mode, PLERROR, and remove_trailing_slash().

Referenced by PLearn::TmpFilenames::addFilename(), PLearn::MatlabInterface::eigs_r11(), PLearn::FilterSplitter::getSplit(), PLearn::MatlabInterface::launch(), and PLearn::MatlabInterface::launchAndWaitFor().

Mat * PLearn::newIndexedMatArray int  n,
Mat &  m,
int  indexcolumn
 

Returns an array of n matrices, that are submatrices of m Such that marray[i] contains all the rows of m that had value i in their indexcolumn. The matrices of the returned array do not contain the indexcolumn Side effect: rows of m are sorted according to indexcolumn

Definition at line 100 of file Mat.cc.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLERROR, sortRows(), PLearn::TMat< T >::subMatColumns(), PLearn::TMat< T >::subMatRows(), and PLearn::TMat< T >::width().

Mat * PLearn::newMatArray int  n,
int  the_length,
int  the_width
 

Definition at line 92 of file Mat.cc.

References PLearn::TMat< T >::resize().

Mat * PLearn::newMatArray int  n  ) 
 

Definition at line 87 of file Mat.cc.

Object* newObject const string representation  )  [inline]
 

Creates a new object according to the given representation.

This actually calls readObject on an istrstream, so anything understandable by readObject can be used here

Definition at line 609 of file Object.h.

References readObject().

Referenced by PLearn::VVMatrix::build_(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::VVMatrix::createPreproVMat(), and getDataSet().

PPointableSet* newSet  )  [inline]
 

Definition at line 83 of file Set.h.

Vec * PLearn::newVecArray int  n,
int  the_length
 

Definition at line 70 of file Mat.cc.

References PLearn::TVec< T >::resize().

Vec * PLearn::newVecArray int  n  ) 
 

Definition at line 65 of file Mat.cc.

Var nll_semispherical_gaussian Var  tangent_plane_var,
Var  mu_var,
Var  sm_var,
Var  sn_var,
Var  neighbors_dist_var,
Var  p_target_var,
Var  p_neighbors_var,
Var  noise,
Var  mu_noisy,
bool  use_noise = false,
real  epsilon = 1e-6
[inline]
 

Definition at line 91 of file NllSemisphericalGaussianVariable.h.

Referenced by PLearn::GaussianContinuum::build_().

VarArray PLearn::nonInputParentsOfPath VarArray  inputs,
VarArray  outputs
 

Isn't this useless? as we have a constructor of VarArray from Var that should be called automatically !!!???? (Pascal).

returns the set of all the direct parents of the vars on the path from inputs to outputs. (inputs are not included, neither are the direct parents of inputs unless they are also direct parents of other Vars in the path)

Definition at line 1082 of file VarArray.cc.

References PLearn::VarArray::clearMark(), PLearn::VarArray::parents(), propagationPath(), and PLearn::VarArray::setMark().

Referenced by PLearn::ConcatOfVariable::ConcatOfVariable(), PLearn::MatrixSumOfVariable::MatrixSumOfVariable(), PLearn::Function::operator()(), propagationPathToParentsOfPath(), PLearn::SumOfVariable::SumOfVariable(), PLearn::SumOverBagsVariable::SumOverBagsVariable(), PLearn::UnfoldedFuncVariable::UnfoldedFuncVariable(), and PLearn::UnfoldedSumOfVariable::UnfoldedSumOfVariable().

VarArray PLearn::nonInputSources const VarArray &  inputs,
const VarArray &  outputs
 

returns all sources that influence outputs except those that influence it only through inputs

Definition at line 1121 of file VarArray.cc.

References PLearn::VarArray::setMark(), PLearn::VarArray::sources(), and PLearn::VarArray::unmarkAncestors().

Referenced by PLearn::Function::build_().

template<class T>
TVec<T> nonZero const TVec< T > &  vec  ) 
 

returns a vector composed of the values of v that are different from 0;

Definition at line 1942 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
TVec<T> nonZeroIndices TVec< bool v  ) 
 

Definition at line 1659 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TVec< T >::resize(), and val.

template<class T>
TVec<T> nonZeroIndices TVec< T >  v  ) 
 

Definition at line 1643 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TVec< T >::resize(), and val.

Var PLearn::norm Var  input,
real  n = 2.0
 

Definition at line 81 of file Var_utils.cc.

References abs(), pow(), sqrt(), square(), and sum().

template<class T>
T norm const TVec< T > &  vec  )  [inline]
 

Definition at line 705 of file TMat_maths_impl.h.

References norm().

template<class T>
T norm const TVec< T > &  vec,
double  n
 

Definition at line 694 of file TMat_maths_impl.h.

References mypow(), pownorm(), and sqrt().

Referenced by PLearn::SubsampleVariable::bprop(), PLearn::IsomapTangentLearner::computeOutput(), PLearn::GaussianContinuum::computeOutput(), diagonalizeSubspace(), distance(), PLearn::NormalizedDotProductKernel::evaluate(), PLearn::ProjectionErrorVariable::fprop(), PLearn::GaussMix::generateFromGaussian(), PLearn::VecStatsCollector::getCorrelation(), GramSchmidtOrthogonalization(), norm(), normalize(), PLearn::ScaledGradientOptimizer::optimize(), PLearn::GaussianProcessRegressor::QFormInverse(), subsample(), and PLearn::Function::verifyGradient().

RandomVar PLearn::normal RandomVar  mean,
RandomVar  log_variance,
real  minimum_standard_deviation = 1e-6
 

diagonal normal with general parameters given by the provided RandomVar's. Actual variance is variance = minimum_variance + exp(log_variance)

Definition at line 729 of file RandomVar.cc.

References mean().

RandomVar PLearn::normal real  mean = 0,
real  standard_dev = 1,
int  d = 1,
real  minimum_standard_deviation = 1e-6
 

Functions to build a normal distribution.

multivariate d-dimensional diagonal normal with NON-RANDOM and CONSTANT parameters (default means = 0, default standard deviations = 1) Actual variance is variance = minimum_variance + exp(log_variance)

Definition at line 712 of file RandomVar.cc.

References log(), mean(), PLERROR, and variance().

Referenced by PLearn::FieldConvertCommand::FieldConvertCommand().

real normal_cdf real  x  )  [inline]
 

Definition at line 72 of file pl_erf.h.

References gauss_01_cum(), and x.

real normal_sample  )  [inline]
 

Definition at line 89 of file random.h.

References gaussian_01().

Referenced by PLearn::GaussianContinuum::make_random_walk(), and PLearn::CompactVMatrix::perturb().

VMat normalize VMat  d,
int  inputsize
[inline]
 

Definition at line 143 of file VMat_maths.h.

References PLearn::VMat::length(), and normalize().

VMat PLearn::normalize VMat  d,
int  inputsize,
int  ntrain
 

Here, mean and stddev are estimated on d.subMat(0,0,ntrain,inputsize).

Definition at line 681 of file VMat_maths.cc.

References computeMeanAndStddev(), normalize(), and PLearn::VMat::subMat().

VMat PLearn::normalize VMat  d,
Vec  meanvec,
Vec  stddevvec
 

subtracts mean and divide by stddev meanvec and stddevvec can be shorter than d.width() (as we probably only want to 'normalize' the 'input' part of the sample, and not the 'output' that is typically present in the last columns)

Definition at line 666 of file VMat_maths.cc.

References invertElements(), PLearn::TVec< T >::length(), negateElements(), PLearn::TVec< T >::subVec(), and PLearn::VMat::width().

template<class T>
void normalize TMat< T > &  m,
double  n
 

divide each row by its n norm

Definition at line 4390 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and normalize().

template<class T>
void normalize TMat< T > &  m  ) 
 

substract mean, and divide by stddev (these are estimated globally)

Definition at line 4355 of file TMat_maths_impl.h.

References computeMeanAndStddev(), and PLearn::TMat< T >::width().

template<class T>
void normalize const TVec< T > &  vec,
double  n
 

Definition at line 708 of file TMat_maths_impl.h.

References norm().

Referenced by condprob_cost(), entropy(), PLearn::Gnuplot::histoplot(), PLearn::KernelProjection::KernelProjection(), loadBreastCancerWisconsin(), loadClassificationDataset(), loadDiabetes(), loadHousing(), loadLetters(), loadPimaIndians(), loadUCI(), PLearn::PLS::NIPALSEigenvector(), normalize(), PLearn::PCA::PCA(), splitTrainValidTest(), PLearn::PLS::train(), and PLearn::PCA::train().

template<class T>
void normalizeColumns const TMat< T > &  m  ) 
 

Divides each column by the sum of its elements.

Definition at line 4378 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), sum(), and PLearn::TMat< T >::width().

void normalizeDataSet Mat &  m  ) 
 

Definition at line 122 of file databases.cc.

References computeMeanAndStddev(), and PLearn::TMat< T >::width().

Referenced by loadBreastCancerWisconsin(), loadDiabetes(), loadHousing(), loadLetters(), and loadPimaIndians().

void PLearn::normalizeDataSets Mat &  training_set,
Mat &  test_set
 

normalize both training_set and test_set according to mean and stddev computed on training_set

Definition at line 108 of file databases.cc.

References computeMeanAndStddev(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

void PLearn::normalizeDataSets VMat &  training_set,
VMat &  validation_set,
VMat &  test_set
 

Definition at line 90 of file databases.cc.

References computeMeanAndStddev(), PLearn::VMat::subMatColumns(), and PLearn::VMat::width().

void PLearn::normalizeDataSets Mat &  training_set,
Mat &  validation_set,
Mat &  test_set
 

Definition at line 73 of file databases.cc.

References computeMeanAndStddev(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

Referenced by loadATT800(), loadBreastCancer(), loadDiabetes(), loadLetters(), and splitTrainValidTest().

template<class T>
void normalizeRows const TMat< T > &  m  ) 
 

Divides each row by the sum of its elements.

Definition at line 4366 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and sum().

istream nullin null_streambuf  ) 
 

iostream nullinout null_streambuf  ) 
 

ostream nullout null_streambuf  ) 
 

int PLearn::numericType const char *  word  ) 
 

assigns a code to a "word"

Definition at line 244 of file TypesNumeriques.cc.

References PLearn::tRule::attributs, compactRepresentation(), looksNumeric(), NT_NOT_NUMERIC, NT_PERCENT, NT_UNKNOWN_NUMERIC_TYPE, PLearn::tRule::pattern, and rules.

real oEM ConditionalExpression  conditional_expression,
RVArray  parameters_to_learn,
VMat  distr,
int  n_samples,
Optimizer &  MStepOptimizer,
int  max_n_iterations,
real  relative_improvement_threshold = 0.001,
bool  compute_final_train_NLL = true
 

real oEM ConditionalExpression  conditional_expression,
RVArray  parameters_to_learn,
VMat  distr,
int  n_samples,
int  max_n_iterations,
real  relative_improvement_threshold = 0.001,
bool  compute_final_train_NLL = true
 

int PLearn::old_plearn_main int  argc,
char **  argv
 

Definition at line 502 of file old_plearn_main.cc.

References cross_valid(), displayObjectHelp(), endl(), exitmsg(), getDataSetHelp(), getMultipleModelAliases(), isfile(), seed(), stringvector(), toint(), train_and_test(), usage(), and use().

void OldDisplayVarGraph const VarArray &  outputs,
bool  display_values,
real  boxwidth,
const char *  the_filename,
bool  must_wait,
VarArray  display_only_these
 

Definition at line 414 of file DisplayUtils.cc.

References PLearn::TmpFilenames::addFilename(), PLearn::TVec< Var >::append(), center(), PLearn::GhostScript::centerShow(), PLearn::VarArray::clearMark(), PLearn::TVec< Var >::contains(), PLearn::GhostScript::drawArrow(), PLearn::GhostScript::drawBox(), PLearn::Var::length(), PLearn::VarArray::parents(), PLearn::VarArray::setMark(), PLearn::TVec< Var >::size(), PLearn::VarArray::sources(), PLearn::VarArray::unmarkAncestors(), PLearn::GhostScript::usefont(), PLearn::Var::width(), and x.

template<class T>
TVec<T> one_hot int  length,
int  hotpos,
coldvalue,
hotvalue
 

Definition at line 1326 of file TMat_maths_impl.h.

References fill_one_hot().

VMat onehot VMat  d,
int  nclasses,
real  cold_value = 0.0,
real  hot_value = 1.0
[inline]
 

Definition at line 107 of file OneHotVMatrix.h.

Var onehot int  the_length,
Var  hotindex,
real  coldvalue = 0.0,
real  hotvalue = 1.0
[inline]
 

Definition at line 86 of file OneHotVariable.h.

Referenced by PLearn::FieldConvertCommand::FieldConvertCommand().

Var onehot_squared_loss Var  network_output,
Var  classnum,
real  coldval = 0.,
real  hotval = 1.
[inline]
 

Definition at line 88 of file OneHotSquaredLoss.h.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), and PLearn::NeighborhoodSmoothnessNNet::build_().

template<class T>
TVec< T > PLearn::operator & const T &  x,
const TVec< T > &  v
 

Definition at line 63 of file PExperiment.cc.

References PLearn::TVec< T >::size(), PLearn::TVec< T >::subVec(), and x.

Array<VMat> operator & const VMat &  d1,
const VMat &  d2
[inline]
 

******************************** User-friendly VMat interface *

Definition at line 163 of file VMat.h.

VarArray operator & Var  v1,
Var  v2
[inline]
 

* To allow for easy building of VarArray *

Definition at line 296 of file VarArray.h.

StatsItArray operator & const StatsIt &  statsit1,
const StatsIt &  statsit2
[inline]
 

Definition at line 405 of file StatsIterator.h.

Array<Ker> operator & const Ker &  k1,
const Ker &  k2
[inline]
 

******************** inline Ker operators

Definition at line 237 of file Kernel.h.

template<class T>
Array< TMat<T> > operator & const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

This will allow a convenient way of building arrays of Matrices by writing ex: m1&m2&m3.

Definition at line 218 of file Array_impl.h.

template<class T>
Array< TVec<T> > operator & const TVec< T > &  m1,
const TVec< T > &  m2
[inline]
 

This will allow a convenient way of building arrays of Matrices by writing ex: m1&m2&m3.

Definition at line 146 of file Array_impl.h.

template<class T>
Array<T> operator & const Array< T > &  a,
const vector< T > &  ar
 

Definition at line 124 of file Array_impl.h.

References PLearn::TVec< T >::append(), and PLearn::TVec< T >::size().

template<class T>
Array<T> operator & const Array< T > &  a,
const Array< T > &  ar
 

Definition at line 115 of file Array_impl.h.

References PLearn::TVec< T >::append(), and PLearn::TVec< T >::size().

template<class T>
Array<T> operator & const Array< T > &  a,
const T &  elem
 

Definition at line 106 of file Array_impl.h.

References PLearn::TVec< T >::append(), and PLearn::TVec< T >::size().

template<class T>
Array<T> operator & const T &  elem,
const Array< T > &  a
 

Definition at line 90 of file Array_impl.h.

Referenced by PLearn::VarArray::operator &(), PLearn::StatsItArray::operator &(), and PLearn::RVInstanceArray::operator &&().

template<class T>
Array<T>& operator &= Array< T > &  a,
const vector< T > &  ar
 

Definition at line 102 of file Array_impl.h.

References PLearn::TVec< T >::append().

template<class T>
Array<T>& operator &= Array< T > &  a,
const Array< T > &  ar
 

Definition at line 98 of file Array_impl.h.

References PLearn::TVec< T >::append().

template<class T>
Array<T>& operator &= Array< T > &  a,
const T &  elem
 

Definition at line 94 of file Array_impl.h.

References PLearn::TVec< T >::append().

Referenced by PLearn::VarArray::operator &=(), and PLearn::StatsItArray::operator &=().

Var PLearn::operator * Var  v1,
Var  v2
 

element-wise multiplications

< v1 and v2 must have the same dimensions (it is checked by the constructor of TimesVariable)

Definition at line 155 of file Var_operators.cc.

Var PLearn::operator * real  cte,
Var  v
 

Definition at line 150 of file Var_operators.cc.

Var PLearn::operator * Var  v,
real  cte
 

Definition at line 147 of file Var_operators.cc.

RandomVar PLearn::operator * RandomVar  a,
RandomVar  b
 

********************** GLOBAL FUNCTIONS ********************** //!<

Return a RandomVar that is the product of two RandomVar's. If a and b are matrices, this is a matrix product. If one of them is a vector it is interpreted as a column vector (nx1), but if both are vectors, this is a dot product (a is interpreted as a 1xn). The result contains a ProductRandomVariable, which can be "trained" by EM: if one of the two arguments is non-random and is considered to be a parameter, it can be learned (e.g. for implementing a linear regression).

Definition at line 408 of file RandomVar.cc.

References PLERROR.

template<class T>
TMat<T> operator * const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

does an elementwise multiplication of every row by v

Definition at line 5719 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy().

template<class T>
TMat<T> operator * const T &  scalar,
const TMat< T > &  m
[inline]
 

Definition at line 3496 of file TMat_maths_impl.h.

template<class T>
TMat<T> operator * const TMat< T > &  m,
const T &  scalar
[inline]
 

Definition at line 3488 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), multiply(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> operator * const TVec< T > &  v1,
v2
 

Definition at line 1225 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and multiply().

template<class T>
TVec<T> operator * scalar,
const TVec< T > &  v
 

Definition at line 1217 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and multiply().

template<class T>
void operator *= const TMat< T > &  m1,
const TMat< T > &  m2
 

does an elementwise division of every row by v

Definition at line 4474 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator *= const TMat< T > &  m,
const TVec< T > &  v
 

does an elementwise multiplication of every row by v

Definition at line 4459 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator *= const TMat< T > &  m,
scalar
 

Definition at line 4409 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void operator *= const TVec< T > &  vec,
factor
 

Definition at line 2021 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
void operator *= const TVec< T > &  vec1,
const TVec< T > &  vec2
 

Definition at line 845 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Var PLearn::operator!= Var  v1,
Var  v2
 

Definition at line 195 of file Var_operators.cc.

References isequal().

Var operator!= real  cte,
Var  v1
[inline]
 

result[i] = 1 if v1[i]!=cte, 0 otherwise

Definition at line 85 of file UnequalConstantVariable.h.

Var operator!= Var  v1,
real  cte
[inline]
 

result[i] = 1 if v1[i]!=cte, 0 otherwise

Definition at line 81 of file UnequalConstantVariable.h.

template<class T, unsigned N, class TTrait>
bool operator!= const TinyVector< T, N, TTrait > &  x,
const TinyVector< T, N, TTrait > &  y
[inline]
 

Other operators (should be defined in std::rel_ops, but does not work properly with gcc yet).

Definition at line 168 of file TinyVector.h.

References x.

template<class T, unsigned SizeBits, class Allocator>
bool operator!= const SmallVector< T, SizeBits, Allocator > &  x,
const SmallVector< T, SizeBits, Allocator > &  y
[inline]
 

const SmallVector<T,SizeBits,Allocator>& y) { return !(x==y); }

Definition at line 182 of file SmallVector.h.

References x.

template<class T>
TVec<T> operator% const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 1205 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and PLERROR.

Var PLearn::operator+ Var  v1,
Var  v2
 

Definition at line 81 of file Var_operators.cc.

Var PLearn::operator+ real  cte,
Var  v
 

Definition at line 75 of file Var_operators.cc.

Var PLearn::operator+ Var  v,
real  cte
 

Definition at line 72 of file Var_operators.cc.

RandomVar PLearn::operator+ RandomVar  a,
RandomVar  b
 

Return a RandomVar that is the element-by-element sum of two RandomVar's. The result contains a PlusRandomVariable, which can be "trained" by EM: if one of the two arguments is non-random and is considered to be a parameter, it can be learned (e.g. for implementing a linear regression).

Definition at line 432 of file RandomVar.cc.

template<class T>
TMatRowsIterator<T> operator+ typename TMatRowsIterator< T >::difference_type  n,
const TMatRowsIterator< T > &  y
 

Definition at line 57 of file TMatRowsIterator_impl.h.

template<class T>
TMatRowsAsArraysIterator<T> operator+ typename TMatRowsAsArraysIterator< T >::difference_type  n,
const TMatRowsAsArraysIterator< T > &  y
 

Definition at line 57 of file TMatRowsAsArraysIterator_impl.h.

template<class T>
TMatColRowsIterator<T> operator+ typename TMatColRowsIterator< T >::difference_type  n,
const TMatColRowsIterator< T > &  y
 

Definition at line 58 of file TMatColRowsIterator_impl.h.

template<class T>
TMat<T> operator+ const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

return m + v (added to every ROW of m)

Definition at line 5709 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy().

template<class T>
TMat<T> operator+ const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 4566 of file TMat_maths_impl.h.

References add(), PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> operator+ const TVec< T > &  v1,
v2
 

Definition at line 1197 of file TMat_maths_impl.h.

References add(), and PLearn::TVec< T >::length().

template<class T>
TVec<T> operator+ v1,
const TVec< T > &  v2
 

Definition at line 1189 of file TMat_maths_impl.h.

References add(), and PLearn::TVec< T >::length().

template<class T>
TVec<T> operator+ const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 1177 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and PLERROR.

SparseMatrix PLearn::operator+ const SparseMatrix &  A,
const SparseMatrix &  B
 

add two sparse matrices (of same dimensions but with values in possibly different places)

Definition at line 259 of file SparseMatrix.cc.

References PLearn::SparseMatrix::beginRow, PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLearn::SparseMatrix::endRow, PLearn::TVec< T >::length(), PLearn::SparseMatrix::n_rows, PLERROR, PLearn::TVec< T >::resize(), PLearn::SparseMatrix::row, and PLearn::SparseMatrix::values.

PDate operator+ const PDate &  pdate,
int  ndays
[inline]
 

add a number of days

Definition at line 145 of file PDate.h.

References PLearn::PDate::toJulianDay().

void PLearn::operator+= Var &  v1,
const Var &  v2
 

Definition at line 99 of file Var_operators.cc.

References PLearn::PP< Variable >::isNull().

template<class T>
void operator+= const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 4524 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator+= const TMat< T > &  m,
const TVec< T > &  v
 

adds v to every row

Definition at line 4429 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator+= const TMat< T > &  m,
scalar
 

Definition at line 4400 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void operator+= const TVec< T > &  vec1,
const TVec< T > &  vec2
[inline]
 

element-wise +

Definition at line 2031 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
void operator+= const TVec< T > &  vec,
scalar
 

Definition at line 1064 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

VarArray PLearn::operator- const VarArray &  a,
const VarArray &  b
 

returns all variables of a that are not in b

Definition at line 1104 of file VarArray.cc.

References PLearn::TVec< Var >::append(), and PLearn::TVec< Var >::size().

Var PLearn::operator- real  cte,
Var  v
 

Definition at line 142 of file Var_operators.cc.

Var PLearn::operator- Var  v  ) 
 

Definition at line 128 of file Var_operators.cc.

Var PLearn::operator- Var  v1,
Var  v2
 

Definition at line 110 of file Var_operators.cc.

Var PLearn::operator- Var  v,
real  cte
 

Definition at line 78 of file Var_operators.cc.

RandomVar PLearn::operator- RandomVar  a,
RandomVar  b
 

Return a MatRandomVar that is the element-by-element difference of two RandomVar's.

The result contains a MinusRandomVariable.

Definition at line 437 of file RandomVar.cc.

template<class T>
TMat<T> operator- const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

return m - v (subtracted from every ROW of m)

Definition at line 5714 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy().

template<class T>
TMat<T> operator- const TMat< T > &  m  ) 
 

return a negated copy of m

Definition at line 4619 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
TMat<T> operator- const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 4558 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), substract(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> operator- const TVec< T > &  v1,
v2
 

Definition at line 1169 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and substract().

template<class T>
TVec<T> operator- v1,
const TVec< T > &  v2
 

Definition at line 1160 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length().

template<class T>
TVec<T> operator- const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 1148 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and PLERROR.

template<class T>
TVec<T> operator- TVec< T >  vec  ) 
 

Definition at line 1076 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Ker operator- const Ker &  k  )  [inline]
 

Definition at line 76 of file NegKernel.h.

References k.

double operator- const PDateTime &  to_date,
const PDateTime &  from_date
[inline]
 

subtract two dates, the result being counted in days (+ fractions)

Definition at line 145 of file PDateTime.h.

PDate operator- const PDate &  pdate,
int  ndays
[inline]
 

subtract a number of days add a number of days

Definition at line 152 of file PDate.h.

References PLearn::PDate::toJulianDay().

int operator- const PDate &  to_date,
const PDate &  from_date
[inline]
 

substract two dates, the result being counted in days.

Definition at line 139 of file PDate.h.

References PLearn::PDate::toJulianDay().

void PLearn::operator-= Var &  v1,
const Var &  v2
 

Definition at line 131 of file Var_operators.cc.

References PLearn::PP< Variable >::isNull().

template<class T>
void operator-= const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 4541 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator-= const TMat< T > &  m,
const TVec< T > &  v
 

subtracts v from every row

Definition at line 4444 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator-= const TMat< T > &  m,
scalar
[inline]
 

Definition at line 4418 of file TMat_maths_impl.h.

template<class T>
void operator-= const TVec< T > &  vec,
scalar
 

Definition at line 1072 of file TMat_maths_impl.h.

template<class T>
void operator-= const TVec< T > &  vec1,
const TVec< T > &  vec2
 

Definition at line 836 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Var PLearn::operator/ Var  v1,
Var  v2
 

Definition at line 184 of file Var_operators.cc.

References invertElements(), PLearn::Var::length(), and PLearn::Var::width().

Var PLearn::operator/ real  cte,
Var  v
 

Definition at line 176 of file Var_operators.cc.

References invertElements().

Var PLearn::operator/ Var  v,
real  cte
 

Definition at line 173 of file Var_operators.cc.

Func PLearn::operator/ Func  f,
real  value
 

Definition at line 86 of file Func.cc.

RandomVar PLearn::operator/ RandomVar  a,
RandomVar  b
 

Return a MatRandomVar that is the element-by-element ratio of two RandomVar's.

The result contains a RandomVariable.

Definition at line 442 of file RandomVar.cc.

template<class T>
TMat<T> operator/ const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

elementwise division of every row by v

Definition at line 5729 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy().

template<class T>
TMat<T> operator/ const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

elementwise division of every row by v

Definition at line 5724 of file TMat_maths_impl.h.

References PLearn::TMat< T >::copy().

template<class T>
TMat<T> operator/ const TMat< T > &  m,
const T &  scalar
[inline]
 

Definition at line 3501 of file TMat_maths_impl.h.

template<class T1, class T2>
TVec<T1> operator/ const TVec< T1 > &  v1,
T2  scalar
 

Definition at line 1258 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and multiply().

template<class T>
TVec<T> operator/ v1,
const TVec< T > &  v2
 

Definition at line 1245 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
TVec<T> operator/ const TVec< T > &  v1,
const TVec< T > &  v2
 

Definition at line 1233 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length(), and PLERROR.

template<class T>
void operator/= const TMat< T > &  m1,
const TMat< T > &  m2
 

does an elementwise division

Definition at line 4507 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator/= const TMat< T > &  m,
const TVec< T > &  v
 

Definition at line 4492 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void operator/= const TMat< T > &  m,
int  scalar
[inline]
 

Definition at line 4424 of file TMat_maths_impl.h.

template<class T>
void operator/= const TMat< T > &  m,
scalar
[inline]
 

Definition at line 4421 of file TMat_maths_impl.h.

template<class T>
void operator/= const TVec< T > &  vec1,
const TVec< T > &  vec2
 

Definition at line 916 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
void operator/= const TVec< T > &  vec,
int  scalar
[inline]
 

Definition at line 858 of file TMat_maths_impl.h.

template<class T>
void operator/= const TVec< T > &  vec,
scalar
[inline]
 

Definition at line 854 of file TMat_maths_impl.h.

Var operator< Var  v1,
Var  v2
[inline]
 

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Definition at line 78 of file IsSmallerVariable.h.

template<class T>
bool operator< const TVec< T > &  left,
const TVec< T > &  right
 

Definition at line 270 of file TVec_impl.h.

References left(), PLERROR, and right().

template<class T, unsigned N, class TTrait>
bool PLearn::operator< const TinyVector< T, N, TTrait > &  ,
const TinyVector< T, N, TTrait > & 
 

Lexicographical Ordering.

Definition at line 458 of file TinyVector.h.

References x.

template<class T, unsigned SizeBits, class Allocator>
bool operator< const SmallVector< T, SizeBits, Allocator > &  x,
const SmallVector< T, SizeBits, Allocator > &  y
 

Definition at line 491 of file SmallVector.h.

References x.

template<class T, unsigned SizeBits>
bool operator< const SmallVector< T, SizeBits > &  ,
const SmallVector< T, SizeBits > & 
 

const SmallVector<T,SizeBits,Allocator>&);

bool operator< RealMapping::single_mapping_t  a,
RealMapping::single_mapping_t  b
 

Definition at line 122 of file RealMapping.cc.

References PLearn::RealMapping::single_mapping_t.

ostream& operator<< ostream &  out,
Set  s
[inline]
 

Definition at line 107 of file Set.h.

References PLearn::Set::begin(), PLearn::Set::end(), and SetIterator.

PStream& operator<< PStream out,
const PPointableSet &  pp_set
[inline]
 

Definition at line 16 of file Set.h.

ostream& operator<< ostream &  out,
ProbabilitySparseMatrix &  pyx
[inline]
 

Definition at line 545 of file ProbabilitySparseMatrix.h.

References endl(), NUMWIDTH, PLearn::ProbabilitySparseMatrix::nx(), PLearn::ProbabilitySparseMatrix::ny(), PLearn::ProbabilitySparseMatrix::raise_error, and x.

void operator<< const Vec &  v,
const VVec &  vv
[inline]
 

Definition at line 133 of file VVec.h.

References PLearn::VVec::toVec().

void operator<< const VVec &  vv,
const Vec &  v
[inline]
 

Definition at line 130 of file VVec.h.

References PLearn::VVec::copyFrom().

ostream& operator<< ostream &  out,
const VMat &  m
[inline]
 

Definition at line 166 of file VMat.h.

References PLearn::VMat::print().

void operator<< const Mat &  dest,
const VMat &  src
[inline]
 

Definition at line 150 of file VMat.h.

void operator<< const Mat &  dest,
const VMatrix &  src
[inline]
 

Definition at line 140 of file VMat.h.

References PLearn::VMatrix::getMat(), PLearn::VMatrix::length(), PLearn::TMat< T >::length(), PLERROR, PLearn::VMatrix::width(), and PLearn::TMat< T >::width().

PStream& operator<< PStream out,
const VarArray &  o
[inline]
 

Definition at line 304 of file VarArray.h.

void operator<< VarArray &  ar,
const Vec &  datavec
[inline]
 

Definition at line 240 of file VarArray.h.

References PLearn::VarArray::copyFrom().

void PLearn::operator<< VarArray &  ar,
const Array< Vec > &  values
 

Definition at line 1131 of file VarArray.cc.

References k, PLERROR, PLearn::TVec< T >::size(), and PLearn::TVec< Var >::size().

ostream& operator<< ostream &  out,
const Var &  v
[inline]
 

Definition at line 56 of file Var.h.

References endl(), and PLearn::Var::width().

template<class T>
ostream& operator<< ostream &  out,
const TVec< T > &  v
[inline]
 

Definition at line 208 of file TVec_impl.h.

template<class T>
PStream& operator<< PStream out,
const TVec< T > &  v
[inline]
 

Read and Write from C++ stream: write saves length and read resizes accordingly (the raw modes don't write any size information).

Definition at line 146 of file TVec_impl.h.

template<class T, class U>
void operator<< const TVec< T > &  m1,
const TVec< U > &  m2
 

copy TVec << TVec (different types)

Definition at line 117 of file TVec_impl.h.

References copy_cast(), and PLERROR.

template<class T>
void operator<< const TVec< T > &  m1,
const TVec< T > &  m2
[inline]
 

copy TVec << TVec

Definition at line 106 of file TVec_impl.h.

References std::copy(), and PLERROR.

void operator<< const Vec &  v,
real  f
[inline]
 

Same as fill(f) (will only work with Vec, because of a potential conflict with T == string if we wanted to make it generic).

Definition at line 778 of file TVec_decl.h.

References PLearn::TVec< T >::fill().

template<class T>
PStream& operator<< PStream out,
const TMat< T > &  m
[inline]
 

Read and Write from C++ stream: write saves length and read resizes accordingly (the raw modes don't write any size information).

Definition at line 819 of file TMat_impl.h.

template<class T>
ostream& operator<< ostream &  out,
const TMat< T > &  m
[inline]
 

printing a TMat

Definition at line 717 of file TMat_impl.h.

References PLearn::TMat< T >::print().

template<class T, class U>
void operator<< const TVec< T > &  m1,
const TMat< U > &  m2
[inline]
 

copy TVec << TMat (different types)

Definition at line 689 of file TMat_impl.h.

References copy_cast(), and PLERROR.

template<class T>
void operator<< const TVec< T > &  m1,
const TMat< T > &  m2
[inline]
 

copy TVec << TMat

Definition at line 678 of file TMat_impl.h.

References std::copy(), and PLERROR.

template<class T, class U>
void operator<< const TMat< T > &  m1,
const TVec< U > &  m2
[inline]
 

copy TMat << Tvec (different types)

Definition at line 667 of file TMat_impl.h.

References copy_cast(), and PLERROR.

template<class T>
void operator<< const TMat< T > &  m1,
const TVec< T > &  m2
[inline]
 

copy TMat << Tvec

Definition at line 656 of file TMat_impl.h.

References std::copy(), and PLERROR.

template<class T, class U>
void operator<< const TMat< T > &  m1,
const TMat< U > &  m2
 

copy TMat << TMat (different types)

Definition at line 645 of file TMat_impl.h.

References copy_cast(), and PLERROR.

template<class T>
void operator<< const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

copy TMat << TMat

Definition at line 633 of file TMat_impl.h.

References std::copy(), and PLERROR.

PStream& operator<< PStream out,
const StatsItArray &  o
[inline]
 

Definition at line 397 of file StatsIterator.h.

PStream& operator<< PStream out,
const StatsCollectorCounts &  c
[inline]
 

Definition at line 72 of file StatsCollector.h.

References PLearn::StatsCollectorCounts::id, PLearn::StatsCollectorCounts::n, PLearn::StatsCollectorCounts::nbelow, PLearn::StatsCollectorCounts::sum, and PLearn::StatsCollectorCounts::sumsquare.

template<class T>
PStream& operator<< PStream out,
const ProbSparseMatrix &  p
[inline]
 

Definition at line 73 of file ProbSparseMatrix.h.

References PLearn::DoubleAccessSparseMatrix< real >::write().

ostream& operator<< ostream &  out,
const Vec &  v
[inline]
 

Definition at line 56 of file Mat.h.

References PLearn::TVec< T >::print().

template<class T>
PStream& operator<< PStream out,
const DoubleAccessSparseMatrix< T > &  m
[inline]
 

Definition at line 181 of file DoubleAccessSparseMatrix.h.

template<class T>
PStream& operator<< PStream out,
const set< T > &  v
[inline]
 

Definition at line 1162 of file PStream.h.

References writeSet().

template<class T>
PStream& operator<< PStream out,
const vector< T > &  v
[inline]
 

Definition at line 1114 of file PStream.h.

References writeSequence().

template<class Key, class Value>
PStream& operator<< PStream out,
const hash_multimap< Key, Value > &  m
[inline]
 

Definition at line 752 of file PStream.h.

References writeMap().

template<class Key, class Value>
PStream& operator<< PStream out,
const hash_map< Key, Value > &  m
[inline]
 

Definition at line 744 of file PStream.h.

References writeMap().

template<class Key, class Value>
PStream& operator<< PStream out,
const multimap< Key, Value > &  m
[inline]
 

Definition at line 735 of file PStream.h.

References writeMap().

template<class Key, class Value>
PStream& operator<< PStream out,
const map< Key, Value > &  m
[inline]
 

Definition at line 727 of file PStream.h.

References writeMap().

template<class A, class B>
PStream& operator<< PStream out,
const pair< A, B > &  x
[inline]
 

Definition at line 648 of file PStream.h.

References x.

PStream& operator<< PStream out,
bool  x
[inline]
 

Definition at line 608 of file PStream.h.

References PLearn::PStream::outmode, PLERROR, PLearn::PStream::put(), and x.

template<class T>
PStream& operator<< PStream out,
T *&  ptr
[inline]
 

Definition at line 602 of file PStream.h.

template<class T>
PStream& operator<< PStream out,
const PP< T > &  o
[inline]
 

Definition at line 594 of file PStream.h.

template<class T>
PStream& operator<< PStream out,
const T *&  x
[inline]
 

Definition at line 553 of file PStream.h.

References PLearn::PStream::copies_map_out, PLearn::PStream::put(), PLearn::PStream::write(), and x.

ostream & PLearn::operator<< ostream &  o,
const Row &  row
 

outputs all fields in a row, separated by " | "

Definition at line 916 of file SimpleDB.cc.

References PLearn::Row::begin(), PLearn::Row::end(), and endl().

ostream & PLearn::operator<< ostream &  o,
const Row::iterator &  field
 

outputs a single field flushed right in a cell of apropriate width (as given by field.char_width())

Definition at line 856 of file SimpleDB.cc.

References PLearn::RowIterator::asCharacter(), PLearn::RowIterator::asDate(), PLearn::RowIterator::asDouble(), PLearn::RowIterator::asFloat(), PLearn::RowIterator::asInt(), PLearn::RowIterator::asShort(), PLearn::RowIterator::asSignedChar(), PLearn::RowIterator::asString(), center(), PLearn::RowIterator::char_width(), PLearn::RowIterator::isMissing(), PLERROR, and x.

ostream & PLearn::operator<< ostream &  os,
const FieldValue &  ft
 

Definition at line 551 of file SimpleDB.cc.

References PLearn::FieldValue::toString().

template<class U, class V>
ostream& operator<< ostream &  out,
const pair< U, V > &  p
 

Formatted printing of a pair<U,V> as U:V.

Definition at line 263 of file stringutils.h.

ostream & PLearn::operator<< ostream &  out,
const vector< string > &  vs
 

formatted printing of vector<string> prints strings separated by a ", "

Definition at line 608 of file stringutils.cc.

ostream& operator<< ostream &  out,
const StringTable &  st
 

Definition at line 42 of file StringTable.cc.

References PLearn::StringTable::data, PLearn::StringTable::fieldnames, left(), PLearn::TVec< T >::length(), PLearn::StringTable::length(), and PLearn::StringTable::width().

template<class T>
PStream& operator<< PStream out,
const Storage< T > &  seq
 

Definition at line 327 of file Storage.h.

References writeSequence().

PStream& operator<< PStream out,
const SetOption &  o
[inline]
 

Definition at line 113 of file SetOption.h.

PStream& operator<< PStream out,
const RealMapping o
[inline]
 

Definition at line 237 of file RealMapping.h.

ostream& operator<< ostream &  out,
const RealRange &  range
[inline]
 

Definition at line 117 of file RealMapping.h.

References PLearn::RealRange::print().

PStream & PLearn::operator<< PStream out,
const RealRange &  x
 

Definition at line 54 of file RealMapping.cc.

References PLearn::PStream::put(), and x.

ostream& operator<< ostream &  out,
Range  r
[inline]
 

Definition at line 77 of file Range.h.

References PLearn::Range::length, and PLearn::Range::start.

ostream& operator<< ostream &  os,
const PDateTime &  date
[inline]
 

Definition at line 150 of file PDateTime.h.

References PLearn::PDateTime::info().

ostream& operator<< ostream &  os,
const PDate &  date
[inline]
 

Definition at line 158 of file PDate.h.

References PLearn::PDate::info().

PStream& operator<< PStream out,
const Object &  o
[inline]
 

Definition at line 617 of file Object.h.

References PLearn::Object::newwrite().

ostream& operator<< ostream &  out,
const Object &  obj
[inline]
 

Definition at line 612 of file Object.h.

References PLearn::Object::print().

template<class T>
ostream& operator<< ostream &  out,
const Array< T > &  a
 

Definition at line 82 of file Array_impl.h.

template<class T>
PStream& operator<< PStream out,
const Array< T > &  a
[inline]
 

Definition at line 78 of file Array_impl.h.

References writeSequence().

Referenced by PLearn::PStream::operator<<().

Var operator<= Var  v1,
Var  v2
[inline]
 

Definition at line 80 of file IsLargerVariable.h.

Var operator<= Var  v,
real  threshold
[inline]
 

Definition at line 86 of file IsAboveThresholdVariable.h.

template<class T>
bool operator<= const TVec< T > &  left,
const TVec< T > &  right
 

A simple family of relational operators for TVec.

Definition at line 249 of file TVec_impl.h.

References left(), PLERROR, and right().

template<class T, unsigned N, class TTrait>
bool operator<= const TinyVector< T, N, TTrait > &  x,
const TinyVector< T, N, TTrait > &  y
[inline]
 

Definition at line 182 of file TinyVector.h.

References x.

Var PLearn::operator== Var  v1,
Var  v2
 

Definition at line 192 of file Var_operators.cc.

References isequal().

Var operator== real  cte,
Var  v1
[inline]
 

result[i] = 1 if v1[i]==cte, 0 otherwise

Definition at line 85 of file EqualConstantVariable.h.

Var operator== Var  v1,
real  cte
[inline]
 

result[i] = 1 if v1[i]==cte, 0 otherwise

Definition at line 81 of file EqualConstantVariable.h.

template<class T, unsigned N, class TTrait>
bool PLearn::operator== const TinyVector< T, N, TTrait > &  ,
const TinyVector< T, N, TTrait > & 
 

Equality operator.

Definition at line 444 of file TinyVector.h.

References PLearn::TinyVector< T, N, TTrait >::begin(), PLearn::TinyVector< T, N, TTrait >::end(), and x.

template<class T, unsigned SizeBits, class Allocator>
bool PLearn::operator== const SmallVector< T, SizeBits, Allocator > &  a,
const SmallVector< T, SizeBits, Allocator > &  b
 

Equality operator.

Definition at line 476 of file SmallVector.h.

References PLearn::SmallVector< T, SizeBits, Allocator >::begin(), PLearn::SmallVector< T, SizeBits, Allocator >::end(), and x.

Referenced by PLearn::TMatRowsIterator< T >::operator!=(), and PLearn::TMatRowsAsArraysIterator< T >::operator!=().

Var operator> Var  v1,
Var  v2
[inline]
 

Definition at line 77 of file IsLargerVariable.h.

template<class T>
bool operator> const TVec< T > &  left,
const TVec< T > &  right
 

Definition at line 290 of file TVec_impl.h.

References left(), PLERROR, and right().

template<class T, unsigned N, class TTrait>
bool operator> const TinyVector< T, N, TTrait > &  x,
const TinyVector< T, N, TTrait > &  y
[inline]
 

Definition at line 175 of file TinyVector.h.

References x.

Var operator>= Var  v1,
Var  v2
[inline]
 

Definition at line 81 of file IsSmallerVariable.h.

Var operator>= Var  v,
real  threshold
[inline]
 

Definition at line 83 of file IsAboveThresholdVariable.h.

template<class T>
bool operator>= const TVec< T > &  left,
const TVec< T > &  right
 

Definition at line 259 of file TVec_impl.h.

References left(), PLERROR, and right().

template<class T, unsigned N, class TTrait>
bool operator>= const TinyVector< T, N, TTrait > &  x,
const TinyVector< T, N, TTrait > &  y
[inline]
 

Definition at line 189 of file TinyVector.h.

References x.

PStream& operator>> PStream in,
PPointableSet &  pp_set
[inline]
 

Definition at line 19 of file Set.h.

void operator>> const Vec &  v,
const VVec &  vv
[inline]
 

Definition at line 136 of file VVec.h.

References PLearn::VVec::copyFrom().

void operator>> const VVec &  vv,
const Vec &  v
[inline]
 

Definition at line 127 of file VVec.h.

References PLearn::VVec::toVec().

void operator>> const VMat &  src,
const Mat &  dest
[inline]
 

Definition at line 153 of file VMat.h.

void operator>> const VMatrix &  src,
const Mat &  dest
[inline]
 

Definition at line 147 of file VMat.h.

PStream& operator>> PStream in,
VarArray &  o
[inline]
 

Definition at line 301 of file VarArray.h.

void operator>> VarArray &  ar,
const Vec &  datavec
[inline]
 

Definition at line 243 of file VarArray.h.

References PLearn::VarArray::copyTo().

void PLearn::operator>> VarArray &  ar,
const Array< Vec > &  values
 

Definition at line 1146 of file VarArray.cc.

References k, PLERROR, PLearn::TVec< T >::size(), and PLearn::TVec< Var >::size().

template<class T>
istream& operator>> istream &  in,
const TVec< T > &  v
[inline]
 

Definition at line 215 of file TVec_impl.h.

References PLearn::TVec< T >::input().

template<class T>
PStream& operator>> PStream in,
TVec< T > &  v
 

Definition at line 153 of file TVec_impl.h.

References PLearn::TVec< T >::read().

template<class T, class U>
void operator>> const TVec< T > &  m1,
const TVec< U > &  m2
[inline]
 

copy TVec >> TVec

Definition at line 128 of file TVec_impl.h.

template<class T>
PStream& operator>> PStream in,
TMat< T > &  m
 

Definition at line 826 of file TMat_impl.h.

References PLearn::TMat< T >::read().

template<class T>
istream& operator>> istream &  in,
const TMat< T > &  m
[inline]
 

inputing a TMat

Definition at line 726 of file TMat_impl.h.

References PLearn::TMat< T >::input().

template<class T, class U>
void operator>> const TMat< T > &  m1,
const TVec< U > &  m2
[inline]
 

copy TMat >> Tvec

Definition at line 710 of file TMat_impl.h.

template<class T, class U>
void operator>> const TVec< T > &  m1,
const TMat< U > &  m2
[inline]
 

copy TVec >> TMat

Definition at line 705 of file TMat_impl.h.

template<class T, class U>
void operator>> const TMat< T > &  m1,
const TMat< U > &  m2
[inline]
 

copy TMat >> TMat

Definition at line 700 of file TMat_impl.h.

PStream& operator>> PStream in,
StatsItArray &  o
[inline]
 

Definition at line 394 of file StatsIterator.h.

PStream& operator>> PStream in,
StatsCollectorCounts &  c
[inline]
 

this class holds simple statistics about a field

Definition at line 69 of file StatsCollector.h.

References PLearn::StatsCollectorCounts::id, PLearn::StatsCollectorCounts::n, PLearn::StatsCollectorCounts::nbelow, PLearn::StatsCollectorCounts::sum, and PLearn::StatsCollectorCounts::sumsquare.

template<class T>
PStream& operator>> PStream in,
ProbSparseMatrix &  p
[inline]
 

Definition at line 80 of file ProbSparseMatrix.h.

References PLearn::DoubleAccessSparseMatrix< real >::read().

template<class T>
PStream& operator>> PStream in,
DoubleAccessSparseMatrix< T > &  m
[inline]
 

Definition at line 188 of file DoubleAccessSparseMatrix.h.

References PLearn::DoubleAccessSparseMatrix< T >::read().

template<class T>
PStream& operator>> PStream in,
set< T > &  v
[inline]
 

Definition at line 1158 of file PStream.h.

References readSet().

template<class T>
PStream& operator>> PStream in,
vector< T > &  v
[inline]
 

Definition at line 1110 of file PStream.h.

References readSequence().

template<class Key, class Value>
PStream& operator>> PStream in,
hash_multimap< Key, Value > &  m
[inline]
 

Definition at line 756 of file PStream.h.

References readMap().

template<class Key, class Value>
PStream& operator>> PStream in,
hash_map< Key, Value > &  m
[inline]
 

Definition at line 748 of file PStream.h.

References readMap().

template<class Key, class Value>
PStream& operator>> PStream in,
multimap< Key, Value > &  m
[inline]
 

Definition at line 739 of file PStream.h.

References readMap().

template<class Key, class Value>
PStream& operator>> PStream in,
map< Key, Value > &  m
[inline]
 

Definition at line 731 of file PStream.h.

References readMap().

template<typename S, typename T>
PStream& operator>> PStream in,
pair< S, T > &  x
[inline]
 

Definition at line 658 of file PStream.h.

References PLearn::PStream::get(), PLERROR, PLearn::PStream::skipBlanksAndComments(), PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and x.

template<class T>
PStream& operator>> PStream in,
PP< T > &  o
[inline]
 

Definition at line 581 of file PStream.h.

References PLearn::PP< T >::isNull().

template<class T>
PStream& operator>> PStream in,
T *&  x
[inline]
 

Definition at line 509 of file PStream.h.

References PLearn::PStream::copies_map_in, PLearn::PStream::get(), PLearn::PStream::peek(), PLERROR, PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and x.

template<class T>
PStream& operator>> PStream in,
Storage< T > &  seq
 

Definition at line 334 of file Storage.h.

References readSequence().

PStream& operator>> PStream in,
PP< SetOption > &  o
[inline]
 

Definition at line 113 of file SetOption.h.

PStream& operator>> PStream in,
SetOption *&  o
[inline]
 

Definition at line 113 of file SetOption.h.

PStream& operator>> PStream in,
SetOption &  o
[inline]
 

Definition at line 113 of file SetOption.h.

PStream& operator>> PStream in,
PP< RealMapping > &  o
[inline]
 

Definition at line 237 of file RealMapping.h.

PStream& operator>> PStream in,
RealMapping *&  o
[inline]
 

Definition at line 237 of file RealMapping.h.

PStream& operator>> PStream in,
RealMapping o
[inline]
 

Definition at line 237 of file RealMapping.h.

PStream & PLearn::operator>> PStream in,
RealRange &  x
 

Definition at line 62 of file RealMapping.cc.

References PLearn::PStream::get(), PLearn::PStream::skipBlanksAndComments(), PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and x.

PStream& operator>> PStream in,
Object &  o
[inline]
 

Definition at line 615 of file Object.h.

References PLearn::Object::newread().

PStream & PLearn::operator>> PStream in,
Object *&  o
 

Definition at line 428 of file Object.cc.

References PLearn::PStream::copies_map_in, PLearn::PStream::get(), PLearn::PStream::peek(), PLERROR, readObject(), PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and x.

template<class T>
PStream& operator>> PStream in,
Array< T > &  a
[inline]
 

Definition at line 74 of file Array_impl.h.

References readSequence().

Referenced by PLearn::PStream::operator>>().

Mat PLearn::operator^ const Mat &  m1,
const Mat &  m2
 

"cross" product of two sets. Matrices m1 and m2 are regarded as two sets of vectors (their rows) m1^m2 returns the set of all possible concatenations of a vector from m1 and a vector from m2 ex: Mat(2,1,"1 2 3 4") ^ Mat(2,2,"10 20 30 40") ==> 1 2 10 20 1 2 30 40 3 4 10 20 3 4 30 40

Definition at line 132 of file Mat.cc.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

template<class T>
T output_margin const TVec< T > &  class_scores,
int  correct_class
 

Definition at line 1296 of file TMat_maths_impl.h.

References PLearn::TVec< T >::length().

CostFunc PLearn::output_minus_target int  singleoutputindex = -1  )  [inline]
 

Definition at line 59 of file DifferenceKernel.cc.

Var PLearn::P ConditionalExpression  conditional_expression,
bool  clearMarksUponReturn = true
 

Construct a Var that computes P(LHS == observation | RHS == values ) in terms of the Var observation and the Vars in the RHS, where RHS is a RVArray such as (X1==x1 && X2==x2 && X3==x3) where Xi are RandomVar's and xi are Var's which represent the value that are given to the conditioning variables Xi. Normally the marks used to identify RVs which are deterministically determined from the RHS are cleared upon return (unless specified with the optional 2nd argument).

Definition at line 593 of file RandomVar.cc.

References PLearn::ConditionalExpression::LHS, PLearn::ConditionalExpression::RHS, PLearn::RVInstance::v, and PLearn::RVInstance::V.

Referenced by PLearn::PLS::train().

real PLearn::p_value real  mu,
real  vn
 

Definition at line 219 of file pl_erf.cc.

References gauss_01_cum(), and sqrt().

Referenced by KS_test().

real PLearn::paired_t_test Vec  u,
Vec  v
 

Given two paired sets u and v of n measured values, the paired t-test determines whether they differ from each other in a significant way under the assumptions that the paired differences are independent and identically normally distributed.

Definition at line 254 of file stats_utils.cc.

References PLearn::TVec< T >::length(), mean(), MISSING_VALUE, PLWARNING, sqrt(), and sumsquare().

Referenced by PLearn::SequentialModelSelector::test().

void PLearn::parseSizeFromRemainingLines const string filename,
ifstream &  in,
bool could_be_old_amat,
int length,
int width
 

Definition at line 1510 of file MatIO.cc.

References countNonBlankLinesOfFile(), getNextNonBlankLine(), and split().

Referenced by loadAscii().

template<class T>
void partialSortRows const TMat< T > &  mat,
int  k,
int  sortk = 1,
int  col = 0
 

Uses partial_sort.

Sorts only the first k smallests rows and put it in the k first rows. The other rows are in an arbitrary order. If sortk is 0, the k smallest rows are put in the k first rows but in an arbitrary order. This implementation should be very efficient, but it does two memory allocation: a first one of mat.length()*(sizeof(real)+sizeof(int)) and a second one of mat.length()*sizeof(int).

Definition at line 126 of file TMat_sort.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::swapRows().

Referenced by PLearn::Kernel::computeKNNeighbourMatrixFromDistanceMatrix(), and PLearn::Kernel::computeNearestNeighbors().

bool PLearn::pathexists const string path  ) 
 

returns true if the given path points to an existing regular file or directory

Definition at line 129 of file fileutils.cc.

Referenced by PLearn::SequentialValidation::build_(), PLearn::PTester::build_(), PLearn::VVMatrix::createPreproVMat(), getDataSet(), getDataSetDate(), PLearn::VVMatrix::getPrecomputedDataName(), PLearn::VVMatrix::isPrecomputedAndUpToDate(), locateDatasetAliasesDir(), PLearn::VMatrix::lockMetaDataDir(), makeFileNameValid(), and PLearn::Experiment::run().

char peekAfterSkipBlanks istream &  in  )  [inline]
 

peeks the first char after removal of blanks

Definition at line 158 of file fileutils.h.

Referenced by PLearn::VMatLanguage::generateCode(), and PLearn::RealMapping::read().

char peekAfterSkipBlanksAndComments istream &  in  )  [inline]
 

peeks the first char after removal of blanks and comments

Definition at line 161 of file fileutils.h.

References skipBlanksAndComments().

Referenced by readAndMacroProcess().

string PLearn::pgetline istream &  in = cin  ) 
 

returns the next line read from the stream, after removing any trailing '' and/or '
'

Definition at line 293 of file stringutils.cc.

References removenewline().

Referenced by extractWordSet(), PLearn::ShellProgressBar::getAsciiFileLineCount(), interactiveDisplayCDF(), PLearn::VMatrix::loadFieldInfos(), PLearn::WordNetOntology::loadPredominentSyntacticClasses(), PLearn::GraphicalBiText::loadSensemap(), main(), and PLearn::Grapher::plot_1D_regression().

real PLearn::pl_dgammlndz real  z  ) 
 

d(pl_gammln(z))/dz derivate of pl_gammln(z)

Definition at line 74 of file pl_erf.cc.

References log(), and pl_gammln_cof.

real PLearn::pl_erf real  x  ) 
 

The error function.

Definition at line 151 of file pl_erf.cc.

References pl_gammq(), and x.

Referenced by PLearn::ErfVariable::fprop(), and gauss_01_cum().

real PLearn::pl_gammln real  z  ) 
 

function gamma returns log(Gamma(z)), where Gamma(z) = ^infty t^{z-1}*e^{-t} dt

Definition at line 60 of file pl_erf.cc.

References log(), Pi, and pl_gammln_cof.

Referenced by pl_gcf(), and pl_gser().

real PLearn::pl_gammq real  a,
real  x
 

returns the incomplete gamma function Q(a,x) = 1 - P(a,x) it either uses the series or the continued fraction formula

Definition at line 141 of file pl_erf.cc.

References pl_gcf(), pl_gser(), PLERROR, and x.

Referenced by pl_erf().

real PLearn::pl_gcf real  a,
real  x
 

returns the continued fraction representation of the incomplete gamma function

Definition at line 110 of file pl_erf.cc.

References EPS, exp(), FPMIN, ITMAX, log(), pl_gammln(), PLERROR, and x.

Referenced by pl_gammq().

real PLearn::pl_gser real  a,
real  x
 

returns the series value of the incomplete gamma function

Definition at line 89 of file pl_erf.cc.

References exp(), ITMAX, log(), pl_gammln(), PLERROR, sum(), and x.

Referenced by pl_gammq().

bool PLearn::pl_isnumber const string s,
float *  dbl
 

Definition at line 96 of file stringutils.cc.

References MISSING_VALUE, and removeblanks().

bool PLearn::pl_isnumber const string s,
double *  dbl = NULL
 

Definition at line 83 of file stringutils.cc.

References MISSING_VALUE, and removeblanks().

Referenced by PLearn::VMatLanguage::generateCode(), PLearn::VecStatsCollector::getFieldNum(), getList(), loadAscii(), loadUCIMLDB(), PLearn::PDate::PDate(), PLearn::VMatLanguage::preprocess(), readAndMacroProcess(), PLearn::StrTableVMatrix::StrTableVMatrix(), and viewVMat().

PLEARN_IMPLEMENT_ABSTRACT_OBJECT SequentialLearner  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT StatefulLearner  ,
"PLearner with an internal state"  ,
"PLearner with an internal state.\n""It   replaces,
for efficacity and compatibility  reasons,
SequentialLearner." 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT PLearner  ,
"The base class for all PLearn learners."  ,
"" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Learner  ,
"DEPRECATED CLASS: Derive from PLearner instead"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT VMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

VMatrix *.

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Splitter  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT RowBufferedVMatrix  ,
"A base class for VMatrices that keep the last row(s) in a buffer for faster access."  ,
"" 
 

RowBufferedVMatrix *.

PLEARN_IMPLEMENT_ABSTRACT_OBJECT DatedVMatrix  ,
"ONE_LINE_DESC"  ,
"ONE_LINE_HELP" 
 

DatedVMatrix *.

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Variable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT UnaryVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT NaryVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

NaryVariable *.

PLEARN_IMPLEMENT_ABSTRACT_OBJECT BinaryVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Optimizer  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT HyperOptimizer  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT ObjectGenerator  ,
"ObjectGenerator is the base class for implementing object-generation techniques."  ,
"The OptionGenerator takes a template   Object,
and from a list of  options,
\n""it will generate another Object(or a complete list).\n" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT StatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Smoother  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_ABSTRACT_OBJECT Kernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT TangentLearner  ,
"Learns local tangent plane of the manifold near which the data lie."  ,
"This learner models a manifold near which the data are supposed to lie.\n""The manifold is represented by a function which predicts a basis for the\n""tangent planes at each point   x,
given x in R^n.Let f_i(x) be the predicted i-th tangent\n""vector(in R^n).Then we will optimize the parameters that define the d functions f_i by\n""pushing the f_i so that they span the local tangent directions.Three criteria are\n""  possible,
according to the 'training_targets'  ,
'normalize_by_neighbor_distance'and\n""'use_subspace_distance'option.The default criterion is the recommanded  one,
with\n""  training_targets = 'local_neighbors',
normalize_by_neighbor_distance  = 1,
\n""and  use_subspace_distance = 0 (it really did not work well in our experiments with\n""use_subspace_distance=1). This corresponds to the following cost function:\n""    sum_x sum_j min_w ||t(x,j) - sum_i w_i f_i(x)||^2 / ||t(x,j)||^2\n""where x is an example,
t(x, j) is the difference vector between x and its j-th  neighbor,
\n""and the w_i are chosen freely for each j and x and correspond to the weights given to\n""each basis vector f_i(x) to obtain the projection of t(x, j) on the tangent plane.\n""More  generally,
if  use_subspace_distance
 

PLEARN_IMPLEMENT_OBJECT SpectralClustering  ,
"Spectral Clustering dimensionality reduction."  ,
"The current code only performs dimensionality   reduction,
and does not do\n""clustering." 
 

PLEARN_IMPLEMENT_OBJECT PCA  ,
"Performs a Principal Component Analysis preprocessing (projecting on the principal directions)."  ,
"This learner finds the empirical covariance matrix of the input part of\n""the training   data,
and learns to project its input vectors along the\n""principal eigenvectors of that  matrix,
optionally scaling by the inverse\n""of the square root of the eigenvalues(to obtained 'sphered', i.e.\n""Normal(0, I) data).\n""Alternative EM algorithms are  provided,
that may be useful when there is\n""a lot of data or the dimension is very high.\n" 
 

PLEARN_IMPLEMENT_OBJECT LLE  ,
"Performs Locally Linear Embedding."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT KPCATangentLearner  ,
"Tangent learning based on KPCA Kernel"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT KernelProjection  ,
"Performs dimensionality reduction by learning eigenfunctions of a kernel."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT KernelPCA  ,
"Kernel Principal Component Analysis"  ,
"Perform PCA in a feature space   phi(x)
 

PLEARN_IMPLEMENT_OBJECT IsomapTangentLearner  ,
"Tangent learning based on Isomap Kernel"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT Isomap  ,
"Performs ISOMAP dimensionality reduction."  ,
"Be careful that when looking for the 'knn' nearest neighbors of a point   x,
\n""we consider all points from the training data  D,
including x itself if it\n""belongs to D.  Thus,
to obtain the same result as with the classical ISOMAP\n""  algorithm,
one should use one more neighbor.\n""Note also that when used out-of-  sample,
this will result in a different output\n""than an algorithm applying the same  formula,
but considering one less neighbor.\n" 
 

PLEARN_IMPLEMENT_OBJECT GaussianContinuum  ,
"Learns a continuous (uncountable) Gaussian mixture with non-local parametrization" 
 

PLEARN_IMPLEMENT_OBJECT EntropyContrast  ,
"Performs a EntropyContrast search"  ,
"Detailed Description " 
 

PLEARN_IMPLEMENT_OBJECT Train  ,
"An easy PTester"  ,
Sometimes
 

PLEARN_IMPLEMENT_OBJECT TestMethod  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PTester  ,
"Manages a learning   experiment,
with training and estimation of generalization error."  ,
"The PTester class allows you to describe a typical learning experiment that you wish to   perform,
\n""as a training/testing of a learning algorithm on a particular dataset.\n""The splitter is used to obtain one or several(such as for k-fold) splits of the dataset\n""and training/testing is performed on each split.\n""Requested statistics are  computed,
and all requested results are written in an appropriate\n""file inside the specified experiment directory.\n""Statistics can be either specified entirely from the 'statnames'  option,
or built from\n""'statnames'and 'statmask'.For  instance,
one may set:\n""  statnames = [ \"NLL\" \"mse\" ]\n""   statmask  = [ [ \"E[*]\" ] [ \"test1.*\" \"test2.*\" ] [ \"E[*]\" \"STDERROR[*]\" ] ]\n""and this will compute:\n""   E[test1.E[NLL]], STDERROR[test1.E[NLL]], E[test2.E[NLL]], STDERROR[test2.E[NLL]]\n""   E[test1.E[mse]], STDERROR[test1.E[mse]], E[test2.E[mse]], STDERROR[test2.E[mse]]\n"
 

PLEARN_IMPLEMENT_OBJECT SequentialValidation  ,
"The SequentialValidation class allows you to describe a typical ""sequential validation experiment that you wish to perform."  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SequentialModelSelector  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MovingAverage  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT EmbeddedSequentialLearner  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PLS  ,
"Partial Least Squares Regression (PLSR)."  ,
"You can use this learner to perform   regression,
and/or dimensionality\n""reduction.\n""PLS regression assumes the target Y and the data X are linked through:\n""  Y = T.Q' + E\n"" X = T.P' + F\n""The underlying coefficients T (the 'scores') and the loading matrices\n""Q and P are seeked. It is then possible to compute the prediction y for\n""a new input x,
as well as its score vector t(its representation in\n""lower-dimensional coordinates).\n""The available algorithms to perform PLS(chosen by the 'method'option) are:\n""\n""  = ===  PLS1  ====\n""The classical PLS algorithm,
suitable only for a 1-dimensional target.The\n""following algorithm is taken from 'Factor Analysis in Chemistry'  ,
with an\n""additional loop   that(I believe) was missing:\n""(1) Let X(n x p) = the centered and normalized input data\n""     Let y (n x 1) = the centered and normalized target data\n""     Let k be the number of components extracted\n"" (2) s = y\n"" (3) lx' = s' X,
= X lx (normalized)\n"" (4) If s has changed by more than 'precision',
loop to(3)\n""(5)  ly = s' y\n"" (6) lx' = s' X\n"" (7) Store s,
lx and ly in the columns of respectively  T,
P and Q\n""(8)  X = X - s lx',
= y - s ly,
loop to(2) k times\n""(9) Set  W = (T P')^(+) T,
where the^(+) is the right pseudoinverse\n""\n""  = === Kernel ====\n""The code implements a NIPALS-PLS-like algorithm,
which is a so-called\n""'kernel'algorithm(faster than more classical implementations).\n""The  algorithm,
inspired from 'Factor Analysis in Chemistry'and above all\n""www.statsoftinc.com/textbook/stpls.  html,
is the following:\n""  (1) Let X(n x p) = the centered and normalized input data\n""     Let Y (n x m) = the centered and normalized target data\n""     Let k be the number of components extracted\n"" (2) Initialize A_0 = X'Y,
M_0  = X'X,
C_0  = Identity(p),
and  h = 0\n"" (3) q_h = largest eigenvector of B_h = A_h' A_h,
found by the NIPALS method:\n""(3.a)  q_h = a (normalized) randomn column of B_h\n""       (3.b) q_h = B_h q_h\n""       (3.c) normalize q_h\n""       (3.d) if q_h has changed by more than 'precision',
go to(b)\n""(4)  w_h = C_h A_h q_h,
normalize w_h and store it in a column of W(p x k)\n""(5)  p_h = M_h w_h,
c_h  = w_h' p_h,
p_h  = p_h / c_h and store it in a column\n""     of P (p x k)\n"" (6) q_h = A_h' w_h / c_h,
and store it in a column of Q(m x k)\n""(7) A_h+  1 = A_h - c_h p_h q_h'\n""     M_h+1 = M_h - c_h p_h p_h',
\n""C_h+  1 = C_h - w_h p_h\n"" (8) h = h+1
 

PLEARN_IMPLEMENT_OBJECT LinearRegressor  ,
"Ordinary Least Squares and Ridge   Regression,
optionally weighted"  ,
"This class performs OLS (Ordinary Least Squares) and Ridge   Regression,
optionally on weighted\n""  data,
by solving the linear equation(X'W X+weight_decay *n_examples *I)  theta = X'W Y\n""where X is the (n_examples x (1+inputsize)) matrix of extended inputs (with a 1 in the first column),
\n""Y is   the(n_example x targetsize)
 

PLEARN_IMPLEMENT_OBJECT ConstantRegressor  ,
"PLearner that outputs a constant (input-independent) vector.\n" 
 

PLEARN_IMPLEMENT_OBJECT PTester  ,
"Evaluates the performance of a PLearner"  ,
"The PTester class allows you to describe a typical learning experiment that you wish to   perform,
\n""as a training/testing of a learning algorithm on a particular dataset.\n""The splitter is used to obtain one or several(such as for k-fold) splits of the dataset\n""and training/testing is performed on each split.\n""Requested statistics are  computed,
and all requested results are written in an appropriate\n""file inside the specified experiment directory.\n" 
 

PLEARN_IMPLEMENT_OBJECT Grapher  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT GenerateDecisionPlot  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT Experiment  ,
"DEPRECATED: use PTester instead"  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT TextSenseSequenceVMatrix  ,
"VMat class that takes another VMat which contains a sequence (rows) ""of words/sense/POS triplets extracted from a corpus and implements a ""representation of a target word and its context."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT Dictionary  ,
"Mapping string->int and int->string ,
"MULTI LINE\nHELP" 
 

PLEARN_IMPLEMENT_OBJECT GraphicalBiText  ,
"Probabilistically tag a bitext (english-other language) with senses from WordNet"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT TestingLearner  ,
"ONE LINE DESCRIPTION"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT StackedLearner  ,
"Implements   stacking,
that combines two levels of  learner,
the 2nd level using the 1st outputs as inputs"  ,
"Stacking is a generic strategy in which two levels (or more, recursively) of learners\n""are combined. The lower level may have one or more   learners,
and they may be trained\n""on the same or different data from the upper level single learner.The outputs of the\n""1st level learners are concatenated and serve as inputs to the second level learner.\n""IT IS ASSUMED THAT ALL BASE LEARNERS HAVE THE SAME NUMBER OF INPUTS AND OUTPUTS\n""There is also the option to copy the input of the 1st level learner as additional\n""inputs for the second level(put_raw_input).\n""A Splitter can optionally be provided to specify how to split the data into\n""the training/validation sets for the lower and upper levels respectively\n" 
 

PLEARN_IMPLEMENT_OBJECT SelectInputSubsetLearner  ,
"PLearner which selects a subset of the inputs for an embedded learner."  ,
"This learner class contains an embedded learner for which it selects a subset of the inputs.\n""The subset can be either selected explicitly or chosen randomly (the user chooses what fraction\n""of the original inputs will be selected)." 
 

PLEARN_IMPLEMENT_OBJECT NNet  ,
"Ordinary Feedforward Neural Network with 1 or 2 hidden layers"  ,
"Neural network with many bells and whistles..." 
 

PLEARN_IMPLEMENT_OBJECT NeuralNet  ,
"DEPRECATED: Use NNet instead"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT NeighborhoodSmoothnessNNet  ,
"Feedforward neural network whose hidden units are smoothed according to input neighborhood\n"  ,
"TODO" 
 

PLEARN_IMPLEMENT_OBJECT EmbeddedLearner  ,
"Wraps an underlying learner"  ,
"EmbeddedLearner implements nothing but forwarding \n""calls to an underlying learner. It is typically used as\n""baseclass for learners that are built on top of another learner" 
 

PLEARN_IMPLEMENT_OBJECT AddCostToLearner  ,
"A PLearner that just adds additional costs to another PLearner."  ,
"In   addition,
this learner can be used to compute costs on bags instead of\n""individual  samples,
using the option 'compute_costs_on_bags'.\n""\n""Feel free to make this class evolve by adding new  costs,
or rewriting it\n""in a better  fashion,
because this one is certainly not perfect.\n""To use the lift  cost
 

PLEARN_IMPLEMENT_OBJECT UniformDistribution  ,
"Implements uniform distribution over intervals."  ,
Currently,
only very few methods are implemented.\n""For  example,
to sample points in 2D  inx[a, b][c, d]
 

PLEARN_IMPLEMENT_OBJECT UnconditionalDistribution  ,
"This class is a simplified version of PDistribution for unconditional distributions."  ,
"Its only goal is to hide the conditional side of PDistributions to make it simpler." 
 

PLEARN_IMPLEMENT_OBJECT SpiralDistribution  ,
"Generates samples drawn from a 2D spiral" 
 

PLEARN_IMPLEMENT_OBJECT PDistribution  ,
"PDistribution is the base class for distributions.\n"  ,
"PDistributions derive from PLearner   (as some of them may be fitted to data with train()),
\n""but they have additional methods allowing for ex.to compute density or generate data points.\n""The default implementations of the learner-type methods for computing outputs and costs work as follows:\n""-the outputs_def option allows to choose which outputs are produced\n""-cost is a vector of size 1 containing only the negative log-  likelihood(NLL),
i.e.-log_density.\n""A PDistribution may be conditional   P(Y|X),
if the option 'conditional_flags'is set.If it is the  case,
\n""the input should always be made of both the 'input'  part(X) and the 'target'part(Y),
even if the\n""output may not need to use the Y part.The exception is when computeOutput() needs to be called\n""successively with the same value of X:in this  case,
after a first call with both X and  Y,
one may\n""only provide Y as  input,
and X will be assumed to be unchanged.\n" 
 

PLEARN_IMPLEMENT_OBJECT PConditionalDistribution  ,
"(THIS CLASS IS DEPRECATED, use PDistribution instead). Conditional distribution or conditional density model P(Y|X)"  ,
"Abstract superclass for conditional distribution classes.\n""It is a subclass of   PDistribution,
with the added method\n""setInput(Vec &input)\n""to set  X,
that must be called before PDistribution methods such as\n""  log_density,
cdf  ,
survival_fn  ,
expectation  ,
variance  ,
generate.\n""The PDistribution option output_defs must be set to specify\n""what the PLearner method computeOutput will produce.If it is\n""set to 'l'  (log_density),
'd'  (density),
'c'  (cdf),
or 's'(survival_fn)\n""then the input part of the data should contain both the input X and\n""the 'target'Y values(targetsize()==0).  Instead,
if output_defs is set to\n"" 'e'  (expectation) or 'v'(variance),
then the input part of the data should\n""contain only  X,
while the target part should contain Y\n" 
 

PLEARN_IMPLEMENT_OBJECT ManifoldParzen2  ,
"ManifoldParzen implements a manifold Parzen."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT LocallyWeightedDistribution  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT HistogramDistribution  ,
"Represents and possibly learns (using a smoother) a univariate distribution as a histogram."  ,
"This class represents a univariate distribution with a set of bins and their densities\n""The bins can be fixed or learned by a Binner   object
 

PLEARN_IMPLEMENT_OBJECT GaussMix  ,
"Gaussian   mixture,
either set non-parametrically or trained by EM."  ,
"GaussMix implements a mixture of L gaussians.\n""There are 4 possible parametrization types:\n"" - spherical : gaussians have covar   matrix = diag(sigma). Parameter used : sigma.\n"" - diagonal  : gaussians have covar matrix = diag(sigma_i). Parameters used : diags.\n"" - general   : gaussians have an unconstrained covariance matrix.\n""               The user specifies the number 'n_eigen' of eigenvectors kept when\n""               decomposing the covariance matrix. The remaining eigenvectors are\n""               considered as having a fixed eigenvalue equal to the next highest\n""               eigenvalue in the decomposition.\n"" - factor    : (not implemented!) as in the general case,
the gaussians are defined\n""with K<=D   vectors(through KxD matrix 'V'),
but these need not be\n""orthogonal/orthonormal.\n""The covariance matrix used will be V(t) V+psi with psi a D-vector\n""(through parameter diags).\n""2 parameters are common to all 4 types:\n""-alpha:the ponderation factor of the gaussians\n""-mu:their centers\n" 
 

PLEARN_IMPLEMENT_OBJECT GaussianProcessRegressor  ,
"Basic version of Gaussian Process regression."  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT GaussianDistribution  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT EmpiricalDistribution  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT Distribution  ,
"This class is   deprecated,
use PDistribution instead."  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ConditionalGaussianDistribution  ,
"ConditionalGaussianDistribution is a gaussian distribution ""in which the parameters could be learned or specified manually."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT ConditionalDistribution  ,
"ONE LINE DESCR"  ,
"You must call setInput to set the condition before using the distribution" 
 

PLEARN_IMPLEMENT_OBJECT ConditionalDensityNet  ,
"Neural Network that Implements a Positive Random Variable Conditional Density"  ,
"The input vector is used to compute parameters of an output density or output\n""cumulative distribution as well as output expected value. The ASSUMPTIONS\n""on the generating distribution P(Y|X) are the following:\n"" * Y is a single real value\n"" * 0 <= Y <=   maxY,
with maxY a known finite value\n""*the density has a mass point at  Y = 0\n""  * the density is continuous for Y
 

PLEARN_IMPLEMENT_OBJECT MultiInstanceNNet  ,
"Multi-instance feedforward neural network for probabilistic classification"  ,
"The data has the form of a set of input vectors x_i associated with a single\n""label y. Each x_i is an instance and the overall set of instance is called a bag.\n""We don't know which of the inputs is responsible for the   label,
i.e.\n""there are hidden(not observed) labels y_i associated with each of the inputs x_i.\n""We also know that  y = 1 if at least one of the y_i is 1,
otherwise  y = 0,
i.e.\n""  y = y_1 or y_2 or ... y_m\n""In terms of probabilities
 

PLEARN_IMPLEMENT_OBJECT ClassifierFromDensity  ,
"A classifier built from density estimators using Bayes' rule."  ,
"ClassifierFromDensity allowd to build a classifier\n""by building one density estimator for each   class,
\n""and using Bayes rule to combine them.\n" 
 

PLEARN_IMPLEMENT_OBJECT AdaBoost  ,
"AdaBoost boosting algorithm for TWO-CLASS classification"  ,
"Given a classification weak-  learner,
this algorithm\"boosts\" it in\n""order to obtain a much more powerful classification algorithm.\n""The classifier is two-  class,
returning 0 or  1
 

PLEARN_IMPLEMENT_OBJECT YMDDatedVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT VVMatrix  ,
"A VMat that reads a '.vmat' file."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT VVec  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT VMatrixFromDistribution  ,
"A VMatrix built from sampling a distribution"  ,
"VMatrixFromDistribution implements a VMatrix whose data rows are drawn from a distribution\n""or that contains the density or log density sampled on a grid (depending on \"mode\").\n""The matrix is computed in memory at build time\n" 
 

PLEARN_IMPLEMENT_OBJECT PreprocessingVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT VMatLanguage  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT VecExtendedVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

VecExtendedVMatrix *.

PLEARN_IMPLEMENT_OBJECT UpsideDownVMatrix  ,
"ONE LINE DESCRIPTION"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT UniformVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

Uniform VMatrix *.

PLEARN_IMPLEMENT_OBJECT UniformizeVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

UniformizeVMatrix *.

PLEARN_IMPLEMENT_OBJECT TransposeVMatrix  ,
"A VMatrix that sees the transpose of another VMatrix."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT TrainValidTestSplitter  ,
"This splitter will basically return ."  [Train+Valid, Test],
"The train test returned by the splitter is formed from the first n_train+n_valid\n""samples in the dataset. The other samples are returned in the test set.\n""The validation and test sets (given by the samples after the n_train-th one) can\n""be shuffled in order to get a different validation and test sets at each split.\n""  However,
the train set(the first n_train samples) remains fixed." 
 

PLEARN_IMPLEMENT_OBJECT TrainTestSplitter  ,
"ONE LINE DESCR"  ,
"TrainTestSplitter implements a single split of the dataset into a training-set and a test-set (the test part being the last few samples of the dataset)" 
 

PLEARN_IMPLEMENT_OBJECT TrainTestBagsSplitter  ,
"Splits a dataset in two parts"  ,
"TrainTestBagsSplitter implements a single split of the dataset into\n""a training set and a test set (the test part being the last few samples of the dataset)\n""Optionally a third set is provided which is the training set itself (in order to test on it)\n" 
 

PLEARN_IMPLEMENT_OBJECT ToBagSplitter  ,
"A Splitter that makes any existing splitter operate on bags only."  ,
"The dataset provided must contain bag   information,
as described in\n""SumOverBagsVariable" 
 

PLEARN_IMPLEMENT_OBJECT TestInTrainSplitter  ,
"A splitter that adds the test points given by another splitter into the training set."  ,
"The underlying splitter should return train / test sets of constant size.\n""For   instance,
if the underlying splitter returns 3 splits of(train, test)\n""pairs with size 2000 and  500,
this splitter will return:\n""-for 'percentage_added'  = = 5%,
15 splits of size 2100 and  100,
with each\n""test point appearing once and only once in a train set and a test set\n""-for 'percentage_added'  = = 20%,
6 splits of size  2400,
400 and  2400,
100  ,
with\n""each test point appearing once or more in a train  set,
and only once in a\n""test set(note that the test points appearing more than once in a train set\n""will be those at the beginning of the test sets returned by the underlying\n""splitter)\n" 
 

PLEARN_IMPLEMENT_OBJECT TemporalHorizonVMatrix  ,
"ONE LINE DESCR"  ,
" VMat class that delay the last entries of an underlying VMat by a certain horizon.\n" 
 

TemporalHorizonVMatrix *.

PLEARN_IMPLEMENT_OBJECT SubVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SubVMatrix *.

PLEARN_IMPLEMENT_OBJECT SubInputVMatrix  ,
"A VMat that only takes part of the input of its source VMat."  ,
"This can be useful for instance to only take the first k components\n""after applying some dimensionality reduction method." 
 

PLEARN_IMPLEMENT_OBJECT StrTableVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SparseVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

SparseVMatrix *.

PLEARN_IMPLEMENT_OBJECT SourceVMatrixSplitter  ,
"Returns the splits of an underlying   splitter,
seen by a SourceVMatrix."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT SourceVMatrix  ,
"Super-class for VMatrices that point to another one (the source vmatrix)"  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT SortRowsVMatrix  ,
"Sort the samples of a VMatrix according to one (or more) given columns."  ,
"The implementation is not efficient at   all,
feel free to improve it!" 
 

SortRowsVMatrix *.

PLEARN_IMPLEMENT_OBJECT ShiftAndRescaleVMatrix  ,
"ONE LINE DESCR"  ,
"ShiftAndRescaleVMatrix allows to shift and scale the first n_inputs columns of an underlying_vm.\n" 
 

ShiftAndRescaleVMatrix *.

PLEARN_IMPLEMENT_OBJECT SequentialSplitter  ,
"ONE LINE DESCR"  ,
"SequentialSplitter implements several   splits,
TODO:Comments" 
 

PLEARN_IMPLEMENT_OBJECT SelectRowsVMatrix  ,
"VMat class that selects samples from a source matrix according to given vector of indices."  ,
"" 
 

SelectRowsVMatrix *.

PLEARN_IMPLEMENT_OBJECT SelectRowsFileIndexVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

SelectRowsFileIndexVMatrix *.

PLEARN_IMPLEMENT_OBJECT SelectColumnsVMatrix  ,
"Selects variables from a source matrix according to given vector of indices."  ,
Alternatively,
the variables can be given by their names." 
 

SelectColumnsVMatrix *.

PLEARN_IMPLEMENT_OBJECT RowsSubVMatrix  ,
"ONE LINE DESCRIPTION"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT RepeatSplitter  ,
"Repeat a given splitter a certain amount of   times,
with the possibility to\n""shuffle randomly the dataset each time"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT RemoveRowsVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

RemoveRowsVMatrix *.

PLEARN_IMPLEMENT_OBJECT RemoveDuplicateVMatrix  ,
"A VMatrix that removes any duplicated entry in its source VMat."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT RemapLastColumnVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

RemapLastColumnVMatrix *.

PLEARN_IMPLEMENT_OBJECT RegularGridVMatrix  ,
"ONE LINE DESCR"  ,
"RegularGridVMatrix represents the list of coordinates along a regularly spaced grid." 
 

PLEARN_IMPLEMENT_OBJECT RangeVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

Range VMatrix *.

PLEARN_IMPLEMENT_OBJECT ProcessingVMatrix  ,
"ONE LINE DESCRIPTION"  ,
"MULTI-LINE \nHELP" 
 

PLEARN_IMPLEMENT_OBJECT PrecomputedVMatrix  ,
"VMatrix that caches (pre-computes on disk) the content of a source vmatrix"  ,
"This sub-class of SourceVMatrix pre-computes the content of a source vmatrix\n""in a dmat or pmat file. The name of the disk file is obtained from the metadatadir option\n""followed by precomp.dmat or precomp.pmat" 
 

PLEARN_IMPLEMENT_OBJECT PLearnerOutputVMatrix  ,
"Use a PLearner (or a set of them) to transform the input part of a data set into the learners outputs"  ,
"The input part of this VMatrix is obtained from the input part an original data set on which\n""one or more PLearner's computeOutput method is applied. The other columns of the original data set\n""are copied as is.   Optionally,
the raw input can be copied as well\n""always in the input part of the new VMatrix.The order of the elements of a new row is as follows:\n""-the outputs of the learners(concatenated) when applied on the input part of the original  data,
\n""-  optionally,
the raw input part of the original  data,
\n""-all the non-input columns of the original data." 
 

PLEARN_IMPLEMENT_OBJECT PairsVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

PairsVMatrix *.

PLEARN_IMPLEMENT_OBJECT OneHotVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

OneHotVMatrix *.

PLEARN_IMPLEMENT_OBJECT MultiInstanceVMatrix  ,
"Virtual Matrix for a multi instance dataset"  ,
"In a multi-instance dataset examples come in 'bags' with only one target label\n""for each bag. This class is built upon a source text file that describes such\n""a dataset (see the help on the 'filename' option for format details).\n""The resulting VMatrix shows the following structure in its   rows,
with\n""all the rows of a bag being consecutive.Each row represents an instance and has:\n""-the input features for the instance\n""-the bag's source_targetsize target values(repeated over bag instances)\n""-a bag signal integer that identifies the beginning and end of the bag:\n""1 means the first instance of the bag\n""2 means the last instance of the bag\n""3 is for a bag with a single row(=1+2)\n""0 is for intermediate instances.\n""The targetsize of the VMatrix is automatically set to source_targetsize+1\n""since the bag_signal is included(appended) in the target vector\n" 
 

PLEARN_IMPLEMENT_OBJECT MovingAverageVMatrix  ,
"Perform moving average of given columns"  ,
"The user specifies one or more columns and for each such <column-name>\n""a moving average window size: a ma<windowsize>-<column-name> column is\n""created which will contain at row t the moving average from row t-<windowsize>+1\n""to t inclusively of <column-name>.\n" 
 

PLEARN_IMPLEMENT_OBJECT MemoryVMatrix  ,
"A VMatrix whose data is stored in memory."  ,
"The data can either be given directly by a   Mat,
or by another VMat that\n""will be precomputed in memory at build time.\n" 
 

MemoryVMatrix *.

PLEARN_IMPLEMENT_OBJECT LocalNeighborsDifferencesVMatrix  ,
"Computes the difference between each input row and its nearest neighbors."  ,
"For each row x of the source   VMatrix,
the resulting row will be the\n""concatenation of n_neighbors  vectors,
each of which is the difference\n""between one of the nearest neighbors of x in the source and x itself.\n" 
 

PLEARN_IMPLEMENT_OBJECT LearnerProcessedVMatrix  ,
"ONE LINE DESCR"  ,
"LearnerProcessedVMatrix implements a VMatrix processed on the fly by a learner (which will optionally be first trained on the source vmatrix)" 
 

PLEARN_IMPLEMENT_OBJECT KNNVMatrix  ,
"A VMatrix that sees the nearest neighbours of each sample in the source VMat."  ,
"Each sample is followed by its (knn-1) nearest neighbours.\n""To each row is appended an additional   target,
which is:\n""-1 if it is the first of a bag of  neighbours,
\n""-2 if it is the last of a  bag,
\n""-0 if it is none of  these,
\n""-3 if it is both(only for knn==1).\n""In  addition,
if a kernel_pij kernel is  provided,
in the input part of the VMatrix\n""is appended  p_ij
 

PLEARN_IMPLEMENT_OBJECT KFoldSplitter  ,
"K-fold cross-validation splitter."  ,
"KFoldSplitter implements K splits of the dataset into a training-set and a test-set.\n""If the number of splits is higher than the number of   examples,
leave-one-out cross-validation\n""will be performed." 
 

PLEARN_IMPLEMENT_OBJECT KernelVMatrix  ,
"ONE LINE DESC"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT JulianizeVMatrix  ,
"ONE LINE DESCR"  ,
"JulianizeVMatrix provides a conversion from a VMat containing dates\n""in an explicit 3-column (YYYY,MM,DD) or 6-column (YYYY,MM,DD,HH,MM,SS)\n""format to a Julian day number format (including fractional part to\n""represent the hour within the day). The dates can be at any   columns,
\n""not only columns 0-2(or 0-5).More than a single date can be\n""converted.\n" 
 

PLEARN_IMPLEMENT_OBJECT JoinVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT InterleaveVMatrix  ,
"ONE LINE DESC"  ,
"ONE LINE HELP" 
 

InterleaveVMatrix *.

PLEARN_IMPLEMENT_OBJECT IndexedVMatrix  ,
"ONE LINE DESCR"  ,
" VMat class that sees a matrix as a collection of triplets (row, column, value)\n""Thus it is a N x 3   matrix
 

PLEARN_IMPLEMENT_OBJECT GramVMatrix  ,
"Computes the Gram matrix of a given kernel."  ,
Currently,
this class inherits from a  MemoryVMatrix,
and the Gram matrix\n""is stored in memory.\n" 
 

PLEARN_IMPLEMENT_OBJECT GetInputVMatrix  ,
"This VMatrix only sees the input part of its source VMatrix."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT GeneralizedOneHotVMatrix  ,
"ONE LINE DESC"  ,
"ONE LINE HELP" 
 

GeneralizedOneHotVMatrix *.

PLEARN_IMPLEMENT_OBJECT FractionSplitter  ,
"A Splitter that can extract several subparts of a dataset in each split."  ,
"Ranges of the dataset are specified explicitly as start:end   positions,
\n""that can be absolute or relative to the number of samples in the training set." 
 

PLEARN_IMPLEMENT_OBJECT ForwardVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT FinancePreprocVMatrix  ,
"ONE LINE DESCR"  ,
"FinancePreprocVMatrix implements a VMatrix with extra preprocessing columns." 
 

PLEARN_IMPLEMENT_OBJECT FilterSplitter  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT FilteredVMatrix  ,
"A filtered view of its source vmatrix"  ,
"The filter is an exression in VPL language.\n""The filtered indexes are saved in the metadata   directory,
that NEEDS to\n""be provided.\n" 
 

PLEARN_IMPLEMENT_OBJECT FileVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

FileVMatrix *.

PLEARN_IMPLEMENT_OBJECT ExtendedVMatrix  ,
"ONE_LINE_DESC"  ,
"ONE_LINE_HELP" 
 

ExtendedVMatrix *.

PLEARN_IMPLEMENT_OBJECT ExplicitSplitter  ,
"ONE LINE DESCR"  ,
"ExplicitSplitter allows you to define a 'splitter' by giving explicitly the datasets for each split\n""as a matrix VMatrices.\n""(This splitter in effect ignores the 'dataset' it is given with setDataSet) \n" 
 

PLEARN_IMPLEMENT_OBJECT DiskVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT DBSplitter  ,
"A Splitter that contains several databases."  ,
"The databases to be used can be specified with the 'databases' option. " 
 

PLEARN_IMPLEMENT_OBJECT DatedJoinVMatrix  ,
"Join two   vmatrices,
taking into account a date field."  ,
"The two vmatrices play an asymmetric role. They are called\n""master and slave. The resulting vmatrix has one row for each row\n""of the master vmatrix (or optionally of the slave vmatrix). Its\n""columns are a concatenation of selected columns of the master vmatrix\n""and of selected columns of the slave which 'match' according to a rule\n""(always in the order: master fields, slave fields). Matchint is\n""obtained using shared 'key fields'.   Optionally,
for  matching,
a date field\n""in the master is forced to belong to a date interval in the  slave
 

PLEARN_IMPLEMENT_OBJECT CumVMatrix  ,
"Add columns that a cumulated values of given columns"  ,
"The user specifies one or more columns and for each such <column-name>\n""a cum-<column-name> column is created which will contain the sum from row 0\n""to the current row of <column-name>.\n" 
 

PLEARN_IMPLEMENT_OBJECT CrossReferenceVMatrix  ,
"ONE LINE DESC"  ,
"ONE LINE HELP" 
 

CrossReferenceVMatrix *.

PLEARN_IMPLEMENT_OBJECT ConcatRowsVMatrix  ,
"Concatenates the rows of a number of VMat."  ,
"It can also be used to select fields which are common to those   VMat,
\n""using the 'only_common_fields'option.\n""  Otherwise,
the fields are just assumed to be those of the first VMat.\n" 
 

ConcatRowsVMatrix *.

PLEARN_IMPLEMENT_OBJECT ConcatRowsSubVMatrix  ,
"ONE LINE DESC"  ,
"ONE LINE HELP" 
 

ConcatRowsSubVMatrix *.

PLEARN_IMPLEMENT_OBJECT ConcatColumnsVMatrix  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

ConcatColumnsVMatrix *.

PLEARN_IMPLEMENT_OBJECT CompressedVMatrix  ,
"ONE LINE DESCR"  ,
"ONE LINE HELP" 
 

PLEARN_IMPLEMENT_OBJECT CenteredVMatrix  ,
"A VMatrix that centers a dataset."  ,
"The empirical mean is subtracted to each row of the underlying VMat." 
 

PLEARN_IMPLEMENT_OBJECT ByteMemoryVMatrix  ,
"ONE_LINE_DESCR"  ,
"ONE LINE HELP" 
 

PLEARN_IMPLEMENT_OBJECT BootstrapVMatrix  ,
"A VMatrix that sees a bootstrap subset of its parent VMatrix.\n""This is not a real bootstrap since a sample can only appear once."  ,
"" 
 

BootstrapVMatrix *.

PLEARN_IMPLEMENT_OBJECT BootstrapSplitter  ,
"A splitter whose splits are bootstrap samples of the original dataset"  ,
"BootstrapSplitter implements a ..." 
 

PLEARN_IMPLEMENT_OBJECT BatchVMatrix  ,
"ONE LINE DESCR"  ,
"VMat class that replicates small parts of a matrix   (mini-batches),
so that each mini-batch appears twice(consecutively)." 
 

PLEARN_IMPLEMENT_OBJECT AutoVMatrix  ,
"Automatically builds an appropriate VMat given its specification."  ,
"AutoVMatrix tries to interpret the given 'specification' (it will call getDataSet) and\n""will be a wrapper around the appropriate VMatrix   type,
simply forwarding calls to it.\n""AutoVMatrix can be used to access the UCI databases.\n" 
 

PLEARN_IMPLEMENT_OBJECT AsciiVMatrix  ,
"ONE LINE DESCR"  ,
"AsciiVMatrix implements a file in ascii format" 
 

PLEARN_IMPLEMENT_OBJECT WeightedSumSquareVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

WeightedSumSquareVariable *.

PLEARN_IMPLEMENT_OBJECT VecElementVariable  ,
"Variable that is the element of vector vec indexed by variable input"  ,
"NO HELP" 
 

VecElementVariable *.

PLEARN_IMPLEMENT_OBJECT VarRowVariable  ,
"Variable that is the row of the input1 variable indexed by the input2 variable"  ,
"NO HELP" 
 

VarRowVariable *.

PLEARN_IMPLEMENT_OBJECT VarRowsVariable  ,
"Variable that is a subset of a matrix's rows; ""input1 : matrix from which rows are selected; ""input2 : vector whose elements are row indices in input1"  ,
"NO HELP" 
 

VarRowsVariable *.

PLEARN_IMPLEMENT_OBJECT VarElementVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

VarElementVariable *.

PLEARN_IMPLEMENT_OBJECT VarColumnsVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

VarColumnsVariable *.

PLEARN_IMPLEMENT_OBJECT VarArrayElementVariable  ,
"Selects one element of a VarArray according to a Var index */"  ,
"NO HELP" 
 

VarArrayElementVariable *.

PLEARN_IMPLEMENT_OBJECT UnfoldedSumOfVariable  ,
"Variable that sums the value of a Func evaluated on each row of a matrix.\n"  ,
However,
unlike the  SumOfVariable,
it does so by unfolding the   Func(up to given maximum number\n""of times 'max_bag_size'),
and it allows that number to be variable.Each of the unfolded Func\n""is applied on a different row of the input matrix.The number of rows to sum is specified on the\n""fly by another  input,
the bag_size.\n" 
 

UnfoldedSumOfVariable *.

PLEARN_IMPLEMENT_OBJECT UnfoldedFuncVariable  ,
"Variable that puts in the rows of its output matrix the value\n""of a Func evaluated on each row of an input matrix.\n"  ,
"The input_matrix and output matrix have n_unfold rows. A separate propagation path\n""is created that maps (using the Func as a template) each input row to each output row.\n""The parents of this variable include the non-input parents of the Func.\n" 
 

UnfoldedFuncVariable *.

PLEARN_IMPLEMENT_OBJECT UnequalConstantVariable  ,
"A scalar var; equal 1 if input1!  = c,
0 otherwise"  ,
"NO HELP" 
 

UnequalConstantVariable *.

PLEARN_IMPLEMENT_OBJECT UnaryHardSlopeVariable  ,
"Hard slope function whose Var input is only the argument of the function."  ,
"Maps x (elementwise) to 0 if x<left, 1 if x right,
and linear in between otherwise." 
 

UnaryHardSlopeVariable *.

PLEARN_IMPLEMENT_OBJECT TransposeProductVariable  ,
"Matrix product between transpose of matrix1 and matrix2"  ,
"NO HELP" 
 

TransposeProductVariable *.

PLEARN_IMPLEMENT_OBJECT TimesVariable  ,
"Multiplies 2 matrix vars of same size elementwise"  ,
"NO HELP" 
 

TimesVariable *.

PLEARN_IMPLEMENT_OBJECT TimesScalarVariable  ,
"Multiplies a matrix var by a scalar var"  ,
"NO HELP" 
 

TimesScalarVariable *.

PLEARN_IMPLEMENT_OBJECT TimesRowVariable  ,
"Multiplies each row of a matrix var elementwise with a single row variable"  ,
"NO HELP" 
 

TimesRowVariable *.

PLEARN_IMPLEMENT_OBJECT TimesConstantVariable  ,
"Multiplies a matrix var by a scalar constant"  ,
"NO HELP" 
 

TimesConstantVariable *.

PLEARN_IMPLEMENT_OBJECT TimesColumnVariable  ,
"Multiplies each column of a matrix var elementwise with a single column variable"  ,
"NO HELP" 
 

TimesColumnVariable *.

PLEARN_IMPLEMENT_OBJECT TanhVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

TanhVariable *.

PLEARN_IMPLEMENT_OBJECT SumVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SumVariable *.

PLEARN_IMPLEMENT_OBJECT SumSquareVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SumSquareVariable *.

PLEARN_IMPLEMENT_OBJECT SumOverBagsVariable  ,
"Variable that sums the value of a Func each time evaluated on a subsequence of a VMat\n" 
 

SumOverBagsVariable *.

PLEARN_IMPLEMENT_OBJECT SumOfVariable  ,
"Variable that sums the value of a Func evaluated on each row of a VMat"  ,
"NO HELP" 
 

SumOfVariable *.

PLEARN_IMPLEMENT_OBJECT SumAbsVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SumAbsVariable *.

PLEARN_IMPLEMENT_OBJECT SubsampleVariable  ,
"A subsample var; equals subsample(input, the_subsamplefactor)"  ,
"NO HELP" 
 

SubsampleVariable *.

PLEARN_IMPLEMENT_OBJECT SubMatVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SubMatVariable *.

PLEARN_IMPLEMENT_OBJECT SubMatTransposeVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SubMatTransposeVariable *.

PLEARN_IMPLEMENT_OBJECT SquareVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SquareVariable *.

PLEARN_IMPLEMENT_OBJECT SquareRootVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SquareRootVariable *.

PLEARN_IMPLEMENT_OBJECT SourceVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SourceVariable *.

PLEARN_IMPLEMENT_OBJECT SoftSlopeVariable  ,
"This Var computes the soft_slope function"  ,
"The soft_slope function is a soft version of linear by parts function.\n""(as smoothness goes to infty). More precisely it converges to a function that is\n""0   in [-infty, left],
linear  in[left, right],
and 1  in[right, infty],
and continuous\n""It is always monotonically increasing wrt x(positive derivative in x).\n""If the arguments are vectors than the operation is performed element by element on all of them.\n" 
 

SoftSlopeVariable *.

PLEARN_IMPLEMENT_OBJECT SoftSlopeIntegralVariable  ,
"This Var computes the integral of the soft_slope function in an interval."  ,
"Compute the integral of soft_slope(x,s,l,r) over x from a to b\n" 
 

SoftSlopeIntegralVariable *.

PLEARN_IMPLEMENT_OBJECT SoftplusVariable  ,
"This is the primitive of a sigmoid: log(1+exp(x))"  ,
"NO HELP" 
 

SoftplusVariable *.

PLEARN_IMPLEMENT_OBJECT SoftmaxVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SoftmaxVariable *.

PLEARN_IMPLEMENT_OBJECT SoftmaxLossVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SoftmaxLossVariable *.

PLEARN_IMPLEMENT_OBJECT SignVariable  ,
"sign(x = 1 if x,
 

SignVariable *.

PLEARN_IMPLEMENT_OBJECT SigmoidVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SigmoidVariable *.

PLEARN_IMPLEMENT_OBJECT SemiSupervisedProbClassCostVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

SemiSupervisedProbClassVariable *.

PLEARN_IMPLEMENT_OBJECT RowSumVariable  ,
"Result is a single column that contains the sum of each row of the input"  ,
"NO HELP" 
 

RowSumVariable *.

PLEARN_IMPLEMENT_OBJECT RowAtPositionVariable  ,
"Variables positionned inside a larger zero variable ..."  ,
"NO HELP" 
 

RowAtPositionVariable *.

PLEARN_IMPLEMENT_OBJECT RightPseudoInverseVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

RightPseudoInverseVariable *.

PLEARN_IMPLEMENT_OBJECT ReshapeVariable  ,
"Variable that views another   variable,
but with a different length() and width()""(the only restriction being that length()*width() remain the same)"  ,
"NO HELP" 
 

ReshapeVariable *.

PLEARN_IMPLEMENT_OBJECT ProjectionErrorVariable  ,
"Computes the projection error of a set of vectors on a non-orthogonal basis.\n"  ,
"The first input is a set of n_dim vectors (possibly seen as a single vector of their concatenation)   f_i,
each in R^n\n""The second input is a set of T vectors(possibly seen as a single vector of their concatenation)  t_j
 

ProjectionErrorVariable *.

PLEARN_IMPLEMENT_OBJECT ProductVariable  ,
"Matrix product"  ,
"NO HELP" 
 

ProductVariable *.

PLEARN_IMPLEMENT_OBJECT ProductTransposeVariable  ,
"Matrix product between matrix1 and transpose of matrix2"  ,
"NO HELP" 
 

ProductTransposeVariable *.

PLEARN_IMPLEMENT_OBJECT PowVariableVariable  ,
"x^y where x and y are variables but y is scalar ""or it has the same size as x ,
"NO HELP" 
 

PowVariableVariable *.

PLEARN_IMPLEMENT_OBJECT PowVariable  ,
"Elementwise pow (returns 0 wherever input is negative)"  ,
"NO HELP" 
 

PowVariable *.

PLEARN_IMPLEMENT_OBJECT PlusVariable  ,
"Adds 2 matrix vars of same size"  ,
"NO HELP" 
 

PlusVariable *.

PLEARN_IMPLEMENT_OBJECT PlusScalarVariable  ,
"Adds a scalar var to a matrix var"  ,
"NO HELP" 
 

PlusScalarVariable *.

PLEARN_IMPLEMENT_OBJECT PlusRowVariable  ,
"Adds a single-row var to each row of a matrix var"  ,
"NO HELP" 
 

PlusRowVariable *.

PLEARN_IMPLEMENT_OBJECT PlusConstantVariable  ,
"Adds a scalar constant to a matrix var"  ,
"NO HELP" 
 

PlusConstantVariable *.

PLEARN_IMPLEMENT_OBJECT PlusColumnVariable  ,
"Adds a single-column var to each column of a matrix var"  ,
"NO HELP" 
 

PlusColumnVariable *.

PLEARN_IMPLEMENT_OBJECT PLogPVariable  ,
"Returns the elementwise x*log(x) in a (hopefully!) numerically stable way"  ,
"NO HELP" 
 

PLogPVariable *.

PLEARN_IMPLEMENT_OBJECT PDistributionVariable  ,
"Variable that represents a random variable according to some PDistribution object"  ,
"" 
 

PDistributionVariable *.

PLEARN_IMPLEMENT_OBJECT OneHotVariable  ,
"Represents a vector of a given   lenth,
that has value 1 at the index""given by another variable and 0 everywhere else"  ,
"NO HELP" 
 

OneHotVariable *.

PLEARN_IMPLEMENT_OBJECT OneHotSquaredLoss   ) 
 

OneHotSquaredLoss *.

PLEARN_IMPLEMENT_OBJECT NllSemisphericalGaussianVariable  ,
"Computes the negative log-likelihood of a Gaussian on some data   point,
depending on the nearest neighbors."  ,
" This class implements the negative log-likelihood cost of a Markov chain that\n"" uses semispherical gaussian transition probabilities. The parameters of the\n"" semispherical gaussians are a tangent   plane,
two  variances,
\n""one mean and the distance of the point with its nearest neighbors.\n""The two variances correspond to the shared variance of every manifold directions\n""and of every noise directions.\n""This variable is used to do gradient descent on the  parameters,
but\n""not to estimate de likelihood of the Markov chain a some  point,
which is\n""more complex to estimate.\n" 
 

NllSemisphericalGaussianVariable *.

PLEARN_IMPLEMENT_OBJECT NegCrossEntropySigmoidVariable  ,
"Compute sigmoid of its first   input,
and then computes the negative""cross-entropy cost"  ,
"NO HELP" 
 

NegCrossEntropySigmoidVariable *.

PLEARN_IMPLEMENT_OBJECT NegateElementsVariable  ,
"Elementwise negation and inversion..."  ,
"NO HELP" 
 

NegateElementsVariable *.

PLEARN_IMPLEMENT_OBJECT MulticlassLossVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MulticlassLossVariable *.

PLEARN_IMPLEMENT_OBJECT MinVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinVariable *.

PLEARN_IMPLEMENT_OBJECT MinusVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinusVariable *.

PLEARN_IMPLEMENT_OBJECT MinusTransposedColumnVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinusTransposedColumnVariable *.

PLEARN_IMPLEMENT_OBJECT MinusScalarVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinusScalarVariable *.

PLEARN_IMPLEMENT_OBJECT MinusRowVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinusRowVariable *.

PLEARN_IMPLEMENT_OBJECT MinusColumnVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MinusColumnVariable *.

PLEARN_IMPLEMENT_OBJECT MiniBatchClassificationLossVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MiniBatchClassificationLossVariable *.

PLEARN_IMPLEMENT_OBJECT MaxVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MaxVariable *.

PLEARN_IMPLEMENT_OBJECT Max2Variable   ) 
 

Max2Variable *.

PLEARN_IMPLEMENT_OBJECT MatRowVariable  ,
"Variable that is the row of matrix mat indexed by variable input"  ,
"NO HELP" 
 

MatRowVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixSumOfVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MatrixSumOfVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixSoftmaxVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MatrixSoftmaxVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixSoftmaxLossVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MatrixSoftmaxLossVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixOneHotSquaredLoss  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

MatrixOneHotSquaredLoss *.

PLEARN_IMPLEMENT_OBJECT MatrixInverseVariable  ,
"Matrix inversions... "  ,
"NO HELP" 
 

MatrixInverseVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixElementsVariable  ,
"Fills the elements of a matrix using the given scalar Variable ""  expression,
that depends on index variables i and  j,
that loop from""0 to ni-1 and 0 to nj-1 respectively."  ,
"NO HELP" 
 

MatrixElementsVariable *.

PLEARN_IMPLEMENT_OBJECT MatrixAffineTransformVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MatrixAffineTransformFeedbackVariable  ,
"Affine transformation of a MATRIX variable."  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MarginPerceptronCostVariable  ,
"Compute sigmoid of its first   input,
and then computes the negative""cross-entropy cost"  ,
"NO HELP" 
 

MarginPerceptronCostVariable *.

PLEARN_IMPLEMENT_OBJECT LogVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

LogVariable *.

PLEARN_IMPLEMENT_OBJECT LogSumVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LogSoftmaxVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

LogSoftmaxVariable *.

PLEARN_IMPLEMENT_OBJECT LogAddVariable   ) 
 

LogAddVariable *.

PLEARN_IMPLEMENT_OBJECT LiftOutputVariable  ,
"The result is the output if the target is   1,
and the opposite of the output""otherwise.This variable is to be used with a  LiftStatsCollector,
in a""stochastic gradient descent."  ,
"NO HELP" 
 

LiftOutputVariable *.

PLEARN_IMPLEMENT_OBJECT LeftPseudoInverseVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

LeftPseudoInverseVariable *.

PLEARN_IMPLEMENT_OBJECT IsSmallerVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

IsSmallerVariable *.

PLEARN_IMPLEMENT_OBJECT IsMissingVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

IsMissingVariable *.

PLEARN_IMPLEMENT_OBJECT IsLargerVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

IsLargerVariable *.

PLEARN_IMPLEMENT_OBJECT IsAboveThresholdVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

IsAboveThresholdVariable *.

PLEARN_IMPLEMENT_OBJECT InvertElementsVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

InvertElementsVariable *.

PLEARN_IMPLEMENT_OBJECT InterValuesVariable  ,
"if   values = [x1,
x2  ,
  ...,
x10]  ,
the resulting variable is"  [(x1+x2)/2,(x2+x3)/2,...(x9+x10)/2],
"NO HELP" 
 

InterValuesVariable *.

PLEARN_IMPLEMENT_OBJECT IndexAtPositionVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

IndexAtPositionVariable *.

PLEARN_IMPLEMENT_OBJECT IfThenElseVariable  ,
"Variable that represents the element-wise IF-THEN-ELSE"  ,
"NO HELP" 
 

IfThenElseVariable *.

PLEARN_IMPLEMENT_OBJECT HardSlopeVariable  ,
"This Var computes the hard_slope function"  ,
"The hard_slope function is linear by parts function:\n""0   in [-infty, left],
linear  in[left, right],
and 1  in[right, infty],
and continuous.\n""If the arguments are vectors than the operation is performed element by element on all of them.\n" 
 

HardSlopeVariable *.

PLEARN_IMPLEMENT_OBJECT Function  ,
"Implements a function defined as a var graph"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ExtendedVariable  ,
"Variable that extends the input variable by appending rows at ""its top and bottom and columns at its left and right."  ,
"NO HELP" 
 

ExtendedVariable *.

PLEARN_IMPLEMENT_OBJECT ExpVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

ExpVariable *.

PLEARN_IMPLEMENT_OBJECT ErfVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

ErfVariable *.

PLEARN_IMPLEMENT_OBJECT EqualVariable  ,
"A scalar var; equal 1 if   input1 = =input2,
0 otherwise"  ,
"NO HELP" 
 

EqualVariable *.

PLEARN_IMPLEMENT_OBJECT EqualScalarVariable  ,
"A scalar var; equal 1 if   input1 = =input2,
0 otherwise"  ,
"NO HELP" 
 

EqualScalarVariable *.

PLEARN_IMPLEMENT_OBJECT EqualConstantVariable  ,
"A scalar var; equal 1 if   input1 = =input2,
0 otherwise"  ,
"NO HELP" 
 

EqualConstantVariable *.

PLEARN_IMPLEMENT_OBJECT ElementAtPositionVariable  ,
"A variable of size   length() x width()
 

ElementAtPositionVariable *.

PLEARN_IMPLEMENT_OBJECT DuplicateScalarVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

DuplicateScalarVariable *.

PLEARN_IMPLEMENT_OBJECT DuplicateRowVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

DuplicateRowVariable *.

PLEARN_IMPLEMENT_OBJECT DuplicateColumnVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

DuplicateColumnVariable *.

PLEARN_IMPLEMENT_OBJECT DotProductVariable  ,
"Dot product between 2 matrices (or vectors) with same number of elements"  ,
"NO HELP" 
 

DotProductVariable *.

PLEARN_IMPLEMENT_OBJECT DivVariable  ,
"Divide 2 matrix vars of same size elementwise"  ,
"NO HELP" 
 

DivVariable *.

PLEARN_IMPLEMENT_OBJECT DilogarithmVariable  ,
"This Var computes the dilogarithm function" 
 

DilogarithmVariable *.

PLEARN_IMPLEMENT_OBJECT DiagonalizedFactorsProductVariable  ,
"Variable that represents the leftmatrix*diag(vector)*rightmatrix product"  ,
"The three parents are respectively the left matrix   U,
the center vector  d
 

DiagonalizedFactorsProductVariable *.

PLEARN_IMPLEMENT_OBJECT DeterminantVariable  ,
"The argument must be a square matrix Var and the result is its determinant"  ,
"NO HELP" 
 

DeterminantVariable *.

PLEARN_IMPLEMENT_OBJECT CutBelowThresholdVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

CutBelowThresholdVariable *.

PLEARN_IMPLEMENT_OBJECT CutAboveThresholdVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

CutAboveThresholdVariable *.

PLEARN_IMPLEMENT_OBJECT CrossEntropyVariable   ) 
 

CrossEntropyVariable *.

PLEARN_IMPLEMENT_OBJECT ConvolveVariable  ,
"A convolve var; equals convolve(input, mask)"  ,
"NO HELP" 
 

ConvolveVariable *.

PLEARN_IMPLEMENT_OBJECT ConcatRowsVariable  ,
"Concatenation of the rows of several variables"  ,
"NO HELP" 
 

ConcatRowsVariable *.

PLEARN_IMPLEMENT_OBJECT ConcatOfVariable  ,
"Concatenates the results of each operation in the loop into the resulting variable"  ,
"NO HELP" 
 

ConcatOfVariable *.

PLEARN_IMPLEMENT_OBJECT ConcatColumnsVariable  ,
"Concatenation of the columns of several variables"  ,
"NO HELP" 
 

ConcatColumnsVariable *.

PLEARN_IMPLEMENT_OBJECT ColumnSumVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ColumnIndexVariable  ,
"Return a row vector with the elements indexed in each column"  ,
"NO HELP" 
 

ColumnIndexVariable *.

PLEARN_IMPLEMENT_OBJECT ClassificationLossVariable  ,
"Indicator(classnum==argmax(netout))"  ,
"NO HELP" 
 

ClassificationLossVariable *.

PLEARN_IMPLEMENT_OBJECT BinaryClassificationLossVariable   ) 
 

BinaryClassificationLossVariable *.

PLEARN_IMPLEMENT_OBJECT ArgminVariable  ,
"Compute the index of the minimum value in the input"  ,
"NO HELP" 
 

ArgminVariable *.

PLEARN_IMPLEMENT_OBJECT ArgminOfVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ArgmaxVariable  ,
"Compute the index of the maximum value in the input"  ,
"NO HELP" 
 

ArgmaxVariable *.

PLEARN_IMPLEMENT_OBJECT AffineTransformWeightPenalty  ,
"Affine transformation with Weight decay terms"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT AffineTransformVariable  ,
"Affine transformation of a vector variable."  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT AbsVariable  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

AbsVariable *.

PLEARN_IMPLEMENT_OBJECT HTryCombinations  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT HCoordinateDescent  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT HTryAll  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT HSetVal  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT GradientOptimizer  ,
"Optimization by gradient descent."  ,
"GradientOptimizer is the simple usual gradient descent algorithm \n"" (the number of samples on which to estimate gradients before an \n"" update, which determines whether we are performing 'batch' \n"" 'stochastic' or even 'minibatch', is currently specified outside \n"" this class, typically in the numer of s/amples of the meanOf function \n"" to be optimized, as its 'nsamples' parameter). \n""Options for GradientOptimizer are : \n"" - start_learning_rate: <real> (0.01) \n"" the initial learning rate \n"" - decrease_constant: <real> (0) \n"" the learning rate decrease constant \n""\n""GradientOptimizer derives form Optimizer. \n"  [option_name:< type >(default)]
 

PLEARN_IMPLEMENT_OBJECT ConjGradientOptimizer  ,
"Optimizer based on the conjugate gradient method."  ,
"The conjugate gradient algorithm is basically the following :\n""- 0: initialize the search direction   d = -gradient\n""- 1: perform a line search along direction d for the minimum of the gradient\n""- 2: move to this minimum,
update the search direction d and go to step 1\n""There are various methods available through the options for both steps 1 and 2." 
 

PLEARN_IMPLEMENT_OBJECT AdaptGradientOptimizer  ,
"An optimizer that performs gradient descent with learning rate adaptation."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT ShellScript  ,
"Allows one to run shell commands (especially within a script)."  ,
"This runnable object will execute the given shell commands when run." 
 

PLEARN_IMPLEMENT_OBJECT RunObject  ,
"Allows to build a non-runnable object in a PLearn script."  ,
"This Object implements a run() method so that it can be used in\n""a PLearn   script,
in order to build another Object given by the\n""'underlying_object'option without PLearn returning an error.\n" 
 

PLEARN_IMPLEMENT_OBJECT NearestNeighborPredictionCost  ,
"ONE LINE DESCRIPTION"  ,
"MULTI LINE\nHELP" 
 

PLEARN_IMPLEMENT_OBJECT VecStatsCollector  ,
"Collects basic statistics on a vector ,
"VecStatsCollector allows to collect statistics on a series of vectors.\n""Individual vectors x are presented by calling   update(x),
and this class will\n""collect both individual statistics for each element(as a Vec< StatsCollector >)\n""as well as(optionally) compute the covariance matrix." 
 

PLEARN_IMPLEMENT_OBJECT QuantilesStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LiftStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MaxStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MinStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SharpeRatioStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT StderrStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT StddevStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ExpMeanStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MeanStatsIterator  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT StatsCollector  ,
"Collects basic statistics"  ,
"A StatsCollector allows to compute basic global statistics for a series of   numbers,
\n""as well as statistics within automatically determined ranges.\n""The first maxnvalues encountered values will be used as reference points to define\n""the  ranges,
so to get reasonable  results,
your sequence should be  iid,
and NOT sorted!\n""\n""The following statistics are available:""-E Sample mean\n""-V Sample variance\n""-STDDEV Sample standard deviation\n""-STDERROR Standard error of the mean\n""-MIN Minimum value\n""-MAX Maximum value\n""-SUM Sum of observations\n""-SUMSQ Sum of squares\n""-FIRST First observation\n""-LAST Last observation\n""-N Total number of observations\n""-NMISSING Number of missing observations\n""-NNONMISSING Number of non-missing observations\n""-SHARPERATIO Mean divided by standard deviation\n" 
 

PLEARN_IMPLEMENT_OBJECT ScaledConditionalCDFSmoother  ,
"This smoothes a low-resolution histogram using as prior a high-resolution one."  ,
"This class takes as 'prior_cdf' a detailed histogram (usually derived from\n""an unconditional distribution) and uses it to smooth a given survival\n""function and provide extra detail (high resolution).\n""Two smoothing formula are   provided,
both of which guarantee that the smoothed\n""survival function takes the same value as the raw one at or near original bin\n""positions.In between the original bin  positions,
the smoothed survival\n""is obtained by applying one of two possible  formula,
according to the\n""preserve_relative_density option.\n" 
 

PLEARN_IMPLEMENT_OBJECT ManualBinner  ,
"Binner with predefined cut-points."  ,
"ManualBinner implements a Binner for which cutpoints are predefined. ""It's getBinning function doesn't have to look at the data; it simply ""builds a RealMapping from the supplied bin_positions." 
 

PLEARN_IMPLEMENT_OBJECT LimitedGaussianSmoother  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LiftStatsCollector  ,
"Computes the performance of a binary classifier"  ,
"The following statistics can be requested out of getStat():\n""-   LIFT = % of positive examples in the first n samples,
divided by the%of positive examples in the whole database\n""-  LIFT_MAX = best performance that could be achieved,
if all positive examples were selected in the first n samples\n""  (where n=lift_fraction *nsamples).\n""IMPORTANT:if you add more samples after you call finalize()(or get any of the statistics above),
some samples may\n""be wrongly discarded and further statistics may be wrong\n\n""Here are the typical steps to follow to optimize the lift with a neural network:\n""-add a lift_output cost to cost_funcs(e.g.cost_funcs=[\"stable_cross_entropy\" \"lift_output\"];)\n""- change the template_stats_collector of your PTester:\n""   template_stats_collector = \n""      LiftStatsCollector (\n""        output_column = \"lift_output\" ;\n""        opposite_lift = 1 ; # if you want to optimize the lift\n""        sign_trick = 1 ;\n""      )\n""- add the lift to its statnames:\n""    statnames = [ \"E[train.E[stable_cross_entropy]]\",\"E[test.E[stable_cross_entropy]]\",\n""                  \"E[train.LIFT]\", \"E[test.LIFT]\" ]\n""- maybe also change which_cost in your HyperOptimize strategy.\n"
 

PLEARN_IMPLEMENT_OBJECT ConditionalStatsCollector  ,
"ONE LINE DESCRIPTION"  ,
"MULTI LINE\nHELP" 
 

PLEARN_IMPLEMENT_OBJECT ConditionalCDFSmoother  ,
"Smoother that combines a detailed prior curve with a rough input curve.\n"  ,
"This smoother is meant to smooth conditional distribution   functions,
using\n""a high-resolution prior cdf provided as a HistogramDistribution.Its 'smooth'\n""function takes a lower-resolution curve and smooths it using the prior\n""to fill the gaps." 
 

PLEARN_IMPLEMENT_OBJECT Binner  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT WeightedCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SquaredErrorCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SourceKernel  ,
"A kernel built upon an underlying source kernel"  ,
"The default behavior of a SourceKernel is to forward all calls to the underlying\n""kernel.   However,
subclasses will probably want to override the methods to perform\n""more complex operations." 
 

PLEARN_IMPLEMENT_OBJECT SigmoidPrimitiveKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SigmoidalKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT SelectedOutputCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ScaledLaplacianKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ScaledGeneralizedDistanceRBFKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ScaledGaussianKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ReconstructionWeightsKernel  ,
"Computes the reconstruction weights of a point given its neighbors."  ,
"K(x, x_i)  = the weight of x_i in the reconstruction of x by its knn\n""nearest neighbors. More precisely,
we compute weights W_i such that\n""||x-\\sum_j W_i x_i||^2 is  minimized,
and   K(x, x_i) = W_i.\n""If the second argument is not in the training set
 

PLEARN_IMPLEMENT_OBJECT QuadraticUtilityCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PricingTransactionPairProfitFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PrecomputedKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PowDistanceKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT PolynomialKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT NormalizedDotProductKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT NegOutputCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT NegLogProbCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT NegKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT MulticlassErrorCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LogOfGaussianDensityKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LLEKernel  ,
"The kernel used in Locally Linear Embedding."  ,
"This kernel is the (weighted) sum of two kernels K' and K'' 
 

PLEARN_IMPLEMENT_OBJECT LiftBinaryCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT LaplacianKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT GeodesicDistanceKernel  ,
"Computes the geodesic distance based on k nearest neighbors."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT GeneralizedDistanceRBFKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT GaussianKernel  ,
"The good old Gaussian kernel."  ,
"" 
 

PLEARN_IMPLEMENT_OBJECT GaussianDensityKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT DotProductKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT DivisiveNormalizationKernel  ,
"Divisive normalization of an underlying kernel."  ,
"From a positive kernel   K,
defines a new kernel K'such that:\n""K'  (x, y) = K(x,y) / sqrt(E[K(x,x_i)] . E[K(x_i,y)])\n""where the expectation is performed on the data set.\n""If the 'remove_bias' option is set,
then the expectation will not\n""take into account terms of the form K(x_i, x_i).\n" 
 

PLEARN_IMPLEMENT_OBJECT DistanceKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT DirectNegativeCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT DifferenceKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ConvexBasisKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT CompactVMatrixPolynomialKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLearn::PLEARN_IMPLEMENT_OBJECT CompactVMatrixGaussianKernel  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ClassMarginCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ClassErrorCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT ClassDistanceProportionCostFunction  ,
"ONE LINE DESCR"  ,
"NO HELP" 
 

PLEARN_IMPLEMENT_OBJECT AdditiveNormalizationKernel  ,
"Normalizes additively an underlying kernel with respect to a training set."  ,
"From a kernel   K,
defines a new kernel K'such that:\n""K'  (x, y) = K(x,y) - E[K(x,x_i)] - E[K(x_i,y)] + E[K(x_i,x_j)]\n""where the expectation is performed on the data set.\n""If the 'remove_bias' option is set,
then the expectation will not\n""take into account terms of the form K(x_i, x_i).\n""If the 'double_centering'option is  set,
this kernel K'will be\n""multiplied by-1/2(this turns a squared distance kernel into a\n""centered dot product kernel).\n" 
 

PLEARN_IMPLEMENT_OBJECT FilePStreamBuf  ,
"ONE LINE DESCRIPTION"  ,
"MULTI LINE\nHELP" 
 

PLEARN_IMPLEMENT_OBJECT UCISpecification  ,
"Describes the specifications of a UCI database."  ,
"This object specifies characteristics of a database from the UCI machine\n""learning   repository,
such as the input  size,
target  size,
etc...\n""It is intended to be used in a script put in the same directory as the\n""  database,
in order to be loaded by the getDataSet() method.\n" 
 

int PLearn::plearn_main int  argc,
char **  argv
 

Definition at line 51 of file plearn_main.cc.

References endl(), file_exists(), PLERROR, prgname(), seed(), and stringvector().

Referenced by main().

Var plogp Var  v  )  [inline]
 

Definition at line 73 of file PLogPVariable.h.

Referenced by entropy().

void PLearn::plotVMats char *  defs[],
int  ndefs
 

Definition at line 1236 of file vmatmain.cc.

References c_str(), endl(), PLearn::PStream::get(), getDataSet(), PLearn::TVec< T >::length(), PLERROR, PLearn::TVec< T >::resize(), saveGnuplot(), split(), toint(), and tostring().

Referenced by vmatmain().

real PLearn::poidev real  xm  ) 
 

returns a poisson random number with lambda = "xm"

Definition at line 426 of file random.cc.

References exp(), log(), log_gamma(), Pi, sqrt(), and uniform_sample().

template<class T>
int positionOfClosestElement const TVec< T > &  vec,
const T &  value,
bool  is_sorted_vec = false
 

returns the position of the element in the vector that is closest to value If is_sorted_vec is true the procedure assumes the vector's elements are sorted in ascending order and uses a dichotomy search.

Definition at line 1972 of file TMat_maths_impl.h.

References binary_search(), PLearn::TVec< T >::data(), dist(), and PLearn::TVec< T >::length().

template<class T>
int positionOfkthOrderedElement const TVec< T > &  vec,
int  k
 

Definition at line 1834 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), k, PLearn::TVec< T >::length(), and PLERROR.

Referenced by kthOrderedElement().

Var positive Var  v  )  [inline]
 

Definition at line 79 of file CutBelowThresholdVariable.h.

References cutBelowThreshold().

real positive real  a  )  [inline]
 

Definition at line 97 of file pl_math.h.

Referenced by PLearn::ConditionalDensityNet::build_().

real positive_dilogarithm real  x  ) 
 

Definition at line 171 of file pl_math.cc.

References log(), Pi, small_dilogarithm(), and x.

Referenced by dilogarithm().

template<class T>
TVec<T> positiveValues const TVec< T > &  vec  ) 
 

returns a vector composed of the values of v that are greater than 0;

Definition at line 1956 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Var pow Var  v,
Var  power
[inline]
 

Definition at line 90 of file PowVariableVariable.h.

Var pow Var  v,
real  power
[inline]
 

Definition at line 77 of file PowVariable.h.

Referenced by PLearn::VMatrixFromDistribution::build_(), PLearn::AsciiVMatrix::build_(), PLearn::EntropyContrast::compute_diversity_cost(), PLearn::ScaledGeneralizedDistanceRBFKernel::evaluate(), PLearn::GeneralizedDistanceRBFKernel::evaluate(), PLearn::SemiSupervisedProbClassCostVariable::fprop(), PLearn::PowVariableVariable::fprop(), PLearn::SpiralDistribution::generate(), mypow(), norm(), pownorm(), PLearn::VMatLanguage::run(), sqrt(), and PLearn::PowVariable::symbolicBprop().

Var PLearn::powdistance Var  input1,
Var  input2,
real  n
 

Definition at line 106 of file Var_utils.cc.

References pownorm().

template<class T>
T powdistance const TVec< T > &  vec1,
const TVec< T > &  vec2
[inline]
 

Definition at line 756 of file TMat_maths_impl.h.

References powdistance().

template<class T>
T powdistance const TVec< T > &  vec1,
const TVec< T > &  vec2,
double  n
 

Definition at line 712 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), mypow(), and PLERROR.

Referenced by PLearn::PCA::computeCostsFromOutputs(), PLearn::LinearRegressor::computeCostsFromOutputs(), PLearn::ConstantRegressor::computeCostsFromOutputs(), PLearn::AddCostToLearner::computeCostsFromOutputs(), computeNearestNeighbors(), dist(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::PowDistanceKernel::evaluate(), PLearn::LogOfGaussianDensityKernel::evaluate(), PLearn::GaussianDensityKernel::evaluate(), PLearn::DistanceKernel::evaluate(), PLearn::CompactVMatrixGaussianKernel::evaluate(), PLearn::VMatrix::find(), powdistance(), and PLearn::ConstantRegressor::train().

Var PLearn::pownorm Var  input,
real  n = 2.0
 

Definition at line 71 of file Var_utils.cc.

References abs(), pow(), square(), and sum().

template<class T>
T pownorm const TVec< T > &  vec  )  [inline]
 

Definition at line 691 of file TMat_maths_impl.h.

References pownorm().

template<class T>
T pownorm const TVec< T > &  vec,
double  n
 

Definition at line 657 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), mypow(), and val.

Referenced by PLearn::GaussianKernel::addDataForKernelMatrix(), PLearn::NllSemisphericalGaussianVariable::bprop(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), PLearn::KernelProjection::computeCostsFromOutputs(), PLearn::AddCostToLearner::computeCostsFromOutputs(), PLearn::GaussMix::computeLogLikelihood(), PLearn::GaussianContinuum::computeOutput(), PLearn::ConjGradientOptimizer::conjpomdp(), PLearn::ConjGradientOptimizer::daiYuan(), diagonalOfSquare(), PLearn::RowBufferedVMatrix::dot(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::GaussianKernel::evaluate_i_j(), PLearn::GaussianKernel::evaluate_i_x(), PLearn::GaussianKernel::evaluate_x_i(), PLearn::ConjGradientOptimizer::findDirection(), PLearn::ConjGradientOptimizer::fletcherReeves(), PLearn::ProjectionErrorVariable::fprop(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::GaussianContinuum::get_image_matrix(), PLearn::GaussMix::kmeans(), norm(), PLearn::ConjGradientOptimizer::polakRibiere(), powdistance(), pownorm(), and PLearn::GaussianContinuum::train().

void PLearn::pretty_print_number char *  buffer,
real  number
 

print a number without unnecessary trailing zero's, into buffer

Definition at line 96 of file general.cc.

Referenced by saveAsciiWithoutSize().

void prettyprint_test_results ostream &  out,
const Learner &  learner,
const Vec &  results
[inline]
 

Definition at line 569 of file Learner.h.

References endl(), PLearn::TVec< T >::size(), and PLearn::Learner::testResultsNames().

string PLearn::prgname const string setname = ""  ) 
 

Definition at line 149 of file general.cc.

Referenced by PLearn::HelpCommand::helpCommands(), PLearn::HelpCommand::helpOverview(), PLearn::HelpCommand::helpScripts(), and plearn_main().

void print ostream &  out,
const map< int, real > &  vec,
Set  V
[inline]
 

Definition at line 609 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::Set::end(), endl(), NUMWIDTH, and SetIterator.

void print ostream &  out,
const map< int, real > &  vec
[inline]
 

Definition at line 600 of file ProbabilitySparseMatrix.h.

References endl(), and NUMWIDTH.

void print ostream &  out,
const map< int, real > &  vec,
int  size
[inline]
 

Definition at line 587 of file ProbabilitySparseMatrix.h.

References endl(), and NUMWIDTH.

void print ostream &  out,
RowMapSparseMatrix< real > &  m
[inline]
 

Definition at line 575 of file ProbabilitySparseMatrix.h.

References endl(), PLearn::RowMapSparseMatrix< T >::length(), NUMWIDTH, and PLearn::RowMapSparseMatrix< T >::width().

void print ostream &  out,
ProbabilitySparseMatrix &  pyx,
Set  Y,
Set  X
[inline]
 

Definition at line 561 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::Set::end(), endl(), NUMWIDTH, SetIterator, and x.

Referenced by PLearn::TVec< pair< real, real > >::debugPrint(), PLearn::TMat< pair< real, real > >::debugPrint(), main(), PLearn::TVec< pair< real, real > >::println(), and PLearn::RealMapping::write().

int PLearn::print_diff ostream &  out,
VMat  m1,
VMat  m2,
double  tolerance = 1e-6
 

Prints where m1 and m2 differ by more than tolerance returns the number of such differences, or -1 if the sizes differ.

Definition at line 65 of file vmatmain.cc.

References endl(), is_missing(), PLearn::VMat::length(), and PLearn::VMat::width().

Referenced by vmatmain().

void PLearn::printDistanceStatistics VMat  vm,
int  inputsize
 

Definition at line 196 of file vmatmain.cc.

References endl(), L2distance(), PLearn::VMat::length(), PLearn::TVec< T >::subVec(), PLearn::StatsCollector::update(), and PLearn::VMat::width().

Referenced by vmatmain().

void PLearn::printFieldName ostream &  o,
const Row::iterator &  field
 

outputs the given field name in a cell of apropriate size

Definition at line 834 of file SimpleDB.cc.

References PLearn::RowIterator::char_width(), PLearn::Row::iterator, and PLearn::RowIterator::name().

Referenced by printFieldNames().

void PLearn::printFieldNames ostream &  o,
const Row &  row
 

outputs all field names, separated by " | "

Definition at line 842 of file SimpleDB.cc.

References PLearn::Row::begin(), PLearn::Row::const_iterator, PLearn::Row::end(), endl(), and printFieldName().

void PLearn::printInfo VarArray  inputs,
const Var &  output,
bool  show_gradients = true
 

Definition at line 1163 of file VarArray.cc.

References PLearn::VarArray::fbprop(), PLearn::VarArray::fprop(), PLearn::VarArray::printInfo(), and PLearn::VarArray::setMark().

void PLearn::printInfo VarArray &  a  ) 
 

Definition at line 1161 of file VarArray.cc.

References PLearn::VarArray::printInfo().

Mat PLearn::product Mat  m1,
VMat  m2
 

computes M1.M2

Definition at line 883 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLearn::VMat::length(), PLERROR, productAcc(), rowmatrix(), PLearn::VMat::width(), and PLearn::TMat< T >::width().

Var product Var  v1,
Var  v2
[inline]
 

general matrix product

Definition at line 80 of file ProductVariable.h.

template<class T>
TMat<T> product const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

return m1 x m2

Definition at line 5694 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), product(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> product const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

products return m x v

Definition at line 5684 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and product().

template<class T>
T product const TMat< T > &  mat  ) 
 

Definition at line 3783 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void product const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 2867 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void product const TMat< T > &  mat,
const TVec< T > &  x,
TVec< T > &  y
 

Definition at line 2846 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void product const TVec< T > &  result,
const TMat< T > &  m,
const TVec< T > &  v
 

result[i] = sum_j m[i,j] * v[j]

Definition at line 2209 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
T product const TVec< T > &  vec  ) 
 

Definition at line 271 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
void product RowMapSparseMatrix< T > &  M,
const Vec &  x,
Vec &  y
 

Definition at line 801 of file RowMapSparseMatrix.h.

References PLearn::RowMapSparseMatrix< T >::product(), and x.

Referenced by PLearn::TangentLearner::build_(), PLearn::RegularGridVMatrix::build_(), PLearn::GaussianContinuum::build_(), PLearn::EntropyContrast::compute_df_dx(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), diagonalizeSubspace(), eigenSparseNonSymmMat(), eigenSparseSymmMat(), PLearn::ProjectionErrorVariable::fprop(), PLearn::ProductVariable::fprop(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::GaussianContinuum::get_image_matrix(), PLearn::ProductRandomVariable::invertible(), linearRegression(), mahalanobis_distance(), PLearn::PLS::NIPALSEigenvector(), product(), PLearn::ProductVariable::rfprop(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), PLearn::GaussMix::setInput(), PLearn::TransposeProductVariable::symbolicBprop(), PLearn::ProductTransposeVariable::symbolicBprop(), PLearn::PLS::train(), PLearn::PCA::train(), PLearn::GaussianContinuum::train(), PLearn::EntropyContrast::train(), PLearn::GaussMix::updateFromConditionalSorting(), and weightedLinearRegression().

template<class T>
void product2Acc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 2921 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductTransposeVariable::bbprop().

template<class T>
void product2Transpose const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3136 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void product2TransposeAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3193 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductVariable::bbprop().

template<class T>
void productAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 2894 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void productAcc const TVec< T > &  vec,
const TMat< T > &  m,
const TVec< T > &  v
 

result[i] += sum_j m[i,j] * v[j]

Definition at line 2229 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::TransposeProductVariable::bprop(), PLearn::ProductTransposeVariable::bprop(), PLearn::MatrixAffineTransformVariable::bprop(), PLearn::AffineTransformVariable::bprop(), PLearn::ProjectionErrorVariable::fprop(), product(), PLearn::ProductVariable::rfprop(), and PLearn::PCA::train().

Mat PLearn::productTranspose VMat  m1,
VMat  m2
 

computes M1.M2'

Definition at line 858 of file VMat_maths.cc.

References dot(), PLearn::VMat::length(), PLERROR, and PLearn::VMat::width().

Var productTranspose Var &  m1,
Var &  m2
[inline]
 

Definition at line 79 of file ProductTransposeVariable.h.

template<class T>
TMat<T> productTranspose const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

return m1 x m2'

Definition at line 5704 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and productTranspose().

template<class T>
void productTranspose const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3079 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductRandomVariable::EMBprop(), PLearn::ProjectionErrorVariable::fprop(), PLearn::ProductTransposeVariable::fprop(), productTranspose(), PLearn::ProductTransposeVariable::rfprop(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), PLearn::TransposeProductVariable::symbolicBprop(), PLearn::ProductVariable::symbolicBprop(), PLearn::PLS::train(), PLearn::PCA::train(), and PLearn::GaussMix::updateFromConditionalSorting().

template<class T>
void productTransposeAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3166 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::TransposeProductVariable::bprop(), PLearn::ProductVariable::bprop(), PLearn::MatrixAffineTransformVariable::bprop(), PLearn::MatrixAffineTransformFeedbackVariable::fprop(), and PLearn::ProductTransposeVariable::rfprop().

Var projection_error Var  f,
Var  t,
real  norm_penalization = 0,
int  n = -1,
bool  normalize_by_neighbor_distance = true,
bool  use_subspace_distance = false,
real  epsilon = 0,
real  regularization = 0,
bool  ordered_vectors = true
[inline]
 

Definition at line 98 of file ProjectionErrorVariable.h.

Referenced by PLearn::TangentLearner::build_(), and PLearn::NearestNeighborPredictionCost::run().

template<class T>
void projectOnOrthogonalSubspace const TMat< T > &  mat,
TMat< T >  orthonormal_subspace
 

Definition at line 3673 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and projectOnOrthogonalSubspace().

template<class T>
void projectOnOrthogonalSubspace const TVec< T > &  vec,
const TMat< T > &  orthonormal_subspace
 

project the Vec x on the linear subspace ORTHOGONAL to the subspace defined by the rows of the orthonormal_subspace matrix, which are ASSUMED to be ORTHORNORMAL. The method is based on substracting for each row v of the matrix the quantity v * x . v.

Definition at line 2010 of file TMat_maths_impl.h.

References dot(), PLearn::TMat< T >::length(), and multiplyAcc().

Referenced by GramSchmidtOrthogonalization(), and projectOnOrthogonalSubspace().

VarArray PLearn::propagationPath const VarArray &  outputs  ) 
 

returns the propagationpath going from all sources that influence the outputs to the outputs.

The sources themselves are not included in the path.

Definition at line 1054 of file VarArray.cc.

References propagationPath(), PLearn::TVec< Var >::size(), PLearn::VarArray::sources(), and PLearn::VarArray::unmarkAncestors().

VarArray PLearn::propagationPath const VarArray &  inputs,
const VarArray &  outputs
 

The function that computes a propagation path *.

returns the array of all the variables on which fprop is to be called sequentially to do a full fprop (for bprop it is the reverse order). NOTE THAT THE INPUTS ARE NOT IN THE RETURNED PATH ! so that a clearGradient() call on the array won't erase their gradients but fprop and bprop will still take into account their values.

Definition at line 1018 of file VarArray.cc.

References PLearn::VarArray::buildPath(), PLearn::VarArray::clearMark(), PLearn::VarArray::markPath(), PLearn::VarArray::setMark(), and PLearn::TVec< Var >::size().

Referenced by PLearn::ArgminOfVariable::ArgminOfVariable(), PLearn::UnfoldedSumOfVariable::build_(), PLearn::UnfoldedFuncVariable::build_(), PLearn::Optimizer::build_(), PLearn::MatrixElementsVariable::build_(), PLearn::Function::build_(), PLearn::AddCostToLearner::build_(), PLearn::Function::differentiate(), EM(), PLearn::Variable::fprop_from_all_sources(), nonInputParentsOfPath(), propagationPath(), propagationPathToParentsOfPath(), and PLearn::Optimizer::setToOptimize().

VarArray PLearn::propagationPathToParentsOfPath const VarArray &  inputs,
const VarArray &  outputs
 

from all sources to all direct non-inputs parents of the path inputs-->outputs

Definition at line 1064 of file VarArray.cc.

References nonInputParentsOfPath(), propagationPath(), and PLearn::TVec< Var >::size().

Referenced by PLearn::Optimizer::build_(), PLearn::Function::build_(), and PLearn::Optimizer::setToOptimize().

CostFunc quadratic_risk real  risk_aversion,
CostFunc  profit_function
[inline]
 

Definition at line 85 of file QuadraticUtilityCostFunction.h.

StatsIt quantiles_stats Vec  quantiles,
int  n_data = 1000
[inline]
 

Definition at line 413 of file StatsIterator.h.

void PLearn::randomShuffleRows SDB &  sdb  ) 
 

Performs a random permutation of all the rows of the SDB (same algorithm as Mat::shuffle).

Definition at line 954 of file SimpleDB.cc.

References endl(), PLearn::SimpleDB< KeyType, QueryResult >::getInRow(), PLearn::SimpleDB< KeyType, QueryResult >::getSchema(), PLearn::SimpleDB< KeyType, QueryResult >::length(), PLearn::SimpleDB< KeyType, QueryResult >::setRow(), and uniform_sample().

void PLearn::randomSplit VMat  d,
real  validation_fraction,
real  test_fraction,
VMat &  train,
VMat &  valid,
VMat &  test
 

Splits the dataset d into 3 subsets (similar to above).

Definition at line 131 of file Splitter.cc.

References split().

Vec PLearn::randomSplit VMat  d,
real  test_fraction,
VMat &  train,
VMat &  test
 

Splits the dataset d into a train and a test subset randomly picking which samples should be in each subset. (test_fraction has the same meaning a sabove). Return the permuted indices (the last ntest of which are the test indices and the remainder are the train indices).

Definition at line 104 of file Splitter.cc.

References PLearn::VMat::length(), PLearn::VMat::rows(), shuffleElements(), and PLearn::TVec< T >::subVec().

template<class T>
void read const string stringval,
T &  x
[inline]
 

Definition at line 1099 of file PStream.h.

References x.

template<class T>
void read istream &  in_,
T &  o
[inline]
 

Definition at line 1092 of file PStream.h.

void read istream &  in,
RealRange &  range
[inline]
 

Definition at line 118 of file RealMapping.h.

References PLearn::RealRange::read().

Referenced by cross_valid(), getDatasetAliases(), PLearn::RowMapSparseMatrix< real >::load(), PLearn::NumToStringMapping::load(), PLearn::Variable::oldread(), PLearn::StatsCollector::oldread(), PLearn::PStream::operator>>(), PLearn::RowMapSparseMatrix< real >::read(), PLearn::PStream::read(), PLearn::NumToStringMapping::read(), readField(), train_and_test(), and PLearn::pl_fdstreambuf::underflow().

void read_bool istream &  in,
bool ptr,
int  n,
bool  is_file_bigendian
 

void read_compr_mode_and_size FILE *  in,
unsigned char &  mode,
int size
[inline]
 

Definition at line 307 of file pl_io.cc.

References binread(), and mode.

void read_compr_mode_and_size istream &  in,
unsigned char &  mode,
int size
[inline]
 

Definition at line 104 of file pl_io.cc.

References binread(), and mode.

Referenced by binread_compressed().

void PLearn::read_compr_mode_and_size_ptr char *&  in,
unsigned char &  mode,
int size
[inline]
 

DEPRECATED DO NOT USE! compressed vec to and from memory.

Definition at line 463 of file pl_io.cc.

References mode.

Referenced by compressedTransposeProductAcc(), and uncompress_vec().

double read_double istream &  in,
bool  is_file_bigendian = true
[inline]
 

Definition at line 164 of file pl_io_deprecated.h.

References read_double().

void PLearn::read_double istream &  in,
double *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 429 of file pl_io_deprecated.cc.

References reverse_double().

Referenced by read_double().

float read_float istream &  in,
bool  is_file_bigendian = true
[inline]
 

Definition at line 162 of file pl_io_deprecated.h.

References read_float().

void PLearn::read_float istream &  in,
float *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 416 of file pl_io_deprecated.cc.

References reverse_float().

Referenced by read_float().

int read_int istream &  in,
bool  is_file_bigendian = true
[inline]
 

The following calls read a single value from the file, assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness used on the current architecture.

Definition at line 158 of file pl_io_deprecated.h.

References read_int().

void PLearn::read_int istream &  in,
int ptr,
int  n,
bool  is_file_bigendian
 

Reads binary data from a file assuming it is in the specified representation (either little or big endian) If necessary the representation is translated to the endianness on the current architecture.

Definition at line 390 of file pl_io_deprecated.cc.

References reverse_int().

Referenced by read_int(), and read_uint().

signed char read_sbyte istream &  in  )  [inline]
 

Definition at line 170 of file pl_io_deprecated.h.

Referenced by PLearn::VecCompressor::readCompressedVec().

short read_short istream &  in,
bool  is_file_bigendian = true
[inline]
 

Definition at line 160 of file pl_io_deprecated.h.

References read_short().

void PLearn::read_short istream &  in,
short *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 403 of file pl_io_deprecated.cc.

References reverse_short().

Referenced by read_short(), and read_ushort().

unsigned char read_ubyte istream &  in  )  [inline]
 

Definition at line 176 of file pl_io_deprecated.h.

unsigned int read_uint istream &  in,
bool  is_file_bigendian = true
[inline]
 

Definition at line 166 of file pl_io_deprecated.h.

References read_uint().

void read_uint istream &  in,
unsigned int ptr,
int  n,
bool  is_file_bigendian
[inline]
 

Definition at line 148 of file pl_io_deprecated.h.

References read_int().

Referenced by read_uint().

unsigned short read_ushort istream &  in,
bool  is_file_bigendian = true
[inline]
 

Definition at line 168 of file pl_io_deprecated.h.

References read_ushort().

void read_ushort istream &  in,
unsigned short *  ptr,
int  n,
bool  is_file_bigendian
[inline]
 

Definition at line 150 of file pl_io_deprecated.h.

References read_short().

Referenced by read_ushort().

string PLearn::readAndMacroProcess istream &  in,
map< string, string > &  variables
 

Will return the text, macro processed, with each instance of ${varname} in the text that corresponds to a key in the given map replaced by its associated value.

Also every $DEFINE{varname=... } in the text will add a new varname entry in the map. (The DEFINE macro will be discarded) Also every $INCLUDE{filepath} will be replaced in place by the text of the file it includes

Definition at line 630 of file fileutils.cc.

References c_str(), chdir(), endl(), extract_directory(), extract_filename(), getAfterSkipBlanksAndComments(), getcwd(), peekAfterSkipBlanksAndComments(), pl_isnumber(), PLERROR, readFileAndMacroProcess(), readWhileMatches(), removeblanks(), skipBlanksAndComments(), smartReadUntilNext(), toint(), tostring(), and val.

Referenced by readFileAndMacroProcess().

template<class T>
void readField istream &  in,
const string fieldname,
T &  x,
default_value
 

readField with a default value when the field is not found

Definition at line 257 of file pl_io_deprecated.h.

References read(), readFieldName(), and x.

template<class T>
void readField istream &  in,
const string fieldname,
T &  x
 

Definition at line 226 of file pl_io_deprecated.h.

References read(), readFieldName(), and x.

Referenced by PLearn::VMatrix::oldread(), PLearn::QuantilesStatsIterator::oldread(), PLearn::LiftStatsIterator::oldread(), PLearn::SharpeRatioStatsIterator::oldread(), PLearn::StderrStatsIterator::oldread(), PLearn::StddevStatsIterator::oldread(), PLearn::ExpMeanStatsIterator::oldread(), PLearn::MeanStatsIterator::oldread(), PLearn::StatsIterator::oldread(), PLearn::StatsCollector::oldread(), PLearn::Optimizer::oldread(), PLearn::Learner::oldread(), and PLearn::RowMapSparseMatrix< real >::read().

bool PLearn::readFieldName istream &  in,
const string fieldname,
bool  force = false
 

consumes "fieldname: " if possible, and return true if it does however if force=true and fieldname is not found then call error.

Definition at line 95 of file pl_io_deprecated.cc.

References PLERROR, and PLearn::pl_streambuf::seekmark().

Referenced by binreadField(), binreadField_double(), PLearn::StatsCollector::oldread(), and readField().

string readFileAndMacroProcess const string filepath  )  [inline]
 

Definition at line 203 of file fileutils.h.

References readFileAndMacroProcess().

string PLearn::readFileAndMacroProcess const string filepath,
map< string, string > &  variables
 

Same as readAndMacroProcess, but takes a filename instead of an istream The following variables are automatically set from the filepath: FILEPATH DIRPATH FILENAME FILEBASE FILEEXT.

Ex: if the absolute path to filepath is /home/me/foo.plearn Then we'll get: FILEPATH = "/home/me/foo.plearn" DIRPATH = "/home/me" FILENAME = "foo.plearn" FILEBASE = "foo" FILEEXT = ".plearn"

Definition at line 585 of file fileutils.cc.

References abspath(), extract_directory(), extract_extension(), extract_filename(), PLERROR, readAndMacroProcess(), remove_extension(), and remove_trailing_slash().

Referenced by PLearn::VVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), getDataSet(), PLearn::VVMatrix::getDateOfVMat(), macroLoadObject(), readAndMacroProcess(), readFileAndMacroProcess(), PLearn::RunCommand::run(), and PLearn::ReadAndWriteCommand::run().

void PLearn::readFooter istream &  in,
const string classname
 

consumes "</ClassName>\n"

Definition at line 81 of file pl_io_deprecated.cc.

References PLERROR.

Referenced by PLearn::VMatrix::oldread(), PLearn::QuantilesStatsIterator::oldread(), PLearn::LiftStatsIterator::oldread(), PLearn::MaxStatsIterator::oldread(), PLearn::MinStatsIterator::oldread(), PLearn::SharpeRatioStatsIterator::oldread(), PLearn::StderrStatsIterator::oldread(), PLearn::StddevStatsIterator::oldread(), PLearn::ExpMeanStatsIterator::oldread(), PLearn::MeanStatsIterator::oldread(), PLearn::StatsIterator::oldread(), PLearn::StatsCollector::oldread(), PLearn::Optimizer::oldread(), PLearn::Learner::oldread(), PLearn::RowMapSparseMatrix< real >::read(), and PLearn::RealMapping::read().

int PLearn::readHeader istream &  in,
const string classname
 

consumes "<ClassName:version>\n and returns version"

Definition at line 61 of file pl_io_deprecated.cc.

References header, PLERROR, and toint().

Referenced by PLearn::VMatrix::oldread(), PLearn::QuantilesStatsIterator::oldread(), PLearn::LiftStatsIterator::oldread(), PLearn::MaxStatsIterator::oldread(), PLearn::MinStatsIterator::oldread(), PLearn::SharpeRatioStatsIterator::oldread(), PLearn::StderrStatsIterator::oldread(), PLearn::StddevStatsIterator::oldread(), PLearn::ExpMeanStatsIterator::oldread(), PLearn::MeanStatsIterator::oldread(), PLearn::StatsIterator::oldread(), PLearn::StatsCollector::oldread(), PLearn::Optimizer::oldread(), PLearn::Learner::oldread(), PLearn::RowMapSparseMatrix< real >::read(), and PLearn::RealMapping::read().

template<class MapT>
void readMap PStream in,
MapT &  m
 

Definition at line 700 of file PStream.h.

References PLearn::PStream::get(), PLearn::PStream::peek(), PLERROR, PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and val.

Referenced by operator>>().

void readNewline istream &  in  )  [inline]
 

Reads next character and issues an error if it's not a newline.

Definition at line 200 of file pl_io_deprecated.h.

References PLERROR.

Referenced by PLearn::StatsCollector::oldread().

Object* readObject istream &  in_  )  [inline]
 

Definition at line 592 of file Object.h.

References readObject().

Object * PLearn::readObject PStream in,
unsigned int  id = UINT_MAX
 

This function builds an object from its representation in the stream. It understands several representations:

  • The <objectclass> ... type of representation as is typically produced by write() serialization methods and functions. This will call the object's read() method.
  • The ObjectClass( optionname=optionvalue; ... ; optionname=optionvalue ) type of representation (typical form for human input), will result in appropriate calls of the object's setOption() followed by its build().
  • load( filepath ) will call loadObject

Definition at line 367 of file Object.cc.

References PLearn::PStream::_do_not_use_this_method_rawin_(), PLearn::PStream::copies_map_in, fname, PLearn::PStream::get(), PLearn::PStream::getline(), loadObject(), PLearn::Object::newread(), PLearn::PStream::peek(), PLearn::PStream::pl_rdbuf(), PLERROR, PLearn::Object::read(), removeblanks(), PLearn::pl_streambuf::seekmark(), and PLearn::PStream::skipBlanksAndCommentsAndSeparators().

Referenced by loadObject(), macroLoadObject(), newObject(), operator>>(), readObject(), PLearn::RunCommand::run(), and PLearn::ReadAndWriteCommand::run().

template<class SequenceType>
void readSequence PStream in,
SequenceType &  seq
 

This reads into a sequence.

For this to work with the current implementation, the SequenceType must have:

  • typedefs defining (SequenceType::...) value_type, size_type, iterator
  • a begin() method that returns a proper iterator,
  • a size_type size() method returning the size of the current container
  • a resize(size_type n) method that allows to change the size of the container (which should also work with resize(0) )
  • a push_back(const value_type& x) method that appends the element x at the end

Definition at line 977 of file PStream.h.

References BIG_ENDIAN_ORDER, binread_(), byte_order(), endianswap(), PLearn::PStream::eof(), PLearn::PStream::get(), PLearn::PStream::inmode, LITTLE_ENDIAN_ORDER, PLearn::PStream::peek(), PLERROR, PLearn::PStream::read(), PLearn::PStream::skipBlanks(), PLearn::PStream::skipBlanksAndComments(), PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and x.

Referenced by operator>>(), and PLearn::TVec< pair< real, real > >::read().

template<class SetT>
void readSet PStream in,
SetT &  s
 

Definition at line 1137 of file PStream.h.

References PLearn::PStream::get(), PLearn::PStream::peek(), PLERROR, PLearn::PStream::skipBlanksAndCommentsAndSeparators(), and val.

Referenced by operator>>().

void PLearn::readWhileMatches istream &  in,
const string s
 

Reads while the characters read exactly match those in s Will throw a PLERROR exception as soon as it doesn't match.

Definition at line 405 of file fileutils.cc.

References PLERROR.

Referenced by readAndMacroProcess().

void PLearn::real2rgb real  colorval,
real r,
real g,
real b
 

Definition at line 67 of file GhostScript.cc.

Referenced by PLearn::GhostScript::setcolor(), and PLearn::GhostScript::writeBitmapHexString24Bits().

VMat PLearn::rebalanceNClasses VMat  inputs,
int  nclasses,
const string filename
 

Rebalance the input VMatrix so that each class has a probability 1/nclasses. Also, the return VMat class alternates between all classes cyclicly. The resulting VMat is a SelectRowsFileIndexVMatrix which has its IntVecFile load if filename already exist, or computed if not.

Definition at line 1063 of file VMat_maths.cc.

References binary_search(), PLearn::Array< T >::clear(), PLearn::TMat< T >::column(), PLearn::TVec< T >::data(), file_exists(), PLearn::VMat::lastColumn(), PLearn::TVec< T >::length(), PLERROR, PLearn::IntVecFile::put(), PLearn::TVec< T >::resize(), PLearn::VMat::rows(), sortRows(), PLearn::VMat::toMat(), and PLearn::TMat< T >::toVecCopy().

template<class ObjectType, class OptionType>
void redeclareOption OptionList &  ol,
const string optionname,
OptionType ObjectType::*  member_ptr,
OptionBase::flag_t  flags,
const string description,
const string defaultval = ""
[inline]
 

Allows one to redeclare an option differently (e.g.

in a subclass, after calling inherited::declareOptions).

Parameters:
ol  the list to which this option should be appended
optionname  the name of this option
member_ptr  &YourClass::your_field
description  see the flags in OptionBase a description of the option
defaultval  the default value for this option, as set by the default constructor

Definition at line 129 of file Option.h.

References PLERROR.

Referenced by PLearn::UnconditionalDistribution::declareOptions(), PLearn::SpectralClustering::declareOptions(), PLearn::RemoveDuplicateVMatrix::declareOptions(), PLearn::LLE::declareOptions(), PLearn::KernelPCA::declareOptions(), PLearn::Isomap::declareOptions(), PLearn::GramVMatrix::declareOptions(), PLearn::BootstrapVMatrix::declareOptions(), and PLearn::AutoVMatrix::declareOptions().

VMat PLearn::reduceDataSetSize real  fraction,
VMat  data
 

Definition at line 156 of file databases.cc.

References PLearn::VMat::length(), and PLearn::VMat::subMatRows().

VMat PLearn::reduceInputSize real  fraction,
VMat  data
 

Definition at line 148 of file databases.cc.

References endl(), PLearn::VMat::subMatColumns(), and PLearn::VMat::width().

template<class T>
void regularizeMatrix const TMat< T > &  mat,
tolerance
 

Applies a regularizer : diag(A) += (tolerance * trace(A)).

Definition at line 3434 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and trace().

Referenced by PLearn::ReconstructionWeightsKernel::reconstruct(), and smartInitialization().

void PLearn::remapClassnums VMat &  data,
real  remap_minval_to,
real  remap_maxval_to
 

remaps classnums to {0,1} or to {-1,+1}

Definition at line 164 of file databases.cc.

References PLearn::VMat::length(), and PLearn::VMat::width().

VMat remapLastColumn VMat  d,
real  if_equals_value,
real  then_value = 1.0,
real  else_value = -1.0
[inline]
 

Definition at line 105 of file RemapLastColumnVMatrix.h.

VMat remapLastColumn VMat  d,
Mat  mapping
[inline]
 

Definition at line 102 of file RemapLastColumnVMatrix.h.

Referenced by loadClassificationDataset().

vector< string > PLearn::remove const vector< string > &  v,
string  element
 

return vector with all instances of element removed

Definition at line 517 of file stringutils.cc.

Referenced by PLearn::TVec< pair< real, real > >::removeSorted(), and PLearn::Set::replace().

void PLearn::remove_comments string text,
const string commentstart = "#"
 

In a multiline text, removes everything starting at commentstart pattern until the end of line.

Definition at line 484 of file stringutils.cc.

Referenced by getModelAliases().

string PLearn::remove_extension const string filename  ) 
 

Return the filename withoug the extension (i.e. removing the last.

Definition at line 178 of file stringutils.cc.

Referenced by PLearn::MatlabInterface::eigs_r11(), getDataSet(), matlabR11eigs(), readFileAndMacroProcess(), and use().

template<class T>
TVec<T> remove_missing const TVec< T > &  vec  ) 
 

Definition at line 1358 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::resize().

Referenced by PLearn::SequentialModelSelector::paired_t_test(), and PLearn::SequentialModelSelector::train().

string PLearn::remove_trailing_slash const string path  ) 
 

removes any trailing '/' from the path

Definition at line 246 of file stringutils.cc.

References slash_char.

Referenced by PLearn::VVMatrix::createPreproVMat(), PLearn::DiskVMatrix::DiskVMatrix(), force_mkdir(), PLearn::VVMatrix::getPrecomputedDataName(), PLearn::VVMatrix::isPrecomputedAndUpToDate(), locateDatasetAliasesDir(), lsdir_fullpath(), newFilename(), and readFileAndMacroProcess().

string PLearn::removeallblanks const string s  ) 
 

removes all blanks '
','',' ',''

Definition at line 225 of file stringutils.cc.

Referenced by PLearn::StatSpec::parseStatname().

string PLearn::removeblanks const string s  ) 
 

removes starting and ending blanks '
','',' ',''

Definition at line 204 of file stringutils.cc.

Referenced by PLearn::VVMatrix::build_(), PLearn::AsciiVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), PLearn::VVMatrix::extractSourceMatrix(), PLearn::VVMatrix::generateVMatIndex(), getDataSet(), getDatasetAliases(), getModelAliases(), PLearn::Object::getOption(), loadAscii(), loadAsciiSingleBinaryDescriptor(), makeExplicitPath(), PLearn::Object::newread(), pl_isnumber(), PLearn::VMatLanguage::preprocess(), PLearn::VVMatrix::processJoinSection(), PLearn::RealMapping::read(), readAndMacroProcess(), readObject(), PLearn::VMatrix::resolveFieldInfoLink(), PLearn::StringTable::StringTable(), and viewVMat().

template<class T>
TMat< T > PLearn::removeColumn const TMat< T > &  m,
int  colnum
 

returns a new mat which is m with the given column removed if the column to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the column removed.

Definition at line 566 of file TMat_impl.h.

References hconcat(), PLearn::TMat< T >::subMatColumns(), and PLearn::TMat< T >::width().

void PLearn::removeDelimiters string s,
string  delim,
string  replace
 

Definition at line 2889 of file WordNetOntology.cc.

Referenced by PLearn::WordNetOntology::extractOntology(), and PLearn::WordNetOntology::getSynsetWords().

template<class T>
TVec< T > PLearn::removeElement const TVec< T > &  v,
int  elemnum
 

if the element to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the element removed.

Definition at line 238 of file TMat_impl.h.

References concat(), PLearn::TVec< T >::length(), and PLearn::TVec< T >::subVec().

string PLearn::removenewline const string s  ) 
 

removes any trailing '
' and/or ''

Definition at line 238 of file stringutils.cc.

Referenced by addprepostfix(), PLearn::AsciiVMatrix::build_(), and pgetline().

VMat removeRow VMat  d,
int  rownum
[inline]
 

Definition at line 105 of file RemoveRowsVMatrix.h.

References Vec.

template<class T>
TMat< T > PLearn::removeRow const TMat< T > &  m,
int  rownum
 

returns a new mat which is m with the given row removed if the row to remove is the first or the last one, then a submatrix (a view) of m will be returned (for efficiency) otherwise, it is a fresh copy with the row removed.

Definition at line 554 of file TMat_impl.h.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::subMatRows(), and vconcat().

Referenced by PLearn::Learner::computeLeaveOneOutCosts().

VMat removeRows VMat  d,
Vec  rownums
[inline]
 

Definition at line 102 of file RemoveRowsVMatrix.h.

void PLearn::replaceChars string str,
string  char_to_replace,
string  replacing_char
 

Definition at line 2907 of file WordNetOntology.cc.

Var reshape Var  v,
int  newlength,
int  newwidth
[inline]
 

Definition at line 83 of file ReshapeVariable.h.

void reverse_double const double *  ptr,
int  n
[inline]
 

Definition at line 72 of file pl_io_deprecated.h.

References endianswap().

Referenced by fread_double(), fwrite_double(), PLearn::SimpleDB< KeyType, QueryResult >::memoryToDisk(), read_double(), and write_double().

void reverse_float const float *  ptr,
int  n
[inline]
 

Definition at line 71 of file pl_io_deprecated.h.

References endianswap().

Referenced by fread_float(), fwrite_float(), loadCorelDatamat(), PLearn::SimpleDB< KeyType, QueryResult >::memoryToDisk(), read_float(), and write_float().

void reverse_int const int ptr,
int  n
[inline]
 

Definition at line 70 of file pl_io_deprecated.h.

References endianswap().

Referenced by fread_int(), fwrite_int(), PLearn::SimpleDB< KeyType, QueryResult >::memoryToDisk(), PLearn::FilesIntStream::read_current(), read_int(), and write_int().

void reverse_short const short *  ptr,
int  n
[inline]
 

Definition at line 74 of file pl_io_deprecated.h.

References endianswap().

Referenced by PLearn::SimpleDB< KeyType, QueryResult >::memoryToDisk(), read_short(), and write_short().

void reverse_uint const unsigned int ptr,
int  n
[inline]
 

NOTE: these calls are deprecated, use directly endianswap from base/byte_order.h.

Definition at line 69 of file pl_io_deprecated.h.

References endianswap().

void reverse_ushort const unsigned short *  ptr,
int  n
[inline]
 

Definition at line 73 of file pl_io_deprecated.h.

References endianswap().

Referenced by fread_short().

real PLearn::rgb2real real  r,
real  g,
real  b
 

These are used to pack and unpack r,g,b triplets to and from a single real. As usual in postscript, r,g and b are values between 0 and 1 indicating quantity of light.

Definition at line 59 of file GhostScript.cc.

Referenced by color().

string PLearn::right const string s,
size_t  width,
char  padding = ' '
 

Definition at line 64 of file stringutils.cc.

Referenced by PLearn::SoftSlopeVariable::bprop(), PLearn::SoftSlopeIntegralVariable::bprop(), PLearn::HardSlopeVariable::bprop(), d_hard_slope(), d_soft_slope(), PLearn::SoftSlopeVariable::fprop(), PLearn::SoftSlopeIntegralVariable::fprop(), PLearn::HardSlopeVariable::fprop(), hard_slope(), PLearn::HardSlopeVariable::HardSlopeVariable(), PLearn::PDateTime::info(), PLearn::PDate::info(), operator<(), operator<=(), operator>(), operator>=(), soft_slope(), soft_slope_integral(), soft_slope_limit(), PLearn::SoftSlopeIntegralVariable::SoftSlopeIntegralVariable(), PLearn::SoftSlopeVariable::SoftSlopeVariable(), split_on_first(), tabulated_soft_slope(), tabulated_soft_slope_integral(), and PLearn::UnaryHardSlopeVariable::UnaryHardSlopeVariable().

Var rightPseudoInverse Var  v  )  [inline]
 

Definition at line 76 of file RightPseudoInverseVariable.h.

template<class T>
void rightPseudoInverse const TMat< T > &  m,
TMat< T > &  inv
 

Definition at line 4686 of file TMat_maths_impl.h.

References inverse(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
TMat<T> rightPseudoInverse TMat< T > &  m  ) 
 

Definition at line 4676 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

Referenced by PLearn::RightPseudoInverseVariable::fprop(), and PLearn::ProductRandomVariable::invertible().

void PLearn::rm const string file  ) 
 

calls system rm command with string file as parameters

Definition at line 364 of file fileutils.cc.

Referenced by PLearn::VVMatrix::createPreproVMat(), PLearn::VVMatrix::generateFilterIndexFile(), PLearn::VVMatrix::generateVMatIndex(), PLearn::FilteredVMatrix::openIndex(), PLearn::RealMapping::operator==(), PLearn::VMatrix::saveStringMappings(), PLearn::VMatrix::setSFIFFilename(), PLearn::VMatrix::unlockMetaDataDir(), PLearn::PrecomputedVMatrix::usePrecomputed(), and vmatmain().

template<class T>
void rowArgmax const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4119 of file TMat_maths_impl.h.

References argmax(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void rowArgmin const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4130 of file TMat_maths_impl.h.

References argmin(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
TMat<T> rowmatrix const TVec< T > &  v  )  [inline]
 

returns a view of this vector as a single row matrix

Definition at line 734 of file TMat_impl.h.

References PLearn::TVec< T >::length(), and PLearn::TVec< T >::toMat().

Referenced by product(), transposeProduct(), PLearn::GaussMix::updateFromConditionalSorting(), and PLearn::VVec::VVec().

template<class T>
void rowMax const TMat< T > &  mat,
const TVec< T > &  colvec
 

Definition at line 4085 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), max(), and PLERROR.

template<class T>
void rowMax const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4074 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), max(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void rowMean const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4036 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), mean(), PLERROR, and PLearn::TMat< T >::width().

Referenced by computeColumnsMeanAndStddev().

template<class T>
void rowMin const TMat< T > &  mat,
const TVec< T > &  colvec
 

Definition at line 4108 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), min(), and PLERROR.

template<class T>
void rowMin const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4096 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), min(), PLERROR, and PLearn::TMat< T >::width().

Var rowSum Var  v  )  [inline]
 

Definition at line 73 of file RowSumVariable.h.

template<class T>
void rowSum const TMat< T > &  mat,
const TVec< T > &  colvec
 

Definition at line 4025 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and sum().

template<class T>
void rowSum const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

all the operations below result in a column vector and are obtained by iterating (e.g. summing) over the column index, e.g. yielding the sum of each row in the result.

Definition at line 4013 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, sum(), and PLearn::TMat< T >::width().

Referenced by PLearn::KernelProjection::computeOutput(), PLearn::TimesColumnVariable::symbolicBprop(), PLearn::PlusColumnVariable::symbolicBprop(), PLearn::MinusColumnVariable::symbolicBprop(), and PLearn::DuplicateColumnVariable::symbolicBprop().

template<class T>
void rowSumOfSquares const TMat< T > &  mat,
const TMat< T > &  singlecolumn
 

Definition at line 4058 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void rowVariance const TMat< T > &  mat,
const TMat< T > &  singlecolumn,
const TMat< T > &  rowmean
 

Definition at line 4047 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLERROR, variance(), and PLearn::TMat< T >::width().

Referenced by computeColumnsMeanAndStddev().

real PLearn::safeexp real  a  ) 
 

Definition at line 84 of file pl_math.cc.

References exp().

Referenced by PLearn::SoftmaxLossVariable::bprop(), PLearn::MatrixSoftmaxLossVariable::bprop(), PLearn::LogSoftmaxVariable::bprop(), PLearn::LogAddVariable::bprop(), PLearn::GraphicalBiText::compute_likelihood(), PLearn::MultiInstanceNNet::computeCostsFromOutputs(), PLearn::MixtureRandomVariable::EMBprop(), exp(), PLearn::SoftmaxLossVariable::fprop(), PLearn::MatrixSoftmaxVariable::fprop(), PLearn::MatrixSoftmaxLossVariable::fprop(), PLearn::ExpVariable::fprop(), PLearn::SoftmaxLossVariable::rfprop(), and softmax().

real PLearn::safeflog real  base,
real  a
 

Definition at line 106 of file pl_math.cc.

References safeflog().

real PLearn::safeflog real  a  ) 
 

Definition at line 75 of file pl_math.cc.

References log(), and PLERROR.

Referenced by PLearn::GaussianProcessRegressor::BayesianCost(), PLearn::PowVariableVariable::bprop(), PLearn::PLogPVariable::bprop(), PLearn::MultiInstanceNNet::computeCostsFromOutputs(), PLearn::MultinomialRandomVariable::EMUpdate(), PLearn::MixtureRandomVariable::EMUpdate(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), PLearn::NegLogProbCostFunction::evaluate(), PLearn::SemiSupervisedProbClassCostVariable::fprop(), PLearn::PLogPVariable::fprop(), PLearn::LogVariable::fprop(), safeflog(), safeflog2(), and PLearn::AdaBoost::train().

real PLearn::safeflog2 real  a  ) 
 

Definition at line 111 of file pl_math.cc.

References LOG_2, and safeflog().

Referenced by PLearn::GraphicalBiText::computeKL().

template<class T>
TVec<T> safelog const TVec< T > &  src  )  [inline]
 

Definition at line 912 of file TMat_maths_impl.h.

References compute_safelog(), and PLearn::TVec< T >::length().

Referenced by compute_safelog().

void samePos ProbabilitySparseMatrix &  m1,
ProbabilitySparseMatrix &  m2,
string  m1name,
string  m2name
[inline]
 

Definition at line 448 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::Set::end(), PLearn::ProbabilitySparseMatrix::exists(), PLearn::ProbabilitySparseMatrix::getPyX(), PLERROR, SetIterator, x, and PLearn::ProbabilitySparseMatrix::Y.

Var PLearn::Sample ConditionalExpression  conditional_expression  ) 
 

Return a Var which depends functionally on the RHS instances and the value of other RandomVars which are non-random and influence the LHS.

Definition at line 701 of file RandomVar.cc.

References PLearn::ConditionalExpression::LHS, PLearn::ConditionalExpression::RHS, and PLearn::RVInstance::V.

Referenced by PLearn::RandomVarVMatrix::RandomVarVMatrix(), and sample().

void PLearn::sample ConditionalExpression  conditional_expression,
Mat &  samples
 

Sample N instances from the given conditional expression, of the form (LHS|RHS) where LHS is a RandomVar and RHS is a RVInstanceArray, e.g. (X==x && Z==z && W==w). Put the N instances in the rows of the given Nxd matrix. THIS ALSO SHOWS HOW TO REPEATEDLY SAMPLE IN AN EFFICIENT MANNER (rather than call "Vec sample(ConditionalExpression)").

Definition at line 677 of file RandomVar.cc.

References PLearn::VarArray::fprop(), PLearn::TMat< T >::length(), and Sample().

Vec PLearn::sample ConditionalExpression  conditional_expression  ) 
 

Sample an instance from the given conditional expression, of the form (LHS|RHS) where LHS is a RandomVar and RHS is a RVInstanceArray, e.g. (X==x && Z==z && W==w).

Definition at line 664 of file RandomVar.cc.

References Sample().

Referenced by PLearn::GraphicalBiText::GraphicalBiText(), PLearn::RGBImageVMatrix::sample(), PLearn::GaussMix::train(), and PLearn::VMatrixFromDistribution::VMatrixFromDistribution().

template<class T>
void save const string filepath,
const T &  x
[inline]
 

If necessary, missing directories along the filepath will be created.

Definition at line 1183 of file PStream.h.

References force_mkdir_for_file(), PLERROR, and x.

Referenced by PLearn::GaussianContinuum::compute_train_and_validation_costs(), cross_valid(), PLearn::GaussianContinuum::get_image_matrix(), PLearn::VMatrix::getRanges(), PLearn::VMatrix::getStats(), PLearn::LearnerCommand::LearnerCommand(), PLearn::GaussianContinuum::make_random_walk(), PLearn::Learner::measure(), PLearn::PTester::perform(), PLearn::VMat::precompute(), PLearn::Train::run(), PLearn::SequentialValidation::run(), PLearn::Grapher::run(), PLearn::GenerateDecisionPlot::run(), PLearn::Experiment::run(), PLearn::WordNetOntology::save(), PLearn::Object::save(), PLearn::RowMapSparseMatrix< real >::saveAscii(), PLearn::Learner::stop_if_wanted(), PLearn::LearnerCommand::test(), PLearn::NeuralNet::train(), PLearn::LearnerCommand::train(), PLearn::GaussianContinuum::train(), PLearn::AdaBoost::train(), and train_and_test().

template<class T>
void PLearn::saveAscii const string filename,
const TVec< T > &  vec
 

first number in file is length

Definition at line 464 of file MatIO.h.

References PLearn::TVec< T >::begin(), PLearn::TVec< T >::end(), endl(), PLearn::TVec< T >::length(), and PLERROR.

template<class T>
void PLearn::saveAscii const string filename,
const TMat< T > &  mat
 

Definition at line 431 of file MatIO.h.

References saveAscii().

template<class T>
void PLearn::saveAscii const string filename,
const TMat< T > &  mat,
const TVec< string > &  fieldnames
 

Definition at line 437 of file MatIO.h.

References endl(), k, PLearn::TMat< T >::length(), PLERROR, PLearn::TVec< T >::size(), space_to_underscore(), and PLearn::TMat< T >::width().

Referenced by PLearn::Grapher::plot_1D_regression(), PLearn::SequentialValidation::reportStats(), and saveAscii().

void PLearn::saveAsciiWithoutSize const string filename,
const Mat &  mat
 

Definition at line 855 of file MatIO.cc.

References PLearn::TMat< T >::length(), PLERROR, and pretty_print_number().

void PLearn::saveAsciiWithoutSize const string filename,
const Vec &  vec
 

Definition at line 816 of file MatIO.cc.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and pretty_print_number().

Referenced by PLearn::SequentialModelSelector::test(), and PLearn::SequentialModelSelector::train().

void PLearn::saveGnuplot const string filename,
const Mat &  mat
 

Definition at line 597 of file MatIO.cc.

References PLearn::TMat< T >::length(), and PLERROR.

void PLearn::saveGnuplot const string filename,
const Vec &  vec
 

Definition at line 584 of file MatIO.cc.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::Gnuplot::multiplot(), PLearn::Gnuplot::plot(), PLearn::Gnuplot::plot3d(), and plotVMats().

template<class T>
void savePMat const string filename,
const TMat< T > &  mat
 

Definition at line 804 of file TMat_impl.h.

References PLERROR.

void PLearn::savePMat const string filename,
const TMat< double > &  mat
 

Definition at line 361 of file MatIO.cc.

References DATAFILE_HEADERLENGTH, header, PLearn::TMat< T >::length(), PLERROR, strlen(), and PLearn::TMat< T >::width().

void PLearn::savePMat const string filename,
const TMat< float > &  mat
 

Definition at line 329 of file MatIO.cc.

References DATAFILE_HEADERLENGTH, header, PLearn::TMat< T >::length(), PLERROR, strlen(), and PLearn::TMat< T >::width().

Referenced by PLearn::TMat< pair< real, real > >::save().

template<class T>
void savePVec const string filename,
const TVec< T > &  vec
 

Definition at line 133 of file TVec_impl.h.

References PLERROR.

void PLearn::savePVec const string filename,
const TVec< double > &  vec
 

Definition at line 193 of file MatIO.cc.

References PLearn::TVec< T >::data(), DATAFILE_HEADERLENGTH, header, PLearn::TVec< T >::length(), PLERROR, and strlen().

void PLearn::savePVec const string filename,
const TVec< float > &  vec
 

Old native PLearn binary format (.pmat).

Definition at line 160 of file MatIO.cc.

References PLearn::TVec< T >::data(), DATAFILE_HEADERLENGTH, header, PLearn::TVec< T >::length(), PLERROR, and strlen().

Referenced by PLearn::TVec< pair< real, real > >::save().

void PLearn::saveSNMat const string filename,
const Mat &  mat
 

Definition at line 899 of file MatIO.cc.

References fwrite_float(), fwrite_int(), PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

void PLearn::saveSNVec const string filename,
const Vec &  vec
 

Definition at line 954 of file MatIO.cc.

References PLearn::TVec< T >::data(), fwrite_float(), fwrite_int(), PLearn::TVec< T >::length(), and PLERROR.

void PLearn::saveStringInFile const string filepath,
const string text
 

Writes the raw string into the given file Intermediate directories in filepath are created if necessary.

Definition at line 348 of file fileutils.cc.

References force_mkdir_for_file(), and PLERROR.

Referenced by PLearn::PTester::perform(), and PLearn::SequentialValidation::run().

int PLearn::search_replace string text,
const string searchstr,
const string replacestr
 

replaces all occurences of searchstr in the text by replacestr returns the number of matches that got replaced

Definition at line 366 of file stringutils.cc.

void PLearn::seed  ) 
 

initializes the random number generator with the cpu time

Definition at line 195 of file random.cc.

References manual_seed().

Referenced by PLearn::RepeatSplitter::build_(), PLearn::EmpiricalDistribution::EmpiricalDistribution(), PLearn::AdaBoost::forget(), PLearn::VVMatrix::generateVMatIndex(), get_seed(), PLearn::TangentLearner::initializeParams(), PLearn::NNet::initializeParams(), PLearn::NeuralNet::initializeParams(), PLearn::NeighborhoodSmoothnessNNet::initializeParams(), PLearn::MultiInstanceNNet::initializeParams(), PLearn::GaussianContinuum::initializeParams(), PLearn::ConditionalDensityNet::initializeParams(), old_plearn_main(), plearn_main(), and PLearn::RepeatSplitter::RepeatSplitter().

template<class T>
void select const TMat< T > &  source,
const TVec< T > &  row_indices,
const TVec< T > &  column_indices,
TMat< T > &  destination
 

template<class T, class I>
void select const TMat< T > &  source,
const TVec< I > &  row_indices,
const TVec< I > &  column_indices,
TMat< T > &  destination
 

Definition at line 473 of file TMat_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by eigenSparseNonSymmMat(), eigenSparseSymmMat(), PLearn::TMat< pair< real, real > >::operator()(), and PLearn::GraphicalBiText::test_WSD().

template<class T>
TVec<T> selectAndOrder const TMat< T > &  mat,
int  pos,
int  colnum = 0
 

Definition at line 3584 of file TMat_maths_impl.h.

References PLearn::TMat< T >::column(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::swapRows(), PLearn::TMat< T >::width(), and x.

template<class T>
T selectAndOrder const TVec< T > &  vec,
int  pos
 

find the element at position pos that would result from a sort and put all elements (not in order!) lower than v[pos] in v[i<pos].

Definition at line 1890 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, swap(), and x.

Referenced by PLearn::LiftStatsCollector::finalize(), and PLearn::LiftStatsIterator::finish().

template<class T, class I>
void PLearn::selectColumns const TMat< T > &  source,
const TVec< I > &  column_indices,
TMat< T > &  destination
 

Definition at line 445 of file TMat_impl.h.

References PLearn::TMat< T >::column(), PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::TMat< pair< real, real > >::columns().

template<class T, class I>
void PLearn::selectElements const TVec< T > &  source,
const TVec< I > &  indices,
TVec< T > &  destination
 

select the elements of the source as specified by the vector of indices (between 0 and source.length()-1) into the destination vector (which must have the same length() as the indices vector).

Definition at line 161 of file TMat_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::TVec< pair< real, real > >::operator()().

template<class T, class I>
void PLearn::selectRows const TMat< T > &  source,
const TVec< I > &  row_indices,
TMat< T > &  destination
 

Definition at line 418 of file TMat_impl.h.

References PLearn::TVec< T >::data(), PLearn::TMat< T >::length(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by PLearn::TMat< pair< real, real > >::rows().

void sgesdd_ char *  JOBZ,
int M,
int N,
float *  A,
int LDA,
float *  S,
float *  U,
int LDU,
float *  VT,
int LDVT,
float *  WORK,
int LWORK,
int IWORK,
int INFO
 

Referenced by lapack_Xgesdd_().

void sgesv_ int N,
int NRHS,
float *  A,
int LDA,
int IPIV,
float *  B,
int LDB,
int INFO
 

Referenced by lapackSolveLinearSystem().

void sgetrf_ int M,
int N,
float *  A,
int LDA,
int IPIV,
int INFO
 

Referenced by matInvert().

void sgetri_ int N,
float *  A,
int LDA,
int IPIV,
float *  WORK,
int LWORK,
int INFO
 

Referenced by matInvert().

StatsIt sharpe_ratio_stats  )  [inline]
 

exponential of the mean

Definition at line 416 of file StatsIterator.h.

VMat PLearn::shuffle VMat  d  ) 
 

returns a SelectRowsVMatrix that has d's rows shuffled

Definition at line 775 of file VMat_maths.cc.

References PLearn::VMat::length(), PLearn::VMat::rows(), and shuffleElements().

Referenced by PLearn::BootstrapVMatrix::BootstrapVMatrix(), and PLearn::RepeatSplitter::RepeatSplitter().

template<class T>
void shuffleElements const TVec< T > &  vec  ) 
 

randomly shuffle the entries of the TVector

Definition at line 145 of file random.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and uniform_sample().

Referenced by bootstrap(), PLearn::TrainValidTestSplitter::build_(), PLearn::SelectInputSubsetLearner::build_(), PLearn::RepeatSplitter::build_(), PLearn::BootstrapVMatrix::build_(), PLearn::VVMatrix::generateVMatIndex(), randomSplit(), shuffle(), and split().

template<class T>
void shuffleRows const TMat< T > &  mat  ) 
 

Definition at line 164 of file random.h.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::swapRows(), and uniform_sample().

Referenced by computeRanks(), input2dSet(), loadATT800(), loadBreastCancer(), loadBreastCancerWisconsin(), loadCorel(), loadDiabetes(), loadIonosphere(), loadLetters(), loadPimaIndians(), and loadSonar().

Var sigmoid Var  v  )  [inline]
 

Definition at line 73 of file SigmoidVariable.h.

template<class T>
TVec<T> sigmoid const TVec< T > &  src  )  [inline]
 

Definition at line 995 of file TMat_maths_impl.h.

References compute_sigmoid(), and PLearn::TVec< T >::length().

real sigmoid real  x  )  [inline]
 

numerically stable version of sigmoid(x) = 1.0/(1.0+exp(-x))

Definition at line 270 of file pl_math.h.

References tanh(), and x.

Referenced by PLearn::SoftSlopeVariable::bprop(), PLearn::SoftplusVariable::bprop(), PLearn::NegCrossEntropySigmoidVariable::bprop(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), compute_sigmoid(), d_soft_slope(), PLearn::SigmoidalKernel::evaluate(), PLearn::LiftBinaryCostFunction::evaluate(), PLearn::SigmoidVariable::fprop(), PLearn::NegCrossEntropySigmoidVariable::fprop(), softmax(), PLearn::SoftplusVariable::symbolicBprop(), and PLearn::ConditionalDensityNet::train().

Var sign Var  input  )  [inline]
 

Definition at line 72 of file SignVariable.h.

real sign real  a  )  [inline]
 

Definition at line 92 of file pl_math.h.

Referenced by PLearn::SignVariable::fprop().

void PLearn::skipBlanksAndComments istream &  in  ) 
 

will skip all blanks (white space, newline and #-style comments) Next character read will be first "non-blank"

Definition at line 434 of file fileutils.cc.

References skipRestOfLine().

Referenced by PLearn::MultiInstanceVMatrix::build_(), getAfterSkipBlanksAndComments(), loadAscii(), loadAsciiSingleBinaryDescriptor(), peekAfterSkipBlanksAndComments(), and readAndMacroProcess().

void PLearn::skipRestOfLine istream &  in  ) 
 

skips everything until '
' (also consumes the '
')

Definition at line 427 of file fileutils.cc.

Referenced by PLearn::VMatLanguage::preprocess(), and skipBlanksAndComments().

real small_dilogarithm real  x  ) 
 

Definition at line 150 of file pl_math.cc.

References PLWARNING, and x.

Referenced by positive_dilogarithm().

Mat PLearn::smartInitialization VMat  v,
int  n,
real  c,
real  regularization
 

Definition at line 79 of file GaussianContinuum.cc.

References PLearn::TMat< T >::clear(), PLearn::VMat::length(), regularizeMatrix(), solveLinearSystem(), uniform_multinomial_sample(), and PLearn::VMat::width().

Referenced by PLearn::TangentLearner::initializeParams().

int PLearn::smartReadUntilNext istream &  in,
string  stoppingsymbols,
string characters_read,
bool  ignore_brackets = false
 

same as PStream's method smartReadUntilNext, but for istream

Definition at line 491 of file fileutils.cc.

References PLearn::PStream::smartReadUntilNext().

Referenced by getModelAliases(), readAndMacroProcess(), and PLearn::PStream::smartReadUntilNext().

template<class T>
TMat<T> smooth TMat< T >  data,
int  windowsize
 

Definition at line 5494 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), and PLearn::TMat< T >::width().

Mat PLearn::smoothCorelHisto Mat &  data  ) 
 

Definition at line 595 of file databases.cc.

References k, and PLearn::TMat< T >::length().

Referenced by loadCorel().

int snaupd_ long int ,
const char *  ,
long int ,
const char *  ,
long int ,
float *  ,
float *  ,
long int ,
float *  ,
long int ,
long int ,
long int ,
float *  ,
float *  ,
long int ,
long int ,
short  ,
short 
 

Referenced by eigenSparseNonSymmMat().

int sneupd_ long int ,
const char *  ,
long int ,
float *  ,
float *  ,
float *  ,
long int ,
float *  ,
float *  ,
float *  ,
const char *  ,
long int ,
const char *  ,
long int ,
float *  ,
float *  ,
long int ,
float *  ,
long int ,
long int ,
long int ,
float *  ,
float *  ,
long int ,
long int ,
short  ,
short  ,
short 
 

Referenced by eigenSparseNonSymmMat().

Var soft_slope Var  x,
Var  smoothness,
Var  left,
Var  right
[inline]
 

Definition at line 81 of file SoftSlopeVariable.h.

References left(), right(), and x.

real soft_slope real  x,
real  smoothness = 1,
real  left = 0,
real  right = 1
[inline]
 

Definition at line 349 of file pl_math.h.

References hard_slope(), left(), right(), softplus(), and x.

Referenced by PLearn::SoftSlopeVariable::bprop(), PLearn::ConditionalDensityNet::build_(), PLearn::SoftSlopeVariable::fprop(), and PLearn::ConditionalDensityNet::train().

Var soft_slope_integral Var  smoothness,
Var  left,
Var  right,
real  a = 0,
real  b = 1
[inline]
 

Definition at line 80 of file SoftSlopeIntegralVariable.h.

References left(), and right().

real PLearn::soft_slope_integral real  smoothness = 1,
real  left = 0,
real  right = 1,
real  a = 0,
real  b = 1
 

Definition at line 241 of file pl_math.cc.

References hard_slope_integral(), left(), right(), and softplus_primitive().

Referenced by PLearn::ConditionalDensityNet::build_(), and PLearn::SoftSlopeIntegralVariable::fprop().

Var soft_slope_limit Var  x,
Var  smoothness,
Var  left,
Var  right
[inline]
 

Definition at line 91 of file SoftSlopeVariable.h.

References left(), PLearn::Var::length(), PLERROR, right(), PLearn::Var::width(), and x.

Referenced by PLearn::ConditionalDensityNet::build_().

Var PLearn::softmax Var  input,
int  index
 

< should be numerically more stable

Definition at line 66 of file Var_utils.cc.

References exp(), and sum().

Var softmax Var  v  )  [inline]
 

Definition at line 75 of file SoftmaxVariable.h.

Var softmax Var  input,
Var  index
[inline]
 

Definition at line 79 of file SoftmaxLossVariable.h.

Var PLearn::softmax Var  x1,
Var  x2,
Var  hardness
 

a soft version of the ordinary max(x1,x2) mathematical operation where the hardness parameter controls how close to an actual max(x1,x2) we are (i.e. as hardness --> infty we quickly get max(x1,x2), but as hardness --> 0 we get the simple average of x1 and x2.

Definition at line 133 of file SigmoidVariable.cc.

References sigmoid().

template<class T>
TVec<T> softmax const TVec< T > &  x  ) 
 

Definition at line 1613 of file TMat_maths_impl.h.

References softmax(), and x.

template<class T>
void softmax const TVec< T > &  x,
const TVec< T > &  y
 

y = softmax(x)

Definition at line 76 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), max(), PLERROR, safeexp(), and x.

Referenced by PLearn::LogSumVariable::bprop(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MixtureRandomVariable::ElogP(), PLearn::SoftmaxVariable::fprop(), PLearn::MultinomialRandomVariable::logP(), PLearn::MixtureRandomVariable::logP(), PLearn::MultinomialRandomVariable::setValueFromParentsValue(), PLearn::MixtureRandomVariable::setValueFromParentsValue(), softmax(), and PLearn::LogSumVariable::symbolicBprop().

Var softplus Var  v  )  [inline]
 

Definition at line 72 of file SoftplusVariable.h.

real softplus real  x  )  [inline]
 

numerically stable computation of log(1+exp(x))

return 0.5*x + LOG_2 - log(1./cosh(0.5*x));

Definition at line 293 of file pl_math.h.

References exp(), and x.

Referenced by PLearn::SoftSlopeIntegralVariable::bprop(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::GaussianContinuum::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::SoftplusVariable::fprop(), soft_slope(), and tabulated_softplus().

Var softplus_primitive Var  v  )  [inline]
 

Definition at line 78 of file DilogarithmVariable.h.

References dilogarithm(), and exp().

real softplus_primitive real  x  )  [inline]
 

Definition at line 409 of file pl_math.h.

References dilogarithm(), exp(), and x.

Referenced by soft_slope_integral(), and tabulated_softplus_primitive().

Vec PLearn::solveLinearSystem const Mat &  A,
const Vec &  b
 

Returns solution x of Ax = b (same as above, except b and x are vectors).

Definition at line 350 of file plapack.cc.

References PLearn::TVec< T >::length(), solveLinearSystem(), and PLearn::TVec< T >::toMat().

Mat PLearn::solveLinearSystem const Mat &  A,
const Mat &  B
 

Returns the solution X of AX = B A and B are left intact, and the solution is returned. This call does memory allocations/deallocations and transposed copies of matrices (contrary to the lower level lapackSolveLinearSystem call that you may consider using if efficiency is a concern).

Definition at line 337 of file plapack.cc.

References lapackSolveLinearSystem(), PLearn::TMat< T >::length(), PLERROR, and transpose().

void PLearn::solveLinearSystem const Mat &  A,
const Mat &  Y,
Mat &  X
 

for matrices A such that A.length() <= A.width(), find X s.t.

A X = Y

Definition at line 325 of file plapack.cc.

References PLERROR.

Referenced by constrainedLinearRegression(), PLearn::ProductRandomVariable::EMBprop(), linearRegression(), PLearn::ReconstructionWeightsKernel::reconstruct(), smartInitialization(), solveLinearSystem(), and weightedLinearRegression().

template<class T>
void solveLinearSystemByCholesky const TMat< T > &  A,
const TMat< T > &  B,
TMat< T > &  X,
TMat< T > *  pL = 0,
TVec< T > *  py = 0
 

Definition at line 4732 of file TMat_maths_impl.h.

References choleskyDecomposition(), choleskySolve(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductRandomVariable::EMUpdate(), linearRegression(), linearRegressionNoBias(), and weightedLinearRegression().

void PLearn::solveTransposeLinearSystem const Mat &  A,
const Mat &  Y,
Mat &  X
 

for matrices A such that A.length() >= A.width(), find X s.t.

X A = Y

Definition at line 332 of file plapack.cc.

References PLERROR.

Referenced by PLearn::ProductRandomVariable::EMBprop().

template<class T>
void solveTransposeLinearSystemByCholesky const TMat< T > &  A,
const TMat< T > &  B,
TMat< T > &  X,
TMat< T > *  pL = 0,
TVec< T > *  py = 0
 

Definition at line 4760 of file TMat_maths_impl.h.

References choleskyDecomposition(), choleskySolve(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductRandomVariable::EMUpdate().

template<class T>
void sortColumns const TMat< T > &  mat,
int  rownum
 

Definition at line 239 of file TMat_sort.h.

References PLearn::TMat< T >::length(), min(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void sortElements const TVec< T > &  vec  )  [inline]
 

Sorts the elements of vec in place.

Definition at line 113 of file TMat_sort.h.

References PLearn::TVec< T >::begin(), and PLearn::TVec< T >::end().

Referenced by bootstrap(), PLearn::TrainValidTestSplitter::build_(), PLearn::RemoveRowsVMatrix::build_(), PLearn::BootstrapVMatrix::build_(), displayHistogram(), PLearn::QuantilesStatsIterator::finish(), max_cdf_diff(), PLearn::Gnuplot::plotcdf(), and PLearn::Gnuplot::plotdensity().

int sortIdComparator const void *  i1,
const void *  i2
 

Definition at line 83 of file StatsCollector.cc.

References PairRealSCCType.

Referenced by PLearn::StatsCollector::sortIds().

template<class T>
void sortRows const TMat< T > &  mat,
int  col = 0,
bool  increasing_order = true
 

This implementation should be very efficient, but it does two memory allocation: a first one of mat.length()*(sizeof(real)+sizeof(int)) and a second one of mat.length()*sizeof(int).

(Note: due to the implementation of the column sorting, this function always performs a STABLE SORT (in the sense of the STL stable_sort function). There is no need to explicitly call stable_sort to achieve the effect, iff increasing_order=true)

Definition at line 194 of file TMat_sort.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::swapRows().

template<class T>
void sortRows TMat< T > &  mat,
const TVec< int > &  key_columns,
bool  increasing_order = true
 

Definition at line 104 of file TMat_sort.h.

References PLearn::TMat< T >::rows_as_arrays_begin(), and PLearn::TMat< T >::rows_as_arrays_end().

Referenced by PLearn::SortRowsVMatrix::build_(), PLearn::Kernel::computeNeighbourMatrixFromDistanceMatrix(), computeRanks(), PLearn::ReconstructionWeightsKernel::computeWeights(), findClosestPairsOfDifferentClass(), newIndexedMatArray(), PLearn::Grapher::plot_1D_regression(), PLearn::Gnuplot::plotClasses(), rebalanceNClasses(), PLearn::TestDependenciesCommand::run(), and PLearn::SortRowsVMatrix::sortRows().

string PLearn::space_to_underscore string  str  ) 
 

replaces all characters <= ' ' (i.e. newline, tab, space, etc...) by '_'

Definition at line 335 of file stringutils.cc.

Referenced by PLearn::AsciiVMatrix::AsciiVMatrix(), PLearn::Learner::costNames(), PLearn::SDBVMFieldRemapStrings::getDiscreteValue(), PLearn::VMatrix::saveAMAT(), saveAscii(), PLearn::Learner::testResultsNames(), PLearn::RowIterator::toString(), and PLearn::FieldValue::toString().

void PLearn::SpearmanRankCorrelation const VMat &  x,
const VMat &  y,
Mat &  r
 

Compute the Spearman Rank correlation statistic.

It measures how much of a monotonic dependency there is between two variables x and y (column matrices). The statistic is computed as follows: r = 1 - 6 (sum_{i=1}^n (rx_i - ry_i)^2) / (n(n^2-1)) If x and y are column matrices than r is a 1x1 matrix. If x and y have width wx and wy respectively than the statistic is computed for each pair of column (the first taken from x and the second from y) and r will be a symmetric matrix size wx by wy upon return. N.B. If x holds in memory than copying to a matrix before calling this function will speed up computation significantly.

Definition at line 62 of file stats_utils.cc.

References PLearn::TMat< T >::clear(), computeRanks(), endl(), k, PLearn::VMat::length(), PLERROR, PLWARNING, PLearn::TMat< T >::resize(), PLearn::VMat::toMat(), PLearn::VMat::width(), and x.

Referenced by testSpearmanRankCorrelation().

void PLearn::split VMat  d,
real  validation_fraction,
real  test_fraction,
VMat &  train,
VMat &  valid,
VMat &  test,
bool  do_shuffle = false
 

Splits the dataset d into 3 subsets.

Definition at line 115 of file Splitter.cc.

References endl(), PLearn::VMat::length(), PLearn::VMat::rows(), shuffleElements(), and PLearn::TVec< T >::subVec().

void PLearn::split VMat  d,
real  test_fraction,
VMat &  train,
VMat &  test,
int  i = 0,
bool  use_all = false
 

Splits the dataset d into a train and a test subset If test_fraction is <1.0 then the size of the test subset is set to be ntest = int(test_fraction*d.length()) If test_fraction is >=1.0 then ntest = int(test_fraction) Last argument i allows to get the ith split of a K-fold cross validation i = 0 corresponds to having the last ntest samples used as the test set i = 1 means having the ntest samples before those as the test set, etc... If the bool 'use_all' is true, then the test set may contain more examples so that if we ask all splits, all the examples are contained in one test split.

Definition at line 63 of file Splitter.cc.

References PLearn::VMat::length(), PLearn::VMat::subMatRows(), and vconcat().

vector< string > PLearn::split const string s,
const string delimiters = " \t\n\r",
bool  keepdelimiters = false
 

splits a string into a list of substrings (using any sequence of the given delimiters as split point) if keepdelimiters is true the delimitersequences are appended to the list otherwise (the default) they are removed.

Definition at line 399 of file stringutils.cc.

vector< string > PLearn::split const string s,
char  delimiter
 

splits a string along occurences of the delimiters.

Definition at line 380 of file stringutils.cc.

Referenced by PLearn::SDBVMatrix::appendField(), PLearn::MultiInstanceVMatrix::build_(), PLearn::GraphicalBiText::build_(), PLearn::AsciiVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), cross_valid(), PLearn::VVMatrix::extractSourceMatrix(), PLearn::VMatLanguage::generateCode(), PLearn::VVMatrix::generateVMatIndex(), getDataSet(), PLearn::VVMatrix::getDateOfVMat(), PLearn::Dictionary::getId(), PLearn::EntropyContrast::getInfo(), getList(), getNonBlankLines(), PLearn::KFoldSplitter::getSplit(), PLearn::ShellProgressBar::getTime(), interactiveDisplayCDF(), PLearn::WordNetOntology::load(), loadAscii(), loadAsciiSingleBinaryDescriptor(), PLearn::GraphicalBiText::loadBitext(), loadClassificationDataset(), PLearn::VMatrix::loadFieldInfos(), PLearn::GraphicalBiText::loadSensemap(), main(), PLearn::GhostScript::multilineShow(), PLearn::Object::newwrite(), parseSizeFromRemainingLines(), PLearn::StatSpec::parseStatname(), plotVMats(), PLearn::VMatLanguage::preprocess(), PLearn::VVMatrix::processJoinSection(), randomSplit(), and PLearn::StringTable::StringTable().

pair< string, string > PLearn::split_on_first const string s,
const string delimiters = " \t\n\r"
 

Split the string on the first occurence of a delimiter; return a pair with the two split parts. If the splitting character is not found, the original string is returned in the first part of the pair, and "" is in the second part

Definition at line 475 of file stringutils.cc.

References left(), right(), and split_on_first().

void PLearn::split_on_first const string s,
const string delimiters,
string left,
string right
 

Split the string on the first occurence of a delimiter and returns what was left of the delimitor and what was right of it. If no delimitor character is found, the original string is returned as left, and "" is returned in right

Definition at line 459 of file stringutils.cc.

References left(), and right().

Referenced by PLearn::StatSpec::parseStatname(), PLearn::RunCommand::run(), and split_on_first().

void PLearn::splitTrainValidTest VMat &  data_set,
VMat &  train_set,
VMat &  valid_set,
real  valid_fraction,
VMat &  test_set,
real  test_fraction,
bool  normalize = true
 

Definition at line 130 of file databases.cc.

References PLearn::VMat::length(), normalize(), normalizeDataSets(), PLearn::VMat::subMatColumns(), PLearn::VMat::subMatRows(), and PLearn::VMat::width().

Var sqrt Var  v  )  [inline]
 

Definition at line 80 of file PowVariable.h.

References pow().

template<class T>
TMat<T> sqrt const TMat< T > &  m  ) 
 

Definition at line 5544 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), sqrt(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> sqrt const TVec< T > &  src  )  [inline]
 

Definition at line 894 of file TMat_maths_impl.h.

References compute_sqrt(), and PLearn::TVec< T >::length().

Referenced by affineNormalization(), autocorrelation_function(), bnldev(), PLearn::SquareRootVariable::bprop(), PLearn::ErfVariable::bprop(), choleskyDecomposition(), compute_sqrt(), computeBasicStats(), computeColumnsMeanAndStddev(), computeConditionalMeans(), computeMeanAndStddev(), PLearn::GaussMix::computeMeansAndCovariances(), PLearn::PCA::computeOutput(), PLearn::KernelProjection::computeOutput(), PLearn::GaussianContinuum::computeOutput(), correlation(), correlations(), dist(), PLearn::GhostScript::drawCross(), PLearn::DivisiveNormalizationKernel::evaluate(), PLearn::DivisiveNormalizationKernel::evaluate_i_j(), PLearn::DistanceKernel::evaluate_i_j(), PLearn::DivisiveNormalizationKernel::evaluate_i_x(), PLearn::DivisiveNormalizationKernel::evaluate_i_x_again(), PLearn::DivisiveNormalizationKernel::evaluate_x_i(), PLearn::DivisiveNormalizationKernel::evaluate_x_i_again(), PLearn::FieldStat::finalize(), PLearn::SharpeRatioStatsIterator::finish(), PLearn::StderrStatsIterator::finish(), PLearn::StddevStatsIterator::finish(), PLearn::SquareRootVariable::fprop(), gamdev(), gaussian_01(), PLearn::GaussianDistribution::generate(), PLearn::GaussMix::generateFromGaussian(), PLearn::JoinVMatrix::getNewRow(), PLearn::TangentLearner::initializeParams(), PLearn::NNet::initializeParams(), PLearn::GaussianContinuum::initializeParams(), KS_test(), PLearn::GaussianContinuum::make_random_walk(), PLearn::ConjGradientOptimizer::minCubic(), multivariate_normal(), norm(), PLearn::ScaledGradientOptimizer::optimize(), p_value(), paired_t_test(), PLearn::SequentialModelSelector::paired_t_test(), poidev(), PLearn::PCA::reconstruct(), PLearn::VMatLanguage::run(), PLearn::GaussianProcessRegressor::setInput_const(), PLearn::DiagonalNormalRandomVariable::setValueFromParentsValue(), sqrt(), squareroot(), PLearn::VMFieldStat::stddev(), PLearn::StatsCollector::stddev(), PLearn::StatsCollector::stderror(), PLearn::ErfVariable::symbolicBprop(), testNoCorrelationAsymptotically(), PLearn::GaussianProcessRegressor::train(), PLearn::EntropyContrast::train(), PLearn::AdaBoost::train(), and weighted_distance().

Var square Var  v  )  [inline]
 

Definition at line 74 of file SquareVariable.h.

template<class T>
TMat<T> square const TMat< T > &  m  ) 
 

Definition at line 5534 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), square(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> square const TVec< T > &  vec  ) 
 

Definition at line 1334 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
T square const T &  x  )  [inline]
 

Definition at line 125 of file pl_math.h.

References x.

Referenced by autocorrelation_function(), PLearn::TanhVariable::bprop(), PLearn::GaussianKernel::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::EntropyContrast::compute_df_dx(), computeBasicStats(), computeConditionalMeans(), PLearn::MovingAverage::computeCostsFromOutputs(), PLearn::GaussMix::computeLogLikelihood(), PLearn::PDistribution::computeOutput(), PLearn::SquaredErrorCostFunction::evaluate(), PLearn::ScaledGaussianKernel::evaluate(), PLearn::GeodesicDistanceKernel::evaluate(), PLearn::GeodesicDistanceKernel::evaluate_i_j(), PLearn::GeodesicDistanceKernel::evaluate_i_x_again(), PLearn::GeodesicDistanceKernel::evaluate_i_x_from_distances(), PLearn::FieldStat::finalize(), PLearn::OneHotSquaredLoss::fprop(), PLearn::MatrixOneHotSquaredLoss::fprop(), logOfCompactGaussian(), PLearn::DiagonalNormalRandomVariable::logP(), norm(), pownorm(), PLearn::TanhVariable::rfprop(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), square(), squareElements(), sumsquare(), PLearn::TanhVariable::symbolicBprop(), PLearn::ErfVariable::symbolicBprop(), PLearn::DivVariable::symbolicBprop(), PLearn::EntropyContrast::train(), PLearn::ConditionalStatsCollector::update(), PLearn::EntropyContrast::update_mu_sigma_f(), PLearn::GaussMix::updateFromConditionalSorting(), PLearn::VMFieldStat::variance(), and PLearn::StatsCollector::variance().

real PLearn::square_f real  x  ) 
 

Definition at line 131 of file pl_math.cc.

References x.

Referenced by PLearn::DiagonalNormalRandomVariable::EMBprop().

template<class T>
void squareAcc const TVec< T > &  vec,
const TVec< T > &  x
 

TVec[i] += x[i]*x[i];.

Definition at line 2141 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::ScaledGradientOptimizer::optimize().

CostFunc squared_classification_error real  hot_value = 0.8,
real  cold_value = 0.2
[inline]
 

Definition at line 88 of file SquaredErrorCostFunction.h.

CostFunc PLearn::squared_error int  singleoutputindex = -1  )  [inline]
 

Definition at line 92 of file SquaredErrorCostFunction.cc.

Referenced by PLearn::LinearRegressor::train().

template<class T>
void squareElements const TMat< T > &  m  ) 
 

squares the elements of m in place

Definition at line 156 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::compact_begin(), PLearn::TMat< T >::compact_end(), PLearn::TMat< T >::end(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::size(), and square().

template<class T>
void squareElements const TVec< T > &  x  ) 
 

squares the elements of x in place

Definition at line 143 of file TMat_maths_impl.h.

References x.

template<class T>
void squareMultiplyAcc const TMat< T > &  mat,
const TMat< T > &  x,
scale
 

Definition at line 3548 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLERROR, PLearn::TMat< T >::width(), and x.

template<class T>
void squareMultiplyAcc const TVec< T > &  vec,
const TVec< T > &  x,
scale
 

TVec[i] += x[i]*x[i]*scale;.

Definition at line 2125 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::DiagonalNormalRandomVariable::EMBprop(), and PLearn::ScaledGradientOptimizer::optimize().

template<class T>
void squareProductAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 2948 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::TransposeProductVariable::bbprop().

template<class T>
void squareProductTranspose const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3106 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void squareProductTransposeAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3223 of file TMat_maths_impl.h.

References k, PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::TransposeProductVariable::bbprop().

Var squareroot Var  v  )  [inline]
 

Definition at line 73 of file SquareRootVariable.h.

template<class T>
TVec<T> squareroot const TVec< T > &  vec  ) 
 

Definition at line 1346 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and sqrt().

template<class T>
void squareSubtract const TVec< T > &  vec,
const TVec< T > &  x
 

Tvec[i] -= x[i]*x[i];.

Definition at line 2157 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and x.

Referenced by PLearn::SharpeRatioStatsIterator::finish(), and PLearn::StderrStatsIterator::finish().

int ssaupd_ long int ,
const char *  ,
long int ,
const char *  ,
long int ,
float *  ,
float *  ,
long int ,
float *  ,
long int ,
long int ,
long int ,
float *  ,
float *  ,
long int ,
long int ,
short  ,
short 
 

Referenced by eigenSparseSymmMat().

int sseupd_ long int ,
const char *  ,
long int ,
float *  ,
float *  ,
long int ,
float *  ,
const char *  ,
long int ,
const char *  ,
long int ,
float *  ,
float *  ,
long int ,
float *  ,
long int ,
long int ,
long int ,
float *  ,
float *  ,
long int ,
long int ,
short  ,
short  ,
short 
 

Referenced by eigenSparseSymmMat().

void ssyev_ char *  JOBZ,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  W,
float *  WORK,
int LWORK,
int INFO
 

Referenced by eigen_SymmMat().

void ssyevr_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
int ISUPPZ,
float *  WORK,
int LWORK,
int IWORK,
int LIWORK,
int INFO
 

Referenced by lapack_Xsyevr_().

void ssyevx_ char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
float *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
 

Referenced by lapack_Xsyevx_().

void ssygvx_ int ITYPE,
char *  JOBZ,
char *  RANGE,
char *  UPLO,
int N,
float *  A,
int LDA,
float *  B,
int LDB,
float *  VL,
float *  VU,
int IL,
int IU,
float *  ABSTOL,
int M,
float *  W,
float *  Z,
int LDZ,
float *  WORK,
int LWORK,
int IWORK,
int IFAIL,
int INFO
 

Referenced by lapack_Xsygvx_().

Var stable_cross_entropy Var  linear_output,
Var  target
[inline]
 

Definition at line 83 of file NegCrossEntropySigmoidVariable.h.

Referenced by PLearn::NNet::build_(), and PLearn::NeighborhoodSmoothnessNNet::build_().

bool startsWith string base,
string  s
 

Definition at line 2899 of file WordNetOntology.cc.

Referenced by extractFiles(), PLearn::WordNetOntology::load(), and main().

StatsIt stddev_stats  )  [inline]
 

Definition at line 409 of file StatsIterator.h.

StatsIt stderr_stats  )  [inline]
 

Definition at line 410 of file StatsIterator.h.

Referenced by PLearn::Learner::Learner().

string PLearn::stemWord string word,
int  wn_pos
 

Definition at line 2869 of file WordNetOntology.cc.

References cstr().

string PLearn::stemWord string word  ) 
 

Definition at line 2842 of file WordNetOntology.cc.

References cstr().

Referenced by PLearn::WordNetOntology::extractWord(), PLearn::WordNetOntology::isInWordNet(), main(), and PLearn::TextSenseSequenceVMatrix::permute().

char * PLearn::strcopy char *  s  ) 
 

make a copy of a C string and return it

Definition at line 87 of file general.cc.

References strlen().

char* stringPos const char *  s,
const char *  strings[]
 

Definition at line 93 of file TypesNumeriques.cc.

Referenced by compactRepresentationTranslate().

vector< string > PLearn::stringvector int  argc,
char **  argv
 

makes a C++ style vector of strings from a C style vectr of strings Note: this may be useful in conjunction with get_option.

Definition at line 564 of file stringutils.cc.

Referenced by old_plearn_main(), and plearn_main().

int strlen char *  s  )  [static]
 

Definition at line 68 of file FileVMatrix.cc.

Referenced by PLearn::FieldValue::FieldValue(), PLearn::VVMatrix::getDateOfVMat(), hashval(), loadLetters(), loadSTATLOG(), loadUCIMLDB(), PLearn::PStream::operator<<(), savePMat(), savePVec(), strcopy(), PLearn::FileVMatrix::updateHeader(), viewVMat(), word_sequences2files_int_stream(), PLearn::PStream::write(), and PLearn::PStream::writeAsciiNum().

real PLearn::student_t_cdf real  t,
int  nb_degrees_of_freedom
 

Student-t cumulative distribution function

Definition at line 158 of file random.cc.

References incomplete_beta(), and PLERROR.

Var subMat Var  v,
int  i,
int  j,
int  l,
int  w
[inline]
 

Definition at line 89 of file SubMatVariable.h.

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::Var::column(), PLearn::Variable::column(), PLearn::Var::operator()(), PLearn::Var::row(), PLearn::Variable::row(), PLearn::Variable::subVec(), and PLearn::Var::subVec().

Var subsample Var  input,
int  subsample_factor
[inline]
 

Definition at line 81 of file SubsampleVariable.h.

template<class T>
void subsample TMat< T >  m,
int  thesubsamplefactor,
TMat< T >  result
 

Definition at line 5600 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), norm(), sum(), and PLearn::TMat< T >::width().

Referenced by PLearn::SubsampleVariable::fprop().

template<class T>
void substract const TMat< T > &  m1,
const TMat< T > &  m2,
TMat< T > &  destination
 

Definition at line 4574 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void substract const TVec< T > &  source1,
const TVec< T > &  source2,
TVec< T > &  destination
 

Definition at line 1501 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, and PLearn::TVec< T >::resize().

template<class T>
void substract const TVec< T > &  source1,
source2,
TVec< T > &  destination
[inline]
 

Definition at line 1496 of file TMat_maths_impl.h.

References add().

Referenced by PLearn::ProjectionErrorVariable::bprop(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), computeMeanAndVariance(), computeWeightedMeanAndCovar(), PLearn::DiagonalNormalRandomVariable::EMBprop(), PLearn::MinusRandomVariable::EMBprop(), PLearn::PlusRandomVariable::EMBprop(), PLearn::DiagonalNormalRandomVariable::EMUpdate(), PLearn::ProjectionErrorVariable::fprop(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::GaussianContinuum::get_image_matrix(), PLearn::LocalNeighborsDifferencesVMatrix::getNewRow(), logOfNormal(), operator-(), and PLearn::GaussianContinuum::train().

template<class T>
void substractFromColumns const TMat< T > &  mat,
const TVec< T >  col,
bool  ignored
 

Definition at line 3746 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
void substractFromRows const TMat< T > &  mat,
const TVec< T >  row,
bool  ignored
 

Definition at line 3733 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length().

Var sum Var  v  )  [inline]
 

Definition at line 75 of file SumVariable.h.

template<class T>
T sum const TMat< T > &  mat,
bool  ignore_missing = false
 

Definition at line 3767 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), is_missing(), PLearn::TMat< T >::length(), MISSING_VALUE, PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

template<class T>
T sum const TVec< T > &  vec,
bool  ignore_missing = false
 

Definition at line 245 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), and MISSING_VALUE.

Referenced by PLearn::LogSoftmaxVariable::bprop(), PLearn::TextSenseSequenceVMatrix::build_(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::KNNVMatrix::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::HistogramDistribution::calc_density_from_survival(), PLearn::GraphicalBiText::check_consitency(), PLearn::ComplementedProbSparseMatrix::checkCondProbIntegrity(), PLearn::SmoothedProbSparseMatrix::checkCondProbIntegrity(), PLearn::ProbSparseMatrix::checkCondProbIntegrity(), choleskyDecomposition(), choleskyInvert(), choleskySolve(), PLearn::GraphicalBiText::compute_likelihood(), PLearn::DivisiveNormalizationKernel::computeAverage(), PLearn::AdditiveNormalizationKernel::computeAverage(), computeConditionalMeans(), PLearn::MultiInstanceNNet::computeCostsFromOutputs(), PLearn::LiftStatsCollector::computeLift(), PLearn::LiftStatsCollector::computeLiftMax(), PLearn::KPCATangentLearner::computeOutput(), computeWeightedMean(), convolve(), PLearn::MixtureRandomVariable::ElogP(), PLearn::MixtureRandomVariable::EMBprop(), PLearn::MultinomialRandomVariable::EMUpdate(), PLearn::MixtureRandomVariable::EMUpdate(), entropy(), PLearn::Kernel::estimateHistograms(), PLearn::RowMapSparseValueMatrix< T >::euclidianDistance(), PLearn::RowMapSparseMatrix< real >::euclidianDistance(), PLearn::NegLogProbCostFunction::evaluate(), PLearn::ReconstructionWeightsKernel::evaluate_sum_k_i_k_j(), PLearn::HistogramDistribution::expectation(), PLearn::LiftStatsIterator::finish(), PLearn::SumVariable::fprop(), PLearn::SoftmaxLossVariable::fprop(), PLearn::ProjectionErrorVariable::fprop(), PLearn::MatrixSoftmaxLossVariable::fprop(), PLearn::DotProductVariable::fprop(), PLearn::GeneralizedOneHotVMatrix::GeneralizedOneHotVMatrix(), PLearn::JoinVMatrix::getNewRow(), loadATT800(), logadd(), PLearn::DiagonalNormalRandomVariable::logP(), makeRowsSumTo1(), mean(), PLearn::RowMapSparseMatrix< real >::multiplyVecs(), norm(), normalizeColumns(), normalizeRows(), PLearn::TextSenseSequenceVMatrix::permute(), pl_gser(), pownorm(), PLearn::ReconstructionWeightsKernel::reconstruct(), PLearn::SumVariable::rfprop(), PLearn::SoftmaxLossVariable::rfprop(), PLearn::OneHotSquaredLoss::rfprop(), PLearn::DotProductVariable::rfprop(), rowSum(), PLearn::EntropyContrast::set_NNcontinuous_gradient(), PLearn::AdditiveNormalizationKernel::setDataForKernelMatrix(), softmax(), subsample(), PLearn::DoubleAccessSparseMatrix< T >::sumCol(), PLearn::ProbabilitySparseMatrix::sumOfElements(), PLearn::DoubleAccessSparseMatrix< T >::sumOfElements(), PLearn::DoubleAccessSparseMatrix< T >::sumRow(), PLearn::PlusScalarVariable::symbolicBprop(), PLearn::MinusScalarVariable::symbolicBprop(), PLearn::DuplicateScalarVariable::symbolicBprop(), PLearn::HistogramDistribution::variance(), vmatmain(), and weighted_variance().

template<class T>
T sum_of_log const TVec< T > &  vec  ) 
 

returns the sum of the log of the elements (this is also the log of the product of the elements but is more stable if you have very small elements).

Definition at line 261 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and log().

template<class T>
T sum_of_squares const TMat< T > &  mat  ) 
 

Definition at line 3794 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), and PLearn::TMat< T >::width().

Referenced by columnSumOfSquares().

Var sumabs Var  v  )  [inline]
 

Definition at line 73 of file SumAbsVariable.h.

template<class T>
T sumabs const TMat< T > &  m  ) 
 

returns the sum of absolute value of the elements

Definition at line 202 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::compact_begin(), PLearn::TMat< T >::compact_end(), PLearn::TMat< T >::end(), PLearn::TMat< T >::isCompact(), and PLearn::TMat< T >::size().

template<class T>
T sumabs const TVec< T > &  x  ) 
 

returns the sum of absolute values of elements

Definition at line 131 of file TMat_maths_impl.h.

References x.

Referenced by PLearn::NNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), and PLearn::AffineTransformWeightPenalty::fprop().

Var sumOf Var  output,
const VarArray &  inputs,
VMat  distr,
int  nsamples,
VarArray  parameters = VarArray()
[inline]
 

deprecated old version do not use!

Definition at line 99 of file SumOfVariable.h.

References sumOf().

Var sumOf VMat  distr,
Func  f,
int  nsamples
[inline]
 

sumOf

Definition at line 95 of file SumOfVariable.h.

Var sumOf VMat  distr,
Func  f,
int  nsamples,
int  input_size
[inline]
 

Definition at line 96 of file MatrixSumOfVariable.h.

Referenced by sumOf().

Var sumOverBags VMat  vmat,
Func  f,
int  max_bag_size,
int  nsamples,
bool  average = false,
bool  transpose = false
[inline]
 

sumOf

Definition at line 122 of file SumOverBagsVariable.h.

References transpose().

Referenced by PLearn::NeighborhoodSmoothnessNNet::train(), and PLearn::MultiInstanceNNet::train().

Var sumsquare Var  v  )  [inline]
 

Definition at line 73 of file SumSquareVariable.h.

template<class T>
T sumsquare const TMat< T > &  m  ) 
 

returns the sum of squared elements

Definition at line 174 of file TMat_maths_impl.h.

References PLearn::TMat< T >::begin(), PLearn::TMat< T >::compact_begin(), PLearn::TMat< T >::compact_end(), PLearn::TMat< T >::end(), PLearn::TMat< T >::isCompact(), PLearn::TMat< T >::size(), and square().

template<class T>
T sumsquare const TVec< T > &  x  ) 
 

returns the sum of squared elements

Definition at line 119 of file TMat_maths_impl.h.

References square(), and x.

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), computeConditionalMeans(), PLearn::ProjectionErrorVariable::fprop(), PLearn::AffineTransformWeightPenalty::fprop(), PLearn::JoinVMatrix::getNewRow(), paired_t_test(), PLearn::SequentialModelSelector::paired_t_test(), and vmatmain().

template<class num_t>
void SVD const TMat< num_t > &  A,
TMat< num_t > &  U,
TVec< num_t > &  S,
TMat< num_t > &  Vt,
char  JOBZ = 'A',
real  safeguard = 1
[inline]
 

Performs the SVD decomposition A = U.S.Vt Where U and Vt are orthonormal matrices.

A is an MxN matrix whose content is destroyed by the call.

S in the above formula is also an MxN matrix, with only its first min(M,N) diagonal elements are non-zero. The call fills a vector S with those elements: the singular values, in decreasing order.

JOBZ has the following meaning:

'A': all M columns of U and all N rows of Vt are returned in the arrays U and VT;

'S': the first min(M,N) columns of U and the first min(M,N) rows of Vt are returned in the arrays U and Vt;

'O': If M >= N, the first N columns of U are overwritten on the array A and all rows of Vt are returned in the array VT; otherwise, all columns of U are returned in the array U and the first M rows of Vt are overwritten in the array VT; = 'N': no columns of U or rows of Vt are computed.

'N': compute only the singular values (U and V are not computed)

The optional value 'safeguard' may be used with a value > 1 if there is a crash in the SVD (typically, saying that parameter 12 has an illegal value).

Relationships between SVD(A) and eigendecomposition of At.A and A.At -> square(singular values) = eigenvalues -> columns of V (rows of Vt) are the eigenvectors of At.A -> columns of U are the eigenvectors of A.At

Definition at line 501 of file plapack.h.

References lapackSVD().

Referenced by PLearn::PLS::train().

template<class T>
void swap TVec< T > &  a,
TVec< T > &  b
 

Definition at line 101 of file TVec_impl.h.

References PLearn::TVec< T >::begin(), and PLearn::TVec< T >::end().

template<class T>
void swap TMat< T > &  a,
TMat< T > &  b
 

Definition at line 628 of file TMat_impl.h.

References PLearn::TMat< T >::begin(), and PLearn::TMat< T >::end().

template<class T>
void swap T &  a,
T &  b
[inline]
 

Definition at line 239 of file general.h.

template<class T>
void swap Array< T > &  a1,
Array< T > &  a2
 

Definition at line 56 of file Array_impl.h.

References PLearn::TVec< T >::data(), PLERROR, and PLearn::TVec< T >::size().

Referenced by PLearn::SmallVector< T, SizeBits, Allocator >::operator=(), PLearn::FieldValue::operator=(), selectAndOrder(), PLearn::TinyVector< T, N, TTrait >::swap(), PLearn::ArrayAllocatorTrivial< T, SizeBits >::swap(), PLearn::ArrayAllocator< T, SizeBits >::swap(), and PLearn::TMat< pair< real, real > >::transpose().

real tabulated_soft_slope real  x,
real  smoothness = 1,
real  left = 0,
real  right = 1
[inline]
 

Definition at line 358 of file pl_math.h.

References hard_slope(), left(), right(), tabulated_softplus(), and x.

Referenced by PLearn::SoftSlopeVariable::bprop(), and PLearn::SoftSlopeVariable::fprop().

real PLearn::tabulated_soft_slope_integral real  smoothness = 1,
real  left = 0,
real  right = 1,
real  a = 0,
real  b = 1
 

Definition at line 254 of file pl_math.cc.

References hard_slope_integral(), left(), right(), and tabulated_softplus_primitive().

Referenced by PLearn::SoftSlopeIntegralVariable::fprop().

real tabulated_softplus real  x  )  [inline]
 

Definition at line 304 of file pl_math.h.

References softplus(), and x.

Referenced by PLearn::SoftSlopeIntegralVariable::bprop(), and tabulated_soft_slope().

real tabulated_softplus_primitive real  x  )  [inline]
 

Definition at line 413 of file pl_math.h.

References softplus_primitive(), and x.

Referenced by tabulated_soft_slope_integral().

Var tanh Var  v  )  [inline]
 

Definition at line 75 of file TanhVariable.h.

template<class T>
void tanh const TVec< T > &  x,
TVec< T > &  y
 

Definition at line 1621 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, tanh(), and x.

template<class T>
TVec<T> tanh const TVec< T > &  src  )  [inline]
 

Definition at line 958 of file TMat_maths_impl.h.

References compute_tanh(), and PLearn::TVec< T >::length().

Referenced by PLearn::TangentLearner::build_(), PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::GaussianContinuum::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::EntropyContrast::compute_df_dx(), compute_tanh(), PLearn::TanhVariable::fprop(), PLearn::NNet::NNet(), PLearn::PLMathInitializer::PLMathInitializer(), PLearn::EntropyContrast::set_NNcontinuous_gradient(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), sigmoid(), tanh(), and PLearn::EntropyContrast::train().

VMat PLearn::temporalThreshold VMat  distr,
int  threshold_date,
bool  is_before,
int  yyyy_col,
int  mm_col,
int  dd_col
 

Definition at line 1208 of file VMat_maths.cc.

References PLearn::VMat::length(), PLearn::TVec< T >::resize(), and PLearn::VMat::rows().

VMat PLearn::temporalThreshold VMat  distr,
int  threshold_date,
bool  is_before,
int  yyyymmdd_col
 

This VMat is a SelectRowsVMatrix which, given a threshold date, keep only the rows earlier (or later) than this date. The thresdold date is given as a YYYYMMDD date, and the date on the original VMatrix are kept on 1 column (YYYYMMDD) or 3 (YYYY, MM and DD).

Definition at line 1192 of file VMat_maths.cc.

References PLearn::VMat::length(), PLearn::TVec< T >::resize(), and PLearn::VMat::rows().

real PLearn::testNoCorrelationAsymptotically real  r,
int  n
 

Return P(|R|>|r|) two-sided p-value for the null-hypothesis that there is no monotonic dependency, with r the observed correlation between two paired samples of length n.

The p-value is computed by taking advantage of the fact that under the null hypothesis r*sqrt(n-1) converges to a Normal(0,1), if n is LARGE ENOUGH (approx. > 30).

Definition at line 105 of file stats_utils.cc.

References gauss_01_cum(), and sqrt().

Referenced by correlations(), and testSpearmanRankCorrelation().

void PLearn::testSpearmanRankCorrelation const VMat &  x,
const VMat &  y,
Mat &  r,
Mat &  pvalues
 

same as above but return also in r the rank correlations

Definition at line 128 of file stats_utils.cc.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::resize(), SpearmanRankCorrelation(), testNoCorrelationAsymptotically(), PLearn::TMat< T >::width(), and x.

Referenced by PLearn::TestDependencyCommand::run(), PLearn::TestDependenciesCommand::run(), and testSpearmanRankCorrelationPValues().

void PLearn::testSpearmanRankCorrelationPValues const VMat &  x,
const VMat &  y,
Mat &  pvalues
 

Compute P(|R|>|r|) two-sided p-value for the null-hypothesis that there is no monotonic dependency, with r the observed Spearman Rank correlation between two paired samples x and y of length n (column matrices).

The p-value is computed by taking advantage of the fact that under the null hypothesis r*sqrt(n-1) is Normal(0,1). If x and y have width wx and wy respectively than the statistic is computed for each pair of column (the first taken from x and the second from y) and pvalues will be a symmetric matrix size wx by wy upon return. N.B. If x holds in memory than copying it to a matrix (toMat()) before calling this function will speed up computation significantly.

Definition at line 122 of file stats_utils.cc.

References testSpearmanRankCorrelation(), and x.

VMat thresholdVMat VMat  d,
real  threshold,
real  cold_value = 0.0,
real  hot_value = 1.0,
bool  gt_threshold = true
[inline]
 

Definition at line 82 of file ThresholdVMatrix.h.

Var times Var  v,
Var  w
[inline]
 

Definition at line 77 of file TimesVariable.h.

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::Profiler::end(), PLearn::AutoRunCommand::run(), and PLearn::Profiler::start().

Var timesScalar Var  v,
Var  scalar
[inline]
 

Definition at line 78 of file TimesScalarVariable.h.

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_().

bool PLearn::tobool const string s  ) 
 

Definition at line 119 of file stringutils.cc.

References PLERROR.

Referenced by PLearn::WordNetOntology::load(), and main().

double todouble const Row::iterator &  it  ) 
 

Generic conversions from an iterator.

double todouble const RowIterator &  it  ) 
 

Definition at line 673 of file SimpleDB.cc.

References PLearn::RowIterator::toDouble().

double PLearn::todouble const string s  ) 
 

Definition at line 127 of file stringutils.cc.

References MISSING_VALUE.

Referenced by PLearn::SDBWithStats::computeStats(), PLearn::SDBVMFieldRemapReals::SDBVMFieldRemapReals(), and tofloat().

float tofloat const string s  )  [inline]
 

FLOAT.

Definition at line 95 of file stringutils.h.

References todouble().

Referenced by PLearn::VMatLanguage::generateCode(), and PLearn::SDBWithStats::loadStats().

template<class T>
Object* toIndexedObjectPtr const T &  ,
int 
 

Definition at line 577 of file Object.h.

References PLERROR.

template<class T>
Object* toIndexedObjectPtr const TVec< T > &  x,
int  i
 

Definition at line 574 of file Object.h.

References toObjectPtr(), and x.

template<class T>
Object* toIndexedObjectPtr const Array< T > &  x,
int  i
 

Definition at line 571 of file Object.h.

References toObjectPtr(), and x.

Referenced by PLearn::Option< DeallocatorType, self >::getIndexedObject().

int toint const string s,
int  base = 10
[inline]
 

Definition at line 94 of file stringutils.h.

References tolong().

Referenced by PLearn::AsciiVMatrix::build_(), PLearn::VVMatrix::createPreproVMat(), PLearn::MatlabInterface::eigs_r11(), PLearn::VMatLanguage::generateCode(), PLearn::VVMatrix::generateVMatIndex(), PLearn::VMatrix::getFieldIndex(), PLearn::VecStatsCollector::getFieldNum(), getList(), PLearn::ShellProgressBar::getWcAsciiFileLineCount(), interactiveDisplayCDF(), PLearn::WordNetOntology::load(), loadAscii(), loadAsciiSingleBinaryDescriptor(), loadClassificationDataset(), PLearn::VMatrix::loadFieldInfos(), PLearn::WordNetOntology::loadPredominentSyntacticClasses(), PLearn::SDBWithStats::loadStats(), main(), matlabR11eigs(), old_plearn_main(), PLearn::StatSpec::parseStatname(), PLearn::PDate::PDate(), PLearn::PDateTime::PDateTime(), plotVMats(), PLearn::VMatLanguage::preprocess(), readAndMacroProcess(), readHeader(), PLearn::Object::readOptionVal(), PLearn::TestDependencyCommand::run(), PLearn::TestDependenciesCommand::run(), PLearn::KolmogorovSmirnovCommand::run(), PLearn::JulianDateCommand::run(), train_and_test(), viewVMat(), vmatmain(), and PLearn::Object::writeOptionVal().

long PLearn::tolong const string s,
int  base = 10
 

conversions from string to numerical types

Definition at line 107 of file stringutils.cc.

References PLERROR.

Referenced by PLearn::WordNetOntology::load(), and toint().

Object* toObjectPtr const SetOption &  o  )  [inline]
 

Definition at line 113 of file SetOption.h.

Object* toObjectPtr const RealMapping o  )  [inline]
 

Definition at line 237 of file RealMapping.h.

template<class T>
Object* toObjectPtr const PP< T > &  x  )  [inline]
 

Definition at line 568 of file Object.h.

References toObjectPtr(), and x.

template<>
Object* toObjectPtr const Object *  x  )  [inline]
 

Definition at line 564 of file Object.h.

References x.

template<>
Object* toObjectPtr const Object &  x  )  [inline]
 

Definition at line 561 of file Object.h.

References x.

template<class T>
Object* toObjectPtr const T &  x  )  [inline]
 

Definition at line 557 of file Object.h.

References PLERROR.

template<class T>
Object* toObjectPtr const T *  x  )  [inline]
 

The toObjectPtr functions attempt to return a pointer to Object (or 0 if the passed argument cannot be considered an Object subclass).

Definition at line 554 of file Object.h.

References PLERROR.

Referenced by PLearn::Option< DeallocatorType, self >::getAsObject(), toIndexedObjectPtr(), and toObjectPtr().

string tostring const Row::iterator &  it  ) 
 

string tostring const RowIterator &  it  ) 
 

Definition at line 678 of file SimpleDB.cc.

References PLearn::RowIterator::toString().

string tostring const char *  s  )  [inline]
 

specialised version for char*

Definition at line 78 of file stringutils.h.

template<class T>
string PLearn::tostring const T &  x  ) 
 

------------------------------------------------------------------

****************** Implementation *

Definition at line 276 of file stringutils.h.

References x.

string PLearn::tostring const float &  x  ) 
 

Definition at line 637 of file stringutils.cc.

References x.

string PLearn::tostring const double &  x  ) 
 

Definition at line 624 of file stringutils.cc.

References x.

Referenced by PLearn::TextProgressBarPlugin::addProgressBar(), PLearn::DiskVMatrix::appendRow(), PLearn::MovingAverageVMatrix::build_(), PLearn::GraphicalBiText::build_(), PLearn::DiskVMatrix::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::ConcatColumnsVMatrix::build_(), PLearn::SequentialModelSelector::checkModelNames(), PLearn::VMatLanguage::compileStream(), compute_learner_outputs_on_grid(), computeOutputFields(), PLearn::SDBWithStats::computeStats(), cross_valid(), PLearn::ShellProgressBar::draw(), DX_create_grid_outputs_file(), DX_write_2D_fields(), PLearn::MatlabInterface::eigs_r11(), PLearn::VMatrix::getFieldInfos(), PLearn::Variable::getName(), PLearn::VMatrix::getString(), PLearn::PLearner::getTestCostIndex(), PLearn::PLearner::getTrainCostIndex(), PLearn::UnequalConstantVariable::info(), PLearn::TimesConstantVariable::info(), PLearn::QuantilesStatsIterator::info(), PLearn::SelectedOutputCostFunction::info(), PLearn::PowDistanceKernel::info(), PLearn::PlusConstantVariable::info(), PLearn::PDateTime::info(), PLearn::PDate::info(), PLearn::EqualConstantVariable::info(), PLearn::DistanceKernel::info(), loadAscii(), PLearn::GraphicalBiText::loadBitext(), main(), PLearn::GaussianContinuum::make_random_walk(), makeFileNameValid(), matlabR11eigs(), matlabSave(), PLearn::Learner::measure(), PLearn::PTester::perform(), PLearn::Gnuplot::plotClasses(), plotVMats(), PLearn::VMatLanguage::preprocess(), PLearn::WordNetOntology::processUnknownWord(), readAndMacroProcess(), PLearn::SequentialValidation::run(), PLearn::Experiment::run(), PLearn::FinancePreprocVMatrix::setVMFields(), PLearn::Learner::stop_if_wanted(), PLearn::SequentialModelSelector::test(), PLearn::RowIterator::toString(), PLearn::FieldValue::toString(), PLearn::TangentLearner::train(), PLearn::StackedLearner::train(), PLearn::SequentialModelSelector::train(), PLearn::NNet::train(), PLearn::NeighborhoodSmoothnessNNet::train(), PLearn::MultiInstanceNNet::train(), PLearn::GraphicalBiText::train(), PLearn::GaussianContinuum::train(), PLearn::ConditionalDensityNet::train(), PLearn::ClassifierFromDensity::train(), PLearn::AdaBoost::train(), PLearn::SimpleDB< KeyType, QueryResult >::truncateFromRow(), PLearn::VMatrix::unduplicateFieldNames(), PLearn::TextProgressBarPlugin::update(), PLearn::GraphicalBiText::update_WSD_model(), PLearn::FieldStat::updateNumber(), viewVMat(), vmatmain(), while(), and word_sequences2files_int_stream().

void PLearn::touch const string file  ) 
 

trivial unix touch

Definition at line 567 of file fileutils.cc.

template<class T>
T trace const TMat< T > &  mat  ) 
 

Definition at line 3422 of file TMat_maths_impl.h.

References PLearn::TMat< T >::firstElement(), PLearn::TMat< T >::isSquare(), PLearn::TMat< T >::length(), and PLERROR.

Referenced by regularizeMatrix().

void train_and_test const string modelalias,
string  trainalias,
vector< string testaliases
 

Definition at line 127 of file old_plearn_main.cc.

References endl(), exitmsg(), getDataSet(), getDatasetAliases(), getModelAliases(), isdir(), isfile(), PLearn::TVec< T >::length(), PLearn::VMat::length(), loadObject(), lsdir(), read(), save(), PLearn::TVec< T >::subVec(), toint(), and PLearn::VMat::width().

Referenced by old_plearn_main().

VMat PLearn::transpose VMat  m1  ) 
 

returns M1'

Definition at line 902 of file VMat_maths.cc.

References PLearn::VMat::toMat(), and transpose().

Var transpose Var  v  )  [inline]
 

Definition at line 90 of file SubMatTransposeVariable.h.

References PLearn::Var::length(), and PLearn::Var::width().

template<class T>
TMat<T> transpose const TMat< T > &  src  ) 
 

Definition at line 5318 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), transpose(), and PLearn::TMat< T >::width().

template<class T>
void transpose const TMat< T >  src,
TMat< T >  dest
 

Definition at line 5297 of file TMat_maths_impl.h.

References PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ConditionalDensityNet::build_(), PLearn::Var::column(), PLearn::Variable::column(), PLearn::Var::row(), PLearn::Variable::row(), PLearn::EntropyContrast::set_NNcontinuous_gradient_from_extra_cost(), solveLinearSystem(), PLearn::Variable::subVec(), PLearn::Var::subVec(), sumOverBags(), PLearn::SumOverBagsVariable::SumOverBagsVariable(), PLearn::SubMatTransposeVariable::symbolicBprop(), transpose(), PLearn::RowMapSparseMatrix< real >::transposeProduct(), unfoldedFunc(), PLearn::UnfoldedFuncVariable::UnfoldedFuncVariable(), and vmatmain().

Vec PLearn::transposeProduct VMat  m1,
Vec  v2
 

computes M1'.V2

Definition at line 841 of file VMat_maths.cc.

References PLearn::TVec< T >::clear(), PLearn::TVec< T >::length(), PLearn::VMat::length(), PLERROR, and PLearn::VMat::width().

Mat PLearn::transposeProduct VMat  m1,
VMat  m2
 

computes M1'.M2

Definition at line 820 of file VMat_maths.cc.

References PLearn::VMat::length(), PLERROR, rowmatrix(), transposeProductAcc(), and PLearn::VMat::width().

Mat PLearn::transposeProduct VMat  m  ) 
 

computes M'.M

Definition at line 805 of file VMat_maths.cc.

References PLearn::VMat::length(), rowmatrix(), transposeProductAcc(), and PLearn::VMat::width().

Var transposeProduct Var &  m1,
Var &  m2
[inline]
 

Definition at line 79 of file TransposeProductVariable.h.

template<class T>
TMat<T> transposeProduct const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

return m1' x m2

Definition at line 5699 of file TMat_maths_impl.h.

References transposeProduct(), and PLearn::TMat< T >::width().

template<class T>
TVec<T> transposeProduct const TMat< T > &  m,
const TVec< T > &  v
[inline]
 

return m' x v

Definition at line 5689 of file TMat_maths_impl.h.

References transposeProduct(), and PLearn::TMat< T >::width().

template<class T>
void transposeProduct const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3253 of file TMat_maths_impl.h.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void transposeProduct const TVec< T > &  result,
const TMat< T > &  m,
const TVec< T > &  v
 

result[i] = sum_j m[j,i] * v[j] Equivalently: rowvec(result) = rowvec(v) .

m Equivalently: columnvec(result) = transpose(m).columnvec(v)

Definition at line 2257 of file TMat_maths_impl.h.

References PLearn::TVec< T >::clear(), PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::NNet::build_(), PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::MultiInstanceNNet::build_(), PLearn::ConditionalDensityNet::build_(), closestPointOnHyperplane(), PLearn::GaussianContinuum::compute_train_and_validation_costs(), computeCovar(), computeMeanAndCovar(), PLearn::PLS::computeOutput(), PLearn::LinearRegressor::computeOutput(), computePrincipalComponents(), PLearn::ProductRandomVariable::EMBprop(), PLearn::TransposeProductVariable::fprop(), PLearn::ProjectionErrorVariable::fprop(), PLearn::NllSemisphericalGaussianVariable::fprop(), PLearn::GaussianDistribution::generate(), PLearn::GaussianContinuum::get_image_matrix(), linearRegressionNoBias(), PLearn::TransposeProductVariable::rfprop(), PLearn::ProductVariable::symbolicBprop(), PLearn::ProductTransposeVariable::symbolicBprop(), PLearn::PLS::train(), PLearn::PCA::train(), PLearn::GaussianContinuum::train(), transposeProduct(), and PLearn::Function::verifyrfprop().

template<class T>
void transposeProduct2 const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3281 of file TMat_maths_impl.h.

References PLearn::TMat< T >::clear(), PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void transposeProduct2Acc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3339 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductVariable::bbprop(), and PLearn::ProductTransposeVariable::bbprop().

template<class T>
void transposeProductAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3312 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void transposeProductAcc const TVec< T > &  result,
const TMat< T > &  m,
const TVec< T > &  v,
alpha
 

result[i] += alpha * sum_j m[j,i] * v[j]

Definition at line 2322 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void transposeProductAcc const TVec< T > &  result,
const TMat< T > &  m,
const TVec< T > &  v
 

result[i] += sum_j m[j,i] * v[j]

Definition at line 2277 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

Referenced by PLearn::ProductVariable::bprop(), PLearn::ProductTransposeVariable::bprop(), PLearn::MatrixAffineTransformVariable::fprop(), PLearn::AffineTransformVariable::fprop(), PLearn::TransposeProductVariable::rfprop(), and transposeProduct().

template<class T>
void transposeTransposeProduct const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3369 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
void transposeTransposeProductAcc const TMat< T > &  mat,
const TMat< T > &  m1,
const TMat< T > &  m2
 

Definition at line 3397 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), k, PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

string PLearn::trimWord string  word  ) 
 

Definition at line 2790 of file WordNetOntology.cc.

References isDigit(), isLegalPunct(), isLetter(), and NULL_TAG.

Referenced by PLearn::WordNetOntology::extractWord(), and PLearn::WordNetOntology::isInWordNet().

template<class T>
T two const T &  x  )  [inline]
 

Definition at line 134 of file pl_math.h.

References x.

Referenced by PLearn::OneHotSquaredLoss::bprop(), PLearn::MatrixOneHotSquaredLoss::bprop(), PLearn::AffineTransformWeightPenalty::bprop(), and PLearn::TestDependencyCommand::TestDependencyCommand().

real ultrafastsigmoid const real x  )  [inline]
 

Definition at line 230 of file pl_math.h.

References ultrafasttanh(), and x.

real ultrafasttanh const real x  )  [inline]
 

Definition at line 212 of file pl_math.h.

References x.

Referenced by ultrafastsigmoid().

Var unary_hard_slope Var  v,
real  l = -1,
real  r = 1
[inline]
 

Definition at line 83 of file UnaryHardSlopeVariable.h.

Referenced by PLearn::NNet::build_().

void PLearn::uncompress_vec char *  comprbuf,
double *  data,
int  l,
bool  double_stored_as_float = false
 

Definition at line 494 of file pl_io.cc.

References mode, PLERROR, read_compr_mode_and_size_ptr(), and val.

string PLearn::underscore_to_space string  str  ) 
 

replaces all underscores by a single space character

Definition at line 345 of file stringutils.cc.

Referenced by PLearn::WordNetOntology::extractWord(), PLearn::WordNetOntology::isInWordNet(), and matlabSave().

Var unfoldedFunc Var  input_matrix,
Func  f,
bool  transpose = false
[inline]
 

Definition at line 90 of file UnfoldedFuncVariable.h.

References transpose().

Referenced by PLearn::NeighborhoodSmoothnessNNet::build_().

Var unfoldedSumOf Var  input_matrix,
Var  bag_size,
Func  f,
int  max_bag_size
[inline]
 

Definition at line 91 of file UnfoldedSumOfVariable.h.

Referenced by PLearn::MultiInstanceNNet::build_().

int PLearn::uniform_multinomial_sample int  N  ) 
 

return an integer between 0 and N-1 with equal probabilities

Definition at line 553 of file random.cc.

References uniform_sample().

Referenced by bootstrap(), bootstrap_rows(), fill_random_discrete(), PLearn::EmpiricalDistribution::generate(), and smartInitialization().

real PLearn::uniform_sample  ) 
 

returns a random number uniformly distributed between 0 and 1

Definition at line 248 of file random.cc.

References AM1, IA1, IA2, IM1, IM2, IMM1, IQ1, IQ2, IR1, IR2, k, NDIV1, NTAB, RNMX, and the_seed.

Referenced by bnldev(), bounded_uniform(), PLearn::RepeatSplitter::build_(), expdev(), fill_random_uniform(), gamdev(), gaussian_01(), PLearn::ConditionalDensityNet::generate(), PLearn::UniformVMatrix::get(), PLearn::UniformVMatrix::getSubRow(), multinomial_sample(), PLearn::TextSenseSequenceVMatrix::permute(), poidev(), randomShuffleRows(), shuffleElements(), shuffleRows(), PLearn::AdaBoost::train(), and uniform_multinomial_sample().

Mat PLearn::unitmatrix int  n  ) 
 

Definition at line 148 of file Mat.cc.

void update ProbabilitySparseMatrix &  pYX,
ProbabilitySparseMatrix &  nYX
[inline]
 

Definition at line 497 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::ProbabilitySparseMatrix::clear(), PLearn::ProbabilitySparseMatrix::computeX(), PLearn::ProbabilitySparseMatrix::computeY(), PLearn::Set::end(), PLearn::ProbabilitySparseMatrix::set(), SetIterator, PLearn::ProbabilitySparseMatrix::sumPYx(), x, PLearn::ProbabilitySparseMatrix::X, and PLearn::ProbabilitySparseMatrix::Y.

Referenced by PLearn::GraphicalBiText::compute_BN_likelihood(), PLearn::GraphicalBiText::compute_likelihood(), PLearn::ShellProgressBar::done(), PLearn::SequentialModelSelector::test(), PLearn::SequentialModelSelector::train(), PLearn::VecStatsCollector::update(), PLearn::StatsIterator::update(), and PLearn::StatsItArray::update().

void updateAndClearCounts ProbabilitySparseMatrix &  pYX,
ProbabilitySparseMatrix &  nYX
[inline]
 

Definition at line 521 of file ProbabilitySparseMatrix.h.

References PLearn::Set::begin(), PLearn::ProbabilitySparseMatrix::clear(), PLearn::ProbabilitySparseMatrix::computeX(), PLearn::ProbabilitySparseMatrix::computeY(), PLearn::Set::end(), PLearn::ProbabilitySparseMatrix::set(), SetIterator, PLearn::ProbabilitySparseMatrix::sumPYx(), x, PLearn::ProbabilitySparseMatrix::X, and PLearn::ProbabilitySparseMatrix::Y.

string PLearn::upperstring const string s  ) 
 

convert a string to all uppercase

Definition at line 278 of file stringutils.cc.

Referenced by PLearn::RealMapping::read().

void usage  ) 
 

Definition at line 460 of file old_plearn_main.cc.

References endl().

Referenced by PLearn::FieldConvertCommand::FieldConvertCommand(), and old_plearn_main().

void use const string modelfile,
const string datasetalias
 

Definition at line 407 of file old_plearn_main.cc.

References endl(), exitmsg(), getDataSet(), getDatasetAliases(), PLearn::VMat::length(), loadObject(), remove_extension(), PLearn::TVec< T >::subVec(), and PLearn::VMat::width().

Referenced by PLearn::Learner::apply(), PLearn::Learner::computeLeaveOneOutCosts(), PLearn::Learner::computeOutput(), PLearn::FieldConvertCommand::FieldConvertCommand(), PLearn::KolmogorovSmirnovCommand::KolmogorovSmirnovCommand(), PLearn::LearnerCommand::LearnerCommand(), old_plearn_main(), PLearn::Learner::use(), PLearn::Learner::useAndCost(), and PLearn::PLearner::useOnTrain().

Var var real  init_value  )  [inline]
 

Definition at line 69 of file Var.h.

Referenced by PLearn::NeuralNet::build_(), PLearn::NeighborhoodSmoothnessNNet::build_(), PLearn::ConditionalDensityNet::build_(), PLearn::AddCostToLearner::build_(), PLearn::GaussianProcessRegressor::computeCostsFromOutputs(), computeInputMeanAndVariance(), d_hard_slope(), gauss_density_var(), gauss_log_density_var(), PLearn::CompactVMatrix::perturb(), and PLearn::GaussianProcessRegressor::variance().

void PLearn::varDeepCopyField Var &  field,
CopiesMap &  copies
 

To use varDeepCopyField.

Definition at line 59 of file Var.cc.

Referenced by PLearn::Variable::makeDeepCopyFromShallowCopy(), PLearn::VarArray::makeDeepCopyFromShallowCopy(), PLearn::UnfoldedSumOfVariable::makeDeepCopyFromShallowCopy(), PLearn::UnaryVariable::makeDeepCopyFromShallowCopy(), PLearn::TangentLearner::makeDeepCopyFromShallowCopy(), PLearn::Optimizer::makeDeepCopyFromShallowCopy(), PLearn::NNet::makeDeepCopyFromShallowCopy(), PLearn::MultiInstanceNNet::makeDeepCopyFromShallowCopy(), PLearn::MatrixElementsVariable::makeDeepCopyFromShallowCopy(), PLearn::GaussianContinuum::makeDeepCopyFromShallowCopy(), PLearn::ConditionalDensityNet::makeDeepCopyFromShallowCopy(), PLearn::BinaryVariable::makeDeepCopyFromShallowCopy(), and PLearn::AddCostToLearner::makeDeepCopyFromShallowCopy().

template<class T>
T variance const TMat< T > &  mat,
meanval
 

Definition at line 3844 of file TMat_maths_impl.h.

References PLearn::TMat< T >::data(), PLearn::TMat< T >::length(), PLearn::TMat< T >::mod(), PLERROR, and PLearn::TMat< T >::width().

template<class T>
T variance const TVec< T > &  vec,
meanval
 

Definition at line 408 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), and PLERROR.

Referenced by columnVariance(), computeConditionalMeans(), PLearn::GaussMix::computeMeansAndCovariances(), PLearn::PDistribution::computeOutput(), PLearn::PConditionalDistribution::computeOutput(), PLearn::HistogramDistribution::computeOutput(), PLearn::DiagonalNormalRandomVariable::logP(), normal(), rowVariance(), PLearn::SequentialModelSelector::sequenceCost(), and vmatmain().

VMat vconcat Array< VMat >  ds  )  [inline]
 

Definition at line 140 of file ConcatRowsVMatrix.h.

References PLearn::TVec< T >::size().

VMat vconcat VMat  d1,
VMat  d2
[inline]
 

Definition at line 137 of file ConcatRowsVMatrix.h.

Var vconcat const VarArray &  varray  )  [inline]
 

Definition at line 79 of file ConcatRowsVariable.h.

template<class T>
TMat<T> vconcat const TMat< T > &  m1,
const TMat< T > &  m2
[inline]
 

Definition at line 211 of file Array_impl.h.

References vconcat().

template<class T>
TMat<T> vconcat const Array< TMat< T > > &  ar  ) 
 

Definition at line 169 of file Array_impl.h.

References PLERROR, and PLearn::TMat< T >::subMatRows().

Referenced by PLearn::ConditionalDensityNet::build_(), PLearn::ConditionalExpression::ConditionalExpression(), PLearn::VVMatrix::createPreproVMat(), PLearn::MixtureRandomVariable::ElogP(), getDataSet(), PLearn::TrainValidTestSplitter::getSplit(), PLearn::TestInTrainSplitter::getSplit(), loadUCI(), PLearn::MixtureRandomVariable::logP(), PLearn::RVArrayRandomElementRandomVariable::logP(), PLearn::GaussianContinuum::make_random_walk(), PLearn::RVInstanceArray::operator|(), removeRow(), PLearn::JointRandomVariable::setValueFromParentsValue(), split(), PLearn::InterValuesVariable::symbolicBprop(), and vconcat().

template<class T>
int vec_counts const TVec< T > &  src,
value
 

Definition at line 1787 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

template<class T>
int vec_find const TVec< T > &  src,
f
 

Definition at line 1800 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), and PLearn::TVec< T >::length().

Referenced by PLearn::UniformizeVMatrix::getNewRow(), and PLearn::GeneralizedOneHotVMatrix::getNewRow().

void PLearn::viewVMat const VMat &  vm  ) 
 

Definition at line 379 of file vmatmain.cc.

References PLearn::TVec< T >::begin(), PLearn::VMat::columns(), PLearn::TVec< T >::end(), endl(), PLearn::VMat::fieldName(), fname, getDataSet(), PLearn::VMat::getFieldIndex(), getList(), is_missing(), PLearn::VMat::length(), max(), min(), pl_isnumber(), PLERROR, removeblanks(), PLearn::TVec< T >::resize(), strlen(), toint(), tostring(), val, PLearn::VMat::width(), and x.

Referenced by vmatmain().

int PLearn::vmatmain int  argc,
char **  argv
 

Definition at line 1279 of file vmatmain.cc.

References PLearn::TVec< T >::append(), c_str(), PLearn::VMatLanguage::compileString(), displayBasicStats(), endl(), extract_extension(), PLearn::VMat::fieldName(), flush(), getDataSet(), getDataSetDate(), getDataSetHelp(), PLearn::VMatrix::getMtime(), PLearn::VVMatrix::getPrecomputedDataName(), interactiveDisplayCDF(), PLearn::VVMatrix::isPrecomputedAndUpToDate(), PLearn::VMat::length(), max(), mean(), min(), PLERROR, plotVMats(), print_diff(), printDistanceStatistics(), rm(), PLearn::VMatLanguage::run(), PLearn::TVec< T >::size(), sum(), sumsquare(), toint(), tostring(), transpose(), variance(), viewVMat(), and PLearn::VMat::width().

VMat vrange real  start,
real  end,
real  step = 1.0
[inline]
 

Definition at line 81 of file RangeVMatrix.h.

void PLearn::warningmsg const char *  msg,
  ...
 

Definition at line 92 of file plerror.cc.

References endl(), ERROR_MSG_SIZE, and error_stream.

CostFunc weighted_costfunc CostFunc  costfunc  )  [inline]
 

reweighting

Definition at line 83 of file WeightedCostFunction.h.

template<class T>
T weighted_distance const TVec< T > &  vec1,
const TVec< T > &  vec2,
double  n,
const TVec< T > &  weights
 

Definition at line 824 of file TMat_maths_impl.h.

References mypow(), sqrt(), and weighted_powdistance().

template<class T>
T weighted_mean const TVec< T > &  vec,
const TVec< T > &  weights,
bool  ignore_missing = false
 

Definition at line 383 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), is_missing(), PLearn::TVec< T >::length(), MISSING_VALUE, and PLERROR.

Referenced by columnWeightedMean(), and weighted_variance().

template<class T>
T weighted_powdistance const TVec< T > &  vec1,
const TVec< T > &  vec2,
double  n,
const TVec< T > &  weights
 

Definition at line 780 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), mypow(), and PLERROR.

Referenced by PLearn::ScaledGaussianKernel::evaluate(), and weighted_distance().

Var weighted_sumsquare Var  v,
Var  w
[inline]
 

Definition at line 80 of file WeightedSumSquareVariable.h.

Referenced by PLearn::NeuralNet::build_().

template<class T>
T weighted_variance const TVec< T > &  vec,
const TVec< T > &  weights,
no_weighted_mean,
weighted_mean
 

Definition at line 448 of file TMat_maths_impl.h.

References PLearn::TVec< T >::data(), PLearn::TVec< T >::length(), PLERROR, sum(), and weighted_mean().

Referenced by columnWeightedVariance().

Mat PLearn::weightedLinearRegression VMat  inputs,
VMat  outputs,
VMat  gammas,
real  weight_decay
 

Version that does all the memory allocations of XtX, XtY and theta_t. Returns theta_t.

Definition at line 1049 of file VMat_maths.cc.

References weightedLinearRegression(), and PLearn::VMat::width().

real PLearn::weightedLinearRegression VMat  inputs,
VMat  outputs,
VMat  gammas,
real  weight_decay,
Mat  theta_t,
bool  use_precomputed_XtX_XtY,
Mat  XtX,
Mat  XtY,
real sum_squared_Y,
real sum_gammas,
bool  return_squared_loss = false,
int  verbose_computation_every = 0,
bool  cholesky = true
 

Linear regression where each input point is given a different importance weight (the gammas); returns weighted average of squared loss.

Definition at line 987 of file VMat_maths.cc.

References PLearn::TMat< T >::clear(), dot(), externalProductScaleAcc(), PLearn::TMat< T >::length(), PLearn::VMat::length(), PLERROR, product(), solveLinearSystem(), solveLinearSystemByCholesky(), PLearn::TMat< T >::width(), PLearn::VMat::width(), and x.

Referenced by PLearn::LinearRegressor::train(), and weightedLinearRegression().

FilesIntStream * PLearn::word_sequences2files_int_stream const char *  word_sequences_file  ) 
 

Convert <word_sequences> filename into a FilesIntStream stream. This file must contain one line per <word_sequence> filename, and each of these filenames must represent binary integers files that can be associated to an IntStream.

Definition at line 247 of file IntStream.cc.

References PLERROR, strlen(), and tostring().

const string wordseparators " \t\n\  r  )  [static]
 

List of characters considered to mark a separation between "words"; This is a fairly restricted list, meaning that many things can be part of a "word" in this sense (for ex: "this-is_a+single@wor'd"), this is to insure a smooth transition for the new setOption, which calls readOptionVal ...

which may call read(istream&, string&)...

template<class T>
void write ostream &  out_,
const T &  o
[inline]
 

Definition at line 1085 of file PStream.h.

void write ostream &  out,
const RealRange &  range
[inline]
 

Definition at line 116 of file RealMapping.h.

Referenced by PLearn::CompactVMatrix::append(), PLearn::StatsCollector::oldwrite(), PLearn::PStream::operator<<(), PLearn::pl_fdstreambuf::overflow(), PLearn::PStream::put(), PLearn::RowMapSparseMatrix< real >::save(), PLearn::pl_fdstreambuf::sync(), PLearn::Variable::write(), PLearn::RowMapSparseMatrix< real >::write(), PLearn::PStream::write(), PLearn::PStream::writeAsciiNum(), writeField(), PLearn::OptionBase::writeIntoString(), and PLearn::pl_fdstreambuf::xsputn().

void write_bool ostream &  out,
const bool ptr,
int  n,
bool  is_file_bigendian
 

void write_compr_mode_and_size ostream &  out,
unsigned char  mode,
int  size
[inline]
 

Definition at line 76 of file pl_io.cc.

References binwrite(), header, mode, and PLERROR.

Referenced by binwrite_compressed().

void write_compr_mode_and_size_ptr char *&  out,
unsigned char  mode,
int  size
[inline]
 

Definition at line 429 of file pl_io.cc.

References header, mode, and PLERROR.

Referenced by compress_vec().

void write_double ostream &  out,
double  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 133 of file pl_io_deprecated.h.

References write_double().

void PLearn::write_double ostream &  out,
const double *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 338 of file pl_io_deprecated.cc.

References reverse_double().

Referenced by write_double().

void write_float ostream &  out,
float  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 131 of file pl_io_deprecated.h.

References write_float().

void PLearn::write_float ostream &  out,
const float *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 363 of file pl_io_deprecated.cc.

References reverse_float().

Referenced by write_float().

void write_int ostream &  out,
int  value,
bool  is_file_bigendian = true
[inline]
 

The following calls write a single value to the file in the specified representation, regardeless of the endianness on the current architecture.

Definition at line 127 of file pl_io_deprecated.h.

References write_int().

void PLearn::write_int ostream &  out,
const int ptr,
int  n,
bool  is_file_bigendian
 

Writes binary data to the file in the specified representation (little or big endian) regardeless of the endianness used on the current architecture.

Definition at line 290 of file pl_io_deprecated.cc.

References reverse_int().

Referenced by write_int(), and write_uint().

void write_sbyte ostream &  out,
signed char  x
[inline]
 

Definition at line 139 of file pl_io_deprecated.h.

References x.

Referenced by PLearn::VecCompressor::writeCompressedVec().

void write_short ostream &  out,
short  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 129 of file pl_io_deprecated.h.

References write_short().

void PLearn::write_short ostream &  out,
const short *  ptr,
int  n,
bool  is_file_bigendian
 

Definition at line 314 of file pl_io_deprecated.cc.

References reverse_short().

Referenced by write_short(), and write_ushort().

void write_ubyte ostream &  out,
unsigned char  x
[inline]
 

Definition at line 140 of file pl_io_deprecated.h.

References x.

void write_uint ostream &  out,
unsigned int  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 135 of file pl_io_deprecated.h.

References write_uint().

void write_uint ostream &  out,
const unsigned int ptr,
int  n,
bool  is_file_bigendian
[inline]
 

Definition at line 119 of file pl_io_deprecated.h.

References write_int().

Referenced by write_uint().

void write_ushort ostream &  out,
unsigned short  value,
bool  is_file_bigendian = true
[inline]
 

Definition at line 137 of file pl_io_deprecated.h.

References write_ushort().

void write_ushort ostream &  out,
const unsigned short *  ptr,
int  n,
bool  is_file_bigendian
[inline]
 

Definition at line 121 of file pl_io_deprecated.h.

References write_short().

Referenced by write_ushort().

template<class T>
void writeField ostream &  out,
const string fieldname,
const T &  x
 

generic field writing and reading

Definition at line 216 of file pl_io_deprecated.h.

References write(), writeFieldName(), and x.

Referenced by PLearn::VMatrix::oldwrite(), PLearn::QuantilesStatsIterator::oldwrite(), PLearn::LiftStatsIterator::oldwrite(), PLearn::SharpeRatioStatsIterator::oldwrite(), PLearn::StderrStatsIterator::oldwrite(), PLearn::StddevStatsIterator::oldwrite(), PLearn::ExpMeanStatsIterator::oldwrite(), PLearn::MeanStatsIterator::oldwrite(), PLearn::StatsIterator::oldwrite(), PLearn::StatsCollector::oldwrite(), PLearn::Optimizer::oldwrite(), PLearn::Learner::oldwrite(), and PLearn::RowMapSparseMatrix< real >::write().

void PLearn::writeFieldName ostream &  out,
const string fieldname
 

writes "fieldname: "

writes and reads the given fieldname (should be followed by wrtiting or reading of the field's value. The readFieldName method checks the read fieldname against the one passed as argument and issues an error if they do not match

Definition at line 92 of file pl_io_deprecated.cc.

Referenced by binwriteField(), binwriteField_double(), PLearn::StatsCollector::oldwrite(), and writeField().

void PLearn::writeFooter ostream &  out,
const string classname
 

writes "</ClassName>\n"

Definition at line 58 of file pl_io_deprecated.cc.

Referenced by PLearn::VMatrix::oldwrite(), PLearn::QuantilesStatsIterator::oldwrite(), PLearn::LiftStatsIterator::oldwrite(), PLearn::MaxStatsIterator::oldwrite(), PLearn::MinStatsIterator::oldwrite(), PLearn::SharpeRatioStatsIterator::oldwrite(), PLearn::StderrStatsIterator::oldwrite(), PLearn::StddevStatsIterator::oldwrite(), PLearn::ExpMeanStatsIterator::oldwrite(), PLearn::MeanStatsIterator::oldwrite(), PLearn::StatsIterator::oldwrite(), PLearn::StatsCollector::oldwrite(), PLearn::Optimizer::oldwrite(), PLearn::Learner::oldwrite(), PLearn::RowMapSparseMatrix< real >::write(), and PLearn::RealMapping::write().

void PLearn::writeHeader ostream &  out,
const string classname,
int  version = 0
 

writes "<ClassName:version>\n"

Definition at line 55 of file pl_io_deprecated.cc.

Referenced by PLearn::VMatrix::oldwrite(), PLearn::QuantilesStatsIterator::oldwrite(), PLearn::LiftStatsIterator::oldwrite(), PLearn::MaxStatsIterator::oldwrite(), PLearn::MinStatsIterator::oldwrite(), PLearn::SharpeRatioStatsIterator::oldwrite(), PLearn::StderrStatsIterator::oldwrite(), PLearn::StddevStatsIterator::oldwrite(), PLearn::ExpMeanStatsIterator::oldwrite(), PLearn::MeanStatsIterator::oldwrite(), PLearn::StatsIterator::oldwrite(), PLearn::StatsCollector::oldwrite(), PLearn::Optimizer::oldwrite(), PLearn::Learner::oldwrite(), PLearn::RowMapSparseMatrix< real >::write(), and PLearn::RealMapping::write().

template<class MapT>
void writeMap PStream out,
const MapT &  m
 

Definition at line 674 of file PStream.h.

References PLearn::PStream::put(), and PLearn::PStream::write().

Referenced by operator<<().

void writeNewline ostream &  out  )  [inline]
 

Writes a single newline character.

Definition at line 196 of file pl_io_deprecated.h.

Referenced by PLearn::StatsCollector::oldwrite().

template<class SequenceType>
void writeSequence PStream out,
const SequenceType &  seq
 

Definition at line 891 of file PStream.h.

References binwrite_(), byte_order(), LITTLE_ENDIAN_ORDER, PLearn::PStream::outmode, PLERROR, PLearn::PStream::put(), and PLearn::PStream::write().

Referenced by operator<<(), and PLearn::TVec< pair< real, real > >::write().

template<class SetT>
void writeSet PStream out,
const SetT &  s
 

Definition at line 1120 of file PStream.h.

References PLearn::PStream::put(), and PLearn::PStream::write().

Referenced by operator<<().

PStream & PLearn::ws PStream out  ) 
 

Definition at line 64 of file PStream.cc.

References PLearn::PStream::skipBlanksAndComments().

Referenced by PLearn::SumOverBagsVariable::build_(), PLearn::AsciiVMatrix::build_(), PLearn::WordNetOntology::computeWordSenseUniqueIds(), PLearn::WordNetOntology::extractSenses(), getDatasetAliases(), getModelAliases(), PLearn::WordNetOntology::getSenseKey(), PLearn::WordNetOntology::getWordSenseUniqueId(), PLearn::WordNetOntology::load(), loadAscii(), loadAsciiSingleBinaryDescriptor(), PLearn::RealMapping::read(), PLearn::Object::read(), and PLearn::GaussianDistribution::train().


Variable Documentation

PLearnInit PLearn::_plearn_init_ [static]
 

Definition at line 55 of file general.cc.

const char PLearn::ALPHAsymbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
 

Definition at line 51 of file TypesNumeriques.cc.

Referenced by compactRepresentationTranslate().

double PLearn::big = 4.503599627370496e15
 

Definition at line 710 of file random.cc.

Referenced by incbcf(), and incbd().

double PLearn::biginv = 2.22044604925031308085e-16
 

Definition at line 711 of file random.cc.

Referenced by incbcf(), and incbd().

pl_stream_clear_flags PLearn::clear_flags
 

Definition at line 47 of file PStream_util.cc.

const string PLearn::dbdir_name = "" [static]
 

Definition at line 179 of file databases.cc.

Referenced by loadATT800(), loadBreastCancer(), loadBreastCancerWisconsin(), loadCallxx(), loadCorelDatamat(), loadDiabetes(), loadHousing(), loadIonosphere(), loadLetters(), loadPimaIndians(), loadSonar(), loadUCI(), and loadUSPS().

const char PLearn::DIGITsymbols = "0123456789"
 

Definition at line 50 of file TypesNumeriques.cc.

Referenced by compactRepresentationTranslate(), and looksNumeric().

bool PLearn::displayvg = false [static]
 

Definition at line 138 of file GradientOptimizer.cc.

Referenced by PLearn::GradientOptimizer::optimize().

ostream * PLearn::error_stream = &cerr
 

Definition at line 59 of file plerror.cc.

Referenced by errormsg(), exitmsg(), and warningmsg().

real PLearn::gset [static]
 

Definition at line 57 of file random.cc.

Referenced by gaussian_01().

const void * PLearn::Hash_DELETED_SLOT = (void *)0x00000001
 

Definition at line 50 of file Hash.cc.

Referenced by PLearn::Hash< KeyType, DataType >::add(), PLearn::Hash< KeyType, DataType >::cleanup(), PLearn::Hash< KeyType, DataType >::del(), PLearn::Hash< KeyType, DataType >::element(), PLearn::Hash< KeyType, DataType >::find(), PLearn::Hash< KeyType, DataType >::findGap(), PLearn::Hash< KeyType, DataType >::flush(), PLearn::Hash< KeyType, DataType >::hashAddress(), PLearn::Hash< KeyType, DataType >::operator()(), PLearn::Hash< KeyType, DataType >::operator[](), and PLearn::Hash< KeyType, DataType >::resize().

const unsigned int PLearn::Hash_NOMBRES_MAGIQUES
 

Definition at line 53 of file Hash.cc.

Referenced by PLearn::Hash< KeyType, DataType >::hashKey().

const unsigned int PLearn::Hash_UNUSED_TAG = 0xffffffffu
 

DataType must have new, delete and copy constructor Keys are unique.

KeyType must have a (char *) type cast operator that returns a pointer to its usefull region since the key can be any object (therefore, the usefull part of the object does not necessarily starts a offset_0) const size_t byteLength() const that returns the byte-size of its usefull region, new, delete and a copy constructor ==, !=

Definition at line 49 of file Hash.cc.

Referenced by PLearn::SimpleDB< KeyType, QueryResult >::findEqualIndexed(), PLearn::Hash< KeyType, DataType >::hashAddress(), and PLearn::SimpleDB< KeyType, QueryResult >::indexColumn().

pl_stream_initiate PLearn::initiate
 

Definition at line 48 of file PStream_util.cc.

int PLearn::iset = 0 [static]
 

Definition at line 56 of file random.cc.

Referenced by gaussian_01(), and manual_seed().

double PLearn::MACHEP = 1.11022302462515654042E-16
 

Definition at line 706 of file random.cc.

Referenced by incbcf(), and incbd().

double PLearn::MAXLOG = 7.09782712893383996732E2
 

Definition at line 704 of file random.cc.

double PLearn::MINLOG = -7.451332191019412076235E2
 

Definition at line 705 of file random.cc.

const unsigned char PLearn::MissingCharacter = (unsigned char)SCHAR_MIN
 

Definition at line 48 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const PDate PLearn::MissingDate
 

Definition at line 54 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const double PLearn::MissingDouble = MISSING_VALUE
 

Definition at line 53 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const float PLearn::MissingFloat = MISSING_VALUE
 

Definition at line 52 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::setMissing().

const int PLearn::MissingInt = INT_MIN
 

Definition at line 51 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const short PLearn::MissingShort = SHRT_MIN
 

Definition at line 50 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const signed char PLearn::MissingSignedChar = (signed char)SCHAR_MIN
 

Definition at line 49 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

const char PLearn::MissingString = '\0'
 

A few constants for representing missing values.

Definition at line 47 of file SimpleDB.cc.

Referenced by PLearn::RowIterator::isMissing(), PLearn::FieldValue::isMissing(), PLearn::RowIterator::setMissing(), and PLearn::FieldValue::setMissing().

pl_nullstreambuf PLearn::null_streambuf [static]
 

Definition at line 51 of file PStream_util.cc.

istream PLearn::nullin
 

a null instream: reading from it does nothing

Definition at line 82 of file PStream_util.h.

Referenced by PLearn::PLMPI::init().

iostream PLearn::nullinout
 

a null iostream: reading/writing from/to it does nothing

Definition at line 83 of file PStream_util.h.

ostream PLearn::nullout
 

a null ostream: writing to it does nothing

Definition at line 81 of file PStream_util.h.

Referenced by PLearn::PLMPI::init().

const char* PLearn::ORDINALS[] = {"d","nd","th","st",0}
 

Definition at line 52 of file TypesNumeriques.cc.

Referenced by compactRepresentationTranslate().

const int PLearn::pl_dftbuflen = 4096
 

Definition at line 53 of file pl_fdstream.h.

Referenced by PLearn::StdPStreamBuf::attach(), and PLearn::pl_fdstream::attach().

double PLearn::pl_gammln_cof[7] [static]
 

Initial value:

{ 1.000000000190015 , 76.18009172947146 , -86.50532032941677 , 24.01409824083091 , -1.231739572450155 , 0.1208650973866179e-2, -0.5395239384953e-5 }

Definition at line 50 of file pl_erf.cc.

Referenced by pl_dgammlndz(), and pl_gammln().

const size_t PLearn::PL_HASH_NOMBRES_MAGIQUES
 

Definition at line 42 of file pl_hash_fun.cc.

Referenced by hashbytes().

PLMathInitializer PLearn::pl_math_initializer
 

Definition at line 73 of file pl_math.cc.

_plearn_nan_type PLearn::plearn_nan
 

Definition at line 72 of file pl_math.h.

pl_stream_raw PLearn::raw
 

Definition at line 46 of file PStream_util.cc.

Referenced by PLearn::Row::begin(), PLearn::Row::bind(), PLearn::RowIterator::copyFrom(), PLearn::Row::end(), and PLearn::Row::Row().

const tRule PLearn::rules[]
 

Initial value:

{ {"#an", NT_CARDINAL + NT_PREFIXED }, {"#n", NT_CARDINAL }, {"#na", NT_CARDINAL + NT_SUFFIXED }, {"#ar", NT_RANGE + NT_PREFIXED }, {"#r", NT_RANGE }, {"#ra", NT_RANGE + NT_SUFFIXED }, {"#n'a", NT_ORDINAL + NT_SUFFIXED }, {"#ao", NT_ORDINAL + NT_PREFIXED }, {"#o", NT_ORDINAL }, {"#oa", NT_ORDINAL + NT_SUFFIXED }, {"#o'a", NT_ORDINAL + NT_SUFFIXED }, {"#$n", NT_CURRENCY }, {"#$na", NT_CURRENCY + NT_SUFFIXED }, {"#$r", NT_CURRENCY + NT_RANGE }, {"#$ra", NT_CURRENCY + NT_RANGE + NT_SUFFIXED }, {"#n:n", NT_TIME }, {"#n:n:n", NT_TIME }, {"#r:n", NT_CODE }, {"#n:r", NT_CODE }, {"", NT_UNKNOWN_NUMERIC_TYPE} }

Definition at line 89 of file TypesNumeriques.h.

Referenced by numericType().

const int PLearn::STORAGE_UNUSED_HANDLE = -1
 

Definition at line 48 of file MemoryMap.h.

Referenced by PLearn::Storage< pair< real, real > >::pointTo(), PLearn::Storage< pair< real, real > >::Storage(), and PLearn::Storage< pair< real, real > >::~Storage().

float PLearn::tanhtable
 

Definition at line 60 of file pl_math.cc.

Referenced by fasttanh(), and PLearn::PLMathInitializer::PLMathInitializer().

long PLearn::the_seed = 0 [static]
 

Definition at line 55 of file random.cc.

Referenced by gaussian_01(), get_seed(), manual_seed(), and uniform_sample().

string PLearn::vmat_view_dataset [static]
 

The specification of the dataset viewed by the vmat program.

Definition at line 45 of file vmatmain.h.


Generated on Tue Aug 17 16:22:32 2004 for PLearn by doxygen 1.3.7