Main Page | Namespace List | Class Hierarchy | Alphabetical List | Class List | File List | Namespace Members | Class Members | File Members

PLearnerOutputVMatrix.cc

Go to the documentation of this file.
00001 // -*- C++ -*- 00002 00003 // PLearnerOutputVMatrix.cc 00004 // 00005 // Copyright (C) 2003 Yoshua Bengio 00006 // 00007 // Redistribution and use in source and binary forms, with or without 00008 // modification, are permitted provided that the following conditions are met: 00009 // 00010 // 1. Redistributions of source code must retain the above copyright 00011 // notice, this list of conditions and the following disclaimer. 00012 // 00013 // 2. Redistributions in binary form must reproduce the above copyright 00014 // notice, this list of conditions and the following disclaimer in the 00015 // documentation and/or other materials provided with the distribution. 00016 // 00017 // 3. The name of the authors may not be used to endorse or promote 00018 // products derived from this software without specific prior written 00019 // permission. 00020 // 00021 // THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 00022 // IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 00023 // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 00024 // NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 00025 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 00026 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 00027 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 00028 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 00029 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 00030 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00031 // 00032 // This file is part of the PLearn library. For more information on the PLearn 00033 // library, go to the PLearn Web site at www.plearn.org 00034 00035 /* ******************************************************* 00036 * $Id: PLearnerOutputVMatrix.cc,v 1.13 2004/07/07 17:30:48 tihocan Exp $ 00037 ******************************************************* */ 00038 00039 // Authors: Yoshua Bengio 00040 00044 #include "PLearnerOutputVMatrix.h" 00045 00046 namespace PLearn { 00047 using namespace std; 00048 00049 00050 PLearnerOutputVMatrix::PLearnerOutputVMatrix() 00051 :inherited(), 00052 put_raw_input(false), 00053 train_learners(false) 00054 /* ### Initialize all fields to their default value */ 00055 {} 00056 00057 PLearnerOutputVMatrix::PLearnerOutputVMatrix 00058 (VMat data_,TVec<PP<PLearner> > learners_, bool put_raw_input_) 00059 : data(data_),learners(learners_), 00060 put_raw_input(put_raw_input_), 00061 train_learners(false) 00062 { 00063 build(); 00064 } 00065 00066 PLEARN_IMPLEMENT_OBJECT(PLearnerOutputVMatrix, 00067 "Use a PLearner (or a set of them) to transform the input part of a data set into the learners outputs", 00068 "The input part of this VMatrix is obtained from the input part an original data set on which\n" 00069 "one or more PLearner's computeOutput method is applied. The other columns of the original data set\n" 00070 "are copied as is. Optionally, the raw input can be copied as well\n" 00071 "always in the input part of the new VMatrix. The order of the elements of a new row is as follows:\n" 00072 " - the outputs of the learners (concatenated) when applied on the input part of the original data,\n" 00073 " - optionally, the raw input part of the original data,\n" 00074 " - all the non-input columns of the original data."); 00075 00076 void PLearnerOutputVMatrix::getNewRow(int i, const Vec& v) const 00077 { 00078 int c=0; 00079 if (learners_need_train) { 00080 // We need to train the learners first. 00081 for (int i = 0; i < learners.length(); i++) { 00082 learners[i]->train(); 00083 } 00084 learners_need_train = false; 00085 } 00086 data->getRow(i,row); 00087 for (int j=0;j<learners.length();j++) 00088 { 00089 Vec out_j = learners_output(j); 00090 learners[j]->computeOutput(learner_input,out_j); 00091 } 00092 v.subVec(0,c=learners_output.size()) << learners_output.toVec(); 00093 if (put_raw_input) 00094 { 00095 v.subVec(c,learner_input->length()) << learner_input; 00096 c+=learner_input->length(); 00097 } 00098 v.subVec(c,non_input_part_of_data_row.length()) << non_input_part_of_data_row; 00099 } 00100 00101 void PLearnerOutputVMatrix::declareOptions(OptionList& ol) 00102 { 00103 // ### Declare all of this object's options here 00104 // ### For the "flags" of each option, you should typically specify 00105 // ### one of OptionBase::buildoption, OptionBase::learntoption or 00106 // ### OptionBase::tuningoption. Another possible flag to be combined with 00107 // ### is OptionBase::nosave 00108 00109 declareOption(ol, "data", &PLearnerOutputVMatrix::data, OptionBase::buildoption, 00110 "The original data set (a VMat)"); 00111 00112 declareOption(ol, "learners", &PLearnerOutputVMatrix::learners, OptionBase::buildoption, 00113 "The vector of PLearners which will be applied to the data set"); 00114 00115 declareOption(ol, "put_raw_input", &PLearnerOutputVMatrix::put_raw_input, OptionBase::buildoption, 00116 "Whether to include in the input part of this VMatrix the raw data input part"); 00117 00118 declareOption(ol, "train_learners", &PLearnerOutputVMatrix::train_learners, OptionBase::buildoption, 00119 "If set to 1, the learners will be train on 'data' before computing the output"); 00120 00121 // Now call the parent class' declareOptions 00122 inherited::declareOptions(ol); 00123 } 00124 00125 void PLearnerOutputVMatrix::build_() 00126 { 00127 if (data && learners.length()>0 && learners[0]) 00128 { 00129 if (train_learners) { 00130 // Set the learners' training set. 00131 for (int i = 0; i < learners.length(); i++) { 00132 learners[i]->setTrainingSet(data); 00133 } 00134 // Note that the learners will be train only if we actually call getRow(). 00135 } 00136 learners_need_train = train_learners; 00137 row.resize(data->width()); 00138 if (data->inputsize() < 0) 00139 PLERROR("In PLearnerOutputVMatrix::build_ - The 'data' matrix has a negative inputsize"); 00140 if (data->targetsize() < 0) 00141 PLERROR("In PLearnerOutputVMatrix::build_ - The 'data' matrix has a negative targetsize"); 00142 learner_input = row.subVec(0,data->inputsize()); 00143 learner_target = row.subVec(data->inputsize(),data->targetsize()); 00144 non_input_part_of_data_row = row.subVec(data->inputsize(),data->width()-data->inputsize()); 00145 learners_output.resize(learners->length(),learners[0]->outputsize()); 00146 inputsize_ = 0; 00147 for (int i=0;i<learners->length();i++) 00148 inputsize_ += learners[i]->outputsize(); 00149 if (put_raw_input) 00150 inputsize_ += data->inputsize(); 00151 targetsize_ = data->targetsize(); 00152 weightsize_ = data->weightsize(); 00153 length_ = data->length(); 00154 width_ = data->width() - data->inputsize() + inputsize_; 00155 00156 // Set field info. 00157 fieldinfos.resize(width_); 00158 if (data->getFieldInfos().size() >= data->inputsize() + data->targetsize()) { 00159 // We can retrieve the information for the target columns. 00160 for (int i = 0; i < data->targetsize(); i++) { 00161 fieldinfos[i + this->inputsize()] = data->getFieldInfos()[i + data->inputsize()]; 00162 } 00163 } 00164 } 00165 } 00166 00167 // ### Nothing to add here, simply calls build_ 00168 void PLearnerOutputVMatrix::build() 00169 { 00170 inherited::build(); 00171 build_(); 00172 } 00173 00174 void PLearnerOutputVMatrix::makeDeepCopyFromShallowCopy(map<const void*, void*>& copies) 00175 { 00176 inherited::makeDeepCopyFromShallowCopy(copies); 00177 00178 deepCopyField(row, copies); 00179 deepCopyField(learner_input, copies); 00180 deepCopyField(learners_output, copies); 00181 deepCopyField(learner_target, copies); 00182 deepCopyField(non_input_part_of_data_row, copies); 00183 } 00184 00185 } // end of namespace PLearn 00186

Generated on Tue Aug 17 16:01:48 2004 for PLearn by doxygen 1.3.7