NormDerivativeLem3.cpp

Go to the documentation of this file.
00001 /*
00002  * This program is free software; you can redistribute it and/or modify
00003  * it under the terms of the GNU General Public License as published by
00004  * the Free Software Foundation; either version 3 of the License, or
00005  * (at your option) any later version.
00006  *
00007  * Written (W) 1999-2009 Soeren Sonnenburg
00008  * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
00009  */
00010 
00011 #include "preproc/NormDerivativeLem3.h"
00012 #include "preproc/SimplePreProc.h"
00013 #include "features/Features.h"
00014 #include "features/SimpleFeatures.h"
00015 
00016 using namespace shogun;
00017 
00018 CNormDerivativeLem3::CNormDerivativeLem3()
00019 : CSimplePreProc<float64_t>("NormDerivativeLem3", "NDL3")
00020 {
00021 }
00022 
00023 CNormDerivativeLem3::~CNormDerivativeLem3()
00024 {
00025 }
00026 
00028 bool CNormDerivativeLem3::init(CFeatures* f)
00029 {
00030     ASSERT(f->get_feature_class()==C_SIMPLE);
00031     ASSERT(f->get_feature_type()==F_DREAL);
00032 
00033     return true;
00034 }
00035 
00037 void CNormDerivativeLem3::cleanup()
00038 {
00039 }
00040 
00042 bool CNormDerivativeLem3::load(FILE* f)
00043 {
00044     return false;
00045 }
00046 
00048 bool CNormDerivativeLem3::save(FILE* f)
00049 {
00050     return false;
00051 }
00052 
00056 float64_t* CNormDerivativeLem3::apply_to_feature_matrix(CFeatures* f)
00057 {
00058     return NULL;
00059 }
00060 
00063 float64_t* CNormDerivativeLem3::apply_to_feature_vector(
00064     float64_t* f, int32_t len)
00065 {
00066     return NULL;
00067 }
00068 
00069 //#warning TODO implement jahau 
00070 //#ifdef JaaHau
00071 // //this is the normalization used in jaahau
00072 //    int32_t o_p=1;
00073 //    float64_t sum_p=0;
00074 //    float64_t sum_q=0;
00075 //    //first do positive model
00076 //    for (i=0; i<pos->get_N(); i++)
00077 //    {
00078 //  featurevector[p]=exp(pos->model_derivative_p(i, x)-posx);
00079 //  sum_p=exp(pos->get_p(i))*featurevector[p++];
00080 //  featurevector[p]=exp(pos->model_derivative_q(i, x)-posx);
00081 //  sum_q=exp(pos->get_q(i))*featurevector[p++];
00082 //
00083 //  float64_t sum_a=0;
00084 //  for (j=0; j<pos->get_N(); j++)
00085 //  {
00086 //      featurevector[p]=exp(pos->model_derivative_a(i, j, x)-posx);
00087 //      sum_a=exp(pos->get_a(i,j))*featurevector[p++];
00088 //  }
00089 //  p-=pos->get_N();
00090 //  for (j=0; j<pos->get_N(); j++)
00091 //      featurevector[p++]-=sum_a;
00092 //
00093 //  float64_t sum_b=0;
00094 //  for (j=0; j<pos->get_M(); j++)
00095 //  {
00096 //      featurevector[p]=exp(pos->model_derivative_b(i, j, x)-posx);
00097 //      sum_b=exp(pos->get_b(i,j))*featurevector[p++];
00098 //  }
00099 //  p-=pos->get_M();
00100 //  for (j=0; j<pos->get_M(); j++)
00101 //      featurevector[p++]-=sum_b;
00102 //    }
00103 //
00104 //    o_p=p;
00105 //    p=1;
00106 //    for (i=0; i<pos->get_N(); i++)
00107 //    {
00108 //  featurevector[p++]-=sum_p;
00109 //  featurevector[p++]-=sum_q;
00110 //    }
00111 //    p=o_p;
00112 //
00113 //    for (i=0; i<neg->get_N(); i++)
00114 //    {
00115 //  featurevector[p]=-exp(neg->model_derivative_p(i, x)-negx);
00116 //  sum_p=exp(neg->get_p(i))*featurevector[p++];
00117 //  featurevector[p]=-exp(neg->model_derivative_q(i, x)-negx);
00118 //  sum_q=exp(neg->get_q(i))*featurevector[p++];
00119 //
00120 //  float64_t sum_a=0;
00121 //  for (j=0; j<neg->get_N(); j++)
00122 //  {
00123 //      featurevector[p]=-exp(neg->model_derivative_a(i, j, x)-negx);
00124 //      sum_a=exp(neg->get_a(i,j))*featurevector[p++];
00125 //  }
00126 //  p-=neg->get_N();
00127 //  for (j=0; j<neg->get_N(); j++)
00128 //      featurevector[p++]-=sum_a;
00129 //
00130 //  float64_t sum_b=0;
00131 //  for (j=0; j<neg->get_M(); j++)
00132 //  {
00133 //      featurevector[p]=-exp(neg->model_derivative_b(i, j, x)-negx);
00134 //      sum_b=exp(neg->get_b(i,j))*featurevector[p++];
00135 //  }
00136 //  p-=neg->get_M();
00137 //  for (j=0; j<neg->get_M(); j++)
00138 //      featurevector[p++]-=sum_b;
00139 //    }
00140 //
00141 //    p=o_p;
00142 //    for (i=0; i<neg->get_N(); i++)
00143 //    {
00144 //  featurevector[p++]-=sum_p;
00145 //  featurevector[p++]-=sum_q;
00146 //    }
00147 //#endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines

SHOGUN Machine Learning Toolbox - Documentation