// Copyright 2013 Yangqing Jia #ifndef CAFFE_VISION_LAYERS_HPP_ #define CAFFE_VISION_LAYERS_HPP_ #include #include #include "caffe/layer.hpp" namespace caffe { // The neuron layer is a specific type of layers that just works on single // celements. template class NeuronLayer : public Layer { public: explicit NeuronLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); }; template class ReLULayer : public NeuronLayer { public: explicit ReLULayer(const LayerParameter& param) : NeuronLayer(param) {} protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); }; template class DropoutLayer : public NeuronLayer { public: explicit DropoutLayer(const LayerParameter& param) : NeuronLayer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); shared_ptr rand_vec_; float threshold_; float scale_; unsigned int uint_thres_; }; template class InnerProductLayer : public Layer { public: explicit InnerProductLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); int M_; int K_; int N_; bool biasterm_; shared_ptr bias_multiplier_; }; template class PaddingLayer : public Layer { public: explicit PaddingLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); unsigned int PAD_; int NUM_; int CHANNEL_; int HEIGHT_IN_; int WIDTH_IN_; int HEIGHT_OUT_; int WIDTH_OUT_; }; template class LRNLayer : public Layer { public: explicit LRNLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); // scale_ stores the intermediate summing results Blob scale_; int size_; int pre_pad_; Dtype alpha_; Dtype beta_; int num_; int channels_; int height_; int width_; }; template class Im2colLayer : public Layer { public: explicit Im2colLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); int KSIZE_; int STRIDE_; int CHANNELS_; int HEIGHT_; int WIDTH_; }; template class PoolingLayer : public Layer { public: explicit PoolingLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); int KSIZE_; int STRIDE_; int CHANNELS_; int HEIGHT_; int WIDTH_; int POOLED_HEIGHT_; int POOLED_WIDTH_; }; template class ConvolutionLayer : public Layer { public: explicit ConvolutionLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); Blob col_bob_; int KSIZE_; int STRIDE_; int NUM_; int CHANNELS_; int HEIGHT_; int WIDTH_; int NUM_OUTPUT_; int GROUP_; Blob col_buffer_; shared_ptr bias_multiplier_; bool biasterm_; int M_; int K_; int N_; }; template class DataLayer : public Layer { public: explicit DataLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); virtual void Forward_gpu(const vector*>& bottom, vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual Dtype Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); shared_ptr db_; shared_ptr iter_; int datum_size_; }; template class SoftmaxLayer : public Layer { public: explicit SoftmaxLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: virtual void Forward_cpu(const vector*>& bottom, vector*>* top); // virtual void Forward_gpu(const vector*>& bottom, // vector*>* top); virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); // virtual Dtype Backward_gpu(const vector*>& top, // const bool propagate_down, vector*>* bottom); // sum_multiplier is just used to carry out sum using blas Blob sum_multiplier_; // scale is an intermediate blob to hold temporary results. Blob scale_; }; template class MultinomialLogisticLossLayer : public Layer { public: explicit MultinomialLogisticLossLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: // The loss layer will do nothing during forward - all computation are // carried out in the backward pass. virtual void Forward_cpu(const vector*>& bottom, vector*>* top) { return; } virtual void Forward_gpu(const vector*>& bottom, vector*>* top) { return; } virtual Dtype Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); // virtual Dtype Backward_gpu(const vector*>& top, // const bool propagate_down, vector*>* bottom); }; } // namespace caffe #endif // CAFFE_VISION_LAYERS_HPP_