1 // Copyright 2013 Yangqing Jia
3 #ifndef CAFFE_VISION_LAYERS_HPP_
4 #define CAFFE_VISION_LAYERS_HPP_
6 #include <leveldb/db.h>
8 #include "caffe/layer.hpp"
10 namespace caffe {
12 // The neuron layer is a specific type of layers that just works on single
13 // celements.
14 template <typename Dtype>
15 class NeuronLayer : public Layer<Dtype> {
16 public:
17 explicit NeuronLayer(const LayerParameter& param)
18 : Layer<Dtype>(param) {};
19 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
20 vector<Blob<Dtype>*>* top);
21 };
24 template <typename Dtype>
25 class ReLULayer : public NeuronLayer<Dtype> {
26 public:
27 explicit ReLULayer(const LayerParameter& param)
28 : NeuronLayer<Dtype>(param) {};
29 protected:
30 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
31 vector<Blob<Dtype>*>* top);
32 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
33 vector<Blob<Dtype>*>* top);
35 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
36 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
37 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
38 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
39 };
42 template <typename Dtype>
43 class DropoutLayer : public NeuronLayer<Dtype> {
44 public:
45 explicit DropoutLayer(const LayerParameter& param)
46 : NeuronLayer<Dtype>(param) {};
47 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
48 vector<Blob<Dtype>*>* top);
49 protected:
50 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
51 vector<Blob<Dtype>*>* top);
52 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
53 vector<Blob<Dtype>*>* top);
55 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
56 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
57 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
58 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
59 shared_ptr<SyncedMemory> rand_vec_;
60 float threshold_;
61 float scale_;
62 unsigned int uint_thres_;
63 };
66 template <typename Dtype>
67 class InnerProductLayer : public Layer<Dtype> {
68 public:
69 explicit InnerProductLayer(const LayerParameter& param)
70 : Layer<Dtype>(param) {};
71 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
72 vector<Blob<Dtype>*>* top);
73 protected:
74 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
75 vector<Blob<Dtype>*>* top);
76 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
77 vector<Blob<Dtype>*>* top);
79 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
80 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
81 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
82 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
83 int M_;
84 int K_;
85 int N_;
86 bool biasterm_;
87 shared_ptr<SyncedMemory> bias_multiplier_;
88 };
90 template <typename Dtype>
91 class PaddingLayer : public Layer<Dtype> {
92 public:
93 explicit PaddingLayer(const LayerParameter& param)
94 : Layer<Dtype>(param) {};
95 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
96 vector<Blob<Dtype>*>* top);
97 protected:
98 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
99 vector<Blob<Dtype>*>* top);
100 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
101 vector<Blob<Dtype>*>* top);
102 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
103 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
104 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
105 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
106 unsigned int PAD_;
107 int NUM_;
108 int CHANNEL_;
109 int HEIGHT_IN_;
110 int WIDTH_IN_;
111 int HEIGHT_OUT_;
112 int WIDTH_OUT_;
113 };
115 template <typename Dtype>
116 class LRNLayer : public Layer<Dtype> {
117 public:
118 explicit LRNLayer(const LayerParameter& param)
119 : Layer<Dtype>(param) {};
120 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
121 vector<Blob<Dtype>*>* top);
122 protected:
123 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
124 vector<Blob<Dtype>*>* top);
125 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
126 vector<Blob<Dtype>*>* top);
127 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
128 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
129 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
130 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
131 // scale_ stores the intermediate summing results
132 Blob<Dtype> scale_;
133 int size_;
134 int pre_pad_;
135 Dtype alpha_;
136 Dtype beta_;
137 int num_;
138 int channels_;
139 int height_;
140 int width_;
141 };
143 template <typename Dtype>
144 class Im2colLayer : public Layer<Dtype> {
145 public:
146 explicit Im2colLayer(const LayerParameter& param)
147 : Layer<Dtype>(param) {};
148 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
149 vector<Blob<Dtype>*>* top);
150 protected:
151 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
152 vector<Blob<Dtype>*>* top);
153 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
154 vector<Blob<Dtype>*>* top);
155 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
156 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
157 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
158 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
159 int KSIZE_;
160 int STRIDE_;
161 int CHANNELS_;
162 int HEIGHT_;
163 int WIDTH_;
164 };
166 template <typename Dtype>
167 class PoolingLayer : public Layer<Dtype> {
168 public:
169 explicit PoolingLayer(const LayerParameter& param)
170 : Layer<Dtype>(param) {};
171 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
172 vector<Blob<Dtype>*>* top);
173 protected:
174 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
175 vector<Blob<Dtype>*>* top);
176 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
177 vector<Blob<Dtype>*>* top);
178 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
179 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
180 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
181 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
182 int KSIZE_;
183 int STRIDE_;
184 int CHANNELS_;
185 int HEIGHT_;
186 int WIDTH_;
187 int POOLED_HEIGHT_;
188 int POOLED_WIDTH_;
189 };
191 template <typename Dtype>
192 class ConvolutionLayer : public Layer<Dtype> {
193 public:
194 explicit ConvolutionLayer(const LayerParameter& param)
195 : Layer<Dtype>(param) {};
196 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
197 vector<Blob<Dtype>*>* top);
198 protected:
199 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
200 vector<Blob<Dtype>*>* top);
201 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
202 vector<Blob<Dtype>*>* top);
203 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
204 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
205 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
206 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
207 Blob<Dtype> col_bob_;
209 int KSIZE_;
210 int STRIDE_;
211 int NUM_;
212 int CHANNELS_;
213 int HEIGHT_;
214 int WIDTH_;
215 int NUM_OUTPUT_;
216 int GROUP_;
217 Blob<Dtype> col_buffer_;
218 shared_ptr<SyncedMemory> bias_multiplier_;
219 bool biasterm_;
220 int M_;
221 int K_;
222 int N_;
223 };
225 template <typename Dtype>
226 class DataLayer : public Layer<Dtype> {
227 public:
228 explicit DataLayer(const LayerParameter& param)
229 : Layer<Dtype>(param) {};
230 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
231 vector<Blob<Dtype>*>* top);
232 protected:
233 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
234 vector<Blob<Dtype>*>* top);
235 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
236 vector<Blob<Dtype>*>* top);
237 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
238 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
239 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
240 const bool propagate_down, vector<Blob<Dtype>*>* bottom);
242 shared_ptr<leveldb::DB> db_;
243 shared_ptr<leveldb::Iterator> iter_;
244 int datum_size_;
245 };
247 } // namespace caffe
249 #endif // CAFFE_VISION_LAYERS_HPP_