0bca34dff744c8222ac0d7964cb22834cce6799c
1 // Copyright 2013 Yangqing Jia
3 #include <cstring>
4 #include <cuda_runtime.h>
6 #include "gtest/gtest.h"
7 #include "caffe/blob.hpp"
8 #include "caffe/common.hpp"
9 #include "caffe/filler.hpp"
10 #include "caffe/vision_layers.hpp"
11 #include "caffe/test/test_gradient_check_util.hpp"
13 #include "caffe/test/test_caffe_main.hpp"
15 namespace caffe {
17 extern cudaDeviceProp CAFFE_TEST_CUDA_PROP;
19 template <typename Dtype>
20 class NeuronLayerTest : public ::testing::Test {
21 protected:
22 NeuronLayerTest()
23 : blob_bottom_(new Blob<Dtype>(2, 3, 4, 5)),
24 blob_top_(new Blob<Dtype>()) {
25 // fill the values
26 FillerParameter filler_param;
27 GaussianFiller<Dtype> filler(filler_param);
28 filler.Fill(this->blob_bottom_);
29 blob_bottom_vec_.push_back(blob_bottom_);
30 blob_top_vec_.push_back(blob_top_);
31 };
32 virtual ~NeuronLayerTest() { delete blob_bottom_; delete blob_top_; }
33 Blob<Dtype>* const blob_bottom_;
34 Blob<Dtype>* const blob_top_;
35 vector<Blob<Dtype>*> blob_bottom_vec_;
36 vector<Blob<Dtype>*> blob_top_vec_;
37 };
39 typedef ::testing::Types<float, double> Dtypes;
40 TYPED_TEST_CASE(NeuronLayerTest, Dtypes);
42 TYPED_TEST(NeuronLayerTest, TestReLUCPU) {
43 LayerParameter layer_param;
44 Caffe::set_mode(Caffe::CPU);
45 ReLULayer<TypeParam> layer(layer_param);
46 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
47 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
48 // Now, check values
49 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
50 const TypeParam* top_data = this->blob_top_->cpu_data();
51 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
52 EXPECT_GE(top_data[i], 0.);
53 EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
54 }
55 }
58 TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) {
59 LayerParameter layer_param;
60 Caffe::set_mode(Caffe::CPU);
61 ReLULayer<TypeParam> layer(layer_param);
62 GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
63 checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
64 }
67 TYPED_TEST(NeuronLayerTest, TestReLUGPU) {
68 LayerParameter layer_param;
69 Caffe::set_mode(Caffe::GPU);
70 ReLULayer<TypeParam> layer(layer_param);
71 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
72 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
73 // Now, check values
74 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
75 const TypeParam* top_data = this->blob_top_->cpu_data();
76 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
77 EXPECT_GE(top_data[i], 0.);
78 EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]);
79 }
80 }
83 TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) {
84 LayerParameter layer_param;
85 Caffe::set_mode(Caffe::GPU);
86 ReLULayer<TypeParam> layer(layer_param);
87 GradientChecker<TypeParam> checker(1e-2, 1e-3, 1701, 0., 0.01);
88 checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
89 }
92 TYPED_TEST(NeuronLayerTest, TestDropoutCPU) {
93 LayerParameter layer_param;
94 Caffe::set_mode(Caffe::CPU);
95 Caffe::set_phase(Caffe::TRAIN);
96 DropoutLayer<TypeParam> layer(layer_param);
97 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
98 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
99 // Now, check values
100 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
101 const TypeParam* top_data = this->blob_top_->cpu_data();
102 float scale = 1. / (1. - layer_param.dropout_ratio());
103 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
104 if (top_data[i] != 0) {
105 EXPECT_EQ(top_data[i], bottom_data[i] * scale);
106 }
107 }
108 }
111 TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) {
112 LayerParameter layer_param;
113 Caffe::set_mode(Caffe::CPU);
114 DropoutLayer<TypeParam> layer(layer_param);
115 GradientChecker<TypeParam> checker(1e-2, 1e-3);
116 checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
117 }
120 TYPED_TEST(NeuronLayerTest, TestDropoutCPUTestPhase) {
121 LayerParameter layer_param;
122 Caffe::set_mode(Caffe::CPU);
123 Caffe::set_phase(Caffe::TEST);
124 DropoutLayer<TypeParam> layer(layer_param);
125 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
126 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
127 // Now, check values
128 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
129 const TypeParam* top_data = this->blob_top_->cpu_data();
130 float scale = 1. / (1. - layer_param.dropout_ratio());
131 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
132 if (top_data[i] != 0) {
133 EXPECT_EQ(top_data[i], bottom_data[i]);
134 }
135 }
136 }
139 TYPED_TEST(NeuronLayerTest, TestDropoutGPU) {
140 LayerParameter layer_param;
141 Caffe::set_mode(Caffe::GPU);
142 Caffe::set_phase(Caffe::TRAIN);
143 DropoutLayer<TypeParam> layer(layer_param);
144 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
145 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
146 // Now, check values
147 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
148 const TypeParam* top_data = this->blob_top_->cpu_data();
149 float scale = 1. / (1. - layer_param.dropout_ratio());
150 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
151 if (top_data[i] != 0) {
152 EXPECT_EQ(top_data[i], bottom_data[i] * scale);
153 }
154 }
155 }
158 TYPED_TEST(NeuronLayerTest, TestDropoutGradientGPU) {
159 if (CAFFE_TEST_CUDA_PROP.major >= 2) {
160 LayerParameter layer_param;
161 Caffe::set_mode(Caffe::GPU);
162 DropoutLayer<TypeParam> layer(layer_param);
163 GradientChecker<TypeParam> checker(1e-2, 1e-3);
164 // it is too expensive to call curand multiple times, so we don't do an
165 // exhaustive gradient check.
166 checker.CheckGradient(layer, this->blob_bottom_vec_, this->blob_top_vec_);
167 } else {
168 LOG(ERROR) << "Skipping test to spare my laptop.";
169 }
170 }
173 TYPED_TEST(NeuronLayerTest, TestDropoutGPUTestPhase) {
174 LayerParameter layer_param;
175 Caffe::set_mode(Caffe::GPU);
176 Caffe::set_phase(Caffe::TEST);
177 DropoutLayer<TypeParam> layer(layer_param);
178 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
179 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
180 // Now, check values
181 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
182 const TypeParam* top_data = this->blob_top_->cpu_data();
183 float scale = 1. / (1. - layer_param.dropout_ratio());
184 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
185 if (top_data[i] != 0) {
186 EXPECT_EQ(top_data[i], bottom_data[i]);
187 }
188 }
189 }
192 TYPED_TEST(NeuronLayerTest, TestBNLLCPU) {
193 LayerParameter layer_param;
194 Caffe::set_mode(Caffe::CPU);
195 BNLLLayer<TypeParam> layer(layer_param);
196 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
197 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
198 // Now, check values
199 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
200 const TypeParam* top_data = this->blob_top_->cpu_data();
201 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
202 EXPECT_GE(top_data[i], 0.);
203 EXPECT_GE(top_data[i], bottom_data[i]);
204 }
205 }
208 TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) {
209 LayerParameter layer_param;
210 Caffe::set_mode(Caffe::CPU);
211 BNLLLayer<TypeParam> layer(layer_param);
212 GradientChecker<TypeParam> checker(1e-2, 1e-3);
213 checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
214 }
217 TYPED_TEST(NeuronLayerTest, TestBNLLGPU) {
218 LayerParameter layer_param;
219 Caffe::set_mode(Caffe::GPU);
220 BNLLLayer<TypeParam> layer(layer_param);
221 layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
222 layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
223 // Now, check values
224 const TypeParam* bottom_data = this->blob_bottom_->cpu_data();
225 const TypeParam* top_data = this->blob_top_->cpu_data();
226 for (int i = 0; i < this->blob_bottom_->count(); ++i) {
227 EXPECT_GE(top_data[i], 0.);
228 EXPECT_GE(top_data[i], bottom_data[i]);
229 }
230 }
233 TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) {
234 LayerParameter layer_param;
235 Caffe::set_mode(Caffe::GPU);
236 BNLLLayer<TypeParam> layer(layer_param);
237 GradientChecker<TypeParam> checker(1e-2, 1e-3);
238 checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_);
239 }
244 }