49734838186be6b099b84e0641a17ec7509a01aa
1 // Copyright 2013 Yangqing Jia
3 #ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
4 #define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
6 #include <glog/logging.h>
7 #include <gtest/gtest.h>
9 #include <algorithm>
10 #include <cmath>
11 #include <vector>
13 #include "caffe/layer.hpp"
14 #include "caffe/net.hpp"
16 using std::max;
18 namespace caffe {
20 // The gradient checker adds a L2 normalization loss function on top of the
21 // top blobs, and checks the gradient.
22 template <typename Dtype>
23 class GradientChecker {
24 public:
25 GradientChecker(const Dtype stepsize, const Dtype threshold,
26 const unsigned int seed = 1701, const Dtype kink = 0.,
27 const Dtype kink_range = -1)
28 : stepsize_(stepsize), threshold_(threshold), seed_(seed),
29 kink_(kink), kink_range_(kink_range) {}
30 // Checks the gradient of a layer, with provided bottom layers and top
31 // layers. The gradient checker will check the gradient with respect to
32 // the parameters of the layer, as well as the input blobs if check_through
33 // is set True.
34 // Note that after the gradient check, we do not guarantee that the data
35 // stored in the layer parameters and the blobs are unchanged.
36 void CheckGradient(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
37 vector<Blob<Dtype>*>& top, int check_bottom = -1) {
38 layer.SetUp(bottom, &top);
39 CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1);
40 }
41 void CheckGradientExhaustive(Layer<Dtype>& layer,
42 vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
43 int check_bottom = -1);
45 void CheckGradientSingle(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
46 vector<Blob<Dtype>*>& top, int check_bottom, int top_id,
47 int top_data_id);
49 // Checks the gradient of a network. This network should not have any data
50 // layers or loss layers, since the function does not explicitly deal with
51 // such cases yet. All input blobs and parameter blobs are going to be
52 // checked, layer-by-layer to avoid numerical problems to accumulate.
53 void CheckGradientNet(Net<Dtype>& net, vector<Blob<Dtype>*>& input);
55 protected:
56 Dtype GetObjAndGradient(vector<Blob<Dtype>*>& top, int top_id = -1,
57 int top_data_id = -1);
58 Dtype stepsize_;
59 Dtype threshold_;
60 unsigned int seed_;
61 Dtype kink_;
62 Dtype kink_range_;
63 };
66 // Detailed implementations are as follows.
69 template <typename Dtype>
70 void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
71 vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
72 int check_bottom, int top_id, int top_data_id) {
73 // First, figure out what blobs we need to check against.
74 vector<Blob<Dtype>*> blobs_to_check;
75 for (int i = 0; i < layer.blobs().size(); ++i) {
76 blobs_to_check.push_back(layer.blobs()[i].get());
77 }
78 if (check_bottom < 0) {
79 for (int i = 0; i < bottom.size(); ++i) {
80 blobs_to_check.push_back(bottom[i]);
81 }
82 } else {
83 CHECK(check_bottom < bottom.size());
84 blobs_to_check.push_back(bottom[check_bottom]);
85 }
86 // go through the bottom and parameter blobs
87 // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
88 for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
89 Blob<Dtype>* current_blob = blobs_to_check[blobid];
90 // LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
91 // << " parameters.";
92 // go through the values
93 for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
94 // First, obtain the original data
95 Caffe::set_random_seed(seed_);
96 layer.Forward(bottom, &top);
97 Dtype computed_objective = GetObjAndGradient(top, top_id, top_data_id);
98 // Get any additional loss from the layer
99 computed_objective += layer.Backward(top, true, &bottom);
100 Dtype computed_gradient = current_blob->cpu_diff()[feat_id];
101 // compute score by adding stepsize
102 current_blob->mutable_cpu_data()[feat_id] += stepsize_;
103 Caffe::set_random_seed(seed_);
104 layer.Forward(bottom, &top);
105 Dtype positive_objective = GetObjAndGradient(top, top_id, top_data_id);
106 positive_objective += layer.Backward(top, true, &bottom);
107 // compute score by subtracting stepsize
108 current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2;
109 Caffe::set_random_seed(seed_);
110 layer.Forward(bottom, &top);
111 Dtype negative_objective = GetObjAndGradient(top, top_id, top_data_id);
112 negative_objective += layer.Backward(top, true, &bottom);
113 // Recover stepsize
114 current_blob->mutable_cpu_data()[feat_id] += stepsize_;
115 Dtype estimated_gradient = (positive_objective - negative_objective) /
116 stepsize_ / 2.;
117 Dtype feature = current_blob->cpu_data()[feat_id];
118 // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
119 // << current_blob->cpu_diff()[feat_id];
120 if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
121 // We check relative accuracy, but for too small values, we threshold
122 // the scale factor by 1.
123 Dtype scale = max(
124 max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
125 EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
126 << "debug: (top_id, top_data_id, blob_id, feat_id)="
127 << top_id << "," << top_data_id << "," << blobid << "," << feat_id;
128 EXPECT_LT(computed_gradient, estimated_gradient + threshold_ * scale)
129 << "debug: (top_id, top_data_id, blob_id, feat_id)="
130 << top_id << "," << top_data_id << "," << blobid << "," << feat_id;
131 }
132 // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
133 // LOG(ERROR) << "computed gradient: " << computed_gradient
134 // << " estimated_gradient: " << estimated_gradient;
135 }
136 }
137 }
139 template <typename Dtype>
140 void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
141 vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
142 int check_bottom) {
143 layer.SetUp(bottom, &top);
144 // LOG(ERROR) << "Exhaustive Mode.";
145 for (int i = 0; i < top.size(); ++i) {
146 // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
147 for (int j = 0; j < top[i]->count(); ++j) {
148 // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
149 CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
150 }
151 }
152 }
154 template <typename Dtype>
155 void GradientChecker<Dtype>::CheckGradientNet(
156 Net<Dtype>& net, vector<Blob<Dtype>*>& input) {
157 const vector<shared_ptr<Layer<Dtype> > >& layers = net.layers();
158 vector<vector<Blob<Dtype>*> >& bottom_vecs = net.bottom_vecs();
159 vector<vector<Blob<Dtype>*> >& top_vecs = net.top_vecs();
160 for (int i = 0; i < layers.size(); ++i) {
161 net.Forward(input);
162 LOG(ERROR) << "Checking gradient for " << layers[i]->layer_param().name();
163 CheckGradientExhaustive(*(layers[i].get()), bottom_vecs[i], top_vecs[i]);
164 }
165 }
167 template <typename Dtype>
168 Dtype GradientChecker<Dtype>::GetObjAndGradient(vector<Blob<Dtype>*>& top,
169 int top_id, int top_data_id) {
170 Dtype loss = 0;
171 if (top_id < 0) {
172 // the loss will be half of the sum of squares of all outputs
173 for (int i = 0; i < top.size(); ++i) {
174 Blob<Dtype>* top_blob = top[i];
175 const Dtype* top_blob_data = top_blob->cpu_data();
176 Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
177 int count = top_blob->count();
178 for (int j = 0; j < count; ++j) {
179 loss += top_blob_data[j] * top_blob_data[j];
180 }
181 // set the diff: simply the data.
182 memcpy(top_blob_diff, top_blob_data, sizeof(Dtype) * top_blob->count());
183 }
184 loss /= 2.;
185 } else {
186 // the loss will be the top_data_id-th element in the top_id-th blob.
187 for (int i = 0; i < top.size(); ++i) {
188 Blob<Dtype>* top_blob = top[i];
189 Dtype* top_blob_diff = top_blob->mutable_cpu_diff();
190 memset(top_blob_diff, 0, sizeof(Dtype) * top_blob->count());
191 }
192 loss = top[top_id]->cpu_data()[top_data_id];
193 top[top_id]->mutable_cpu_diff()[top_data_id] = 1.;
194 }
195 return loss;
196 }
198 } // namespace caffe
200 #endif // CAFFE_TEST_GRADIENT_CHECK_UTIL_H_