summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 143b66b)
raw | patch | inline | side by side (parent: 143b66b)
author | Yangqing Jia <jiayq84@gmail.com> | |
Fri, 27 Sep 2013 18:53:12 +0000 (11:53 -0700) | ||
committer | Yangqing Jia <jiayq84@gmail.com> | |
Fri, 27 Sep 2013 18:53:12 +0000 (11:53 -0700) |
src/caffe/test/lenet.hpp | patch | blob | history | |
src/caffe/test/test_caffe_main.hpp | patch | blob | history | |
src/caffe/test/test_gradient_check_util.hpp | patch | blob | history |
index 266f0b2ff2e527935affa425068ac90d91bfd804..017463a053e7007ffdc48fcd60ff5faeea4eea22 100644 (file)
--- a/src/caffe/test/lenet.hpp
+++ b/src/caffe/test/lenet.hpp
+// Copyright Yangqing Jia 2013
+
+// This file is merely here so we can easily get a string of the lenet.
+// It is actually not the very original LeNet, but with the sigmoid layers
+// replaced by ReLU layers.
+
#ifndef CAFFE_TEST_LENET_HPP_
#define CAFFE_TEST_LENET_HPP_
namespace caffe {
-const char* kLENET = "name: \"LeNet\"\n\
-bottom: \"data\"\n\
-bottom: \"label\"\n\
-layers {\n\
- layer {\n\
- name: \"conv1\"\n\
- type: \"conv\"\n\
- num_output: 20\n\
- kernelsize: 5\n\
- stride: 1\n\
- weight_filler {\n\
- type: \"xavier\"\n\
- }\n\
- bias_filler {\n\
- type: \"constant\"\n\
- }\n\
- }\n\
- bottom: \"data\"\n\
- top: \"conv1\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"pool1\"\n\
- type: \"pool\"\n\
- kernelsize: 2\n\
- stride: 2\n\
- pool: MAX\n\
- }\n\
- bottom: \"conv1\"\n\
- top: \"pool1\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"conv2\"\n\
- type: \"conv\"\n\
- num_output: 50\n\
- kernelsize: 5\n\
- stride: 1\n\
- weight_filler {\n\
- type: \"xavier\"\n\
- }\n\
- bias_filler {\n\
- type: \"constant\"\n\
- }\n\
- }\n\
- bottom: \"pool1\"\n\
- top: \"conv2\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"pool2\"\n\
- type: \"pool\"\n\
- kernelsize: 2\n\
- stride: 2\n\
- pool: MAX\n\
- }\n\
- bottom: \"conv2\"\n\
- top: \"pool2\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"ip1\"\n\
- type: \"innerproduct\"\n\
- num_output: 500\n\
- weight_filler {\n\
- type: \"xavier\"\n\
- }\n\
- bias_filler {\n\
- type: \"constant\"\n\
- }\n\
- }\n\
- bottom: \"pool2\"\n\
- top: \"ip1\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"relu1\"\n\
- type: \"relu\"\n\
- }\n\
- bottom: \"ip1\"\n\
- top: \"relu1\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"ip2\"\n\
- type: \"innerproduct\"\n\
- num_output: 10\n\
- weight_filler {\n\
- type: \"xavier\"\n\
- }\n\
- bias_filler {\n\
- type: \"constant\"\n\
- }\n\
- }\n\
- bottom: \"relu1\"\n\
- top: \"ip2\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"prob\"\n\
- type: \"softmax\"\n\
- }\n\
- bottom: \"ip2\"\n\
- top: \"prob\"\n\
-}\n\
-layers {\n\
- layer {\n\
- name: \"loss\"\n\
- type: \"multinomial_logistic_loss\"\n\
- }\n\
- bottom: \"prob\"\n\
- bottom: \"label\"\n\
-}";
+const char* kLENET = "name: \"LeNet\"\n"
+"bottom: \"data\"\n"
+"bottom: \"label\"\n"
+"layers {\n"
+" layer {\n"
+" name: \"conv1\"\n"
+" type: \"conv\"\n"
+" num_output: 20\n"
+" kernelsize: 5\n"
+" stride: 1\n"
+" weight_filler {\n"
+" type: \"xavier\"\n"
+" }\n"
+" bias_filler {\n"
+" type: \"constant\"\n"
+" }\n"
+" }\n"
+" bottom: \"data\"\n"
+" top: \"conv1\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"pool1\"\n"
+" type: \"pool\"\n"
+" kernelsize: 2\n"
+" stride: 2\n"
+" pool: MAX\n"
+" }\n"
+" bottom: \"conv1\"\n"
+" top: \"pool1\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"conv2\"\n"
+" type: \"conv\"\n"
+" num_output: 50\n"
+" kernelsize: 5\n"
+" stride: 1\n"
+" weight_filler {\n"
+" type: \"xavier\"\n"
+" }\n"
+" bias_filler {\n"
+" type: \"constant\"\n"
+" }\n"
+" }\n"
+" bottom: \"pool1\"\n"
+" top: \"conv2\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"pool2\"\n"
+" type: \"pool\"\n"
+" kernelsize: 2\n"
+" stride: 2\n"
+" pool: MAX\n"
+" }\n"
+" bottom: \"conv2\"\n"
+" top: \"pool2\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"ip1\"\n"
+" type: \"innerproduct\"\n"
+" num_output: 500\n"
+" weight_filler {\n"
+" type: \"xavier\"\n"
+" }\n"
+" bias_filler {\n"
+" type: \"constant\"\n"
+" }\n"
+" }\n"
+" bottom: \"pool2\"\n"
+" top: \"ip1\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"relu1\"\n"
+" type: \"relu\"\n"
+" }\n"
+" bottom: \"ip1\"\n"
+" top: \"relu1\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"ip2\"\n"
+" type: \"innerproduct\"\n"
+" num_output: 10\n"
+" weight_filler {\n"
+" type: \"xavier\"\n"
+" }\n"
+" bias_filler {\n"
+" type: \"constant\"\n"
+" }\n"
+" }\n"
+" bottom: \"relu1\"\n"
+" top: \"ip2\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"prob\"\n"
+" type: \"softmax\"\n"
+" }\n"
+" bottom: \"ip2\"\n"
+" top: \"prob\"\n"
+"}\n"
+"layers {\n"
+" layer {\n"
+" name: \"loss\"\n"
+" type: \"multinomial_logistic_loss\"\n"
+" }\n"
+" bottom: \"prob\"\n"
+" bottom: \"label\"\n"
+"}";
} // namespace caffe
index 9ee11a3cd0c275bac9c047cbfb0b1b49fd57bff0..a8c16573c79faa4e5b4c64862f2f19afed4870b8 100644 (file)
#ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
#define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
+#include <cuda_runtime.h>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
+
#include <cstdlib>
#include <cstdio>
#include <iostream>
-#include <cuda_runtime.h>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
namespace caffe {
diff --git a/src/caffe/test/test_gradient_check_util.hpp b/src/caffe/test/test_gradient_check_util.hpp
index 0c34861b2da88df1c21044f5aacec2f7ab3ded54..c5405498af0f48a94e65ba3b046d2f22850efab6 100644 (file)
#ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
#define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
-#include <algorithm>
-#include <cmath>
#include <glog/logging.h>
#include <gtest/gtest.h>
+
+#include <algorithm>
+#include <cmath>
+#include <vector>
+
#include "caffe/layer.hpp"
using std::max;
const unsigned int seed = 1701, const Dtype kink = 0.,
const Dtype kink_range = -1)
: stepsize_(stepsize), threshold_(threshold), seed_(seed),
- kink_(kink), kink_range_(kink_range) {};
+ kink_(kink), kink_range_(kink_range) {}
// Checks the gradient of a layer, with provided bottom layers and top
// layers. The gradient checker will check the gradient with respect to
// the parameters of the layer, as well as the input blobs if check_through
void CheckGradientSingle(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>& top, int check_bottom, int top_id,
int top_data_id);
+
protected:
Dtype GetObjAndGradient(vector<Blob<Dtype>*>& top, int top_id = -1,
int top_data_id = -1);
blobs_to_check.push_back(bottom[check_bottom]);
}
// go through the bottom and parameter blobs
- //LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
+ // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
Blob<Dtype>* current_blob = blobs_to_check[blobid];
- //LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
- // << " parameters.";
+ // LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
+ // << " parameters.";
// go through the values
for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
// First, obtain the original data
Dtype estimated_gradient = (positive_objective - negative_objective) /
stepsize_ / 2.;
Dtype feature = current_blob->cpu_data()[feat_id];
- //LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
- // << current_blob->cpu_diff()[feat_id];
+ // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
+ // << current_blob->cpu_diff()[feat_id];
if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
// We check relative accuracy, but for too small values, we threshold
// the scale factor by 1.
- Dtype scale = max(max(fabs(computed_gradient), fabs(estimated_gradient)),
- 1.);
+ Dtype scale = max(
+ max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << "," << feat_id;
<< "debug: (top_id, top_data_id, blob_id, feat_id)="
<< top_id << "," << top_data_id << "," << blobid << "," << feat_id;
}
- //LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
- //LOG(ERROR) << "computed gradient: " << computed_gradient
- // << " estimated_gradient: " << estimated_gradient;
+ // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
+ // LOG(ERROR) << "computed gradient: " << computed_gradient
+ // << " estimated_gradient: " << estimated_gradient;
}
}
}
template <typename Dtype>
void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
- vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top, int check_bottom) {
+ vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
+ int check_bottom) {
layer.SetUp(bottom, &top);
- //LOG(ERROR) << "Exhaustive Mode.";
+ // LOG(ERROR) << "Exhaustive Mode.";
for (int i = 0; i < top.size(); ++i) {
- //LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
+ // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
for (int j = 0; j < top[i]->count(); ++j) {
- //LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
+ // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
}
}