1 // Copyright 2013 Yangqing Jia
2 #include <algorithm>
3 #include <cmath>
4 #include <cfloat>
6 #include "caffe/layer.hpp"
7 #include "caffe/vision_layers.hpp"
8 #include "caffe/util/math_functions.hpp"
10 using std::max;
12 namespace caffe {
14 template <typename Dtype>
15 void MultinomialLogisticLossLayer<Dtype>::SetUp(
16 const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
17 CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input.";
18 CHECK_EQ(top->size(), 0) << "Loss Layer takes no as output.";
19 CHECK_EQ(bottom[0]->num(), bottom[1]->num())
20 << "The data and label should have the same number.";
21 CHECK_EQ(bottom[1]->channels(), 1);
22 CHECK_EQ(bottom[1]->height(), 1);
23 CHECK_EQ(bottom[1]->width(), 1);
24 };
27 template <typename Dtype>
28 Dtype MultinomialLogisticLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
29 const bool propagate_down,
30 vector<Blob<Dtype>*>* bottom) {
31 const Dtype* bottom_data = (*bottom)[0]->cpu_data();
32 const Dtype* bottom_label = (*bottom)[1]->cpu_data();
33 Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
34 int num = (*bottom)[0]->num();
35 int dim = (*bottom)[0]->count() / (*bottom)[0]->num();
36 memset(bottom_diff, 0, sizeof(Dtype) * (*bottom)[0]->count());
37 Dtype loss = 0;
38 const Dtype kLOG_THRESHOLD = 1e-8;
39 for (int i = 0; i < num; ++i) {
40 int label = static_cast<int>(bottom_label[i]);
41 Dtype prob = max(bottom_data[i * dim + label], kLOG_THRESHOLD);
42 loss -= log(prob);
43 bottom_diff[i * dim + label] = - 1. / prob / num;
44 }
45 return loss / num;
46 }
48 // TODO: implement the GPU version for multinomial loss
51 template <typename Dtype>
52 void EuclideanLossLayer<Dtype>::SetUp(
53 const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
54 CHECK_EQ(bottom.size(), 2) << "Loss Layer takes two blobs as input.";
55 CHECK_EQ(top->size(), 0) << "Loss Layer takes no as output.";
56 CHECK_EQ(bottom[0]->num(), bottom[1]->num())
57 << "The data and label should have the same number.";
58 CHECK_EQ(bottom[0]->channels(), bottom[1]->channels());
59 CHECK_EQ(bottom[0]->height(), bottom[1]->height());
60 CHECK_EQ(bottom[0]->width(), bottom[1]->width());
61 difference_.Reshape(bottom[0]->num(), bottom[0]->channels(),
62 bottom[0]->height(), bottom[0]->width());
63 }
65 template <typename Dtype>
66 Dtype EuclideanLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
67 const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
68 int count = (*bottom)[0]->count();
69 int num = (*bottom)[0]->num();
70 caffe_sub(count, (*bottom)[0]->cpu_data(), (*bottom)[1]->cpu_data(),
71 difference_.mutable_cpu_data());
72 Dtype loss = caffe_cpu_dot(
73 count, difference_.cpu_data(), difference_.cpu_data()) / num / Dtype(2);
74 // Compute the gradient
75 caffe_axpby(count, Dtype(1) / num, difference_.cpu_data(), Dtype(0),
76 (*bottom)[0]->mutable_cpu_diff());
77 return loss;
78 }
80 template <typename Dtype>
81 void AccuracyLayer<Dtype>::SetUp(
82 const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
83 CHECK_EQ(bottom.size(), 2) << "Accuracy Layer takes two blobs as input.";
84 CHECK_EQ(top->size(), 1) << "Accuracy Layer takes 1 output.";
85 CHECK_EQ(bottom[0]->num(), bottom[1]->num())
86 << "The data and label should have the same number.";
87 CHECK_EQ(bottom[1]->channels(), 1);
88 CHECK_EQ(bottom[1]->height(), 1);
89 CHECK_EQ(bottom[1]->width(), 1);
90 (*top)[0]->Reshape(1, 1, 1, 1);
91 }
93 template <typename Dtype>
94 void AccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
95 vector<Blob<Dtype>*>* top) {
96 Dtype accuracy = 0;
97 const Dtype* bottom_data = bottom[0]->cpu_data();
98 const Dtype* bottom_label = bottom[1]->cpu_data();
99 int num = bottom[0]->num();
100 int dim = bottom[0]->count() / bottom[0]->num();
101 for (int i = 0; i < num; ++i) {
102 Dtype maxval = -FLT_MAX;
103 int max_id = 0;
104 for (int j = 0; j < dim; ++j) {
105 if (bottom_data[i * dim + j] > maxval) {
106 maxval = bottom_data[i * dim + j];
107 max_id = j;
108 }
109 }
110 if (max_id == (int)bottom_label[i]) {
111 ++accuracy;
112 }
113 }
114 accuracy /= num;
115 // LOG(INFO) << "Accuracy: " << accuracy;
116 (*top)[0]->mutable_cpu_data()[0] = accuracy;
117 }
119 INSTANTIATE_CLASS(MultinomialLogisticLossLayer);
120 INSTANTIATE_CLASS(EuclideanLossLayer);
121 INSTANTIATE_CLASS(AccuracyLayer);
123 } // namespace caffe