1 // Copyright 2013 Yangqing Jia
3 #include <algorithm>
4 #include <cfloat>
5 #include <vector>
6 #include <thrust/device_vector.h>
8 #include "caffe/layer.hpp"
9 #include "caffe/vision_layers.hpp"
10 #include "caffe/util/math_functions.hpp"
12 using std::max;
14 namespace caffe {
16 template <typename Dtype>
17 void SoftmaxLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
18 vector<Blob<Dtype>*>* top) {
19 CHECK_EQ(bottom.size(), 1) << "Softmax Layer takes a single blob as input.";
20 CHECK_EQ(top->size(), 1) << "Softmax Layer takes a single blob as output.";
21 (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
22 bottom[0]->height(), bottom[0]->width());
23 sum_multiplier_.Reshape(1, bottom[0]->channels(),
24 bottom[0]->height(), bottom[0]->width());
25 Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
26 for (int i = 0; i < sum_multiplier_.count(); ++i) {
27 multiplier_data[i] = 1.;
28 }
29 scale_.Reshape(bottom[0]->num(), 1, 1, 1);
30 };
32 template <typename Dtype>
33 void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
34 vector<Blob<Dtype>*>* top) {
35 const Dtype* bottom_data = bottom[0]->cpu_data();
36 Dtype* top_data = (*top)[0]->mutable_cpu_data();
37 Dtype* scale_data = scale_.mutable_cpu_data();
38 int num = bottom[0]->num();
39 int dim = bottom[0]->count() / bottom[0]->num();
40 memcpy(top_data, bottom_data, sizeof(Dtype) * bottom[0]->count());
41 // we need to subtract the max to avoid numerical issues, compute the exp,
42 // and then normalize.
43 for (int i = 0; i < num; ++i) {
44 scale_data[i] = bottom_data[i*dim];
45 for (int j = 0; j < dim; ++j) {
46 scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]);
47 }
48 }
49 // subtraction
50 caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
51 scale_data, sum_multiplier_.cpu_data(), 1., top_data);
52 // Perform exponentiation
53 caffe_exp<Dtype>(num * dim, top_data, top_data);
54 // sum after exp
55 caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
56 sum_multiplier_.cpu_data(), 0., scale_data);
57 // Do division
58 for (int i = 0; i < num; ++i) {
59 caffe_scal<Dtype>(dim, Dtype(1.) / scale_data[i], top_data + i * dim);
60 }
61 }
63 template <typename Dtype>
64 __global__ void kernel_get_max(const int num, const int dim,
65 const Dtype* data, Dtype* out) {
66 int index = threadIdx.x + blockIdx.x * blockDim.x;
67 if (index < num) {
68 Dtype maxval = -FLT_MAX;
69 for (int i = 0; i < dim; ++i) {
70 maxval = max(data[index * dim + i], maxval);
71 }
72 out[index] = maxval;
73 }
74 }
76 template <typename Dtype>
77 __global__ void kernel_softmax_div(const int num, const int dim,
78 const Dtype* scale, Dtype* data) {
79 int index = threadIdx.x + blockIdx.x * blockDim.x;
80 if (index < num * dim) {
81 int n = index / dim;
82 data[index] /= scale[n];
83 }
84 }
86 template <typename Dtype>
87 __global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) {
88 int index = threadIdx.x + blockIdx.x * blockDim.x;
89 if (index < num) {
90 out[index] = exp(data[index]);
91 }
92 }
94 template <typename Dtype>
95 void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
96 vector<Blob<Dtype>*>* top) {
97 const Dtype* bottom_data = bottom[0]->gpu_data();
98 Dtype* top_data = (*top)[0]->mutable_gpu_data();
99 Dtype* scale_data = scale_.mutable_gpu_data();
100 int num = bottom[0]->num();
101 int dim = bottom[0]->count() / bottom[0]->num();
102 CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
103 sizeof(Dtype) * bottom[0]->count(), cudaMemcpyDeviceToDevice));
104 // we need to subtract the max to avoid numerical issues, compute the exp,
105 // and then normalize.
106 // Compute max
107 kernel_get_max<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
108 num, dim, bottom_data, scale_data);
109 // subtraction
110 caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
111 scale_data, sum_multiplier_.gpu_data(), 1., top_data);
112 // Perform exponentiation
113 kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
114 num * dim, top_data, top_data);
115 // sum after exp
116 caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
117 sum_multiplier_.gpu_data(), 0., scale_data);
118 // Do division
119 kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
120 num, dim, scale_data, top_data);
121 }
123 template <typename Dtype>
124 Dtype SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
125 const bool propagate_down,
126 vector<Blob<Dtype>*>* bottom) {
127 const Dtype* top_diff = top[0]->cpu_diff();
128 const Dtype* top_data = top[0]->cpu_data();
129 Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
130 Dtype* scale_data = scale_.mutable_cpu_data();
131 int num = top[0]->num();
132 int dim = top[0]->count() / top[0]->num();
133 memcpy(bottom_diff, top_diff, sizeof(Dtype) * top[0]->count());
134 // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
135 for (int i = 0; i < num; ++i) {
136 scale_data[i] = caffe_cpu_dot<Dtype>(dim, top_diff + i * dim,
137 top_data + i * dim);
138 }
139 // subtraction
140 caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
141 scale_data, sum_multiplier_.cpu_data(), 1., bottom_diff);
142 // elementwise multiplication
143 caffe_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
144 return Dtype(0);
145 }
147 // TODO(Yangqing): implement the GPU version of softmax.
148 template <typename Dtype>
149 Dtype SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
150 const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
151 const Dtype* top_diff = top[0]->gpu_diff();
152 const Dtype* top_data = top[0]->gpu_data();
153 Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
154 int num = top[0]->num();
155 int dim = top[0]->count() / top[0]->num();
156 CUDA_CHECK(cudaMemcpy(bottom_diff, top_diff,
157 sizeof(Dtype) * top[0]->count(), cudaMemcpyDeviceToDevice));
158 // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
159 // cuda dot returns the result to cpu, so we temporarily change the pointer
160 // mode
161 CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(),
162 CUBLAS_POINTER_MODE_DEVICE));
163 Dtype* scale_data = scale_.mutable_gpu_data();
164 for (int i = 0; i < num; ++i) {
165 caffe_gpu_dot<Dtype>(dim, top_diff + i * dim,
166 top_data + i * dim, scale_data + i);
167 }
168 CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(),
169 CUBLAS_POINTER_MODE_HOST));
170 // subtraction
171 caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
172 scale_.gpu_data(), sum_multiplier_.gpu_data(), 1., bottom_diff);
173 // elementwise multiplication
174 caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
175 return Dtype(0);
176 }
178 INSTANTIATE_CLASS(SoftmaxLayer);
181 } // namespace caffe