1 // Copyright 2013 Yangqing Jia
3 #include <cuda_runtime.h>
4 #include <fcntl.h>
5 #include <google/protobuf/text_format.h>
7 #include <cstring>
9 #include "caffe/blob.hpp"
10 #include "caffe/common.hpp"
11 #include "caffe/net.hpp"
12 #include "caffe/filler.hpp"
13 #include "caffe/proto/caffe.pb.h"
14 #include "caffe/util/io.hpp"
15 #include "caffe/optimization/solver.hpp"
17 using namespace caffe;
19 int main(int argc, char** argv) {
20 cudaSetDevice(1);
21 Caffe::set_mode(Caffe::GPU);
23 NetParameter net_param;
24 ReadProtoFromTextFile("caffe/test/data/lenet.prototxt",
25 &net_param);
26 vector<Blob<float>*> bottom_vec;
27 Net<float> caffe_net(net_param, bottom_vec);
29 // Run the network without training.
30 LOG(ERROR) << "Performing Forward";
31 caffe_net.Forward(bottom_vec);
32 LOG(ERROR) << "Performing Backward";
33 LOG(ERROR) << "Initial loss: " << caffe_net.Backward();
35 SolverParameter solver_param;
36 solver_param.set_base_lr(0.01);
37 solver_param.set_display(0);
38 solver_param.set_max_iter(6000);
39 solver_param.set_lr_policy("inv");
40 solver_param.set_gamma(0.0001);
41 solver_param.set_power(0.75);
42 solver_param.set_momentum(0.9);
44 LOG(ERROR) << "Starting Optimization";
45 SGDSolver<float> solver(solver_param);
46 solver.Solve(&caffe_net);
47 LOG(ERROR) << "Optimization Done.";
49 // Run the network after training.
50 LOG(ERROR) << "Performing Forward";
51 caffe_net.Forward(bottom_vec);
52 LOG(ERROR) << "Performing Backward";
53 float loss = caffe_net.Backward();
54 LOG(ERROR) << "Final loss: " << loss;
56 NetParameter trained_net_param;
57 caffe_net.ToProto(&trained_net_param);
59 NetParameter traintest_net_param;
60 ReadProtoFromTextFile("caffe/test/data/lenet_traintest.prototxt",
61 &traintest_net_param);
62 Net<float> caffe_traintest_net(traintest_net_param, bottom_vec);
63 caffe_traintest_net.CopyTrainedLayersFrom(trained_net_param);
65 // Test run
66 double train_accuracy = 0;
67 int batch_size = traintest_net_param.layers(0).layer().batchsize();
68 for (int i = 0; i < 60000 / batch_size; ++i) {
69 const vector<Blob<float>*>& result =
70 caffe_traintest_net.Forward(bottom_vec);
71 train_accuracy += result[0]->cpu_data()[0];
72 }
73 train_accuracy /= 60000 / batch_size;
74 LOG(ERROR) << "Train accuracy:" << train_accuracy;
76 NetParameter test_net_param;
77 ReadProtoFromTextFile("caffe/test/data/lenet_test.prototxt", &test_net_param);
78 Net<float> caffe_test_net(test_net_param, bottom_vec);
79 caffe_test_net.CopyTrainedLayersFrom(trained_net_param);
81 // Test run
82 double test_accuracy = 0;
83 batch_size = test_net_param.layers(0).layer().batchsize();
84 for (int i = 0; i < 10000 / batch_size; ++i) {
85 const vector<Blob<float>*>& result =
86 caffe_test_net.Forward(bottom_vec);
87 test_accuracy += result[0]->cpu_data()[0];
88 }
89 test_accuracy /= 10000 / batch_size;
90 LOG(ERROR) << "Test accuracy:" << test_accuracy;
92 return 0;
93 }