summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 1f54bd6)
raw | patch | inline | side by side (parent: 1f54bd6)
author | Yangqing Jia <jiayq84@gmail.com> | |
Tue, 24 Sep 2013 21:52:09 +0000 (14:52 -0700) | ||
committer | Yangqing Jia <jiayq84@gmail.com> | |
Tue, 24 Sep 2013 21:52:09 +0000 (14:52 -0700) |
src/Makefile | patch | blob | history | |
src/caffe/layer.hpp | patch | blob | history | |
src/caffe/layers/data_layer.cpp | [new file with mode: 0644] | patch | blob |
src/caffe/proto/layer_param.proto | patch | blob | history | |
src/caffe/vision_layers.hpp | patch | blob | history |
diff --git a/src/Makefile b/src/Makefile
index bc76371d0dd10741f2c1d83a16478842dcbf0125..315eaa4dcca42b62f1d74a681c9309b2318dc734 100644 (file)
--- a/src/Makefile
+++ b/src/Makefile
INCLUDE_DIRS := . /usr/local/include $(CUDA_INCLUDE_DIR) $(MKL_INCLUDE_DIR)
LIBRARY_DIRS := . /usr/local/lib $(CUDA_LIB_DIR) $(MKL_LIB_DIR)
-LIBRARIES := cuda cudart cublas protobuf glog mkl_rt mkl_intel_thread curand
+LIBRARIES := cuda cudart cublas protobuf glog mkl_rt mkl_intel_thread curand leveldb snappy
WARNINGS := -Wall
CXXFLAGS += -fPIC $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir))
diff --git a/src/caffe/layer.hpp b/src/caffe/layer.hpp
index 130d3fbe61d58b6e7511259a118e286e6b8103b7..777fc85664411d4ca3d9bf574a586d142d036be0 100644 (file)
--- a/src/caffe/layer.hpp
+++ b/src/caffe/layer.hpp
return blobs_;
}
+ // Writes the layer parameter to a protocol buffer
+ void ToProto(LayerParameter* param, bool write_diff = false);
+
protected:
// The protobuf that stores the layer parameters
LayerParameter layer_param_;
}
};
+template <typename Dtype>
+void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
+ param->Clear();
+ param->CopyFrom(layer_param_);
+ param->clear_blobs();
+ for (int i = 0; i < blobs_.size(); ++i) {
+ blobs_[i].ToProto(param->add_blobs(), write_diff);
+ }
+}
} // namespace caffe
diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright 2013 Yangqing Jia
+
+#include <vector>
+
+#include <leveldb/db.h>
+
+#include "caffe/layer.hpp"
+#include "caffe/vision_layers.hpp"
+
+namespace caffe {
+
+template <typename Dtype>
+void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top) {
+ CHECK_EQ(bottom.size(), 0) << "Neuron Layer takes no input blobs.";
+ CHECK_EQ(top->size(), 2) << "Neuron Layer takes two blobs as output.";
+ // Initialize the leveldb
+ leveldb::DB* db_temp;
+ leveldb::Options options;
+ options.create_if_missing = false;
+ leveldb::Status status = leveldb::DB::Open(
+ options, this->layer_param_.source(), &db_temp);
+ CHECK(status.ok());
+ db_.reset(db_temp);
+ iter_.reset(db_->NewIterator(leveldb::ReadOptions()));
+ // Read a data point, and use it to initialize the top blob.
+ Datum datum;
+ datum.ParseFromString(iter_->value().ToString());
+ const BlobProto& blob = datum.blob();
+ // image
+ (*top)[0]->Reshape(
+ this->layer_param_.batchsize(), blob.channels(), blob.height(),
+ blob.width());
+ // label
+ (*top)[1]->Reshape(this->layer_param_.batchsize(), 1, 1, 1);
+ // datum size
+ datum_size_ = blob.data_size();
+}
+
+template <typename Dtype>
+void DataLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top) {
+ Datum datum;
+ Dtype* top_data = (*top)[0]->mutable_cpu_data();
+ Dtype* top_label = (*top)[0]->mutable_cpu_diff();
+ for (int i = 0; i < this->layer_param_.batchsize(); ++i) {
+ // get a blob
+ datum.ParseFromString(iter_->value().ToString());
+ const BlobProto& blob = datum.blob();
+ for (int j = 0; j < datum_size_; ++j) {
+ top_data[i * datum_size_ + j] = blob.data(j);
+ }
+ top_label[i] = datum.label();
+ // go to the next iter
+ iter_->Next();
+ if (!iter_->Valid()) {
+ // We have reached the end. Restart from the first.
+ LOG(INFO) << "Restarting data read from start.";
+ iter_.reset(db_->NewIterator(leveldb::ReadOptions()));
+ }
+ }
+}
+
+template <typename Dtype>
+void DataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top) {
+ Forward_cpu(bottom, top);
+ // explicitly copy data to gpu - this is achieved by simply calling gpu_data
+ // functions.
+ // TODO(Yangqing): maybe we don't need this since data synchronization is
+ // simply done under the hood?
+ (*top)[0]->gpu_data();
+ (*top)[1]->gpu_data();
+}
+
+// The backward operations are dummy - they do not carry any computation.
+template <typename Dtype>
+Dtype DataLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
+ return Dtype(0.);
+}
+
+template <typename Dtype>
+Dtype DataLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
+ return Dtype(0.);
+}
+
+INSTANTIATE_CLASS(DataLayer);
+
+} // namespace caffe
index ff545cebcda3f795fa4994d15443165043a8a0cc..cb8c7c4b7e4d8943a4591ad1147d5bd16d6196c3 100644 (file)
optional uint32 local_size = 13 [default = 5]; // for local response norm
optional float alpha = 14 [default = 1.]; // for local response norm
optional float beta = 15 [default = 0.75]; // for local response norm
+
+ // For data layers, specify the data source
+ optional string source = 16;
+ // For datay layers, specify the batch size.
+ optional uint32 batchsize = 17;
+
+ // The blobs containing the numeric parameters of the layer
+ repeated BlobProto blobs = 50;
}
message LayerConnection {
index 0eb07d951ffb460e6b3a2989e52c2a138d632694..4744a95296fa5a1b29f0a938575ce77f4782c26d 100644 (file)
#ifndef CAFFE_VISION_LAYERS_HPP_
#define CAFFE_VISION_LAYERS_HPP_
+#include <leveldb/db.h>
+
#include "caffe/layer.hpp"
namespace caffe {
virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
Blob<Dtype> col_bob_;
- protected:
+
int KSIZE_;
int STRIDE_;
int NUM_;
int N_;
};
+template <typename Dtype>
+class DataLayer : public Layer<Dtype> {
+ public:
+ explicit DataLayer(const LayerParameter& param)
+ : Layer<Dtype>(param) {};
+ virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ protected:
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+
+ shared_ptr<leveldb::DB> db_;
+ shared_ptr<leveldb::Iterator> iter_;
+ int datum_size_;
+};
+
} // namespace caffe
#endif // CAFFE_VISION_LAYERS_HPP_