summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: ddef691)
raw | patch | inline | side by side (parent: ddef691)
author | Sergei Nikolaev <snikolaev@nvidia.com> | |
Sun, 28 Jan 2018 10:58:01 +0000 (02:58 -0800) | ||
committer | Sergei Nikolaev <snikolaev@nvidia.com> | |
Sun, 28 Jan 2018 10:58:01 +0000 (02:58 -0800) |
19 files changed:
index 2d0ac19ba3179ab31a57c65231478b634e10cc95..6b1bf079b86e9f3900c69c566804c2fad4e374f6 100644 (file)
--- a/include/caffe/layer.hpp
+++ b/include/caffe/layer.hpp
*/
explicit LayerBase(const LayerParameter& param, int prec = 0)
: layer_param_(param),
- solver_rank_(0U),
debug_(false),
fm_by_user_(false),
bm_by_user_(false),
int iter() const;
int relative_iter() const;
- void set_solver_rank(size_t solver_rank) {
- solver_rank_ = solver_rank;
- }
-
Net* parent_net() {
return parent_net_;
}
/** The phase: TRAIN or TEST */
Phase phase_;
- size_t solver_rank_;
bool debug_;
bool fm_by_user_, bm_by_user_;
Net* parent_net_;
index be3ef78f507941d61177b233246353ea04f6c4cb..152d456113ecd4a5137af2ade076bdb6639c1b11 100644 (file)
class LayerBase;
-template<template <typename Ftype, typename Btype> class LayerType>
+template<template <typename Ftype, typename Btype> class LType>
inline shared_ptr<LayerBase> CreateLayerBase(const LayerParameter& param,
Type ftype, Type btype) {
bool failed = false;
shared_ptr<LayerBase> ptr;
if (ftype == FLOAT) {
if (btype == FLOAT) {
- ptr.reset(new LayerType<float, float>(param));
+ ptr.reset(new LType<float, float>(param));
} else if (btype == FLOAT16) {
- ptr.reset(new LayerType<float, float16>(param));
+ ptr.reset(new LType<float, float16>(param));
} else if (btype == DOUBLE) {
- ptr.reset(new LayerType<float, double>(param));
+ ptr.reset(new LType<float, double>(param));
} else {
failed = true;
}
} else if (ftype == FLOAT16) {
if (btype == FLOAT) {
- ptr.reset(new LayerType<float16, float>(param));
+ ptr.reset(new LType<float16, float>(param));
} else if (btype == FLOAT16) {
- ptr.reset(new LayerType<float16, float16>(param));
+ ptr.reset(new LType<float16, float16>(param));
} else if (btype == DOUBLE) {
- ptr.reset(new LayerType<float16, double>(param));
+ ptr.reset(new LType<float16, double>(param));
} else {
failed = true;
}
} else if (ftype == DOUBLE) {
if (btype == FLOAT) {
- ptr.reset(new LayerType<double, float>(param));
+ ptr.reset(new LType<double, float>(param));
} else if (btype == FLOAT16) {
- ptr.reset(new LayerType<double, float16>(param));
+ ptr.reset(new LType<double, float16>(param));
} else if (btype == DOUBLE) {
- ptr.reset(new LayerType<double, double>(param));
+ ptr.reset(new LType<double, double>(param));
+ } else {
+ failed = true;
+ }
+ } else {
+ failed = true;
+ }
+
+ if (failed) {
+ LOG(FATAL) << "Combination of layer types " << Type_Name(ftype) << " and "
+ << Type_Name(btype) << " is not currently supported "
+ << "(discovered in layer '" << param.name() << "').";
+ }
+ CHECK_NOTNULL(ptr.get());
+ return ptr;
+}
+
+template<template <typename Ftype, typename Btype> class LType>
+inline shared_ptr<LayerBase> CreateLayerBase(const LayerParameter& param,
+ Type ftype, Type btype, size_t solver_rank) {
+ bool failed = false;
+ shared_ptr<LayerBase> ptr;
+ if (ftype == FLOAT) {
+ if (btype == FLOAT) {
+ ptr.reset(new LType<float, float>(param, solver_rank));
+ } else if (btype == FLOAT16) {
+ ptr.reset(new LType<float, float16>(param, solver_rank));
+ } else if (btype == DOUBLE) {
+ ptr.reset(new LType<float, double>(param, solver_rank));
+ } else {
+ failed = true;
+ }
+ } else if (ftype == FLOAT16) {
+ if (btype == FLOAT) {
+ ptr.reset(new LType<float16, float>(param, solver_rank));
+ } else if (btype == FLOAT16) {
+ ptr.reset(new LType<float16, float16>(param, solver_rank));
+ } else if (btype == DOUBLE) {
+ ptr.reset(new LType<float16, double>(param, solver_rank));
+ } else {
+ failed = true;
+ }
+ } else if (ftype == DOUBLE) {
+ if (btype == FLOAT) {
+ ptr.reset(new LType<double, float>(param, solver_rank));
+ } else if (btype == FLOAT16) {
+ ptr.reset(new LType<double, float16>(param, solver_rank));
+ } else if (btype == DOUBLE) {
+ ptr.reset(new LType<double, double>(param, solver_rank));
} else {
failed = true;
}
class LayerRegistry {
public:
- typedef shared_ptr<LayerBase> (*Creator)(const LayerParameter&, Type, Type);
+ typedef shared_ptr<LayerBase> (*Creator)(const LayerParameter&, Type, Type, size_t);
typedef std::map<string, Creator> CreatorRegistry;
static CreatorRegistry& Registry() {
registry[type] = creator;
}
- static shared_ptr<LayerBase> CreateLayer(const LayerParameter& param) {
+ static shared_ptr<LayerBase> CreateLayer(const LayerParameter& param, size_t solver_rank) {
const string& layer_type = param.type();
const string& layer_name = param.name();
if (Caffe::root_solver()) {
<< " Fmath:" << Type_Name(fmath)
<< " Bmath:" << Type_Name(bmath);
}
- return registry[layer_type](param, ftype, btype);
+ return registry[layer_type](param, ftype, btype, solver_rank);
}
static vector<string> LayerTypeList() {
private:
// Layer registry should never be instantiated - everything is done with its
// static variables.
- LayerRegistry() {}
+ LayerRegistry() = delete;
static string LayerTypeListString() {
vector<string> layer_types = LayerTypeList();
class LayerRegisterer {
public:
LayerRegisterer(const string& type,
- shared_ptr<LayerBase> (*creator)(const LayerParameter&, Type, Type)) {
+ shared_ptr<LayerBase> (*creator)(const LayerParameter&, Type, Type, size_t)) {
LayerRegistry::AddCreator(type, creator);
}
};
#define REGISTER_LAYER_CLASS(type) \
shared_ptr<LayerBase> Creator_##type##Layer(const LayerParameter& param, \
- Type ftype, Type btype) \
+ Type ftype, Type btype, size_t) \
{ \
return CreateLayerBase<type##Layer>(param, ftype, btype); \
} \
REGISTER_LAYER_CREATOR(type, Creator_##type##Layer)
+#define REGISTER_LAYER_CLASS_R(type) \
+ shared_ptr<LayerBase> Creator_##type##Layer(const LayerParameter& param, \
+ Type ftype, Type btype, size_t solver_rank) \
+ { \
+ return CreateLayerBase<type##Layer>(param, ftype, btype, solver_rank); \
+ } \
+ REGISTER_LAYER_CREATOR(type, Creator_##type##Layer)
+
+
void check_precision_support(Type& ftype, Type& btype, LayerParameter& param,
bool transf = false);
index b571ed02b12614b4dde5308af75ab70f739e4575..77053042be27260a3352ee8459005011bd536266 100644 (file)
template<typename Ftype, typename Btype>
class BasePrefetchingDataLayer : public BaseDataLayer<Ftype, Btype>, public InternalThread {
public:
- explicit BasePrefetchingDataLayer(const LayerParameter& param);
+ BasePrefetchingDataLayer(const LayerParameter& param, size_t solver_rank);
virtual ~BasePrefetchingDataLayer();
// LayerSetUp: implements common data layer setup functionality, and calls
// DataLayerSetUp to do special data layer setup for individual layer types.
index df95b3265ebb1532b7651ce4a5d265070d483f87..661d28441b2fcddda4fb0cc7defcf2a6075b6b9e 100644 (file)
template <typename Ftype, typename Btype>
class DataLayer : public BasePrefetchingDataLayer<Ftype, Btype> {
public:
- explicit DataLayer(const LayerParameter& param);
+ DataLayer(const LayerParameter& param, size_t solver_rank);
virtual ~DataLayer();
void DataLayerSetUp(const vector<Blob*>& bottom, const vector<Blob*>& top) override;
// DataLayer uses DataReader instead for sharing for parallelism
index af5d1398cb3d9fa0e277862a3213abeecb5427cb..3cdda4d8fbb066529cc559d4d2298c6d3f258faa 100644 (file)
template <typename Ftype, typename Btype>
class ImageDataLayer : public BasePrefetchingDataLayer<Ftype, Btype> {
public:
- explicit ImageDataLayer(const LayerParameter& param)
- : BasePrefetchingDataLayer<Ftype, Btype>(param) {}
+ ImageDataLayer(const LayerParameter& param, size_t solver_rank);
virtual ~ImageDataLayer();
void DataLayerSetUp(const vector<Blob*>& bottom, const vector<Blob*>& top) override;
- const char* type() const override { return "ImageData"; }
- int ExactNumBottomBlobs() const override { return 0; }
- int ExactNumTopBlobs() const override { return 2; }
+ bool ShareInParallel() const override {
+ return false;
+ }
+ const char* type() const override {
+ return "ImageData";
+ }
+ int ExactNumBottomBlobs() const override {
+ return 0;
+ }
+ int ExactNumTopBlobs() const override {
+ return 2;
+ }
protected:
void ShuffleImages();
- bool is_root() const;
void load_batch(Batch* batch, int thread_id, size_t queue_id = 0UL) override;
void start_reading() override {}
void InitializePrefetch() override;
}
Flag* layer_inititialized_flag() override {
- return this->phase_ == TRAIN ? &layer_inititialized_flag_ : nullptr;
+ return &layer_inititialized_flag_;
}
shared_ptr<Caffe::RNG> prefetch_rng_;
Flag layer_inititialized_flag_;
vector<size_t> line_ids_;
- vector<std::pair<std::string, int>> lines_;
+ static vector<std::pair<std::string, int>> lines_;
};
+template <typename Ftype, typename Btype>
+vector<std::pair<std::string, int>> ImageDataLayer<Ftype, Btype>::lines_;
+
} // namespace caffe
#endif // CAFFE_IMAGE_DATA_LAYER_HPP_
diff --git a/include/caffe/layers/window_data_layer.hpp b/include/caffe/layers/window_data_layer.hpp
index 33c32a0990a6a1ebe7024100c9b4179c0fc976dc..4ac661c0de23c91ec747f1fc97b8eac97cd8eec2 100644 (file)
template <typename Ftype, typename Btype>
class WindowDataLayer : public BasePrefetchingDataLayer<Ftype, Btype> {
public:
- explicit WindowDataLayer(const LayerParameter& param)
- : BasePrefetchingDataLayer<Ftype, Btype>(param) {}
+ explicit WindowDataLayer(const LayerParameter& param, size_t solver_rank)
+ : BasePrefetchingDataLayer<Ftype, Btype>(param, solver_rank) {}
virtual ~WindowDataLayer();
void DataLayerSetUp(const vector<Blob*>& bottom, const vector<Blob*>& top) override;
index 5c1486ac195629f1d340f40cc3eb36eb7760837a..de733e8dfe6d9ebc918f07c60649b64f915313b4 100644 (file)
--- a/python/caffe/_caffe.cpp
+++ b/python/caffe/_caffe.cpp
@@ -384,7 +384,7 @@ bp::object LayerParameter_ToPython(const LayerParameter *layer_param, bp::object
// Create layer from caffe_pb2.LayerParameter in Python
shared_ptr<LayerBase> create_layer(bp::object py_layer_param) {
shared_ptr<LayerParameter> layer_param(LayerParameter_Init(py_layer_param));
- return LayerRegistry::CreateLayer(*layer_param);
+ return LayerRegistry::CreateLayer(*layer_param, 0UL);
}
// Run solver step without GIL
index 12167f526ab0de005d94fe3c69fe820518f797c4..46eb4cf735baa08ef16d8589e80fa9dff883f735 100644 (file)
// Get convolution layer according to engine.
shared_ptr<LayerBase> GetConvolutionLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
ConvolutionParameter conv_param = param.convolution_param();
ConvolutionParameter_Engine engine = conv_param.engine();
if (engine == ConvolutionParameter_Engine_DEFAULT) {
// Get BN layer according to engine.
shared_ptr<LayerBase> GetBatchNormLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
BatchNormParameter_Engine engine = param.batch_norm_param().engine();
if (engine == BatchNormParameter_Engine_DEFAULT) {
engine = BatchNormParameter_Engine_CAFFE;
// Get pooling layer according to engine.
shared_ptr<LayerBase> GetPoolingLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
PoolingParameter_Engine engine = param.pooling_param().engine();
if (engine == PoolingParameter_Engine_DEFAULT) {
engine = PoolingParameter_Engine_CAFFE;
// Get LRN layer according to engine
shared_ptr<LayerBase> GetLRNLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
LRNParameter_Engine engine = param.lrn_param().engine();
if (engine == LRNParameter_Engine_DEFAULT) {
// Get relu layer according to engine.
shared_ptr<LayerBase> GetReLULayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
ReLUParameter_Engine engine = param.relu_param().engine();
if (engine == ReLUParameter_Engine_DEFAULT) {
engine = ReLUParameter_Engine_CAFFE;
// Get sigmoid layer according to engine.
shared_ptr<LayerBase> GetSigmoidLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
SigmoidParameter_Engine engine = param.sigmoid_param().engine();
if (engine == SigmoidParameter_Engine_DEFAULT) {
engine = SigmoidParameter_Engine_CAFFE;
// Get softmax layer according to engine.
shared_ptr<LayerBase> GetSoftmaxLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
LayerParameter lparam(param);
SoftmaxParameter_Engine engine = lparam.softmax_param().engine();
if (engine == SoftmaxParameter_Engine_DEFAULT) {
// Get tanh layer according to engine.
shared_ptr<LayerBase> GetTanHLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
TanHParameter_Engine engine = param.tanh_param().engine();
if (engine == TanHParameter_Engine_DEFAULT) {
engine = TanHParameter_Engine_CAFFE;
// Get dropout layer according to engine
shared_ptr<LayerBase> GetDropoutLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
DropoutParameter_Engine engine = param.dropout_param().engine();
if (engine == DropoutParameter_Engine_DEFAULT) {
engine = DropoutParameter_Engine_CAFFE;
}
REGISTER_LAYER_CREATOR(Dropout, GetDropoutLayer);
-shared_ptr<LayerBase> GetMemoryDataLayer(const LayerParameter& param, Type ftype, Type btype) {
+shared_ptr<LayerBase> GetMemoryDataLayer(const LayerParameter& param,
+ Type ftype, Type btype, size_t) {
LayerParameter lparam(param);
check_precision_support(ftype, btype, lparam);
shared_ptr<LayerBase> ret;
@@ -294,21 +295,22 @@ shared_ptr<LayerBase> GetMemoryDataLayer(const LayerParameter& param, Type ftype
}
REGISTER_LAYER_CREATOR(MemoryData, GetMemoryDataLayer);
-shared_ptr<LayerBase> GetWindowDataLayer(const LayerParameter& param, Type ftype, Type btype) {
+shared_ptr<LayerBase> GetWindowDataLayer(const LayerParameter& param, Type ftype, Type btype,
+ size_t solver_rank) {
LayerParameter lparam(param);
check_precision_support(ftype, btype, lparam);
shared_ptr<LayerBase> ret;
if (is_type<double>(ftype)) {
- ret.reset(new WindowDataLayer<double, double>(lparam));
+ ret.reset(new WindowDataLayer<double, double>(lparam, solver_rank));
} else {
- ret.reset(new WindowDataLayer<float, float>(lparam));
+ ret.reset(new WindowDataLayer<float, float>(lparam, solver_rank));
}
return ret;
}
REGISTER_LAYER_CREATOR(WindowData, GetWindowDataLayer);
shared_ptr<LayerBase> GetDetectNetTransformationLayer(const LayerParameter& param,
- Type ftype, Type btype) {
+ Type ftype, Type btype, size_t) {
LayerParameter lparam(param);
check_precision_support(ftype, btype, lparam);
shared_ptr<LayerBase> ret;
@@ -322,7 +324,7 @@ shared_ptr<LayerBase> GetDetectNetTransformationLayer(const LayerParameter& para
REGISTER_LAYER_CREATOR(DetectNetTransformation, GetDetectNetTransformationLayer);
#ifdef WITH_PYTHON_LAYER
-shared_ptr<LayerBase> GetPythonLayer(const LayerParameter& param, Type, Type) {
+shared_ptr<LayerBase> GetPythonLayer(const LayerParameter& param, Type, Type, size_t) {
try {
string module_name = param.python_param().module();
string layer_name = param.python_param().layer();
index eace7cda273764ebe7c07089d6d7331882b27f1c..cb68035a61ba0d3b8090bcfdc8e9e546ddb83c61 100644 (file)
}
template<typename Ftype, typename Btype>
-BasePrefetchingDataLayer<Ftype, Btype>::BasePrefetchingDataLayer(const LayerParameter& param)
+BasePrefetchingDataLayer<Ftype, Btype>::BasePrefetchingDataLayer(const LayerParameter& param,
+ size_t solver_rank)
: BaseDataLayer<Ftype, Btype>(param, threads(param)),
- InternalThread(Caffe::current_device(), this->solver_rank_, threads(param), false),
+ InternalThread(Caffe::current_device(), solver_rank, threads(param), false),
auto_mode_(Caffe::mode() == Caffe::GPU && this->phase_ == TRAIN && auto_mode(param)),
parsers_num_(parser_threads(param)),
transf_num_(threads(param)),
queues_num_(transf_num_ * parsers_num_),
batch_transformer_(make_shared<BatchTransformer<Ftype, Btype>>(Caffe::current_device(),
- this->solver_rank_, queues_num_, param.transform_param(), is_gpu_transform())) {
+ solver_rank, queues_num_, param.transform_param(), is_gpu_transform())) {
CHECK_EQ(transf_num_, threads_num());
batch_size_ = param.data_param().batch_size();
// We begin with minimum required
template<typename Ftype, typename Btype>
void BasePrefetchingDataLayer<Ftype, Btype>::LayerSetUp(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
- this->rank_ = this->solver_rank_;
bottom_init_ = bottom;
top_init_ = top;
BaseDataLayer<Ftype, Btype>::LayerSetUp(bottom, top);
index 3de6d7c34de445bdf7ae942976b1a8705d145f98..2a18ee483c819c9307bc52d2fbbcfaa737a85d81 100644 (file)
namespace caffe {
template<typename Ftype, typename Btype>
-DataLayer<Ftype, Btype>::DataLayer(const LayerParameter& param)
- : BasePrefetchingDataLayer<Ftype, Btype>(param),
+DataLayer<Ftype, Btype>::DataLayer(const LayerParameter& param, size_t solver_rank)
+ : BasePrefetchingDataLayer<Ftype, Btype>(param, solver_rank),
cache_(param.data_param().cache()),
shuffle_(param.data_param().shuffle()) {
sample_only_.store(this->auto_mode_ && this->phase_ == TRAIN);
@@ -128,7 +128,7 @@ DataLayer<Ftype, Btype>::DataLayerSetUp(const vector<Blob*>& bottom, const vecto
if (this->auto_mode_) {
if (!sample_reader_) {
sample_reader_ = std::make_shared<DataReader>(param, Caffe::solver_count(),
- this->solver_rank_,
+ this->rank_,
this->parsers_num_,
this->threads_num(),
batch_size,
@@ -140,7 +140,7 @@ DataLayer<Ftype, Btype>::DataLayerSetUp(const vector<Blob*>& bottom, const vecto
} else if (!reader_) {
reader_ = std::make_shared<DataReader>(param,
Caffe::solver_count(),
- this->solver_rank_,
+ this->rank_,
this->parsers_num_,
this->threads_num(),
batch_size,
@@ -153,7 +153,7 @@ DataLayer<Ftype, Btype>::DataLayerSetUp(const vector<Blob*>& bottom, const vecto
} else if (!reader_) {
reader_ = std::make_shared<DataReader>(param,
Caffe::solver_count(),
- this->solver_rank_,
+ this->rank_,
this->parsers_num_,
this->threads_num(),
batch_size,
@@ -349,6 +349,6 @@ void DataLayer<Ftype, Btype>::load_batch(Batch* batch, int thread_id, size_t que
}
INSTANTIATE_CLASS_FB(DataLayer);
-REGISTER_LAYER_CLASS(Data);
+REGISTER_LAYER_CLASS_R(Data);
} // namespace caffe
index 834ae4d4233a8016b7318fdd93079c3ec8fda92d..6dafba7f793748be07ca5694d51c33eda210acd7 100644 (file)
namespace caffe {
+template <typename Ftype, typename Btype>
+ImageDataLayer<Ftype, Btype>::ImageDataLayer(const LayerParameter& param, size_t solver_rank)
+ : BasePrefetchingDataLayer<Ftype, Btype>(param, solver_rank) {}
+
template <typename Ftype, typename Btype>
ImageDataLayer<Ftype, Btype>::~ImageDataLayer<Ftype, Btype>() {
if (layer_inititialized_flag_.is_set()) {
}
line_ids_.resize(this->threads_num());
for (size_t i = 0; i < this->threads_num(); ++i) {
- line_ids_[i] = this->solver_rank_ * this->threads_num() + i + skip;
+ line_ids_[i] = this->rank_ * this->threads_num() + i + skip;
}
CHECK((new_height == 0 && new_width == 0) ||
(new_height > 0 && new_width > 0)) << "Current implementation requires "
"new_height and new_width to be set at the same time.";
- if (this->solver_rank_ == 0) {
+ if (this->rank_ == 0) {
// Read the file with filenames and labels
ImageDataLayer<Ftype, Btype>::lines_.clear();
const string &source = this->layer_param_.image_data_param().source();
while (infile >> filename >> label) {
ImageDataLayer<Ftype, Btype>::lines_.emplace_back(std::make_pair(filename, label));
}
- }
- if (is_root() && this->layer_param_.image_data_param().shuffle()) {
- // randomly shuffle data
- LOG(INFO) << "Shuffling data";
- prefetch_rng_.reset(new Caffe::RNG(caffe_rng_rand()));
- ShuffleImages();
+ if (this->layer_param_.image_data_param().shuffle()) {
+ // randomly shuffle data
+ LOG(INFO) << "Shuffling data";
+ prefetch_rng_.reset(new Caffe::RNG(caffe_rng_rand()));
+ ShuffleImages();
+ }
}
LOG(INFO) << "A total of " << lines_.size() << " images.";
template<typename Ftype, typename Btype>
void ImageDataLayer<Ftype, Btype>::InitializePrefetch() {}
-template<typename Ftype, typename Btype>
-bool ImageDataLayer<Ftype, Btype>::is_root() const {
- const Solver* psolver = this->parent_solver();
- if (psolver != nullptr) {
- return psolver->is_root();
- }
- if (Caffe::gpus().size() > 0) {
- return Caffe::gpus()[0] == Caffe::current_device();
- }
- return true;
-}
-
template <typename Ftype, typename Btype>
void ImageDataLayer<Ftype, Btype>::load_batch(Batch* batch, int thread_id, size_t) {
CHECK(batch->data_->count());
@@ -176,7 +168,7 @@ void ImageDataLayer<Ftype, Btype>::load_batch(Batch* batch, int thread_id, size_
while (line_ids_[thread_id] >= lines_size) {
line_ids_[thread_id] -= lines_size;
}
- if (thread_id == 0 && is_root() && this->layer_param_.image_data_param().shuffle()) {
+ if (thread_id == 0 && this->rank_ == 0 && this->layer_param_.image_data_param().shuffle()) {
ShuffleImages();
}
}
@@ -187,6 +179,6 @@ void ImageDataLayer<Ftype, Btype>::load_batch(Batch* batch, int thread_id, size_
}
INSTANTIATE_CLASS_CPU_FB(ImageDataLayer);
-REGISTER_LAYER_CLASS(ImageData);
+REGISTER_LAYER_CLASS_R(ImageData);
} // namespace caffe
index 7551e1c2f823df352caaf50b9871a08ad6a7f3d4..e9045f8b7798ab768fc987377fe2986ec7f2ae7b 100644 (file)
bias_param->set_num_axes(param.num_axes());
}
bias_param->mutable_filler()->CopyFrom(param.bias_filler());
- bias_layer_ = LayerRegistry::CreateLayer(layer_param);
+ bias_layer_ = LayerRegistry::CreateLayer(layer_param, 0UL);
bias_bottom_vec_.resize(1);
bias_bottom_vec_[0] = bottom[0];
bias_layer_->SetUp(bias_bottom_vec_, top);
diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp
index 54051811c16fa9508d564a8fce2a88416d82b3c7..d99b89b0fdc97a9de51a7b993ee7db932b71c31d 100644 (file)
--- a/src/caffe/net.cpp
+++ b/src/caffe/net.cpp
layers_.push_back(root_net_->layers_[layer_id]);
layers_[layer_id]->SetShared(true);
} else {
- layers_.push_back(LayerRegistry::CreateLayer(layer_param));
+ layers_.push_back(LayerRegistry::CreateLayer(layer_param, solver_rank_));
}
layer_names_.push_back(layer_param.name());
LOG_IF(INFO, Caffe::root_solver())
// specified fewer than the required number (as specified by
// ExactNumTopBlobs() or MinTopBlobs()), allocate them here.
LayerBase* layer = layers_[layer_id].get();
- layer->set_solver_rank(solver_rank_);
if (layer->AutoTopBlobs()) {
const int needed_num_top =
std::max(layer->MinTopBlobs(), layer->ExactNumTopBlobs());
index 62505786ce64b2d092ef24a0770e774c1e479278..b7cd6f09f844c0e4a4bf9acfb80020404cfc2c67 100644 (file)
--- a/src/caffe/syncedmem.cpp
+++ b/src/caffe/syncedmem.cpp
switch (head_) {
case UNINITIALIZED:
CUDA_CHECK(cudaGetDevice(&device_));
- GPUMemory::allocate(&gpu_ptr_, size_, device_);
+ GPUMemory::allocate(&gpu_ptr_, size_, device_, group);
caffe_gpu_memset(size_, 0, gpu_ptr_, group);
head_ = HEAD_AT_GPU;
own_gpu_data_ = true;
case HEAD_AT_CPU:
if (gpu_ptr_ == NULL) {
CUDA_CHECK(cudaGetDevice(&device_));
- GPUMemory::allocate(&gpu_ptr_, size_, device_);
+ GPUMemory::allocate(&gpu_ptr_, size_, device_, group);
own_gpu_data_ = true;
}
if (copy_from_cpu) {
index 63cc19b1eb5c60ac66e4dabfd7520b1a1f9bc6c2..34e5f9bf6e1ec6bf5c866abd32f14f056da83631 100644 (file)
layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_);
// Test mean
- Dtype mean, var;
int num = this->blob_bottom_->num();
int channels = this->blob_bottom_->channels();
int height = this->blob_bottom_->height();
index 542118d76809a096bee7f7f0d6e30a7a8ec8cb4d..70f7091b52b7486c9779ea7629bd1b4fee209ce9 100644 (file)
transform_param->set_scale(scale);
transform_param->set_use_gpu_transform(use_gpu_transform);
- DataLayer<Dtype, Dtype> layer(param);
+ DataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 5);
EXPECT_EQ(blob_top_data_->channels(), 2);
data_param->set_backend(backend);
data_param->set_threads(data_param->backend() == DataParameter_DB_LEVELDB ? 1 : 3);
- DataLayer<Dtype, Dtype> layer(param);
+ DataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 1);
EXPECT_EQ(blob_top_data_->channels(), 2);
transform_param->set_crop_size(1);
transform_param->set_use_gpu_transform(use_gpu_transform);
- DataLayer<Dtype, Dtype> layer(param);
+ DataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(blob_bottom_vec_, blob_top_vec_);
EXPECT_EQ(blob_top_data_->num(), 5);
EXPECT_EQ(blob_top_data_->channels(), 2);
Caffe::set_random_seed(seed_);
vector<vector<Dtype>> crop_sequence;
{
- DataLayer<Dtype, Dtype> layer1(param);
+ DataLayer<Dtype, Dtype> layer1(param, 0UL);
layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer1.Forward(blob_bottom_vec_, blob_top_vec_);
// Get crop sequence after reseeding Caffe with 1701.
// Check that the sequence is the same as the original.
Caffe::set_random_seed(seed_);
- DataLayer<Dtype, Dtype> layer2(param);
+ DataLayer<Dtype, Dtype> layer2(param, 0UL);
layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer2.Forward(blob_bottom_vec_, blob_top_vec_);
srand(seed_);
vector<vector<Dtype>> crop_sequence;
{
- DataLayer<Dtype, Dtype> layer1(param);
+ DataLayer<Dtype, Dtype> layer1(param, 0UL);
layer1.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer1.Forward(blob_bottom_vec_, blob_top_vec_);
// Get crop sequence continuing from previous Caffe RNG state; reseed
// srand with 1701. Check that the sequence differs from the original.
srand(seed_);
- DataLayer<Dtype, Dtype> layer2(param);
+ DataLayer<Dtype, Dtype> layer2(param, 0UL);
layer2.SetUp(blob_bottom_vec_, blob_top_vec_);
for (int iter = 0; iter < 2; ++iter) {
layer2.Forward(blob_bottom_vec_, blob_top_vec_);
index 4d54a68db62debe58f4b7f71e5c21ce48ebebea2..d2c14884eb8a7e965bd5ac2a87ff7f9a5ec44a9b 100644 (file)
image_data_param->set_batch_size(5);
image_data_param->set_source(this->filename_.c_str());
image_data_param->set_shuffle(false);
- ImageDataLayer<Dtype, Dtype> layer(param);
+ ImageDataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(5, this->blob_top_data_->num());
EXPECT_EQ(3, this->blob_top_data_->channels());
image_data_param->set_new_height(256);
image_data_param->set_new_width(256);
image_data_param->set_shuffle(false);
- ImageDataLayer<Dtype, Dtype> layer(param);
+ ImageDataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(5, this->blob_top_data_->num());
EXPECT_EQ(3, this->blob_top_data_->channels());
image_data_param->set_batch_size(1);
image_data_param->set_source(this->filename_reshape_.c_str());
image_data_param->set_shuffle(false);
- ImageDataLayer<Dtype, Dtype> layer(param);
+ ImageDataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(1, this->blob_top_label_->num());
EXPECT_EQ(1, this->blob_top_label_->channels());
image_data_param->set_batch_size(5);
image_data_param->set_source(this->filename_.c_str());
image_data_param->set_shuffle(true);
- ImageDataLayer<Dtype, Dtype> layer(param);
+ ImageDataLayer<Dtype, Dtype> layer(param, 0UL);
layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_);
EXPECT_EQ(5, this->blob_top_data_->num());
EXPECT_EQ(3, this->blob_top_data_->channels());
index e97ac02bc7433bbf0919beec6203429ce193de07..a0d47baacddea7d67752efda9438e5b9f1c30269 100644 (file)
#endif // USE_LEVELDB
}
layer_param.set_type(iter->first);
- layer = LayerRegistry::CreateLayer(layer_param);
+ layer = LayerRegistry::CreateLayer(layer_param, 0UL);
EXPECT_EQ(iter->first, layer->type());
}
}
index a18663156ffc837000759aa4f6a001f67e197ab2..ca54ad9460c92db69eea07fa3fe76bbf392fb555 100644 (file)
continue;
#endif // USE_LEVELDB
}
- layer = LayerRegistry::CreateLayer(layer_param);
+ layer = LayerRegistry::CreateLayer(layer_param, 0UL);
EXPECT_EQ(v2_layer_type, layer->type());
}
}