summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 1417d91)
raw | patch | inline | side by side (parent: 1417d91)
author | Yuan Zhao <yuanzhao@ti.com> | |
Thu, 12 Sep 2019 22:06:58 +0000 (17:06 -0500) | ||
committer | Yuan Zhao <yuanzhao@ti.com> | |
Fri, 13 Sep 2019 15:32:28 +0000 (10:32 -0500) |
- Copy original image to show image before pre-processing, because
pre-processing will change BGR to RGB for tensorflow models
- Subtract 1 from output object class index, because tensorflow outputs
1001 bytes and uses index-0 for background. Regular imagenet labels
only have 1000 entries.
- Fix path to inceptionnet net and params binaries in the config file.
- MCT-1221
pre-processing will change BGR to RGB for tensorflow models
- Subtract 1 from output object class index, because tensorflow outputs
1001 bytes and uses index-0 for background. Regular imagenet labels
only have 1000 entries.
- Fix path to inceptionnet net and params binaries in the config file.
- MCT-1221
index 25361f8a210a1de37c37a3030d6d3bfc55adf988..68c5f8009034ea1e9204f58e45a5b38db91ac7f2 100644 (file)
// Report average FPS across a sliding window of 16 frames
AvgFPSWindow fps_window(16);
-static int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id);
+static int tf_postprocess(uchar *in, int out_size, int size, int roi_idx,
+ int frame_idx, int f_id);
static int ShowRegion(int roi_history[]);
// from most recent to oldest at top indices
static int selclass_history[MAX_NUM_ROI][3];
sprintf(tmp_string, "ROI[%02d]", frame_idx % NUM_ROI);
cv::imshow(tmp_string, r_image);
}
+ image.copyTo(show_image);
#endif
imgutil::PreprocessImage(r_image, eop->GetInputBufferPtr(), c);
eop->SetFrameIndex(frame_idx);
writer << to_stream;
#endif
-#ifdef LIVE_DISPLAY
- //waitKey(2);
- image.copyTo(show_image);
-#endif
return true;
}
} else {
int f_id = eop->GetFrameIndex();
int curr_roi = f_id % NUM_ROI;
int is_object = tf_postprocess((uchar*) eop->GetOutputBufferPtr(),
+ eop->GetOutputBufferSizeInBytes(),
IMAGE_CLASSES_NUM, curr_roi, frame_idx, f_id);
selclass_history[curr_roi][2] = selclass_history[curr_roi][1];
selclass_history[curr_roi][1] = selclass_history[curr_roi][0];
return false;
}
-int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id)
+int tf_postprocess(uchar *in, int out_size, int size, int roi_idx,
+ int frame_idx, int f_id)
{
//prob_i = exp(TIDL_Lib_output_i) / sum(exp(TIDL_Lib_output))
// sort and get k largest values and corresponding indices
const int k = TOP_CANDIDATES;
int rpt_id = -1;
+ // Tensorflow trained network outputs 1001 probabilities,
+ // with 0-index being background, thus we need to subtract 1 when
+ // reporting classified object from 1000 categories
+ int background_offset = out_size == 1001 ? 1 : 0;
typedef std::pair<uchar, int> val_index;
auto cmp = [](val_index &left, val_index &right) { return left.first > right.first; };
for (int i = 0; i < k; i++)
{
- int id = sorted[i].second;
+ int id = sorted[i].second - background_offset;
if (tf_expected_id(id))
{
std::cout << "Frame:" << frame_idx << "," << f_id << " ROI[" << roi_idx << "]: rank="
<< k-i << ", outval=" << (float)sorted[i].first / 255 << ", "
- << labels_classes[sorted[i].second] << std::endl;
+ << labels_classes[id] << std::endl;
rpt_id = id;
}
}
index 565807a0fcf6e7e1dbd7233a75cbadefe0d39e5c..047a92267fc6a74786959f4e22122462859a9460 100644 (file)
# 1. Live camera input, using 2xEVE and 2xDSP cores, based on model with single layers group
./tidl_classification -g 1 -d 2 -e 2 -l ./imagenet.txt -s ./classlist.txt -i 1 -c ./stream_config_j11_v2.txt
# 2. Use video clip as input stream, using 2xEVE and 2xDSP cores, based on model with single layers group
-./tidl_classification -g 1 -d 2 -e 2 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test50.mp4 -c ./stream_config_j11_v2.txt
+./tidl_classification -g 1 -d 2 -e 2 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test10.mp4 -c ./stream_config_j11_v2.txt
# 3. Use video clip as input stream, using 2xEVE and 1xDSP cores, based on model with two layers group (1st layers group running on EVE, 2nd layers group on DSP)
-./tidl_classification -g 2 -d 1 -e 2 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test50.mp4 -c ./stream_config_j11_v2.txt
+./tidl_classification -g 2 -d 1 -e 2 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test10.mp4 -c ./stream_config_j11_v2.txt
# 4. Use video clip as input stream, using no EVEs and 2xDSP cores, based on model with single layers group
-./tidl_classification -g 1 -d 2 -e 0 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test50.mp4 -c ./stream_config_j11_v2.txt
+./tidl_classification -g 1 -d 2 -e 0 -l ./imagenet.txt -s ./classlist.txt -i ./clips/test10.mp4 -c ./stream_config_j11_v2.txt
diff --git a/examples/test/testvecs/config/infer/tidl_config_inceptionNetv1.txt b/examples/test/testvecs/config/infer/tidl_config_inceptionNetv1.txt
index 934cff6b7125157f123ca78da1dd36149c4aa23b..6ba34d4e78224e8c8a7414f83d80114a2610b5be 100755 (executable)
preProcType = 2
inData = ../test/testvecs/input/preproc_2_224x224.y
outData = "stats_tool_out.bin"
-netBinFile = ../test/testvecs/config/tidl_models/tidl_inception_v1_net.bin
-paramsBinFile = ../test/testvecs/config/tidl_models/tidl_inception_v1_param.bin
+netBinFile = ../test/testvecs/config/tidl_models/tidl_net_inceptionv1_224.bin
+paramsBinFile = ../test/testvecs/config/tidl_models/tidl_param_inceptionv1_224.bin
inWidth = 224
inHeight = 224
inNumChannels = 3
diff --git a/examples/test/testvecs/config/infer/tidl_config_inceptionNetv1_lg2.txt b/examples/test/testvecs/config/infer/tidl_config_inceptionNetv1_lg2.txt
index b41294bd11c3512bb3234e7c1061bcd8ef7813b0..f2fb97f135a1d855167895aa317237f0ff12fb92 100755 (executable)
preProcType = 2
inData = ../test/testvecs/input/preproc_2_224x224.y
outData = "stats_tool_out.bin"
-netBinFile = ../test/testvecs/config/tidl_models/tidl_inception_v1_net.bin
-paramsBinFile = ../test/testvecs/config/tidl_models/tidl_inception_v1_param.bin
+netBinFile = ../test/testvecs/config/tidl_models/tidl_net_inceptionv1_224.bin
+paramsBinFile = ../test/testvecs/config/tidl_models/tidl_param_inceptionv1_224.bin
inWidth = 224
inHeight = 224
inNumChannels = 3
diff --git a/examples/test/testvecs/config/tidl_models/tidl_inception_v1_net.bin b/examples/test/testvecs/config/tidl_models/tidl_net_inceptionv1_224.bin
similarity index 100%
rename from examples/test/testvecs/config/tidl_models/tidl_inception_v1_net.bin
rename to examples/test/testvecs/config/tidl_models/tidl_net_inceptionv1_224.bin
rename from examples/test/testvecs/config/tidl_models/tidl_inception_v1_net.bin
rename to examples/test/testvecs/config/tidl_models/tidl_net_inceptionv1_224.bin
diff --git a/examples/test/testvecs/config/tidl_models/tidl_inception_v1_param.bin b/examples/test/testvecs/config/tidl_models/tidl_param_inceptionv1_224.bin
similarity index 100%
rename from examples/test/testvecs/config/tidl_models/tidl_inception_v1_param.bin
rename to examples/test/testvecs/config/tidl_models/tidl_param_inceptionv1_224.bin
rename from examples/test/testvecs/config/tidl_models/tidl_inception_v1_param.bin
rename to examples/test/testvecs/config/tidl_models/tidl_param_inceptionv1_224.bin