1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <iostream>
30 #include <iomanip>
31 #include <fstream>
32 #include <cassert>
33 #include <string>
34 #include <functional>
35 #include <algorithm>
36 #include <time.h>
37 #include <unistd.h>
39 #include <queue>
40 #include <vector>
41 #include <cstdio>
42 #include <chrono>
44 #include "executor.h"
45 #include "execution_object.h"
46 #include "execution_object_pipeline.h"
47 #include "configuration.h"
48 #include "../common/object_classes.h"
49 #include "../common/utils.h"
50 #include "../common/video_utils.h"
52 using namespace std;
53 using namespace tidl;
54 using namespace cv;
57 #define NUM_VIDEO_FRAMES 100
58 #define DEFAULT_CONFIG "jdetnet"
59 #define DEFAULT_INPUT "../test/testvecs/input/preproc_0_768x320.y"
60 #define DEFAULT_INPUT_FRAMES (1)
61 #define DEFAULT_OBJECT_CLASSES_LIST_FILE "./jdetnet_objects.json"
63 std::unique_ptr<ObjectClasses> object_classes;
64 uint32_t orig_width;
65 uint32_t orig_height;
68 bool RunConfiguration(const cmdline_opts_t& opts);
69 Executor* CreateExecutor(DeviceType dt, uint32_t num, const Configuration& c,
70 int layers_group_id);
71 bool ReadFrame(ExecutionObjectPipeline& eop, uint32_t frame_idx,
72 const Configuration& c, const cmdline_opts_t& opts,
73 VideoCapture &cap);
74 bool WriteFrameOutput(const ExecutionObjectPipeline& eop,
75 const Configuration& c, const cmdline_opts_t& opts);
76 static void DisplayHelp();
79 int main(int argc, char *argv[])
80 {
81 // Catch ctrl-c to ensure a clean exit
82 signal(SIGABRT, exit);
83 signal(SIGTERM, exit);
85 // If there are no devices capable of offloading TIDL on the SoC, exit
86 uint32_t num_eves = Executor::GetNumDevices(DeviceType::EVE);
87 uint32_t num_dsps = Executor::GetNumDevices(DeviceType::DSP);
88 if (num_eves == 0 || num_dsps == 0)
89 {
90 cout << "ssd_multibox requires both EVE and DSP for execution." << endl;
91 return EXIT_SUCCESS;
92 }
94 // Process arguments
95 cmdline_opts_t opts;
96 opts.config = DEFAULT_CONFIG;
97 opts.object_classes_list_file = DEFAULT_OBJECT_CLASSES_LIST_FILE;
98 opts.num_eves = 1;
99 opts.num_dsps = 1;
100 if (! ProcessArgs(argc, argv, opts))
101 {
102 DisplayHelp();
103 exit(EXIT_SUCCESS);
104 }
105 assert(opts.num_dsps != 0 && opts.num_eves != 0);
106 if (opts.num_frames == 0)
107 opts.num_frames = (opts.is_camera_input || opts.is_video_input) ?
108 NUM_VIDEO_FRAMES :
109 (opts.input_file.empty() ? DEFAULT_INPUT_FRAMES : 1);
110 if (opts.input_file.empty())
111 cout << "Input: " << DEFAULT_INPUT << endl;
112 else
113 cout << "Input: " << opts.input_file << endl;
115 // Get object classes list
116 object_classes = std::unique_ptr<ObjectClasses>(
117 new ObjectClasses(opts.object_classes_list_file));
118 if (object_classes->GetNumClasses() == 0)
119 {
120 cout << "No object classes defined for this config." << endl;
121 return EXIT_FAILURE;
122 }
124 // Run network
125 bool status = RunConfiguration(opts);
126 if (!status)
127 {
128 cout << "ssd_multibox FAILED" << endl;
129 return EXIT_FAILURE;
130 }
132 cout << "ssd_multibox PASSED" << endl;
133 return EXIT_SUCCESS;
134 }
136 bool RunConfiguration(const cmdline_opts_t& opts)
137 {
138 // Read the TI DL configuration file
139 Configuration c;
140 std::string config_file = "../test/testvecs/config/infer/tidl_config_"
141 + opts.config + ".txt";
142 bool status = c.ReadFromFile(config_file);
143 if (!status)
144 {
145 cerr << "Error in configuration file: " << config_file << endl;
146 return false;
147 }
148 c.enableApiTrace = opts.verbose;
150 // setup camera/video input
151 VideoCapture cap;
152 if (! SetVideoInputOutput(cap, opts, "SSD_Multibox")) return false;
154 try
155 {
156 // Create Executors with the approriate core type, number of cores
157 // and configuration specified
158 // EVE will run layersGroupId 1 in the network, while
159 // DSP will run layersGroupId 2 in the network
160 Executor* e_eve = CreateExecutor(DeviceType::EVE, opts.num_eves, c, 1);
161 Executor* e_dsp = CreateExecutor(DeviceType::DSP, opts.num_dsps, c, 2);
163 // Construct ExecutionObjectPipeline that utilizes multiple
164 // ExecutionObjects to process a single frame, each ExecutionObject
165 // processes one layerGroup of the network
166 //
167 // Pipeline depth can enable more optimized pipeline execution:
168 // Given one EVE and one DSP as an example, with different
169 // pipeline_depth, we have different execution behavior:
170 // If pipeline_depth is set to 1,
171 // we create one EOP: eop0 (eve0, dsp0)
172 // pipeline execution of multiple frames over time is as follows:
173 // --------------------- time ------------------->
174 // eop0: [eve0...][dsp0]
175 // eop0: [eve0...][dsp0]
176 // eop0: [eve0...][dsp0]
177 // eop0: [eve0...][dsp0]
178 // If pipeline_depth is set to 2,
179 // we create two EOPs: eop0 (eve0, dsp0), eop1(eve0, dsp0)
180 // pipeline execution of multiple frames over time is as follows:
181 // --------------------- time ------------------->
182 // eop0: [eve0...][dsp0]
183 // eop1: [eve0...][dsp0]
184 // eop0: [eve0...][dsp0]
185 // eop1: [eve0...][dsp0]
186 // Additional benefit of setting pipeline_depth to 2 is that
187 // it can also overlap host ReadFrame() with device processing:
188 // --------------------- time ------------------->
189 // eop0: [RF][eve0...][dsp0]
190 // eop1: [RF] [eve0...][dsp0]
191 // eop0: [RF][eve0...][dsp0]
192 // eop1: [RF][eve0...][dsp0]
193 vector<ExecutionObjectPipeline *> eops;
194 uint32_t pipeline_depth = 2; // 2 EOs in EOP -> depth 2
195 for (uint32_t j = 0; j < pipeline_depth; j++)
196 for (uint32_t i = 0; i < max(opts.num_eves, opts.num_dsps); i++)
197 eops.push_back(new ExecutionObjectPipeline(
198 {(*e_eve)[i%opts.num_eves], (*e_dsp)[i%opts.num_dsps]}));
199 uint32_t num_eops = eops.size();
201 // Allocate input/output memory for each EOP
202 AllocateMemory(eops);
204 chrono::time_point<chrono::steady_clock> tloop0, tloop1;
205 tloop0 = chrono::steady_clock::now();
207 // Process frames with available eops in a pipelined manner
208 // additional num_eops iterations to flush pipeline (epilogue)
209 for (uint32_t frame_idx = 0;
210 frame_idx < opts.num_frames + num_eops; frame_idx++)
211 {
212 ExecutionObjectPipeline* eop = eops[frame_idx % num_eops];
214 // Wait for previous frame on the same eop to finish processing
215 if (eop->ProcessFrameWait())
216 {
217 WriteFrameOutput(*eop, c, opts);
218 }
220 // Read a frame and start processing it with current eo
221 if (ReadFrame(*eop, frame_idx, c, opts, cap))
222 eop->ProcessFrameStartAsync();
223 }
225 tloop1 = chrono::steady_clock::now();
226 chrono::duration<float> elapsed = tloop1 - tloop0;
227 cout << "Loop total time (including read/write/opencv/print/etc): "
228 << setw(6) << setprecision(4)
229 << (elapsed.count() * 1000) << "ms" << endl;
231 FreeMemory(eops);
232 for (auto eop : eops) delete eop;
233 delete e_eve;
234 delete e_dsp;
235 }
236 catch (tidl::Exception &e)
237 {
238 cerr << e.what() << endl;
239 status = false;
240 }
242 return status;
243 }
245 // Create an Executor with the specified type and number of EOs
246 Executor* CreateExecutor(DeviceType dt, uint32_t num, const Configuration& c,
247 int layers_group_id)
248 {
249 if (num == 0) return nullptr;
251 DeviceIds ids;
252 for (uint32_t i = 0; i < num; i++)
253 ids.insert(static_cast<DeviceId>(i));
255 return new Executor(dt, ids, c, layers_group_id);
256 }
258 bool ReadFrame(ExecutionObjectPipeline& eop, uint32_t frame_idx,
259 const Configuration& c, const cmdline_opts_t& opts,
260 VideoCapture &cap)
261 {
262 if ((uint32_t)frame_idx >= opts.num_frames)
263 return false;
265 eop.SetFrameIndex(frame_idx);
267 char* frame_buffer = eop.GetInputBufferPtr();
268 assert (frame_buffer != nullptr);
269 int channel_size = c.inWidth * c.inHeight;
271 Mat image;
272 if (!opts.is_camera_input && !opts.is_video_input)
273 {
274 if (opts.input_file.empty())
275 {
276 ifstream ifs(DEFAULT_INPUT, ios::binary);
277 ifs.seekg((frame_idx % DEFAULT_INPUT_FRAMES) * channel_size * 3);
278 ifs.read(frame_buffer, channel_size * 3);
279 bool ifs_status = ifs.good();
280 ifs.close();
281 orig_width = c.inWidth;
282 orig_height = c.inHeight;
283 return ifs_status; // already PreProc-ed
284 }
285 else
286 {
287 image = cv::imread(opts.input_file, CV_LOAD_IMAGE_COLOR);
288 if (image.empty())
289 {
290 cerr << "Unable to read from: " << opts.input_file << endl;
291 return false;
292 }
293 }
294 }
295 else
296 {
297 // 640x480 camera input, process one in every 5 frames,
298 // can adjust number of skipped frames to match real time processing
299 if (! cap.grab()) return false;
300 if (! cap.grab()) return false;
301 if (! cap.grab()) return false;
302 if (! cap.grab()) return false;
303 if (! cap.grab()) return false;
304 if (! cap.retrieve(image)) return false;
305 }
307 // scale to network input size
308 Mat s_image, bgr_frames[3];
309 orig_width = image.cols;
310 orig_height = image.rows;
311 cv::resize(image, s_image, Size(c.inWidth, c.inHeight),
312 0, 0, cv::INTER_AREA);
313 cv::split(s_image, bgr_frames);
314 memcpy(frame_buffer, bgr_frames[0].ptr(), channel_size);
315 memcpy(frame_buffer+1*channel_size, bgr_frames[1].ptr(), channel_size);
316 memcpy(frame_buffer+2*channel_size, bgr_frames[2].ptr(), channel_size);
317 return true;
318 }
320 // Create frame with boxes drawn around classified objects
321 bool WriteFrameOutput(const ExecutionObjectPipeline& eop,
322 const Configuration& c, const cmdline_opts_t& opts)
323 {
324 // Asseembly original frame
325 int width = c.inWidth;
326 int height = c.inHeight;
327 int channel_size = width * height;
328 Mat frame, r_frame, bgr[3];
330 unsigned char *in = (unsigned char *) eop.GetInputBufferPtr();
331 bgr[0] = Mat(height, width, CV_8UC(1), in);
332 bgr[1] = Mat(height, width, CV_8UC(1), in + channel_size);
333 bgr[2] = Mat(height, width, CV_8UC(1), in + channel_size*2);
334 cv::merge(bgr, 3, frame);
336 int frame_index = eop.GetFrameIndex();
337 char outfile_name[64];
338 if (opts.input_file.empty())
339 {
340 snprintf(outfile_name, 64, "frame_%d.png", frame_index);
341 cv::imwrite(outfile_name, frame);
342 printf("Saving frame %d to: %s\n", frame_index, outfile_name);
343 }
345 // Draw boxes around classified objects
346 float *out = (float *) eop.GetOutputBufferPtr();
347 int num_floats = eop.GetOutputBufferSizeInBytes() / sizeof(float);
348 for (int i = 0; i < num_floats / 7; i++)
349 {
350 int index = (int) out[i * 7 + 0];
351 if (index < 0) break;
353 int label = (int) out[i * 7 + 1];
354 int xmin = (int) (out[i * 7 + 3] * width);
355 int ymin = (int) (out[i * 7 + 4] * height);
356 int xmax = (int) (out[i * 7 + 5] * width);
357 int ymax = (int) (out[i * 7 + 6] * height);
359 const ObjectClass& object_class = object_classes->At(label);
361 #if 0
362 printf("(%d, %d) -> (%d, %d): %s, score=%f\n",
363 xmin, ymin, xmax, ymax, object_class.label, score);
364 #endif
366 cv::rectangle(frame, Point(xmin, ymin), Point(xmax, ymax),
367 Scalar(object_class.color.blue,
368 object_class.color.green,
369 object_class.color.red), 2);
370 }
372 // Resize to output width/height, keep aspect ratio
373 uint32_t output_width = opts.output_width;
374 if (output_width == 0) output_width = orig_width;
375 uint32_t output_height = (output_width*1.0f) / orig_width * orig_height;
376 cv::resize(frame, r_frame, Size(output_width, output_height));
378 if (opts.is_camera_input || opts.is_video_input)
379 {
380 cv::imshow("SSD_Multibox", r_frame);
381 waitKey(1);
382 }
383 else
384 {
385 snprintf(outfile_name, 64, "multibox_%d.png", frame_index);
386 cv::imwrite(outfile_name, r_frame);
387 printf("Saving frame %d with SSD multiboxes to: %s\n",
388 frame_index, outfile_name);
389 }
391 return true;
392 }
394 void DisplayHelp()
395 {
396 std::cout <<
397 "Usage: ssd_multibox\n"
398 " Will run partitioned ssd_multibox network to perform "
399 "multi-objects detection\n"
400 " and classification. First part of network "
401 "(layersGroupId 1) runs on EVE,\n"
402 " second part (layersGroupId 2) runs on DSP.\n"
403 " Use -c to run a different segmentation network. Default is jdetnet.\n"
404 "Optional arguments:\n"
405 " -c <config> Valid configs: jdetnet \n"
406 " -d <number> Number of dsp cores to use\n"
407 " -e <number> Number of eve cores to use\n"
408 " -i <image> Path to the image file as input\n"
409 " Default are 9 frames in testvecs\n"
410 " -i camera<number> Use camera as input\n"
411 " video input port: /dev/video<number>\n"
412 " -i <name>.{mp4,mov,avi} Use video file as input\n"
413 " -l <objects_list> Path to the object classes list file\n"
414 " -f <number> Number of frames to process\n"
415 " -w <number> Output image/video width\n"
416 " -v Verbose output during execution\n"
417 " -h Help\n";
418 }