1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <iostream>
30 #include <iomanip>
31 #include <fstream>
32 #include <cassert>
33 #include <string>
34 #include <functional>
35 #include <algorithm>
36 #include <time.h>
37 #include <unistd.h>
39 #include <queue>
40 #include <vector>
41 #include <chrono>
43 #include "executor.h"
44 #include "execution_object.h"
45 #include "execution_object_pipeline.h"
46 #include "configuration.h"
47 #include "imgutil.h"
48 #include "../common/video_utils.h"
50 #include "opencv2/core.hpp"
51 #include "opencv2/imgproc.hpp"
52 #include "opencv2/highgui.hpp"
53 #include "opencv2/videoio.hpp"
55 using namespace std;
56 using namespace tidl;
57 using namespace cv;
59 #define NUM_VIDEO_FRAMES 300
60 #define DEFAULT_CONFIG "mnist"
61 #define NUM_DEFAULT_INPUTS 1
62 #define DEFAULT_INPUT_FRAMES 1
63 const char *default_inputs[NUM_DEFAULT_INPUTS] =
64 {
65 "../test/testvecs/input/digit_28x28.y"
66 };
69 Executor* CreateExecutor(DeviceType dt, uint32_t num, const Configuration& c);
70 bool RunConfiguration(cmdline_opts_t& opts);
71 bool ReadFrame(ExecutionObjectPipeline& eop,
72 uint32_t frame_idx, const Configuration& c,
73 const cmdline_opts_t& opts, VideoCapture &cap);
74 bool WriteFrameOutput(const ExecutionObjectPipeline &eop);
75 void DisplayHelp();
78 int main(int argc, char *argv[])
79 {
80 // Catch ctrl-c to ensure a clean exit
81 signal(SIGABRT, exit);
82 signal(SIGTERM, exit);
84 // If there are no devices capable of offloading TIDL on the SoC, exit
85 uint32_t num_eves = Executor::GetNumDevices(DeviceType::EVE);
86 uint32_t num_dsps = Executor::GetNumDevices(DeviceType::DSP);
87 if (num_eves == 0 && num_dsps == 0)
88 {
89 cout << "TI DL not supported on this SoC." << endl;
90 return EXIT_SUCCESS;
91 }
93 // Process arguments
94 cmdline_opts_t opts;
95 opts.config = DEFAULT_CONFIG;
96 if (num_eves != 0) { opts.num_eves = 1; opts.num_dsps = 0; }
97 else { opts.num_eves = 0; opts.num_dsps = 1; }
98 if (! ProcessArgs(argc, argv, opts))
99 {
100 DisplayHelp();
101 exit(EXIT_SUCCESS);
102 }
103 assert(opts.num_dsps != 0 || opts.num_eves != 0);
104 if (opts.num_frames == 0)
105 opts.num_frames = (opts.is_camera_input || opts.is_video_input) ?
106 NUM_VIDEO_FRAMES : 1;
107 if (opts.input_file.empty())
108 cout << "Input: " << default_inputs[0] << endl;
109 else
110 cout << "Input: " << opts.input_file << endl;
112 // Run network
113 bool status = RunConfiguration(opts);
114 if (!status)
115 {
116 cout << "mnist FAILED" << endl;
117 return EXIT_FAILURE;
118 }
120 cout << "mnist PASSED" << endl;
121 return EXIT_SUCCESS;
122 }
124 bool RunConfiguration(cmdline_opts_t& opts)
125 {
126 // Read the TI DL configuration file
127 Configuration c;
128 string config_file = "../test/testvecs/config/infer/tidl_config_"
129 + opts.config + ".txt";
130 bool status = c.ReadFromFile(config_file);
131 if (!status)
132 {
133 cerr << "Error in configuration file: " << config_file << endl;
134 return false;
135 }
136 c.enableApiTrace = opts.verbose;
138 // setup camera/video input/output
139 VideoCapture cap;
140 if (! SetVideoInputOutput(cap, opts, "MNIST")) return false;
142 try
143 {
144 // Create Executors with the approriate core type, number of cores
145 // and configuration specified
146 Executor* e_eve = CreateExecutor(DeviceType::EVE, opts.num_eves, c);
147 Executor* e_dsp = CreateExecutor(DeviceType::DSP, opts.num_dsps, c);
149 // Get ExecutionObjects from Executors
150 vector<ExecutionObject*> eos;
151 for (uint32_t i = 0; i < opts.num_eves; i++) eos.push_back((*e_eve)[i]);
152 for (uint32_t i = 0; i < opts.num_dsps; i++) eos.push_back((*e_dsp)[i]);
153 uint32_t num_eos = eos.size();
155 // Use duplicate EOPs to do double buffering on frame input/output
156 // because each EOP has its own set of input/output buffers,
157 // so that host ReadFrame() can be overlapped with device processing
158 // Use one EO as an example, with different buffer_factor,
159 // we have different execution behavior:
160 // If buffer_factor is set to 1 -> single buffering
161 // we create one EOP: eop0 (eo0)
162 // pipeline execution of multiple frames over time is as follows:
163 // --------------------- time ------------------->
164 // eop0: [RF][eo0.....][WF]
165 // eop0: [RF][eo0.....][WF]
166 // eop0: [RF][eo0.....][WF]
167 // If buffer_factor is set to 2 -> double buffering
168 // we create two EOPs: eop0 (eo0), eop1(eo0)
169 // pipeline execution of multiple frames over time is as follows:
170 // --------------------- time ------------------->
171 // eop0: [RF][eo0.....][WF]
172 // eop1: [RF] [eo0.....][WF]
173 // eop0: [RF] [eo0.....][WF]
174 // eop1: [RF] [eo0.....][WF]
175 vector<ExecutionObjectPipeline *> eops;
176 uint32_t buffer_factor = 2; // set to 1 for single buffering
177 for (uint32_t j = 0; j < buffer_factor; j++)
178 for (uint32_t i = 0; i < num_eos; i++)
179 eops.push_back(new ExecutionObjectPipeline({eos[i]}));
180 uint32_t num_eops = eops.size();
182 // Allocate input and output buffers for each EOP
183 AllocateMemory(eops);
185 float device_time = 0.0f;
186 chrono::time_point<chrono::steady_clock> tloop0, tloop1;
187 tloop0 = chrono::steady_clock::now();
189 // Process frames with available eops in a pipelined manner
190 // additional num_eos iterations to flush the pipeline (epilogue)
191 for (uint32_t frame_idx = 0;
192 frame_idx < opts.num_frames + num_eops; frame_idx++)
193 {
194 ExecutionObjectPipeline* eop = eops[frame_idx % num_eops];
196 // Wait for previous frame on the same eop to finish processing
197 if (eop->ProcessFrameWait())
198 {
199 device_time +=
200 eos[frame_idx % num_eos]->GetProcessTimeInMilliSeconds();
201 WriteFrameOutput(*eop);
202 }
204 // Read a frame and start processing it with current eop
205 if (ReadFrame(*eop, frame_idx, c, opts, cap))
206 eop->ProcessFrameStartAsync();
207 }
209 tloop1 = chrono::steady_clock::now();
210 chrono::duration<float> elapsed = tloop1 - tloop0;
211 cout << "Device total time: " << setw(6) << setprecision(4)
212 << device_time << "ms" << endl;
213 cout << "Loop total time (including read/write/opencv/print/etc): "
214 << setw(6) << setprecision(4)
215 << (elapsed.count() * 1000) << "ms" << endl;
217 FreeMemory(eops);
218 for (auto eop : eops) delete eop;
219 delete e_eve;
220 delete e_dsp;
221 }
222 catch (tidl::Exception &e)
223 {
224 cerr << e.what() << endl;
225 status = false;
226 }
228 return status;
229 }
231 // Create an Executor with the specified type and number of EOs
232 Executor* CreateExecutor(DeviceType dt, uint32_t num, const Configuration& c)
233 {
234 if (num == 0) return nullptr;
236 DeviceIds ids;
237 for (uint32_t i = 0; i < num; i++)
238 ids.insert(static_cast<DeviceId>(i));
240 return new Executor(dt, ids, c);
241 }
243 bool ReadFrame(ExecutionObjectPipeline &eop,
244 uint32_t frame_idx, const Configuration& c,
245 const cmdline_opts_t& opts, VideoCapture &cap)
246 {
247 if (frame_idx >= opts.num_frames)
248 return false;
250 eop.SetFrameIndex(frame_idx);
252 char* frame_buffer = eop.GetInputBufferPtr();
253 assert (frame_buffer != nullptr);
254 int channel_size = c.inWidth * c.inHeight;
256 Mat image;
257 if (! opts.is_camera_input && ! opts.is_video_input)
258 {
259 if (opts.input_file.empty())
260 {
261 ifstream ifs(default_inputs[frame_idx % NUM_DEFAULT_INPUTS],
262 ios::binary);
263 ifs.seekg((frame_idx % DEFAULT_INPUT_FRAMES) * channel_size);
264 ifs.read(frame_buffer, channel_size);
265 memcpy(frame_buffer+channel_size, frame_buffer, channel_size);
266 bool ifs_status = ifs.good();
267 ifs.close();
268 return ifs_status; // already PreProc-ed
269 }
270 else
271 {
272 image = cv::imread(opts.input_file, CV_LOAD_IMAGE_COLOR);
273 if (image.empty())
274 {
275 cerr << "Unable to read input image" << endl;
276 return false;
277 }
278 }
279 }
280 else
281 {
282 Mat v_image;
283 if (! cap.grab()) return false;
284 if (! cap.retrieve(v_image)) return false;
285 #define DISPLAY_SIZE 112
286 int orig_width = v_image.cols;
287 int orig_height = v_image.rows;
288 // Crop camera/video input to center DISPLAY_SIZE x DISPLAY_SIZE input
289 if (orig_width > DISPLAY_SIZE && orig_height > DISPLAY_SIZE)
290 {
291 image = Mat(v_image, Rect((orig_width-DISPLAY_SIZE)/2,
292 (orig_height-DISPLAY_SIZE)/2,
293 DISPLAY_SIZE, DISPLAY_SIZE));
294 }
295 else
296 image = v_image;
297 cv::imshow("MNIST", image);
298 waitKey(2);
299 }
301 // Convert to Gray image, resize to 28x28, copy into frame_buffer
302 Mat s_image, bgr_frames[3];
303 cv::resize(image, s_image, Size(c.inWidth, c.inHeight),
304 0, 0, cv::INTER_AREA);
305 cv::split(s_image, bgr_frames);
306 memcpy(frame_buffer, bgr_frames[0].ptr(), channel_size);
307 memcpy(frame_buffer+1*channel_size, bgr_frames[1].ptr(), channel_size);
308 return true;
309 }
311 // Display top 5 classified imagenet classes with probabilities
312 bool WriteFrameOutput(const ExecutionObjectPipeline &eop)
313 {
314 unsigned char *out = (unsigned char *) eop.GetOutputBufferPtr();
315 int out_size = eop.GetOutputBufferSizeInBytes();
317 unsigned char maxval = 0;
318 int maxloc = -1;
319 for (int i = 0; i < out_size; i++)
320 {
321 // cout << (int) out[i] << " ";
322 if (out[i] > maxval)
323 {
324 maxval = out[i];
325 maxloc = i;
326 }
327 }
328 cout << maxloc << endl;
330 return true;
331 }
333 void DisplayHelp()
334 {
335 cout <<
336 "Usage: imagenet\n"
337 " Will run imagenet network to predict top 5 object"
338 " classes for the input.\n Use -c to run a"
339 " different imagenet network. Default is j11_v2.\n"
340 "Optional arguments:\n"
341 " -c <config> Valid configs: j11_bn, j11_prelu, j11_v2\n"
342 " -d <number> Number of dsp cores to use\n"
343 " -e <number> Number of eve cores to use\n"
344 " -i <image> Path to the image file as input\n"
345 " -i camera<number> Use camera as input\n"
346 " video input port: /dev/video<number>\n"
347 " -i <name>.{mp4,mov,avi} Use video file as input\n"
348 " -l <objects_list> Path to the object classes list file\n"
349 " -f <number> Number of frames to process\n"
350 " -v Verbose output during execution\n"
351 " -h Help\n";
352 }