1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <getopt.h>
30 #include <iostream>
31 #include <iomanip>
32 #include <fstream>
33 #include <cassert>
34 #include <string>
35 #include <functional>
36 #include <queue>
37 #include <algorithm>
38 #include <time.h>
39 #include <memory.h>
40 #include <string.h>
42 #include "executor.h"
43 #include "execution_object.h"
44 #include "execution_object_pipeline.h"
45 #include "configuration.h"
47 #include "opencv2/core.hpp"
48 #include "opencv2/imgproc.hpp"
49 #include "opencv2/highgui.hpp"
50 #include "opencv2/videoio.hpp"
52 //#define TWO_ROIs
53 #define LIVE_DISPLAY
54 #define PERF_VERBOSE
55 //#define RMT_GST_STREAMER
57 #define MAX_NUM_ROI 4
59 int live_input = 1;
60 char video_clip[320];
62 #ifdef TWO_ROIs
63 #define RES_X 400
64 #define RES_Y 300
65 #define NUM_ROI_X 2
66 #define NUM_ROI_Y 1
67 #define X_OFFSET 0
68 #define X_STEP 176
69 #define Y_OFFSET 52
70 #define Y_STEP 224
71 #else
72 #define RES_X 480
73 #define RES_Y 480
74 #define NUM_ROI_X 1
75 #define NUM_ROI_Y 1
76 #define X_OFFSET 10
77 #define X_STEP 460
78 #define Y_OFFSET 10
79 #define Y_STEP 460
80 #endif
82 #define NUM_ROI (NUM_ROI_X * NUM_ROI_Y)
84 //Temporal averaging
85 int TOP_CANDIDATES = 3;
87 using namespace tidl;
88 using namespace cv;
90 #ifdef LIVE_DISPLAY
91 char imagenet_win[160];
92 char tmp_classwindow_string[160];
93 Mat classlist_image;
95 void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata)
96 {
97 if ( event == EVENT_RBUTTONDOWN )
98 {
99 std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl;
100 exit(0);
101 }
102 }
103 #endif
105 Mat in_image, image, r_image, cnn_image, show_image, bgr_frames[3];
106 Mat to_stream;
107 Rect rectCrop[NUM_ROI];
108 double avg_fps;
110 static int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id);
111 static void tf_preprocess(uchar *out, uchar *in, int size);
112 static int ShowRegion(int roi_history[]);
113 // from most recent to oldest at top indices
114 static int selclass_history[MAX_NUM_ROI][3];
116 bool __TI_show_debug_ = false;
118 bool RunConfiguration(const std::string& config_file, int num_layers_groups,
119 uint32_t num_dsps, uint32_t num_eves);
120 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
121 Configuration& configuration,
122 uint32_t num_layers_groups,
123 Executor*& e_eve, Executor*& e_dsp,
124 std::vector<ExecutionObjectPipeline*>& eops);
125 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops);
126 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps);
127 bool SetupInput(VideoCapture& cap, VideoWriter& writer);
128 bool ReadFrame(ExecutionObjectPipeline* eop,
129 uint32_t frame_idx, uint32_t num_frames,
130 VideoCapture &cap, VideoWriter& writer);
131 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
132 uint32_t frame_idx, uint32_t num_eops,
133 uint32_t num_eves, uint32_t num_dsps);
134 static void ProcessArgs(int argc, char *argv[],
135 std::string& config_file,
136 uint32_t & num_dsps, uint32_t &num_eves,
137 int & num_layers_groups);
138 void ReportTime(const ExecutionObjectPipeline* eop);
140 static void DisplayHelp();
141 extern std::string labels_classes[];
142 extern int IMAGE_CLASSES_NUM;
143 extern int selected_items_size;
144 extern int selected_items[];
145 extern int populate_selected_items (char *filename);
146 extern void populate_labels (char *filename);
149 int main(int argc, char *argv[])
150 {
151 // Catch ctrl-c to ensure a clean exit
152 signal(SIGABRT, exit);
153 signal(SIGTERM, exit);
155 // If there are no devices capable of offloading TIDL on the SoC, exit
156 uint32_t num_eves = Executor::GetNumDevices(DeviceType::EVE);
157 uint32_t num_dsps = Executor::GetNumDevices(DeviceType::DSP);
158 int num_layers_groups = 1;
160 if (num_eves == 0 && num_dsps == 0)
161 {
162 std::cout << "TI DL not supported on this SoC." << std::endl;
163 return EXIT_SUCCESS;
164 }
166 // Process arguments
167 std::string config_file;
168 ProcessArgs(argc, argv, config_file, num_dsps, num_eves, num_layers_groups);
170 bool status = false;
171 if (!config_file.empty()) {
172 std::cout << "Run single configuration: " << config_file << std::endl;
173 status = RunConfiguration(config_file, num_layers_groups, num_dsps, num_eves);
174 }
176 if (!status)
177 {
178 std::cout << "tidl FAILED" << std::endl;
179 return EXIT_FAILURE;
180 }
182 std::cout << "tidl PASSED" << std::endl;
183 return EXIT_SUCCESS;
184 }
186 bool RunConfiguration(const std::string& config_file, int num_layers_groups, uint32_t num_dsps, uint32_t num_eves)
187 {
189 // Read the TI DL configuration file
190 Configuration configuration;
191 bool status = configuration.ReadFromFile(config_file);
192 if (!status)
193 {
194 std::cerr << "Error in configuration file: " << config_file
195 << std::endl;
196 return false;
197 }
199 std::ifstream input_data_file(configuration.inData, std::ios::binary);
200 std::ofstream output_data_file(configuration.outData, std::ios::binary);
201 assert (input_data_file.good());
202 assert (output_data_file.good());
205 try
206 {
207 // Create ExecutionObjectPipelines
208 Executor *e_eve = NULL;
209 Executor *e_dsp = NULL;
210 std::vector<ExecutionObjectPipeline *> eops;
211 if (! CreateExecutionObjectPipelines(num_eves, num_dsps, configuration,
212 num_layers_groups, e_eve, e_dsp, eops))
213 return false;
214 uint32_t num_eops = eops.size();
216 // Allocate input/output memory for each EOP
217 AllocateMemory(eops);
219 // Setup Live Display
220 SetupLiveDisplay(num_eves, num_dsps);
222 // Setup Input
223 VideoCapture cap;
224 VideoWriter writer; // gstreamer
225 if (! SetupInput(cap, writer)) return false;
228 // More initialization
229 for (int k = 0; k < NUM_ROI; k++)
230 for(int i = 0; i < 3; i ++)
231 selclass_history[k][i] = -1;
232 avg_fps = 0.0;
233 int num_frames = configuration.numFrames;
234 std::cout << "About to start ProcessFrame loop!!" << std::endl;
236 // Process frames with available EOPs in a pipelined manner
237 // additional num_eops iterations to flush the pipeline (epilogue)
238 for (uint32_t frame_idx = 0;
239 frame_idx < configuration.numFrames + num_eops; frame_idx++)
240 {
241 ExecutionObjectPipeline* eop = eops[frame_idx % num_eops];
243 // Wait for previous frame on the same eo to finish processing
244 if (eop->ProcessFrameWait())
245 {
246 #ifdef PERF_VERBOSE
247 ReportTime(eop);
248 #endif
249 DisplayFrame(eop, writer, frame_idx, num_eops,
250 num_eves, num_dsps);
251 }
253 if (ReadFrame(eop, frame_idx, num_frames, cap, writer))
254 eop->ProcessFrameStartAsync();
255 }
257 // Cleanup
258 for (auto eop : eops)
259 {
260 free(eop->GetInputBufferPtr());
261 free(eop->GetOutputBufferPtr());
262 delete eop;
263 }
264 if(num_dsps) delete e_dsp;
265 if(num_eves) delete e_eve;
266 }
267 catch (tidl::Exception &e)
268 {
269 std::cerr << e.what() << std::endl;
270 status = false;
271 }
274 input_data_file.close();
275 output_data_file.close();
277 return status;
278 }
281 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
282 Configuration& configuration,
283 uint32_t num_layers_groups,
284 Executor*& e_eve, Executor*& e_dsp,
285 std::vector<ExecutionObjectPipeline*>& eops)
286 {
287 DeviceIds ids_eve, ids_dsp;
288 for (uint32_t i = 0; i < num_eves; i++)
289 ids_eve.insert(static_cast<DeviceId>(i));
290 for (uint32_t i = 0; i < num_dsps; i++)
291 ids_dsp.insert(static_cast<DeviceId>(i));
293 switch(num_layers_groups)
294 {
295 case 1: // Single layers group
296 e_eve = num_eves == 0 ? nullptr :
297 new Executor(DeviceType::EVE, ids_eve, configuration);
298 e_dsp = num_dsps == 0 ? nullptr :
299 new Executor(DeviceType::DSP, ids_dsp, configuration);
301 // Construct ExecutionObjectPipeline with single Execution Object to
302 // process each frame. This is parallel processing of frames with
303 // as many DSP and EVE cores that we have on hand.
304 for (uint32_t i = 0; i < num_eves; i++)
305 eops.push_back(new ExecutionObjectPipeline({(*e_eve)[i]}));
306 for (uint32_t i = 0; i < num_dsps; i++)
307 eops.push_back(new ExecutionObjectPipeline({(*e_dsp)[i]}));
308 break;
310 case 2: // Two layers group
311 // JacintoNet11 specific : specify only layers that will be in
312 // layers group 2 ... by default all other layers are in group 1.
313 configuration.layerIndex2LayerGroupId = { {12, 2}, {13, 2}, {14, 2} };
315 // Create Executors with the approriate core type, number of cores
316 // and configuration specified
317 // EVE will run layersGroupId 1 in the network, while
318 // DSP will run layersGroupId 2 in the network
319 e_eve = num_eves == 0 ? nullptr :
320 new Executor(DeviceType::EVE, ids_eve, configuration, 1);
321 e_dsp = num_dsps == 0 ? nullptr :
322 new Executor(DeviceType::DSP, ids_dsp, configuration, 2);
324 // Construct ExecutionObjectPipeline that utilizes multiple
325 // ExecutionObjects to process a single frame, each ExecutionObject
326 // processes one layerGroup of the network
327 for (uint32_t i = 0; i < std::max(num_eves, num_dsps); i++)
328 eops.push_back(new ExecutionObjectPipeline({(*e_eve)[i%num_eves],
329 (*e_dsp)[i%num_dsps]}));
330 break;
332 default:
333 std::cout << "Layers groups can be either 1 or 2!" << std::endl;
334 return false;
335 break;
336 }
338 return true;
339 }
341 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops)
342 {
343 for (auto eop : eops)
344 {
345 size_t in_size = eop->GetInputBufferSizeInBytes();
346 size_t out_size = eop->GetOutputBufferSizeInBytes();
347 void* in_ptr = malloc(in_size);
348 void* out_ptr = malloc(out_size);
349 assert(in_ptr != nullptr && out_ptr != nullptr);
351 ArgInfo in(in_ptr, in_size);
352 ArgInfo out(out_ptr, out_size);
353 eop->SetInputOutputBuffer(in, out);
354 }
355 }
357 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps)
358 {
359 #ifdef LIVE_DISPLAY
360 sprintf(imagenet_win, "Imagenet_EVEx%d_DSPx%d", num_eves, num_dsps);
362 if(NUM_ROI > 1)
363 {
364 for(int i = 0; i < NUM_ROI; i ++) {
365 char tmp_string[80];
366 sprintf(tmp_string, "ROI[%02d]", i);
367 namedWindow(tmp_string, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
368 }
369 }
370 Mat sw_stack_image = imread(
371 "/usr/share/ti/tidl/examples/classification/tidl-sw-stack-small.png",
372 IMREAD_COLOR); // Read the file
373 if( sw_stack_image.empty() ) // Check for invalid input
374 {
375 std::cout << "Could not open or find the tidl-sw-stack-small image"
376 << std::endl ;
377 } else {
378 // Create a window for display.
379 namedWindow( "TIDL SW Stack", WINDOW_AUTOSIZE | CV_GUI_NORMAL );
380 // Show our image inside it.
381 cv::imshow( "TIDL SW Stack", sw_stack_image );
382 }
384 namedWindow("ClassList", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
385 namedWindow(imagenet_win, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
386 //set the callback function for any mouse event
387 setMouseCallback(imagenet_win, imagenetCallBackFunc, NULL);
389 classlist_image = cv::Mat::zeros(40 + selected_items_size * 20, 220,
390 CV_8UC3);
391 //Erase window
392 classlist_image.setTo(Scalar::all(0));
394 for (int i = 0; i < selected_items_size; i ++)
395 {
396 sprintf(tmp_classwindow_string, "%2d) %12s", 1+i,
397 labels_classes[selected_items[i]].c_str());
398 cv::putText(classlist_image, tmp_classwindow_string,
399 cv::Point(5, 40 + i * 20),
400 cv::FONT_HERSHEY_COMPLEX_SMALL,
401 0.75,
402 cv::Scalar(255,255,255), 1, 8);
403 }
404 cv::imshow("ClassList", classlist_image);
405 #endif
406 }
408 bool SetupInput(VideoCapture& cap, VideoWriter& writer)
409 {
410 if(live_input >= 0)
411 {
412 cap.open(live_input);
414 const double fps = cap.get(CAP_PROP_FPS);
415 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
416 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
417 std::cout << "Capture camera with " << fps << " fps, " << width << "x"
418 << height << " px" << std::endl;
420 #ifdef RMT_GST_STREAMER
421 writer.open(" appsrc ! videoconvert ! video/x-raw, format=(string)NV12, width=(int)640, height=(int)480, framerate=(fraction)30/1 ! \
422 ducatih264enc bitrate=2000 ! queue ! h264parse config-interval=1 ! \
423 mpegtsmux ! udpsink host=192.168.1.2 sync=false port=5000",
424 0,fps,Size(640,480),true);
426 if (!writer.isOpened()) {
427 cap.release();
428 std::cerr << "Can't create gstreamer writer. "
429 << "Do you have the correct version installed?" << std::endl;
430 std::cerr << "Print out OpenCV build information" << std::endl;
431 std::cout << getBuildInformation() << std::endl;
432 return false;
433 }
434 #endif
435 } else {
436 std::cout << "Video input clip: " << video_clip << std::endl;
437 cap.open(std::string(video_clip));
438 const double fps = cap.get(CAP_PROP_FPS);
439 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
440 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
441 std::cout << "Clip with " << fps << " fps, " << width << "x"
442 << height << " px" << std::endl;
443 }
445 if (!cap.isOpened()) {
446 std::cout << "Video input not opened!" << std::endl;
447 return false;
448 }
450 for (int y = 0; y < NUM_ROI_Y; y ++) {
451 for (int x = 0; x < NUM_ROI_X; x ++) {
452 rectCrop[y * NUM_ROI_X + x] = Rect(X_OFFSET + x * X_STEP,
453 Y_OFFSET + y * Y_STEP, X_STEP, Y_STEP);
454 std::cout << "Rect[" << X_OFFSET + x * X_STEP << ", "
455 << Y_OFFSET + y * Y_STEP << "]" << std::endl;
456 }
457 }
459 return true;
460 }
462 bool ReadFrame(ExecutionObjectPipeline* eop,
463 uint32_t frame_idx, uint32_t num_frames,
464 VideoCapture &cap, VideoWriter& writer)
465 {
466 if (cap.grab() && frame_idx < num_frames)
467 {
468 if (cap.retrieve(in_image))
469 {
470 if(live_input >= 0)
471 { //Crop central square portion
472 int loc_xmin = (in_image.size().width - in_image.size().height) / 2; //Central position
473 int loc_ymin = 0;
474 int loc_w = in_image.size().height;
475 int loc_h = in_image.size().height;
477 cv::resize(in_image(Rect(loc_xmin, loc_ymin, loc_w, loc_h)), image, Size(RES_X, RES_Y));
478 } else {
479 if((in_image.size().width != RES_X) || (in_image.size().height != RES_Y))
480 {
481 cv::resize(in_image, image, Size(RES_X,RES_Y));
482 }
483 }
485 r_image = Mat(image, rectCrop[frame_idx % NUM_ROI]);
487 #ifdef LIVE_DISPLAY
488 if(NUM_ROI > 1)
489 {
490 char tmp_string[80];
491 sprintf(tmp_string, "ROI[%02d]", frame_idx % NUM_ROI);
492 cv::imshow(tmp_string, r_image);
493 }
494 #endif
495 //Convert from BGR pixel interleaved to BGR plane interleaved!
496 cv::resize(r_image, cnn_image, Size(224,224));
497 cv::split(cnn_image, bgr_frames);
498 tf_preprocess((uchar*) eop->GetInputBufferPtr(),
499 bgr_frames[0].ptr(), 224*224);
500 tf_preprocess((uchar*) eop->GetInputBufferPtr()+224*224,
501 bgr_frames[1].ptr(), 224*224);
502 tf_preprocess((uchar*) eop->GetInputBufferPtr()+2*224*224,
503 bgr_frames[2].ptr(), 224*224);
504 eop->SetFrameIndex(frame_idx);
506 #ifdef RMT_GST_STREAMER
507 cv::resize(Mat(image, Rect(0,32,640,448)), to_stream,
508 Size(640,480));
509 writer << to_stream;
510 #endif
512 #ifdef LIVE_DISPLAY
513 //waitKey(2);
514 image.copyTo(show_image);
515 #endif
516 return true;
517 }
518 } else {
519 if(live_input == -1) {
520 //Rewind!
521 cap.release();
522 cap.open(std::string(video_clip));
523 }
524 }
526 return false;
527 }
529 void ReportTime(const ExecutionObjectPipeline* eop)
530 {
531 uint32_t frame_index = eop->GetFrameIndex();
532 std::string device_name = eop->GetDeviceName();
533 float elapsed_host = eop->GetHostProcessTimeInMilliSeconds();
534 float elapsed_device = eop->GetProcessTimeInMilliSeconds();
535 double overhead = 100 - (elapsed_device/elapsed_host*100);
536 std::cout << "frame[" << frame_index << "]: "
537 << "Time on " << device_name << ": "
538 << std::setw(6) << std::setprecision(4)
539 << elapsed_device << "ms, "
540 << "host: "
541 << std::setw(6) << std::setprecision(4)
542 << elapsed_host << "ms ";
543 std::cout << "API overhead: "
544 << std::setw(6) << std::setprecision(3)
545 << overhead << " %" << std::endl;
546 }
548 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
549 uint32_t frame_idx, uint32_t num_eops,
550 uint32_t num_eves, uint32_t num_dsps)
551 {
552 int f_id = eop->GetFrameIndex();
553 int curr_roi = f_id % NUM_ROI;
554 int is_object = tf_postprocess((uchar*) eop->GetOutputBufferPtr(),
555 IMAGE_CLASSES_NUM, curr_roi, frame_idx, f_id);
556 selclass_history[curr_roi][2] = selclass_history[curr_roi][1];
557 selclass_history[curr_roi][1] = selclass_history[curr_roi][0];
558 selclass_history[curr_roi][0] = is_object;
559 for (int r = 0; r < NUM_ROI; r ++)
560 {
561 int rpt_id = ShowRegion(selclass_history[r]);
562 if(rpt_id >= 0)
563 {
564 // overlay the display window, if ball seen during last two times
565 cv::putText(show_image, labels_classes[rpt_id].c_str(),
566 cv::Point(rectCrop[r].x + 5,rectCrop[r].y + 20), // Coordinates
567 cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
568 1.0, // Scale. 2.0 = 2x bigger
569 cv::Scalar(0,0,255), // Color
570 1, // Thickness
571 8); // Line type
572 cv::rectangle(show_image, rectCrop[r], Scalar(255,0,0), 3);
573 std::cout << "ROI(" << r << ")(" << rpt_id << ")="
574 << labels_classes[rpt_id].c_str() << std::endl;
576 classlist_image.setTo(Scalar::all(0));
577 for (int k = 0; k < selected_items_size; k ++)
578 {
579 sprintf(tmp_classwindow_string, "%2d) %12s", 1+k,
580 labels_classes[selected_items[k]].c_str());
581 cv::putText(classlist_image, tmp_classwindow_string,
582 cv::Point(5, 40 + k * 20),
583 cv::FONT_HERSHEY_COMPLEX_SMALL,
584 0.75,
585 selected_items[k] == rpt_id ? cv::Scalar(0,0,255) :
586 cv::Scalar(255,255,255), 1, 8);
587 }
588 double elapsed_host = eop->GetHostProcessTimeInMilliSeconds();
589 /* Exponential averaging */
590 avg_fps = 0.1 * ((double)num_eops * 1000.0 /
591 ((double)NUM_ROI * elapsed_host)) + 0.9 * avg_fps;
592 sprintf(tmp_classwindow_string, "FPS:%5.2lf", avg_fps );
594 #ifdef PERF_VERBOSE
595 std::cout << "Device:" << eop->GetDeviceName() << " eops("
596 << num_eops << "), EVES(" << num_eves << ") DSPS("
597 << num_dsps << ") FPS:" << avg_fps << std::endl;
598 #endif
599 cv::putText(classlist_image, tmp_classwindow_string,
600 cv::Point(5, 20),
601 cv::FONT_HERSHEY_COMPLEX_SMALL,
602 0.75,
603 cv::Scalar(0,255,0), 1, 8);
604 cv::imshow("ClassList", classlist_image);
605 }
606 }
608 #ifdef LIVE_DISPLAY
609 cv::imshow(imagenet_win, show_image);
610 #endif
612 #ifdef RMT_GST_STREAMER
613 cv::resize(show_image, to_stream, cv::Size(640,480));
614 writer << to_stream;
615 #endif
617 #ifdef LIVE_DISPLAY
618 waitKey(2);
619 #endif
620 }
622 // Function to process all command line arguments
623 void ProcessArgs(int argc, char *argv[], std::string& config_file,
624 uint32_t & num_dsps, uint32_t & num_eves, int & num_layers_groups )
625 {
626 const struct option long_options[] =
627 {
628 {"labels_classes_file", required_argument, 0, 'l'},
629 {"selected_classes_file", required_argument, 0, 's'},
630 {"config_file", required_argument, 0, 'c'},
631 {"num_dsps", required_argument, 0, 'd'},
632 {"num_eves", required_argument, 0, 'e'},
633 {"num_layers_groups", required_argument, 0, 'g'},
634 {"help", no_argument, 0, 'h'},
635 {"verbose", no_argument, 0, 'v'},
636 {0, 0, 0, 0}
637 };
639 int option_index = 0;
641 while (true)
642 {
643 int c = getopt_long(argc, argv, "l:c:s:i:d:e:g:hv", long_options, &option_index);
645 if (c == -1)
646 break;
648 switch (c)
649 {
650 case 'l': populate_labels(optarg);
651 break;
653 case 's': populate_selected_items(optarg);
654 break;
656 case 'i': if(strlen(optarg) == 1)
657 {
658 live_input = atoi(optarg);
659 } else {
660 live_input = -1;
661 strcpy(video_clip, optarg);
662 }
663 break;
665 case 'c': config_file = optarg;
666 break;
668 case 'g': num_layers_groups = atoi(optarg);
669 assert(num_layers_groups >= 1 && num_layers_groups <= 2);
670 break;
672 case 'd': num_dsps = atoi(optarg);
673 assert (num_dsps >= 0 && num_dsps <= 2);
674 break;
676 case 'e': num_eves = atoi(optarg);
677 assert (num_eves >= 0 && num_eves <= 2);
678 break;
680 case 'v': __TI_show_debug_ = true;
681 break;
683 case 'h': DisplayHelp();
684 exit(EXIT_SUCCESS);
685 break;
687 case '?': // Error in getopt_long
688 exit(EXIT_FAILURE);
689 break;
691 default:
692 std::cerr << "Unsupported option: " << c << std::endl;
693 break;
694 }
695 }
697 // if no eves available, we can only run full net as one layer group
698 if (num_eves == 0) num_layers_groups = 1;
699 }
701 void DisplayHelp()
702 {
703 std::cout << "Usage: tidl_classification\n"
704 " Will run all available networks if tidl is invoked without"
705 " any arguments.\n Use -c to run a single network.\n"
706 "Optional arguments:\n"
707 " -c Path to the configuration file\n"
708 " -d <number of DSP cores> Number of DSP cores to use (0 - 2)\n"
709 " -e <number of EVE cores> Number of EVE cores to use (0 - 2)\n"
710 " -g <1|2> Number of layer groups\n"
711 " -l List of label strings (of all classes in model)\n"
712 " -s List of strings with selected classes\n"
713 " -i Video input (for camera:0,1 or video clip)\n"
714 " -v Verbose output during execution\n"
715 " -h Help\n";
716 }
718 // Function to filter all the reported decisions
719 bool tf_expected_id(int id)
720 {
721 // Filter out unexpected IDs
722 for (int i = 0; i < selected_items_size; i ++)
723 {
724 if(id == selected_items[i]) return true;
725 }
726 return false;
727 }
729 int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id)
730 {
731 //prob_i = exp(TIDL_Lib_output_i) / sum(exp(TIDL_Lib_output))
732 // sort and get k largest values and corresponding indices
733 const int k = TOP_CANDIDATES;
734 int rpt_id = -1;
736 typedef std::pair<uchar, int> val_index;
737 auto constexpr cmp = [](val_index &left, val_index &right) { return left.first > right.first; };
738 std::priority_queue<val_index, std::vector<val_index>, decltype(cmp)> queue(cmp);
739 // initialize priority queue with smallest value on top
740 for (int i = 0; i < k; i++) {
741 queue.push(val_index(in[i], i));
742 }
743 // for rest input, if larger than current minimum, pop mininum, push new val
744 for (int i = k; i < size; i++)
745 {
746 if (in[i] > queue.top().first)
747 {
748 queue.pop();
749 queue.push(val_index(in[i], i));
750 }
751 }
753 // output top k values in reverse order: largest val first
754 std::vector<val_index> sorted;
755 while (! queue.empty())
756 {
757 sorted.push_back(queue.top());
758 queue.pop();
759 }
761 for (int i = k-1; i >= 0; i--)
762 {
763 int id = sorted[i].second;
765 if (tf_expected_id(id))
766 {
767 std::cout << "Frame:" << frame_idx << "," << f_id << " ROI[" << roi_idx << "]: rank="
768 << k-i << ", outval=" << (float)sorted[i].first / 255 << ", "
769 << labels_classes[sorted[i].second] << std::endl;
770 rpt_id = id;
771 }
772 }
773 return rpt_id;
774 }
776 void tf_preprocess(uchar *out, uchar *in, int size)
777 {
778 for (int i = 0; i < size; i++)
779 {
780 out[i] = (uchar) (in[i] /*- 128*/);
781 }
782 }
784 int ShowRegion(int roi_history[])
785 {
786 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[1])) return roi_history[0];
787 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[2])) return roi_history[0];
788 if((roi_history[1] >= 0) && (roi_history[1] == roi_history[2])) return roi_history[1];
789 return -1;
790 }