1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <getopt.h>
30 #include <iostream>
31 #include <iomanip>
32 #include <fstream>
33 #include <cassert>
34 #include <string>
35 #include <functional>
36 #include <queue>
37 #include <algorithm>
38 #include <time.h>
39 #include <memory.h>
40 #include <string.h>
42 #include "executor.h"
43 #include "execution_object.h"
44 #include "execution_object_pipeline.h"
45 #include "configuration.h"
46 #include "avg_fps_window.h"
48 #include "opencv2/core.hpp"
49 #include "opencv2/imgproc.hpp"
50 #include "opencv2/highgui.hpp"
51 #include "opencv2/videoio.hpp"
54 //#define TWO_ROIs
55 #define LIVE_DISPLAY
56 #define PERF_VERBOSE
57 //#define RMT_GST_STREAMER
59 #define MAX_NUM_ROI 4
61 int live_input = 1;
62 char video_clip[320];
64 #ifdef TWO_ROIs
65 #define RES_X 400
66 #define RES_Y 300
67 #define NUM_ROI_X 2
68 #define NUM_ROI_Y 1
69 #define X_OFFSET 0
70 #define X_STEP 176
71 #define Y_OFFSET 52
72 #define Y_STEP 224
73 #else
74 #define RES_X 480
75 #define RES_Y 480
76 #define NUM_ROI_X 1
77 #define NUM_ROI_Y 1
78 #define X_OFFSET 10
79 #define X_STEP 460
80 #define Y_OFFSET 10
81 #define Y_STEP 460
82 #endif
84 #define NUM_ROI (NUM_ROI_X * NUM_ROI_Y)
86 //Temporal averaging
87 int TOP_CANDIDATES = 3;
89 using namespace tidl;
90 using namespace cv;
92 #ifdef LIVE_DISPLAY
93 char imagenet_win[160];
94 char tmp_classwindow_string[160];
95 Mat classlist_image;
97 void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata)
98 {
99 if ( event == EVENT_RBUTTONDOWN )
100 {
101 std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl;
102 exit(0);
103 }
104 }
105 #endif
107 Mat in_image, image, r_image, cnn_image, show_image, bgr_frames[3];
108 Mat to_stream;
109 Rect rectCrop[NUM_ROI];
110 // Report average FPS across a sliding window of 16 frames
111 AvgFPSWindow fps_window(16);
113 static int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id);
114 static int ShowRegion(int roi_history[]);
115 // from most recent to oldest at top indices
116 static int selclass_history[MAX_NUM_ROI][3];
118 bool RunConfiguration(const std::string& config_file, int num_layers_groups,
119 uint32_t num_dsps, uint32_t num_eves);
120 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
121 Configuration& configuration,
122 uint32_t num_layers_groups,
123 Executor*& e_eve, Executor*& e_dsp,
124 std::vector<ExecutionObjectPipeline*>& eops);
125 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops);
126 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps);
127 bool SetupInput(VideoCapture& cap, VideoWriter& writer);
128 bool ReadFrame(ExecutionObjectPipeline* eop, const Configuration& c,
129 int frame_idx, VideoCapture &cap, VideoWriter& writer);
130 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
131 uint32_t frame_idx, uint32_t num_eops,
132 uint32_t num_eves, uint32_t num_dsps);
133 static void ProcessArgs(int argc, char *argv[],
134 std::string& config_file,
135 uint32_t & num_dsps, uint32_t &num_eves,
136 int & num_layers_groups);
138 static void DisplayHelp();
139 extern std::string labels_classes[];
140 extern int IMAGE_CLASSES_NUM;
141 extern int selected_items_size;
142 extern int selected_items[];
143 extern int populate_selected_items (char *filename);
144 extern void populate_labels (char *filename);
146 bool verbose = false;
148 int main(int argc, char *argv[])
149 {
150 // Catch ctrl-c to ensure a clean exit
151 signal(SIGABRT, exit);
152 signal(SIGTERM, exit);
154 // If there are no devices capable of offloading TIDL on the SoC, exit
155 uint32_t num_eves = Executor::GetNumDevices(DeviceType::EVE);
156 uint32_t num_dsps = Executor::GetNumDevices(DeviceType::DSP);
157 int num_layers_groups = 1;
159 if (num_eves == 0 && num_dsps == 0)
160 {
161 std::cout << "TI DL not supported on this SoC." << std::endl;
162 return EXIT_SUCCESS;
163 }
165 // Process arguments
166 std::string config_file;
167 ProcessArgs(argc, argv, config_file, num_dsps, num_eves, num_layers_groups);
169 bool status = false;
170 if (!config_file.empty()) {
171 std::cout << "Run single configuration: " << config_file << std::endl;
172 status = RunConfiguration(config_file, num_layers_groups, num_dsps, num_eves);
173 }
175 if (!status)
176 {
177 std::cout << "tidl FAILED" << std::endl;
178 return EXIT_FAILURE;
179 }
181 std::cout << "tidl PASSED" << std::endl;
182 return EXIT_SUCCESS;
183 }
185 bool RunConfiguration(const std::string& config_file, int num_layers_groups, uint32_t num_dsps, uint32_t num_eves)
186 {
188 // Read the TI DL configuration file
189 Configuration configuration;
190 if (!configuration.ReadFromFile(config_file))
191 return false;
193 if (verbose)
194 configuration.enableApiTrace = true;
196 try
197 {
198 // Create ExecutionObjectPipelines
199 Executor *e_eve = nullptr;
200 Executor *e_dsp = nullptr;
201 std::vector<ExecutionObjectPipeline *> eops;
202 if (! CreateExecutionObjectPipelines(num_eves, num_dsps, configuration,
203 num_layers_groups, e_eve, e_dsp, eops))
204 return false;
206 // Allocate input/output memory for each EOP
207 AllocateMemory(eops);
209 // Setup Live Display
210 SetupLiveDisplay(num_eves, num_dsps);
212 // Setup Input
213 VideoCapture cap;
214 VideoWriter writer; // gstreamer
215 if (! SetupInput(cap, writer)) return false;
217 // More initialization
218 for (int k = 0; k < NUM_ROI; k++)
219 for(int i = 0; i < 3; i ++)
220 selclass_history[k][i] = -1;
221 std::cout << "About to start ProcessFrame loop!!" << std::endl;
223 // Process frames with available EOPs in a pipelined manner
224 // additional num_eops iterations to flush the pipeline (epilogue)
225 int num_eops = eops.size();
226 for (int frame_idx = 0;
227 frame_idx < configuration.numFrames + num_eops; frame_idx++)
228 {
229 ExecutionObjectPipeline* eop = eops[frame_idx % num_eops];
231 // Wait for previous frame on the same eo to finish processing
232 if (eop->ProcessFrameWait())
233 {
234 DisplayFrame(eop, writer, frame_idx, num_eops,
235 num_eves, num_dsps);
236 }
237 fps_window.Tick();
239 if (ReadFrame(eop, configuration, frame_idx, cap, writer))
240 eop->ProcessFrameStartAsync();
241 }
243 // Cleanup
244 for (auto eop : eops)
245 {
246 free(eop->GetInputBufferPtr());
247 free(eop->GetOutputBufferPtr());
248 delete eop;
249 }
250 if (e_dsp) delete e_dsp;
251 if (e_eve) delete e_eve;
252 }
253 catch (tidl::Exception &e)
254 {
255 std::cerr << e.what() << std::endl;
256 return false;
257 }
259 return true;
260 }
263 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
264 Configuration& configuration,
265 uint32_t num_layers_groups,
266 Executor*& e_eve, Executor*& e_dsp,
267 std::vector<ExecutionObjectPipeline*>& eops)
268 {
269 DeviceIds ids_eve, ids_dsp;
270 for (uint32_t i = 0; i < num_eves; i++)
271 ids_eve.insert(static_cast<DeviceId>(i));
272 for (uint32_t i = 0; i < num_dsps; i++)
273 ids_dsp.insert(static_cast<DeviceId>(i));
274 const uint32_t buffer_factor = 2;
276 switch(num_layers_groups)
277 {
278 case 1: // Single layers group
279 e_eve = num_eves == 0 ? nullptr :
280 new Executor(DeviceType::EVE, ids_eve, configuration);
281 e_dsp = num_dsps == 0 ? nullptr :
282 new Executor(DeviceType::DSP, ids_dsp, configuration);
284 // Construct ExecutionObjectPipeline with single Execution Object to
285 // process each frame. This is parallel processing of frames with
286 // as many DSP and EVE cores that we have on hand.
287 // If buffer_factor == 2, duplicating EOPs for double buffering
288 // and overlapping host pre/post-processing with device processing
289 for (uint32_t j = 0; j < buffer_factor; j++)
290 {
291 for (uint32_t i = 0; i < num_eves; i++)
292 eops.push_back(new ExecutionObjectPipeline({(*e_eve)[i]}));
293 for (uint32_t i = 0; i < num_dsps; i++)
294 eops.push_back(new ExecutionObjectPipeline({(*e_dsp)[i]}));
295 }
296 break;
298 case 2: // Two layers group
299 // JacintoNet11 specific : specify only layers that will be in
300 // layers group 2 ... by default all other layers are in group 1.
301 configuration.layerIndex2LayerGroupId = { {12, 2}, {13, 2}, {14, 2} };
303 // Create Executors with the approriate core type, number of cores
304 // and configuration specified
305 // EVE will run layersGroupId 1 in the network, while
306 // DSP will run layersGroupId 2 in the network
307 e_eve = num_eves == 0 ? nullptr :
308 new Executor(DeviceType::EVE, ids_eve, configuration, 1);
309 e_dsp = num_dsps == 0 ? nullptr :
310 new Executor(DeviceType::DSP, ids_dsp, configuration, 2);
312 // Construct ExecutionObjectPipeline that utilizes multiple
313 // ExecutionObjects to process a single frame, each ExecutionObject
314 // processes one layerGroup of the network
315 // If buffer_factor == 2, duplicating EOPs for pipelining at
316 // EO level rather than at EOP level, in addition to double buffering
317 // and overlapping host pre/post-processing with device processing
318 for (uint32_t j = 0; j < buffer_factor; j++)
319 {
320 for (uint32_t i = 0; i < std::max(num_eves, num_dsps); i++)
321 eops.push_back(new ExecutionObjectPipeline(
322 {(*e_eve)[i%num_eves], (*e_dsp)[i%num_dsps]}));
323 }
324 break;
326 default:
327 std::cout << "Layers groups can be either 1 or 2!" << std::endl;
328 return false;
329 break;
330 }
332 return true;
333 }
335 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops)
336 {
337 for (auto eop : eops)
338 {
339 size_t in_size = eop->GetInputBufferSizeInBytes();
340 size_t out_size = eop->GetOutputBufferSizeInBytes();
341 void* in_ptr = malloc(in_size);
342 void* out_ptr = malloc(out_size);
343 assert(in_ptr != nullptr && out_ptr != nullptr);
345 ArgInfo in(in_ptr, in_size);
346 ArgInfo out(out_ptr, out_size);
347 eop->SetInputOutputBuffer(in, out);
348 }
349 }
351 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps)
352 {
353 #ifdef LIVE_DISPLAY
354 sprintf(imagenet_win, "Imagenet_EVEx%d_DSPx%d", num_eves, num_dsps);
356 if(NUM_ROI > 1)
357 {
358 for(int i = 0; i < NUM_ROI; i ++) {
359 char tmp_string[80];
360 sprintf(tmp_string, "ROI[%02d]", i);
361 namedWindow(tmp_string, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
362 }
363 }
364 Mat sw_stack_image = imread(
365 "/usr/share/ti/tidl/examples/classification/tidl-sw-stack-small.png",
366 IMREAD_COLOR); // Read the file
367 if( sw_stack_image.empty() ) // Check for invalid input
368 {
369 std::cout << "Could not open or find the tidl-sw-stack-small image"
370 << std::endl ;
371 } else {
372 // Create a window for display.
373 namedWindow( "TIDL SW Stack", WINDOW_AUTOSIZE | CV_GUI_NORMAL );
374 // Show our image inside it.
375 cv::imshow( "TIDL SW Stack", sw_stack_image );
376 }
378 namedWindow("ClassList", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
379 namedWindow(imagenet_win, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
380 //set the callback function for any mouse event
381 setMouseCallback(imagenet_win, imagenetCallBackFunc, NULL);
383 classlist_image = cv::Mat::zeros(40 + selected_items_size * 20, 220,
384 CV_8UC3);
385 //Erase window
386 classlist_image.setTo(Scalar::all(0));
388 for (int i = 0; i < selected_items_size; i ++)
389 {
390 sprintf(tmp_classwindow_string, "%2d) %12s", 1+i,
391 labels_classes[selected_items[i]].c_str());
392 cv::putText(classlist_image, tmp_classwindow_string,
393 cv::Point(5, 40 + i * 20),
394 cv::FONT_HERSHEY_COMPLEX_SMALL,
395 0.75,
396 cv::Scalar(255,255,255), 1, 8);
397 }
398 cv::imshow("ClassList", classlist_image);
399 #endif
400 }
402 bool SetupInput(VideoCapture& cap, VideoWriter& writer)
403 {
404 if(live_input >= 0)
405 {
406 cap.open(live_input);
408 const double fps = cap.get(CAP_PROP_FPS);
409 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
410 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
411 std::cout << "Capture camera with " << fps << " fps, " << width << "x"
412 << height << " px" << std::endl;
414 #ifdef RMT_GST_STREAMER
415 writer.open(" appsrc ! videoconvert ! video/x-raw, format=(string)NV12, width=(int)640, height=(int)480, framerate=(fraction)30/1 ! \
416 ducatih264enc bitrate=2000 ! queue ! h264parse config-interval=1 ! \
417 mpegtsmux ! udpsink host=192.168.1.2 sync=false port=5000",
418 0,fps,Size(640,480),true);
420 if (!writer.isOpened()) {
421 cap.release();
422 std::cerr << "Can't create gstreamer writer. "
423 << "Do you have the correct version installed?" << std::endl;
424 std::cerr << "Print out OpenCV build information" << std::endl;
425 std::cout << getBuildInformation() << std::endl;
426 return false;
427 }
428 #endif
429 } else {
430 std::cout << "Video input clip: " << video_clip << std::endl;
431 cap.open(std::string(video_clip));
432 const double fps = cap.get(CAP_PROP_FPS);
433 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
434 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
435 std::cout << "Clip with " << fps << " fps, " << width << "x"
436 << height << " px" << std::endl;
437 }
439 if (!cap.isOpened()) {
440 std::cout << "Video input not opened!" << std::endl;
441 return false;
442 }
444 for (int y = 0; y < NUM_ROI_Y; y ++) {
445 for (int x = 0; x < NUM_ROI_X; x ++) {
446 rectCrop[y * NUM_ROI_X + x] = Rect(X_OFFSET + x * X_STEP,
447 Y_OFFSET + y * Y_STEP, X_STEP, Y_STEP);
448 std::cout << "Rect[" << X_OFFSET + x * X_STEP << ", "
449 << Y_OFFSET + y * Y_STEP << "]" << std::endl;
450 }
451 }
453 return true;
454 }
456 bool ReadFrame(ExecutionObjectPipeline* eop, const Configuration& c,
457 int frame_idx, VideoCapture &cap, VideoWriter& writer)
458 {
460 if (cap.grab() && frame_idx < c.numFrames)
461 {
462 if (cap.retrieve(in_image))
463 {
464 if(live_input >= 0)
465 { //Crop central square portion
466 int loc_xmin = (in_image.size().width - in_image.size().height) / 2; //Central position
467 int loc_ymin = 0;
468 int loc_w = in_image.size().height;
469 int loc_h = in_image.size().height;
471 cv::resize(in_image(Rect(loc_xmin, loc_ymin, loc_w, loc_h)), image, Size(RES_X, RES_Y));
472 } else {
473 if((in_image.size().width != RES_X) || (in_image.size().height != RES_Y))
474 {
475 cv::resize(in_image, image, Size(RES_X,RES_Y));
476 }
477 }
479 r_image = Mat(image, rectCrop[frame_idx % NUM_ROI]);
481 #ifdef LIVE_DISPLAY
482 if(NUM_ROI > 1)
483 {
484 char tmp_string[80];
485 sprintf(tmp_string, "ROI[%02d]", frame_idx % NUM_ROI);
486 cv::imshow(tmp_string, r_image);
487 }
488 #endif
489 //Convert from BGR pixel interleaved to BGR plane interleaved!
490 cv::resize(r_image, cnn_image, Size(c.inWidth,c.inHeight));
491 cv::split(cnn_image, bgr_frames);
492 int channel_size = c.inWidth * c.inHeight;
494 char* ptr = eop->GetInputBufferPtr();
495 memcpy(ptr, bgr_frames[0].ptr(), channel_size);
496 memcpy(ptr+1*channel_size, bgr_frames[1].ptr(), channel_size);
497 memcpy(ptr+2*channel_size, bgr_frames[2].ptr(), channel_size);
499 eop->SetFrameIndex(frame_idx);
501 #ifdef RMT_GST_STREAMER
502 cv::resize(Mat(image, Rect(0,32,640,448)), to_stream,
503 Size(640,480));
504 writer << to_stream;
505 #endif
507 #ifdef LIVE_DISPLAY
508 //waitKey(2);
509 image.copyTo(show_image);
510 #endif
511 return true;
512 }
513 } else {
514 if(live_input == -1) {
515 //Rewind!
516 cap.release();
517 cap.open(std::string(video_clip));
518 }
519 }
521 return false;
522 }
525 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
526 uint32_t frame_idx, uint32_t num_eops,
527 uint32_t num_eves, uint32_t num_dsps)
528 {
529 int f_id = eop->GetFrameIndex();
530 int curr_roi = f_id % NUM_ROI;
531 int is_object = tf_postprocess((uchar*) eop->GetOutputBufferPtr(),
532 IMAGE_CLASSES_NUM, curr_roi, frame_idx, f_id);
533 selclass_history[curr_roi][2] = selclass_history[curr_roi][1];
534 selclass_history[curr_roi][1] = selclass_history[curr_roi][0];
535 selclass_history[curr_roi][0] = is_object;
536 for (int r = 0; r < NUM_ROI; r ++)
537 {
538 int rpt_id = ShowRegion(selclass_history[r]);
539 if(rpt_id >= 0)
540 {
541 // overlay the display window, if ball seen during last two times
542 cv::putText(show_image, labels_classes[rpt_id].c_str(),
543 cv::Point(rectCrop[r].x + 5,rectCrop[r].y + 20), // Coordinates
544 cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
545 1.0, // Scale. 2.0 = 2x bigger
546 cv::Scalar(0,0,255), // Color
547 1, // Thickness
548 8); // Line type
549 cv::rectangle(show_image, rectCrop[r], Scalar(255,0,0), 3);
550 std::cout << "ROI(" << r << ")(" << rpt_id << ")="
551 << labels_classes[rpt_id].c_str() << std::endl;
553 classlist_image.setTo(Scalar::all(0));
554 for (int k = 0; k < selected_items_size; k ++)
555 {
556 sprintf(tmp_classwindow_string, "%2d) %12s", 1+k,
557 labels_classes[selected_items[k]].c_str());
558 cv::putText(classlist_image, tmp_classwindow_string,
559 cv::Point(5, 40 + k * 20),
560 cv::FONT_HERSHEY_COMPLEX_SMALL,
561 0.75,
562 selected_items[k] == rpt_id ? cv::Scalar(0,0,255) :
563 cv::Scalar(255,255,255), 1, 8);
564 }
566 double avg_fps = fps_window.UpdateAvgFPS();
567 sprintf(tmp_classwindow_string, "FPS:%5.2lf", avg_fps );
569 #ifdef PERF_VERBOSE
570 std::cout << "Device:" << eop->GetDeviceName() << " eops("
571 << num_eops << "), EVES(" << num_eves << ") DSPS("
572 << num_dsps << ") FPS:" << avg_fps << std::endl;
573 #endif
574 cv::putText(classlist_image, tmp_classwindow_string,
575 cv::Point(5, 20),
576 cv::FONT_HERSHEY_COMPLEX_SMALL,
577 0.75,
578 cv::Scalar(0,255,0), 1, 8);
579 cv::imshow("ClassList", classlist_image);
580 }
581 }
583 #ifdef LIVE_DISPLAY
584 cv::imshow(imagenet_win, show_image);
585 #endif
587 #ifdef RMT_GST_STREAMER
588 cv::resize(show_image, to_stream, cv::Size(640,480));
589 writer << to_stream;
590 #endif
592 #ifdef LIVE_DISPLAY
593 waitKey(2);
594 #endif
595 }
597 // Function to process all command line arguments
598 void ProcessArgs(int argc, char *argv[], std::string& config_file,
599 uint32_t & num_dsps, uint32_t & num_eves, int & num_layers_groups )
600 {
601 const struct option long_options[] =
602 {
603 {"labels_classes_file", required_argument, 0, 'l'},
604 {"selected_classes_file", required_argument, 0, 's'},
605 {"config_file", required_argument, 0, 'c'},
606 {"num_dsps", required_argument, 0, 'd'},
607 {"num_eves", required_argument, 0, 'e'},
608 {"num_layers_groups", required_argument, 0, 'g'},
609 {"help", no_argument, 0, 'h'},
610 {"verbose", no_argument, 0, 'v'},
611 {0, 0, 0, 0}
612 };
614 int option_index = 0;
616 while (true)
617 {
618 int c = getopt_long(argc, argv, "l:c:s:i:d:e:g:hv", long_options, &option_index);
620 if (c == -1)
621 break;
623 switch (c)
624 {
625 case 'l': populate_labels(optarg);
626 break;
628 case 's': populate_selected_items(optarg);
629 break;
631 case 'i': if(strlen(optarg) == 1)
632 {
633 live_input = atoi(optarg);
634 } else {
635 live_input = -1;
636 strcpy(video_clip, optarg);
637 }
638 break;
640 case 'c': config_file = optarg;
641 break;
643 case 'g': num_layers_groups = atoi(optarg);
644 assert(num_layers_groups >= 1 && num_layers_groups <= 2);
645 break;
647 case 'd': num_dsps = atoi(optarg);
648 assert (num_dsps >= 0 && num_dsps <= 2);
649 break;
651 case 'e': num_eves = atoi(optarg);
652 assert (num_eves >= 0 && num_eves <= 2);
653 break;
655 case 'v': verbose = true;
656 break;
658 case 'h': DisplayHelp();
659 exit(EXIT_SUCCESS);
660 break;
662 case '?': // Error in getopt_long
663 exit(EXIT_FAILURE);
664 break;
666 default:
667 std::cerr << "Unsupported option: " << c << std::endl;
668 break;
669 }
670 }
672 // if no eves available, we can only run full net as one layer group
673 if (num_eves == 0) num_layers_groups = 1;
674 }
676 void DisplayHelp()
677 {
678 std::cout << "Usage: tidl_classification\n"
679 " Will run all available networks if tidl is invoked without"
680 " any arguments.\n Use -c to run a single network.\n"
681 "Optional arguments:\n"
682 " -c Path to the configuration file\n"
683 " -d <number of DSP cores> Number of DSP cores to use (0 - 2)\n"
684 " -e <number of EVE cores> Number of EVE cores to use (0 - 2)\n"
685 " -g <1|2> Number of layer groups\n"
686 " -l List of label strings (of all classes in model)\n"
687 " -s List of strings with selected classes\n"
688 " -i Video input (for camera:0,1 or video clip)\n"
689 " -v Verbose output during execution\n"
690 " -h Help\n";
691 }
693 // Function to filter all the reported decisions
694 bool tf_expected_id(int id)
695 {
696 // Filter out unexpected IDs
697 for (int i = 0; i < selected_items_size; i ++)
698 {
699 if(id == selected_items[i]) return true;
700 }
701 return false;
702 }
704 int tf_postprocess(uchar *in, int size, int roi_idx, int frame_idx, int f_id)
705 {
706 //prob_i = exp(TIDL_Lib_output_i) / sum(exp(TIDL_Lib_output))
707 // sort and get k largest values and corresponding indices
708 const int k = TOP_CANDIDATES;
709 int rpt_id = -1;
711 typedef std::pair<uchar, int> val_index;
712 auto constexpr cmp = [](val_index &left, val_index &right) { return left.first > right.first; };
713 std::priority_queue<val_index, std::vector<val_index>, decltype(cmp)> queue(cmp);
714 // initialize priority queue with smallest value on top
715 for (int i = 0; i < k; i++) {
716 queue.push(val_index(in[i], i));
717 }
718 // for rest input, if larger than current minimum, pop mininum, push new val
719 for (int i = k; i < size; i++)
720 {
721 if (in[i] > queue.top().first)
722 {
723 queue.pop();
724 queue.push(val_index(in[i], i));
725 }
726 }
728 // output top k values in reverse order: largest val first
729 std::vector<val_index> sorted;
730 while (! queue.empty())
731 {
732 sorted.push_back(queue.top());
733 queue.pop();
734 }
736 for (int i = k-1; i >= 0; i--)
737 {
738 int id = sorted[i].second;
740 if (tf_expected_id(id))
741 {
742 std::cout << "Frame:" << frame_idx << "," << f_id << " ROI[" << roi_idx << "]: rank="
743 << k-i << ", outval=" << (float)sorted[i].first / 255 << ", "
744 << labels_classes[sorted[i].second] << std::endl;
745 rpt_id = id;
746 }
747 }
748 return rpt_id;
749 }
751 int ShowRegion(int roi_history[])
752 {
753 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[1])) return roi_history[0];
754 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[2])) return roi_history[0];
755 if((roi_history[1] >= 0) && (roi_history[1] == roi_history[2])) return roi_history[1];
756 return -1;
757 }