1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <getopt.h>
30 #include <iostream>
31 #include <iomanip>
32 #include <fstream>
33 #include <cassert>
34 #include <string>
35 #include <functional>
36 #include <queue>
37 #include <algorithm>
38 #include <time.h>
39 #include <memory.h>
40 #include <string.h>
42 #include "executor.h"
43 #include "execution_object.h"
44 #include "execution_object_pipeline.h"
45 #include "configuration.h"
46 #include "avg_fps_window.h"
47 #include "imgutil.h"
49 #include "opencv2/core.hpp"
50 #include "opencv2/imgproc.hpp"
51 #include "opencv2/highgui.hpp"
52 #include "opencv2/videoio.hpp"
55 //#define TWO_ROIs
56 #define LIVE_DISPLAY
57 #define PERF_VERBOSE
58 //#define RMT_GST_STREAMER
60 #define MAX_NUM_ROI 4
62 int live_input = 1;
63 char video_clip[320];
65 #ifdef TWO_ROIs
66 #define RES_X 400
67 #define RES_Y 300
68 #define NUM_ROI_X 2
69 #define NUM_ROI_Y 1
70 #define X_OFFSET 0
71 #define X_STEP 176
72 #define Y_OFFSET 52
73 #define Y_STEP 224
74 #else
75 #define RES_X 480
76 #define RES_Y 480
77 #define NUM_ROI_X 1
78 #define NUM_ROI_Y 1
79 #define X_OFFSET 10
80 #define X_STEP 460
81 #define Y_OFFSET 10
82 #define Y_STEP 460
83 #endif
85 #define NUM_ROI (NUM_ROI_X * NUM_ROI_Y)
87 //Temporal averaging
88 int TOP_CANDIDATES = 3;
90 using namespace tidl;
91 using namespace cv;
93 #ifdef LIVE_DISPLAY
94 char imagenet_win[160];
95 char tmp_classwindow_string[160];
96 Mat classlist_image;
98 void imagenetCallBackFunc(int event, int x, int y, int flags, void* userdata)
99 {
100 if ( event == EVENT_RBUTTONDOWN )
101 {
102 std::cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << " ... prepare to exit!" << std::endl;
103 exit(0);
104 }
105 }
106 #endif
108 Mat in_image, image, r_image, cnn_image, show_image, bgr_frames[3];
109 Mat to_stream;
110 Rect rectCrop[NUM_ROI];
111 // Report average FPS across a sliding window of 16 frames
112 AvgFPSWindow fps_window(16);
114 static int tf_postprocess(uchar *in, int out_size, int size, int roi_idx,
115 int frame_idx, int f_id);
116 static int ShowRegion(int roi_history[]);
117 // from most recent to oldest at top indices
118 static int selclass_history[MAX_NUM_ROI][3];
120 bool RunConfiguration(const std::string& config_file, int num_layers_groups,
121 uint32_t num_dsps, uint32_t num_eves);
122 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
123 Configuration& configuration,
124 uint32_t num_layers_groups,
125 Executor*& e_eve, Executor*& e_dsp,
126 std::vector<ExecutionObjectPipeline*>& eops);
127 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops);
128 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps);
129 bool SetupInput(VideoCapture& cap, VideoWriter& writer);
130 bool ReadFrame(ExecutionObjectPipeline* eop, const Configuration& c,
131 int frame_idx, VideoCapture &cap, VideoWriter& writer);
132 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
133 uint32_t frame_idx, uint32_t num_eops,
134 uint32_t num_eves, uint32_t num_dsps);
135 static void ProcessArgs(int argc, char *argv[],
136 std::string& config_file,
137 uint32_t & num_dsps, uint32_t &num_eves,
138 int & num_layers_groups);
140 static void DisplayHelp();
141 extern std::string labels_classes[];
142 extern int IMAGE_CLASSES_NUM;
143 extern int selected_items_size;
144 extern int selected_items[];
145 extern int populate_selected_items (char *filename);
146 extern void populate_labels (char *filename);
148 bool verbose = false;
150 int main(int argc, char *argv[])
151 {
152 // Catch ctrl-c to ensure a clean exit
153 signal(SIGABRT, exit);
154 signal(SIGTERM, exit);
156 // If there are no devices capable of offloading TIDL on the SoC, exit
157 uint32_t num_eves = Executor::GetNumDevices(DeviceType::EVE);
158 uint32_t num_dsps = Executor::GetNumDevices(DeviceType::DSP);
159 int num_layers_groups = 1;
161 if (num_eves == 0 && num_dsps == 0)
162 {
163 std::cout << "TI DL not supported on this SoC." << std::endl;
164 return EXIT_SUCCESS;
165 }
167 // Process arguments
168 std::string config_file;
169 ProcessArgs(argc, argv, config_file, num_dsps, num_eves, num_layers_groups);
171 bool status = false;
172 if (!config_file.empty()) {
173 std::cout << "Run single configuration: " << config_file << std::endl;
174 status = RunConfiguration(config_file, num_layers_groups, num_dsps, num_eves);
175 }
177 if (!status)
178 {
179 std::cout << "tidl FAILED" << std::endl;
180 return EXIT_FAILURE;
181 }
183 std::cout << "tidl PASSED" << std::endl;
184 return EXIT_SUCCESS;
185 }
187 bool RunConfiguration(const std::string& config_file, int num_layers_groups, uint32_t num_dsps, uint32_t num_eves)
188 {
190 // Read the TI DL configuration file
191 Configuration configuration;
192 if (!configuration.ReadFromFile(config_file))
193 return false;
195 if (verbose)
196 configuration.enableApiTrace = true;
198 if (num_layers_groups == 1)
199 configuration.runFullNet = true; //Force all layers to be in the same group
201 try
202 {
203 // Create ExecutionObjectPipelines
204 Executor *e_eve = nullptr;
205 Executor *e_dsp = nullptr;
206 std::vector<ExecutionObjectPipeline *> eops;
207 if (! CreateExecutionObjectPipelines(num_eves, num_dsps, configuration,
208 num_layers_groups, e_eve, e_dsp, eops))
209 return false;
211 // Allocate input/output memory for each EOP
212 AllocateMemory(eops);
214 // Setup Live Display
215 SetupLiveDisplay(num_eves, num_dsps);
217 // Setup Input
218 VideoCapture cap;
219 VideoWriter writer; // gstreamer
220 if (! SetupInput(cap, writer)) return false;
222 // More initialization
223 for (int k = 0; k < NUM_ROI; k++)
224 for(int i = 0; i < 3; i ++)
225 selclass_history[k][i] = -1;
226 std::cout << "About to start ProcessFrame loop!!" << std::endl;
228 // Process frames with available EOPs in a pipelined manner
229 // additional num_eops iterations to flush the pipeline (epilogue)
230 int num_eops = eops.size();
231 for (int frame_idx = 0;
232 frame_idx < configuration.numFrames + num_eops; frame_idx++)
233 {
234 ExecutionObjectPipeline* eop = eops[frame_idx % num_eops];
236 // Wait for previous frame on the same eo to finish processing
237 if (eop->ProcessFrameWait())
238 {
239 DisplayFrame(eop, writer, frame_idx, num_eops,
240 num_eves, num_dsps);
241 }
242 fps_window.Tick();
244 if (ReadFrame(eop, configuration, frame_idx, cap, writer))
245 eop->ProcessFrameStartAsync();
246 }
248 // Cleanup
249 for (auto eop : eops)
250 {
251 free(eop->GetInputBufferPtr());
252 free(eop->GetOutputBufferPtr());
253 delete eop;
254 }
255 if (e_dsp) delete e_dsp;
256 if (e_eve) delete e_eve;
257 }
258 catch (tidl::Exception &e)
259 {
260 std::cerr << e.what() << std::endl;
261 return false;
262 }
264 return true;
265 }
268 bool CreateExecutionObjectPipelines(uint32_t num_eves, uint32_t num_dsps,
269 Configuration& configuration,
270 uint32_t num_layers_groups,
271 Executor*& e_eve, Executor*& e_dsp,
272 std::vector<ExecutionObjectPipeline*>& eops)
273 {
274 DeviceIds ids_eve, ids_dsp;
275 for (uint32_t i = 0; i < num_eves; i++)
276 ids_eve.insert(static_cast<DeviceId>(i));
277 for (uint32_t i = 0; i < num_dsps; i++)
278 ids_dsp.insert(static_cast<DeviceId>(i));
279 const uint32_t buffer_factor = 2;
281 switch(num_layers_groups)
282 {
283 case 1: // Single layers group
284 e_eve = num_eves == 0 ? nullptr :
285 new Executor(DeviceType::EVE, ids_eve, configuration);
286 e_dsp = num_dsps == 0 ? nullptr :
287 new Executor(DeviceType::DSP, ids_dsp, configuration);
289 // Construct ExecutionObjectPipeline with single Execution Object to
290 // process each frame. This is parallel processing of frames with
291 // as many DSP and EVE cores that we have on hand.
292 // If buffer_factor == 2, duplicating EOPs for double buffering
293 // and overlapping host pre/post-processing with device processing
294 for (uint32_t j = 0; j < buffer_factor; j++)
295 {
296 for (uint32_t i = 0; i < num_eves; i++)
297 eops.push_back(new ExecutionObjectPipeline({(*e_eve)[i]}));
298 for (uint32_t i = 0; i < num_dsps; i++)
299 eops.push_back(new ExecutionObjectPipeline({(*e_dsp)[i]}));
300 }
301 break;
303 case 2: // Two layers group
304 // Create Executors with the approriate core type, number of cores
305 // and configuration specified
306 // EVE will run layersGroupId 1 in the network, while
307 // DSP will run layersGroupId 2 in the network
308 e_eve = num_eves == 0 ? nullptr :
309 new Executor(DeviceType::EVE, ids_eve, configuration, 1);
310 e_dsp = num_dsps == 0 ? nullptr :
311 new Executor(DeviceType::DSP, ids_dsp, configuration, 2);
313 // Construct ExecutionObjectPipeline that utilizes multiple
314 // ExecutionObjects to process a single frame, each ExecutionObject
315 // processes one layerGroup of the network
316 // If buffer_factor == 2, duplicating EOPs for pipelining at
317 // EO level rather than at EOP level, in addition to double buffering
318 // and overlapping host pre/post-processing with device processing
319 for (uint32_t j = 0; j < buffer_factor; j++)
320 {
321 for (uint32_t i = 0; i < std::max(num_eves, num_dsps); i++)
322 eops.push_back(new ExecutionObjectPipeline(
323 {(*e_eve)[i%num_eves], (*e_dsp)[i%num_dsps]}));
324 }
325 break;
327 default:
328 std::cout << "Layers groups can be either 1 or 2!" << std::endl;
329 return false;
330 break;
331 }
333 return true;
334 }
336 void AllocateMemory(const std::vector<ExecutionObjectPipeline*>& eops)
337 {
338 for (auto eop : eops)
339 {
340 size_t in_size = eop->GetInputBufferSizeInBytes();
341 size_t out_size = eop->GetOutputBufferSizeInBytes();
342 void* in_ptr = malloc(in_size);
343 void* out_ptr = malloc(out_size);
344 assert(in_ptr != nullptr && out_ptr != nullptr);
346 ArgInfo in(in_ptr, in_size);
347 ArgInfo out(out_ptr, out_size);
348 eop->SetInputOutputBuffer(in, out);
349 }
350 }
352 void SetupLiveDisplay(uint32_t num_eves, uint32_t num_dsps)
353 {
354 #ifdef LIVE_DISPLAY
355 sprintf(imagenet_win, "Imagenet_EVEx%d_DSPx%d", num_eves, num_dsps);
357 if(NUM_ROI > 1)
358 {
359 for(int i = 0; i < NUM_ROI; i ++) {
360 char tmp_string[80];
361 sprintf(tmp_string, "ROI[%02d]", i);
362 namedWindow(tmp_string, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
363 }
364 }
365 Mat sw_stack_image = imread(
366 "/usr/share/ti/tidl/examples/classification/tidl-sw-stack-small.png",
367 IMREAD_COLOR); // Read the file
368 if( sw_stack_image.empty() ) // Check for invalid input
369 {
370 std::cout << "Could not open or find the tidl-sw-stack-small image"
371 << std::endl ;
372 } else {
373 // Create a window for display.
374 namedWindow( "TIDL SW Stack", WINDOW_AUTOSIZE | CV_GUI_NORMAL );
375 // Show our image inside it.
376 cv::imshow( "TIDL SW Stack", sw_stack_image );
377 }
379 namedWindow("ClassList", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
380 namedWindow(imagenet_win, WINDOW_AUTOSIZE | CV_GUI_NORMAL);
381 //set the callback function for any mouse event
382 setMouseCallback(imagenet_win, imagenetCallBackFunc, NULL);
384 classlist_image = cv::Mat::zeros(40 + selected_items_size * 20, 220,
385 CV_8UC3);
386 //Erase window
387 classlist_image.setTo(Scalar::all(0));
389 for (int i = 0; i < selected_items_size; i ++)
390 {
391 sprintf(tmp_classwindow_string, "%2d) %12s", 1+i,
392 labels_classes[selected_items[i]].c_str());
393 cv::putText(classlist_image, tmp_classwindow_string,
394 cv::Point(5, 40 + i * 20),
395 cv::FONT_HERSHEY_COMPLEX_SMALL,
396 0.75,
397 cv::Scalar(255,255,255), 1, 8);
398 }
399 cv::imshow("ClassList", classlist_image);
400 #endif
401 }
403 bool SetupInput(VideoCapture& cap, VideoWriter& writer)
404 {
405 if(live_input >= 0)
406 {
407 cap.open(live_input);
409 const double fps = cap.get(CAP_PROP_FPS);
410 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
411 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
412 std::cout << "Capture camera with " << fps << " fps, " << width << "x"
413 << height << " px" << std::endl;
415 #ifdef RMT_GST_STREAMER
416 writer.open(" appsrc ! videoconvert ! video/x-raw, format=(string)NV12, width=(int)640, height=(int)480, framerate=(fraction)30/1 ! \
417 ducatih264enc bitrate=2000 ! queue ! h264parse config-interval=1 ! \
418 mpegtsmux ! udpsink host=192.168.1.2 sync=false port=5000",
419 0,fps,Size(640,480),true);
421 if (!writer.isOpened()) {
422 cap.release();
423 std::cerr << "Can't create gstreamer writer. "
424 << "Do you have the correct version installed?" << std::endl;
425 std::cerr << "Print out OpenCV build information" << std::endl;
426 std::cout << getBuildInformation() << std::endl;
427 return false;
428 }
429 #endif
430 } else {
431 std::cout << "Video input clip: " << video_clip << std::endl;
432 cap.open(std::string(video_clip));
433 const double fps = cap.get(CAP_PROP_FPS);
434 const int width = cap.get(CAP_PROP_FRAME_WIDTH);
435 const int height = cap.get(CAP_PROP_FRAME_HEIGHT);
436 std::cout << "Clip with " << fps << " fps, " << width << "x"
437 << height << " px" << std::endl;
438 }
440 if (!cap.isOpened()) {
441 std::cout << "Video input not opened!" << std::endl;
442 return false;
443 }
445 for (int y = 0; y < NUM_ROI_Y; y ++) {
446 for (int x = 0; x < NUM_ROI_X; x ++) {
447 rectCrop[y * NUM_ROI_X + x] = Rect(X_OFFSET + x * X_STEP,
448 Y_OFFSET + y * Y_STEP, X_STEP, Y_STEP);
449 std::cout << "Rect[" << X_OFFSET + x * X_STEP << ", "
450 << Y_OFFSET + y * Y_STEP << "]" << std::endl;
451 }
452 }
454 return true;
455 }
457 bool ReadFrame(ExecutionObjectPipeline* eop, const Configuration& c,
458 int frame_idx, VideoCapture &cap, VideoWriter& writer)
459 {
461 if (cap.grab() && frame_idx < c.numFrames)
462 {
463 if (cap.retrieve(in_image))
464 {
465 if(live_input >= 0)
466 { //Crop central square portion
467 int loc_xmin = (in_image.size().width - in_image.size().height) / 2; //Central position
468 int loc_ymin = 0;
469 int loc_w = in_image.size().height;
470 int loc_h = in_image.size().height;
472 cv::resize(in_image(Rect(loc_xmin, loc_ymin, loc_w, loc_h)), image, Size(RES_X, RES_Y));
473 } else {
474 if((in_image.size().width != RES_X) || (in_image.size().height != RES_Y))
475 {
476 cv::resize(in_image, image, Size(RES_X,RES_Y));
477 }
478 }
480 r_image = Mat(image, rectCrop[frame_idx % NUM_ROI]);
482 #ifdef LIVE_DISPLAY
483 if(NUM_ROI > 1)
484 {
485 char tmp_string[80];
486 sprintf(tmp_string, "ROI[%02d]", frame_idx % NUM_ROI);
487 cv::imshow(tmp_string, r_image);
488 }
489 image.copyTo(show_image);
490 #endif
491 imgutil::PreprocessImage(r_image, eop->GetInputBufferPtr(), c);
492 eop->SetFrameIndex(frame_idx);
494 #ifdef RMT_GST_STREAMER
495 cv::resize(Mat(image, Rect(0,32,640,448)), to_stream,
496 Size(640,480));
497 writer << to_stream;
498 #endif
500 return true;
501 }
502 } else {
503 if(live_input == -1) {
504 //Rewind!
505 cap.set(CAP_PROP_POS_FRAMES, 0);
506 }
507 }
509 return false;
510 }
513 void DisplayFrame(const ExecutionObjectPipeline* eop, VideoWriter& writer,
514 uint32_t frame_idx, uint32_t num_eops,
515 uint32_t num_eves, uint32_t num_dsps)
516 {
517 int f_id = eop->GetFrameIndex();
518 int curr_roi = f_id % NUM_ROI;
519 int is_object = tf_postprocess((uchar*) eop->GetOutputBufferPtr(),
520 eop->GetOutputBufferSizeInBytes(),
521 IMAGE_CLASSES_NUM, curr_roi, frame_idx, f_id);
522 selclass_history[curr_roi][2] = selclass_history[curr_roi][1];
523 selclass_history[curr_roi][1] = selclass_history[curr_roi][0];
524 selclass_history[curr_roi][0] = is_object;
525 for (int r = 0; r < NUM_ROI; r ++)
526 {
527 int rpt_id = ShowRegion(selclass_history[r]);
528 if(rpt_id >= 0)
529 {
530 // overlay the display window, if ball seen during last two times
531 cv::putText(show_image, labels_classes[rpt_id].c_str(),
532 cv::Point(rectCrop[r].x + 5,rectCrop[r].y + 32), // Coordinates
533 cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
534 1.5, // Scale. 2.0 = 2x bigger
535 cv::Scalar(0,0,0), // Color
536 1, // Thickness
537 8); // Line type
538 cv::rectangle(show_image, rectCrop[r], Scalar(255,0,0), 3);
539 std::cout << "ROI(" << r << ")(" << rpt_id << ")="
540 << labels_classes[rpt_id].c_str() << std::endl;
542 classlist_image.setTo(Scalar::all(0));
543 for (int k = 0; k < selected_items_size; k ++)
544 {
545 sprintf(tmp_classwindow_string, "%2d) %12s", 1+k,
546 labels_classes[selected_items[k]].c_str());
547 cv::putText(classlist_image, tmp_classwindow_string,
548 cv::Point(5, 40 + k * 20),
549 cv::FONT_HERSHEY_COMPLEX_SMALL,
550 0.75,
551 selected_items[k] == rpt_id ? cv::Scalar(0,0,255) :
552 cv::Scalar(255,255,255), 1, 8);
553 }
555 double avg_fps = fps_window.UpdateAvgFPS();
556 sprintf(tmp_classwindow_string, "FPS:%5.2lf", avg_fps );
558 #ifdef PERF_VERBOSE
559 std::cout << "Device:" << eop->GetDeviceName() << " eops("
560 << num_eops << "), EVES(" << num_eves << ") DSPS("
561 << num_dsps << ") FPS:" << avg_fps << std::endl;
562 #endif
563 cv::putText(classlist_image, tmp_classwindow_string,
564 cv::Point(5, 20),
565 cv::FONT_HERSHEY_COMPLEX_SMALL,
566 0.75,
567 cv::Scalar(0,255,0), 1, 8);
568 cv::imshow("ClassList", classlist_image);
569 }
570 }
572 #ifdef LIVE_DISPLAY
573 cv::imshow(imagenet_win, show_image);
574 #endif
576 #ifdef RMT_GST_STREAMER
577 cv::resize(show_image, to_stream, cv::Size(640,480));
578 writer << to_stream;
579 #endif
581 #ifdef LIVE_DISPLAY
582 waitKey(2);
583 #endif
584 }
586 // Function to process all command line arguments
587 void ProcessArgs(int argc, char *argv[], std::string& config_file,
588 uint32_t & num_dsps, uint32_t & num_eves, int & num_layers_groups )
589 {
590 const struct option long_options[] =
591 {
592 {"labels_classes_file", required_argument, 0, 'l'},
593 {"selected_classes_file", required_argument, 0, 's'},
594 {"config_file", required_argument, 0, 'c'},
595 {"num_dsps", required_argument, 0, 'd'},
596 {"num_eves", required_argument, 0, 'e'},
597 {"num_layers_groups", required_argument, 0, 'g'},
598 {"help", no_argument, 0, 'h'},
599 {"verbose", no_argument, 0, 'v'},
600 {0, 0, 0, 0}
601 };
603 int option_index = 0;
605 while (true)
606 {
607 int c = getopt_long(argc, argv, "l:c:s:i:d:e:g:hv", long_options, &option_index);
609 if (c == -1)
610 break;
612 switch (c)
613 {
614 case 'l': populate_labels(optarg);
615 break;
617 case 's': populate_selected_items(optarg);
618 break;
620 case 'i': if(strlen(optarg) == 1)
621 {
622 live_input = atoi(optarg);
623 } else {
624 live_input = -1;
625 strcpy(video_clip, optarg);
626 }
627 break;
629 case 'c': config_file = optarg;
630 break;
632 case 'g': num_layers_groups = atoi(optarg);
633 assert(num_layers_groups >= 1 && num_layers_groups <= 2);
634 break;
636 case 'd': num_dsps = atoi(optarg);
637 assert (num_dsps >= 0 && num_dsps <= 2);
638 break;
640 case 'e': num_eves = atoi(optarg);
641 assert (num_eves >= 0 && num_eves <= 4);
642 break;
644 case 'v': verbose = true;
645 break;
647 case 'h': DisplayHelp();
648 exit(EXIT_SUCCESS);
649 break;
651 case '?': // Error in getopt_long
652 exit(EXIT_FAILURE);
653 break;
655 default:
656 std::cerr << "Unsupported option: " << c << std::endl;
657 break;
658 }
659 }
661 // if no eves available, we can only run full net as one layer group
662 if (num_eves == 0) num_layers_groups = 1;
663 }
665 void DisplayHelp()
666 {
667 std::cout << "Usage: tidl_classification\n"
668 " Will run all available networks if tidl is invoked without"
669 " any arguments.\n Use -c to run a single network.\n"
670 "Optional arguments:\n"
671 " -c Path to the configuration file\n"
672 " -d <number of DSP cores> Number of DSP cores to use (0 - 2)\n"
673 " -e <number of EVE cores> Number of EVE cores to use (0 - 2)\n"
674 " -g <1|2> Number of layer groups\n"
675 " -l List of label strings (of all classes in model)\n"
676 " -s List of strings with selected classes\n"
677 " -i Video input (for camera:0,1 or video clip)\n"
678 " -v Verbose output during execution\n"
679 " -h Help\n";
680 }
682 // Function to filter all the reported decisions
683 bool tf_expected_id(int id)
684 {
685 // Filter out unexpected IDs
686 for (int i = 0; i < selected_items_size; i ++)
687 {
688 if(id == selected_items[i]) return true;
689 }
690 return false;
691 }
693 int tf_postprocess(uchar *in, int out_size, int size, int roi_idx,
694 int frame_idx, int f_id)
695 {
696 //prob_i = exp(TIDL_Lib_output_i) / sum(exp(TIDL_Lib_output))
697 // sort and get k largest values and corresponding indices
698 const int k = TOP_CANDIDATES;
699 int rpt_id = -1;
700 // Tensorflow trained network outputs 1001 probabilities,
701 // with 0-index being background, thus we need to subtract 1 when
702 // reporting classified object from 1000 categories
703 int background_offset = out_size == 1001 ? 1 : 0;
705 typedef std::pair<uchar, int> val_index;
706 auto cmp = [](val_index &left, val_index &right) { return left.first > right.first; };
707 std::priority_queue<val_index, std::vector<val_index>, decltype(cmp)> queue(cmp);
708 // initialize priority queue with smallest value on top
709 for (int i = 0; i < k; i++) {
710 queue.push(val_index(in[i], i));
711 }
712 // for rest input, if larger than current minimum, pop mininum, push new val
713 for (int i = k; i < size; i++)
714 {
715 if (in[i] > queue.top().first)
716 {
717 queue.pop();
718 queue.push(val_index(in[i], i));
719 }
720 }
722 // output top k values in reverse order: largest val first
723 std::vector<val_index> sorted;
724 while (! queue.empty())
725 {
726 sorted.push_back(queue.top());
727 queue.pop();
728 }
730 for (int i = 0; i < k; i++)
731 {
732 int id = sorted[i].second - background_offset;
734 if (tf_expected_id(id))
735 {
736 std::cout << "Frame:" << frame_idx << "," << f_id << " ROI[" << roi_idx << "]: rank="
737 << k-i << ", outval=" << (float)sorted[i].first / 255 << ", "
738 << labels_classes[id] << std::endl;
739 rpt_id = id;
740 }
741 }
742 return rpt_id;
743 }
745 int ShowRegion(int roi_history[])
746 {
747 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[1])) return roi_history[0];
748 if((roi_history[0] >= 0) && (roi_history[0] == roi_history[2])) return roi_history[0];
749 if((roi_history[1] >= 0) && (roi_history[1] == roi_history[2])) return roi_history[1];
750 return -1;
751 }