1 /******************************************************************************
2 * Copyright (c) 2018, Texas Instruments Incorporated - http://www.ti.com/
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Texas Instruments Incorporated nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 *****************************************************************************/
28 #include <signal.h>
29 #include <getopt.h>
30 #include <iostream>
31 #include <iomanip>
32 #include <fstream>
33 #include <cassert>
34 #include <string>
35 #include <functional>
36 #include <algorithm>
37 #include <time.h>
38 #include <unistd.h>
40 #include <queue>
41 #include <vector>
42 #include <cstdio>
44 #include "executor.h"
45 #include "execution_object.h"
46 #include "configuration.h"
47 #include "object_classes.h"
49 #include "opencv2/core.hpp"
50 #include "opencv2/imgproc.hpp"
51 #include "opencv2/highgui.hpp"
52 #include "opencv2/videoio.hpp"
54 #define NUM_VIDEO_FRAMES 100
55 #define DEFAULT_CONFIG "jseg21_tiscapes"
56 #define DEFAULT_INPUT "../test/testvecs/input/000100_1024x512_bgr.y"
58 bool __TI_show_debug_ = false;
59 bool is_default_input = false;
60 bool is_preprocessed_input = false;
61 bool is_camera_input = false;
62 int orig_width;
63 int orig_height;
64 object_class_table_t *object_class_table;
66 using namespace tidl;
67 using namespace cv;
70 bool RunConfiguration(const std::string& config_file, int num_devices,
71 DeviceType device_type, std::string& input_file);
72 bool RunAllConfigurations(int32_t num_devices, DeviceType device_type);
74 bool ReadFrame(ExecutionObject& eo, int frame_idx,
75 const Configuration& configuration, int num_frames,
76 std::string& image_file, VideoCapture &cap);
78 bool WriteFrameOutput(const ExecutionObject &eo,
79 const Configuration& configuration);
81 static void ProcessArgs(int argc, char *argv[],
82 std::string& config,
83 int& num_devices,
84 DeviceType& device_type,
85 std::string& input_file);
87 static void DisplayHelp();
89 static double ms_diff(struct timespec &t0, struct timespec &t1)
90 { return (t1.tv_sec - t0.tv_sec) * 1e3 + (t1.tv_nsec - t0.tv_nsec) / 1e6; }
93 int main(int argc, char *argv[])
94 {
95 // Catch ctrl-c to ensure a clean exit
96 signal(SIGABRT, exit);
97 signal(SIGTERM, exit);
99 // If there are no devices capable of offloading TIDL on the SoC, exit
100 uint32_t num_dla = Executor::GetNumDevices(DeviceType::DLA);
101 uint32_t num_dsp = Executor::GetNumDevices(DeviceType::DSP);
102 if (num_dla == 0 && num_dsp == 0)
103 {
104 std::cout << "TI DL not supported on this SoC." << std::endl;
105 return EXIT_SUCCESS;
106 }
108 // Process arguments
109 std::string config = DEFAULT_CONFIG;
110 std::string input_file = DEFAULT_INPUT;
111 int num_devices = 1;
112 DeviceType device_type = (num_dla > 0 ? DeviceType::DLA:DeviceType::DSP);
113 ProcessArgs(argc, argv, config, num_devices, device_type, input_file);
115 if ((object_class_table = GetObjectClassTable(config)) == nullptr)
116 {
117 std::cout << "No object classes defined for this config." << std::endl;
118 return EXIT_FAILURE;
119 }
121 if (input_file == DEFAULT_INPUT) is_default_input = true;
122 if (input_file == "camera") is_camera_input = true;
123 if (input_file.length() > 2 &&
124 input_file.compare(input_file.length() - 2, 2, ".y") == 0)
125 is_preprocessed_input = true;
126 std::cout << "Input: " << input_file << std::endl;
127 std::string config_file = "../test/testvecs/config/infer/tidl_config_"
128 + config + ".txt";
129 bool status = RunConfiguration(config_file, num_devices, device_type,
130 input_file);
132 if (!status)
133 {
134 std::cout << "segmentation FAILED" << std::endl;
135 return EXIT_FAILURE;
136 }
138 std::cout << "segmentation PASSED" << std::endl;
139 return EXIT_SUCCESS;
140 }
142 bool RunConfiguration(const std::string& config_file, int num_devices,
143 DeviceType device_type, std::string& input_file)
144 {
145 DeviceIds ids;
146 for (int i = 0; i < num_devices; i++)
147 ids.insert(static_cast<DeviceId>(i));
149 // Read the TI DL configuration file
150 Configuration configuration;
151 bool status = configuration.ReadFromFile(config_file);
152 if (!status)
153 {
154 std::cerr << "Error in configuration file: " << config_file
155 << std::endl;
156 return false;
157 }
159 // setup input
160 int num_frames = is_default_input ? 3 : 1;
161 VideoCapture cap;
162 std::string image_file;
163 if (is_camera_input)
164 {
165 cap = VideoCapture(1); // cap = VideoCapture("test.mp4");
166 if (! cap.isOpened())
167 {
168 std::cerr << "Cannot open camera input." << std::endl;
169 return false;
170 }
171 num_frames = NUM_VIDEO_FRAMES;
172 namedWindow("Segmentation", WINDOW_AUTOSIZE | CV_GUI_NORMAL);
173 }
174 else
175 {
176 image_file = input_file;
177 }
179 try
180 {
181 // Create a executor with the approriate core type, number of cores
182 // and configuration specified
183 Executor executor(device_type, ids, configuration);
185 // Query Executor for set of ExecutionObjects created
186 const ExecutionObjects& execution_objects =
187 executor.GetExecutionObjects();
188 int num_eos = execution_objects.size();
190 // Allocate input and output buffers for each execution object
191 std::vector<void *> buffers;
192 for (auto &eo : execution_objects)
193 {
194 size_t in_size = eo->GetInputBufferSizeInBytes();
195 size_t out_size = eo->GetOutputBufferSizeInBytes();
196 ArgInfo in = { ArgInfo(malloc(in_size), in_size)};
197 ArgInfo out = { ArgInfo(malloc(out_size), out_size)};
198 eo->SetInputOutputBuffer(in, out);
200 buffers.push_back(in.ptr());
201 buffers.push_back(out.ptr());
202 }
204 #define MAX_NUM_EOS 4
205 struct timespec t0[MAX_NUM_EOS], t1;
207 // Process frames with available execution objects in a pipelined manner
208 // additional num_eos iterations to flush the pipeline (epilogue)
209 for (int frame_idx = 0;
210 frame_idx < num_frames + num_eos; frame_idx++)
211 {
212 ExecutionObject* eo = execution_objects[frame_idx % num_eos].get();
214 // Wait for previous frame on the same eo to finish processing
215 if (eo->ProcessFrameWait())
216 {
217 clock_gettime(CLOCK_MONOTONIC, &t1);
218 double elapsed_host =
219 ms_diff(t0[eo->GetFrameIndex() % num_eos], t1);
220 double elapsed_device = eo->GetProcessTimeInMilliSeconds();
221 double overhead = 100 - (elapsed_device/elapsed_host*100);
223 std::cout << "frame[" << eo->GetFrameIndex() << "]: "
224 << "Time on device: "
225 << std::setw(6) << std::setprecision(4)
226 << elapsed_device << "ms, "
227 << "host: "
228 << std::setw(6) << std::setprecision(4)
229 << elapsed_host << "ms ";
230 std::cout << "API overhead: "
231 << std::setw(6) << std::setprecision(3)
232 << overhead << " %" << std::endl;
234 WriteFrameOutput(*eo, configuration);
235 }
237 // Read a frame and start processing it with current eo
238 if (ReadFrame(*eo, frame_idx, configuration, num_frames,
239 image_file, cap))
240 {
241 clock_gettime(CLOCK_MONOTONIC, &t0[frame_idx % num_eos]);
242 eo->ProcessFrameStartAsync();
243 }
244 }
246 for (auto b : buffers)
247 free(b);
249 }
250 catch (tidl::Exception &e)
251 {
252 std::cerr << e.what() << std::endl;
253 status = false;
254 }
256 return status;
257 }
260 bool ReadFrame(ExecutionObject &eo, int frame_idx,
261 const Configuration& configuration, int num_frames,
262 std::string& image_file, VideoCapture &cap)
263 {
264 if (frame_idx >= num_frames)
265 return false;
266 eo.SetFrameIndex(frame_idx);
268 char* frame_buffer = eo.GetInputBufferPtr();
269 assert (frame_buffer != nullptr);
270 int channel_size = configuration.inWidth * configuration.inHeight;
272 Mat image;
273 if (! image_file.empty())
274 {
275 if (is_preprocessed_input)
276 {
277 std::ifstream ifs(image_file, std::ios::binary);
278 ifs.seekg(frame_idx * channel_size * 3);
279 ifs.read(frame_buffer, channel_size * 3);
280 bool ifs_status = ifs.good();
281 ifs.close();
282 orig_width = configuration.inWidth;
283 orig_height = configuration.inHeight;
284 return ifs_status; // already PreProc-ed
285 }
286 else
287 {
288 image = cv::imread(image_file, CV_LOAD_IMAGE_COLOR);
289 if (image.empty())
290 {
291 std::cerr << "Unable to read from: " << image_file << std::endl;
292 return false;
293 }
294 }
295 }
296 else
297 {
298 // 640x480 camera input, process one in every 5 frames,
299 // can adjust number of skipped frames to match real time processing
300 if (! cap.grab()) return false;
301 if (! cap.grab()) return false;
302 if (! cap.grab()) return false;
303 if (! cap.grab()) return false;
304 if (! cap.grab()) return false;
305 if (! cap.retrieve(image)) return false;
306 }
308 // scale to network input size 1024 x 512
309 Mat s_image, bgr_frames[3];
310 orig_width = image.cols;
311 orig_height = image.rows;
312 cv::resize(image, s_image,
313 Size(configuration.inWidth, configuration.inHeight),
314 0, 0, cv::INTER_AREA);
315 cv::split(s_image, bgr_frames);
316 memcpy(frame_buffer, bgr_frames[0].ptr(), channel_size);
317 memcpy(frame_buffer+1*channel_size, bgr_frames[1].ptr(), channel_size);
318 memcpy(frame_buffer+2*channel_size, bgr_frames[2].ptr(), channel_size);
319 return true;
320 }
322 // Create Overlay mask for pixel-level segmentation
323 void CreateMask(uchar *classes, uchar *mb, uchar *mg, uchar* mr,
324 int channel_size)
325 {
326 for (int i = 0; i < channel_size; i++)
327 {
328 object_class_t *object_class = GetObjectClass(object_class_table,
329 classes[i]);
330 mb[i] = object_class->color.blue;
331 mg[i] = object_class->color.green;
332 mr[i] = object_class->color.red;
333 }
334 }
336 // Create frame overlayed with pixel-level segmentation
337 bool WriteFrameOutput(const ExecutionObject &eo,
338 const Configuration& configuration)
339 {
340 unsigned char *out = (unsigned char *) eo.GetOutputBufferPtr();
341 int out_size = eo.GetOutputBufferSizeInBytes();
342 int width = configuration.inWidth;
343 int height = configuration.inHeight;
344 int channel_size = width * height;
346 Mat mask, frame, blend, r_blend, bgr[3];
347 // Create overlay mask
348 bgr[0] = Mat(height, width, CV_8UC(1));
349 bgr[1] = Mat(height, width, CV_8UC(1));
350 bgr[2] = Mat(height, width, CV_8UC(1));
351 CreateMask(out, bgr[0].ptr(), bgr[1].ptr(), bgr[2].ptr(), channel_size);
352 cv::merge(bgr, 3, mask);
354 // Asseembly original frame
355 unsigned char *in = (unsigned char *) eo.GetInputBufferPtr();
356 bgr[0] = Mat(height, width, CV_8UC(1), in);
357 bgr[1] = Mat(height, width, CV_8UC(1), in + channel_size);
358 bgr[2] = Mat(height, width, CV_8UC(1), in + channel_size*2);
359 cv::merge(bgr, 3, frame);
361 // Create overlayed frame
362 cv::addWeighted(frame, 0.7, mask, 0.3, 0.0, blend);
364 cv::resize(blend, r_blend, Size(orig_width, orig_height));
365 if (is_camera_input)
366 {
367 cv::imshow("Segmentation", r_blend);
368 waitKey(1);
369 }
370 else
371 {
372 int frame_index = eo.GetFrameIndex();
373 char outfile_name[64];
374 if (is_preprocessed_input)
375 {
376 snprintf(outfile_name, 64, "frame_%d.png", frame_index);
377 cv::imwrite(outfile_name, frame);
378 printf("Saving frame %d to: %s\n", frame_index, outfile_name);
379 }
381 snprintf(outfile_name, 64, "overlay_%d.png", frame_index);
382 cv::imwrite(outfile_name, r_blend);
383 printf("Saving frame %d overlayed with segmentation to: %s\n",
384 frame_index, outfile_name);
385 }
387 return true;
388 }
391 void ProcessArgs(int argc, char *argv[], std::string& config,
392 int& num_devices, DeviceType& device_type,
393 std::string& input_file)
394 {
395 const struct option long_options[] =
396 {
397 {"config", required_argument, 0, 'c'},
398 {"num_devices", required_argument, 0, 'n'},
399 {"device_type", required_argument, 0, 't'},
400 {"image_file", required_argument, 0, 'i'},
401 {"help", no_argument, 0, 'h'},
402 {"verbose", no_argument, 0, 'v'},
403 {0, 0, 0, 0}
404 };
406 int option_index = 0;
408 while (true)
409 {
410 int c = getopt_long(argc, argv, "c:n:t:i:hv", long_options, &option_index);
412 if (c == -1)
413 break;
415 switch (c)
416 {
417 case 'c': config = optarg;
418 break;
420 case 'n': num_devices = atoi(optarg);
421 assert (num_devices > 0 && num_devices <= 4);
422 break;
424 case 't': if (*optarg == 'e')
425 device_type = DeviceType::DLA;
426 else if (*optarg == 'd')
427 device_type = DeviceType::DSP;
428 else
429 {
430 std::cerr << "Invalid argument to -t, only e or d"
431 " allowed" << std::endl;
432 exit(EXIT_FAILURE);
433 }
434 break;
436 case 'i': input_file = optarg;
437 break;
439 case 'v': __TI_show_debug_ = true;
440 break;
442 case 'h': DisplayHelp();
443 exit(EXIT_SUCCESS);
444 break;
446 case '?': // Error in getopt_long
447 exit(EXIT_FAILURE);
448 break;
450 default:
451 std::cerr << "Unsupported option: " << c << std::endl;
452 break;
453 }
454 }
455 }
457 void DisplayHelp()
458 {
459 std::cout << "Usage: segmentation\n"
460 " Will run segmentation network to perform pixel-level"
461 " classification.\n Use -c to run a different"
462 " segmentation network. Default is jseg21_tiscapes.\n"
463 "Optional arguments:\n"
464 " -c <config> Valid configs: jseg21_tiscapes, jseg21\n"
465 " -n <number of cores> Number of cores to use (1 - 4)\n"
466 " -t <d|e> Type of core. d -> DSP, e -> DLA\n"
467 " -i <image> Path to the image file\n"
468 " Default are 3 frames in testvecs\n"
469 " -i camera Use camera as input\n"
470 " -v Verbose output during execution\n"
471 " -h Help\n";
472 }