1 #!/usr/bin/env python
3 #################################################################################
4 # Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
5 # All Rights Reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #
32 #################################################################################
34 import sys
35 import os
36 import cv2
37 import argparse
38 import datetime
39 import numpy as np
41 ################################
42 from pytorch_jacinto_ai.xnn.utils import str2bool
43 parser = argparse.ArgumentParser()
44 parser.add_argument('--save_path', type=str, default=None, help='checkpoint save folder')
45 parser.add_argument('--gpus', type=int, nargs='*', default=None, help='Base learning rate')
46 parser.add_argument('--batch_size', type=int, default=None, help='Batch size')
47 parser.add_argument('--model_name', type=str, default=None, help='model name')
48 parser.add_argument('--dataset_name', type=str, default=None, help='dataset name')
49 parser.add_argument('--data_path', type=str, default=None, help='data path')
50 parser.add_argument('--epoch_size', type=float, default=None, help='epoch size. using a fraction will reduce the data used for one epoch')
51 parser.add_argument('--img_resize', type=int, nargs=2, default=None, help='img_resize size. for training this will be modified according to rand_scale')
52 parser.add_argument('--rand_scale', type=float, nargs=2, default=None, help='random scale factors for training')
53 parser.add_argument('--rand_crop', type=int, nargs=2, default=None, help='random crop for training')
54 parser.add_argument('--output_size', type=int, nargs=2, default=None, help='output size of the evaluation - prediction/groundtruth. this is not used while training as it blows up memory requirement')
55 parser.add_argument('--quantize', type=str2bool, default=None, help='Quantize the model')
56 #parser.add_argument('--model_surgery', type=str, default=None, choices=[None, 'pact2'], help='whether to transform the model after defining')
57 parser.add_argument('--pretrained', type=str, default=None, help='pretrained model')
58 parser.add_argument('--bitwidth_weights', type=int, default=None, help='bitwidth for weight quantization')
59 parser.add_argument('--bitwidth_activations', type=int, default=None, help='bitwidth for activation quantization')
60 parser.add_argument('--img_border_crop', type=int, nargs=4, default=None, help='image border crop rectangle. can be relative or absolute')
61 cmds = parser.parse_args()
63 ################################
64 # taken care first, since this has to be done before importing pytorch
65 if 'gpus' in vars(cmds):
66 value = getattr(cmds, 'gpus')
67 if (value is not None) and ("CUDA_VISIBLE_DEVICES" not in os.environ):
68 os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(v) for v in value])
69 #
70 #
72 # to avoid hangs in data loader with multi threads
73 # this was observed after using cv2 image processing functions
74 # https://github.com/pytorch/pytorch/issues/1355
75 cv2.setNumThreads(0)
78 ################################
79 #import of torch should be after CUDA_VISIBLE_DEVICES for it to take effect
80 import torch
81 from pytorch_jacinto_ai.engine import infer_pixel2pixel
83 # Create the parse and set default arguments
84 args = infer_pixel2pixel.get_config()
87 #Modify arguments
88 args.model_name = "deeplabv3lite_mobilenetv2_tv" #"deeplabv3lite_mobilenetv2_relu" #"deeplabv3lite_mobilenetv2_relu_x1p5" #"deeplabv3plus"
90 args.dataset_name = 'cityscapes_segmentation_measure' #'tiad_segmentation_infer' #'cityscapes_segmentation_infer' #'tiad_segmentation' #'cityscapes_segmentation_measure'
91 args.dataset_config.split = 'val'
93 #args.save_path = './data/checkpoints'
94 args.data_path = '/data/ssd/datasets/cityscapes/data/' #'./data/datasets/cityscapes/data' #'/data/hdd/datasets/cityscapes_leftImg8bit_sequence_trainvaltest/' #'./data/datasets/cityscapes/data' #'./data/tiad/data/demoVideo/sequence0021' #'./data/tiad/data/demoVideo/sequence0025' #'./data/tiad/data/demoVideo/sequence0001_2017'
95 #args.pretrained = './data/modelzoo/semantic_segmentation/cityscapes/deeplabv3lite-mobilenetv2/cityscapes_segmentation_deeplabv3lite-mobilenetv2_2019-06-26-08-59-32.pth'
96 #args.pretrained = './data/checkpoints/tiad_segmentation/2019-10-18_00-50-03_tiad_segmentation_deeplabv3lite_mobilenetv2_ericsun_resize768x384_traincrop768x384_float/checkpoint.pth.tar'
98 args.pretrained = '/data/files/work/bitbucket_TI/pytorch-jacinto-models/data/checkpoints/a2d2_segmentation/2020-01-25_13-06-18_a2d2_segmentation_deeplabv3lite_mobilenetv2_tv_resize768x384_traincrop768x384_v2_val41.83_train56.02/training/model_best.pth.tar'
100 args.model_config.input_channels = (3,)
101 args.model_config.output_type = ['segmentation']
102 args.model_config.output_channels = None
103 args.losses = [['segmentation_loss']]
104 args.metrics = [['segmentation_metrics']]
106 args.frame_IOU = False # Print mIOU for each frame
107 args.shuffle = False
109 args.num_images = 50000 # Max number of images to run inference on
111 #['color'], ['blend'], ['']
112 args.viz_op_type = ['blend']
113 args.visualize_gt = False
114 args.car_mask = False # False #True
115 args.label = [True] # False #True
116 args.label_infer = [True]
117 args.palette = True
118 args.start_img_index = 0
119 args.end_img_index = 0
120 args.create_video = True # True #False
121 args.depth = [False]
123 args.epoch_size = 0 #0 #0.5
124 args.iter_size = 1 #2
126 args.batch_size = 32 #80 #12 #16 #32 #64
127 args.img_resize = (384, 768) #(256,512) #(512,512) # #(1024, 2048) #(512,1024) #(720, 1280)
129 args.output_size = (1024, 2048) #(1024, 2048)
130 #args.rand_scale = (1.0, 2.0) #(1.0,2.0) #(1.0,1.5) #(1.0,1.25)
132 args.depth = [False]
133 args.quantize = False
134 args.histogram_range = True
136 #args.image_prenorm=False
137 #args.image_mean = [0]
138 #args.image_scale = [1.0]
139 #args.image_prenorm = False
140 #args.image_mean = [123.675, 116.28, 103.53]
141 #args.image_scale = [0.017125, 0.017507, 0.017429]
142 #args.image_mean = [0] # image mean for input image normalization
143 #args.image_scale = [1.0] # image scaling/mult for input iamge normalization
145 #save modified files after last commit
146 #args.save_mod_files = True
148 args.gpu_mode = True
149 args.write_layer_ip_op = False
150 args.save_onnx = True
152 ################################
153 for key in vars(cmds):
154 if key == 'gpus':
155 pass # already taken care above, since this has to be done before importing pytorch
156 elif hasattr(args, key):
157 value = getattr(cmds, key)
158 if value != 'None' and value is not None:
159 setattr(args, key, value)
160 else:
161 assert False, f'invalid argument {key}'
162 #
164 ################################
165 #Run the test
166 infer_pixel2pixel.main(args)