533831832b2d2218148631cad913ec4cb26039ea
1 import sys
2 import os
3 import cv2
4 import argparse
5 import datetime
6 import numpy as np
8 ################################
9 from pytorch_jacinto_ai.xnn.utils import str2bool
10 parser = argparse.ArgumentParser()
11 parser.add_argument('--save_path', type=str, default=None, help='checkpoint save folder')
12 parser.add_argument('--gpus', type=int, nargs='*', default=None, help='Base learning rate')
13 parser.add_argument('--batch_size', type=int, default=None, help='Batch size')
14 parser.add_argument('--lr', type=float, default=None, help='Base learning rate')
15 parser.add_argument('--lr_clips', type=float, default=None, help='Learning rate for clips in PAct2')
16 parser.add_argument('--lr_calib', type=float, default=None, help='Learning rate for calibration')
17 parser.add_argument('--model_name', type=str, default=None, help='model name')
18 parser.add_argument('--dataset_name', type=str, default=None, help='dataset name')
19 parser.add_argument('--data_path', type=str, default=None, help='data path')
20 parser.add_argument('--epoch_size', type=float, default=None, help='epoch size. using a fraction will reduce the data used for one epoch')
21 parser.add_argument('--epochs', type=int, default=None, help='number of epochs')
22 parser.add_argument('--warmup_epochs', type=int, default=None, help='number of epochs for the learning rate to increase and reach base value')
23 parser.add_argument('--milestones', type=int, nargs='*', default=None, help='change lr at these milestones')
24 parser.add_argument('--img_resize', type=int, nargs=2, default=None, help='img_resize size. for training this will be modified according to rand_scale')
25 parser.add_argument('--rand_scale', type=float, nargs=2, default=None, help='random scale factors for training')
26 parser.add_argument('--rand_crop', type=int, nargs=2, default=None, help='random crop for training')
27 parser.add_argument('--output_size', type=int, nargs=2, default=None, help='output size of the evaluation - prediction/groundtruth. this is not used while training as it blows up memory requirement')
28 parser.add_argument('--pretrained', type=str, default=None, help='pretrained model')
29 parser.add_argument('--resume', type=str, default=None, help='resume an unfinished training from this model')
30 parser.add_argument('--phase', type=str, default=None, help='training/calibration/validation')
31 parser.add_argument('--evaluate_start', type=str2bool, default=None, help='Whether to run validation before the training')
32 #
33 parser.add_argument('--quantize', type=str2bool, default=None, help='Quantize the model')
34 parser.add_argument('--histogram_range', type=str2bool, default=None, help='run only evaluation and no training')
35 parser.add_argument('--per_channel_q', type=str2bool, default=None, help='run only evaluation and no training')
36 parser.add_argument('--bias_calibration', type=str2bool, default=None, help='run only evaluation and no training')
37 parser.add_argument('--bitwidth_weights', type=int, default=None, help='bitwidth for weight quantization')
38 parser.add_argument('--bitwidth_activations', type=int, default=None, help='bitwidth for activation quantization')
39 #
40 parser.add_argument('--freeze_bn', type=str2bool, default=None, help='freeze the bn stats or not')
41 cmds = parser.parse_args()
43 ################################
44 # taken care first, since this has to be done before importing pytorch
45 if 'gpus' in vars(cmds):
46 value = getattr(cmds, 'gpus')
47 if (value is not None) and ("CUDA_VISIBLE_DEVICES" not in os.environ):
48 os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(v) for v in value])
49 #
50 #
52 ################################
53 # to avoid hangs in data loader with multi threads
54 # this was observed after using cv2 image processing functions
55 # https://github.com/pytorch/pytorch/issues/1355
56 cv2.setNumThreads(0)
59 ################################
60 #import of torch should be after CUDA_VISIBLE_DEVICES for it to take effect
61 import torch
62 from pytorch_jacinto_ai.engine import train_pixel2pixel
64 #Create the parse and set default arguments
65 args = train_pixel2pixel.get_config()
68 #Modify arguments
70 args.model_name = 'deeplabv3lite_mobilenetv2_ericsun_mi4' #'deeplabv3lite_mobilenetv2_mi4'
72 args.dataset_name = 'cityscapes_depth_semantic_five_class_motion_image_dof_conf' #'cityscapes_flow_depth_segmentation_image_pair' #cityscapes_segmentation #'cityscapes_segmentation_dual'
74 #args.save_path = './data/checkpoints'
76 args.data_path = './data/datasets/cityscapes_768x384/data' #./data/pascal-voc/VOCdevkit/VOC2012
78 args.pretrained = './data/modelzoo/pytorch/semantic_segmentation/cityscapes/jacinto_ai/deeplabv3lite_mobilenetv2_tv_resize768x384_best.pth.tar'
79 #'./data/checkpoints/cityscapes_depth_semantic_five_class_motion_image_dof_conf/0p9_release/2019-06-27-13-50-10_cityscapes_depth_semantic_five_class_motion_image_dof_conf_deeplabv3lite_mobilenetv2_ericsun_mi4_resize768x384_traincrop768x384/model_best.pth.tar'
80 #'./data/modelzoo/pretrained/pytorch/cityscapes_segmentation/v0.9-2018-12-07-19:38:26_cityscapes_segmentation_deeplabv3lite_mobilenetv2_relu_resize768x384_traincrop768x384_(68.9%)/model_best.pth.tar'
81 #'./data/checkpoints/store/saved/cityscapes_segmentation/v0.7-2018-10-25-13:07:38_cityscapes_segmentation_deeplabv3lite_mobilenetv2_relu_resize1024x512_traincrop512x512_(71.5%)/model_best.pth.tar'
82 #'./data/modelzoo/pretrained/pytorch/imagenet_classification/pytorch_jacinto_ai.vision/resnet50-19c8e357.pth'
83 #'./data/modelzoo/pretrained/pytorch/imagenet_classification/ericsun99/MobileNet-V2-Pytorch/mobilenetv2_Top1_71.806_Top2_90.410.pth.tar'
85 # args.resume = '/user/a0132471/Files/pytorch/pytorch-jacinto-models/checkpoints/cityscapes_depth_semantic_five_class_motion_image_dof_conf/2019-08-13-13-49-29_cityscapes_depth_semantic_five_class_motion_image_dof_conf_deeplabv3lite_mobilenetv2_ericsun_mi4_resize768x384_traincrop768x384/checkpoint.pth.tar'
87 args.model_config.input_channels = (3,3)
88 args.model_config.output_type = ['depth', 'segmentation', 'segmentation']
89 args.model_config.output_channels = None #this can be found out from the dataset
90 args.losses = [['supervised_loss', 'scale_loss'], ['segmentation_loss'], ['segmentation_loss']]
91 args.metrics = [['supervised_relative_error_rng3to80'], ['segmentation_metrics'], ['segmentation_metrics']]
92 args.is_flow = [[True,False],[False,False,False]]
94 args.multi_task_type = 'gradient_norm' #None, 'adaptive', 'learned', 'uncertainty'
95 args.multi_task_factors = (0.169, 1.279, 1.553) #None #[1.291, 6.769, 6.852]
96 args.multi_decoder = True
100 args.solver = 'adam' #'sgd' #'adam'
101 args.epochs = 250 #200
102 args.epoch_size = 0 #0 #0.5
103 args.scheduler = 'step' #'poly' #'step'
104 args.multistep_gamma = 0.5 #only for step scheduler
105 args.milestones = (100, 200) #only for step scheduler
106 args.polystep_power = 0.9 #only for poly scheduler
107 args.iter_size = 1 #2
108 args.evaluate_start = False
111 args.lr = 1e-4 #1e-4 #0.01 #7e-3 #1e-4 #2e-4
112 args.batch_size = 16 #12 #16 #32 #64
113 args.weight_decay = 1e-4 #4e-5 #1e-5
115 args.img_resize = (384, 768) #(512, 1024) #(1024, 2048)
116 args.rand_scale = (1.0, 2.0) #(1.0,2.0)
117 args.rand_crop = (384, 768) #(512,512) #(512,1024)
118 args.output_size = (1024, 2048) #for unflow loss only, output_size must match img_size
120 args.transform_rotation = 5 #0 #rotation degrees
122 args.model_config.aspp_dil = (2, 4, 6)
123 #args.model_config.use_aspp = True
125 args.generate_onnx = False
126 #args.phase = 'validation'
127 #args.quantize = True
129 args.model_config.normalize_gradients = True
131 args.pivot_task_idx = 2
133 ################################
134 for key in vars(cmds):
135 if key == 'gpus':
136 pass # already taken care above, since this has to be done before importing pytorch
137 elif hasattr(args, key):
138 value = getattr(cmds, key)
139 if value != 'None' and value is not None:
140 setattr(args, key, value)
141 else:
142 assert False, f'invalid argument {key}'
143 #
145 ################################
146 # Run the given phase
147 train_pixel2pixel.main(args)
149 ################################
150 # In addition run a quantization aware training, starting from the trained model
151 if 'training' in args.phase and (not args.quantize):
152 save_path = train_pixel2pixel.get_save_path(args)
153 args.pretrained = os.path.join(save_path, 'model_best.pth.tar') if (args.epochs>0) else args.pretrained
154 args.phase = 'training_quantize'
155 args.quantize = True
156 args.lr = 1e-5
157 args.epochs = 50
158 # quantized training will use only one GPU in the engine - so reduce the batch_size
159 num_gpus = len(str(os.environ["CUDA_VISIBLE_DEVICES"]).split(',')) if ("CUDA_VISIBLE_DEVICES" in os.environ) else None
160 args.batch_size = (args.batch_size//num_gpus) if (num_gpus is not None) else args.batch_size
161 train_pixel2pixel.main(args)
162 #
164 ################################
165 # In addition run a separate validation
166 if 'training' in args.phase or 'calibration' in args.phase:
167 save_path = train_pixel2pixel.get_save_path(args)
168 args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
169 args.phase = 'validation'
170 args.quantize = True
171 train_pixel2pixel.main(args)
172 #