changing extension of checkpoint files to .pth instead of .pth.tar
authorManu Mathew <a0393608@ti.com>
Tue, 12 May 2020 07:49:05 +0000 (13:19 +0530)
committerManu Mathew <a0393608@ti.com>
Tue, 12 May 2020 08:00:19 +0000 (13:30 +0530)
docs/Calibration.md
modules/pytorch_jacinto_ai/engine/infer_classification_onnx_rt.py
modules/pytorch_jacinto_ai/engine/test_classification.py
modules/pytorch_jacinto_ai/engine/train_classification.py
modules/pytorch_jacinto_ai/engine/train_pixel2pixel.py
scripts/train_classification_main.py
scripts/train_depth_main.py
scripts/train_motion_segmentation_main.py
scripts/train_segmentation_main.py

index 2cfe4ef071c905a15be55075b348fa6acc47f2d8..247743afb2dcc53b0d416c9ec3c44ed900d4edc7 100644 (file)
@@ -82,7 +82,7 @@ python ./scripts/train_classification_main.py --phase calibration --dataset_name
 - Calibration of Cityscapes Semantic Segmentation model
 ```
 python ./scripts/train_segmentation_main.py --phase calibration --dataset_name cityscapes_segmentation --model_name deeplabv3lite_mobilenetv2_tv --data_path ./data/datasets/cityscapes/data --img_resize 384 768 --output_size 1024 2048 --gpus 0 1 
---pretrained ./data/modelzoo/pytorch/semantic_segmentation/cityscapes/jacinto_ai/deeplabv3lite_mobilenetv2_tv_resize768x384_best.pth.tar 
+--pretrained ./data/modelzoo/pytorch/semantic_segmentation/cityscapes/jacinto_ai/deeplabv3lite_mobilenetv2_tv_resize768x384_best.pth 
 --batch_size 12 --quantize True --epochs 1 --epoch_size 100
 ```
 
index e6a0f900efcd7d7ee5c3d481adeada733d0baa63..15a48ec2d2400bcfdf2599cd421e82793a84b941 100644 (file)
@@ -251,10 +251,10 @@ def write_onnx_model(args, model, save_path, name='checkpoint.onnx'):
     torch.onnx.export(model, dummy_input, os.path.join(save_path,name), export_params=True, verbose=False,
                       do_constant_folding=True, opset_version=args.opset_version)
 
-def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
+def save_checkpoint(state, is_best, filename='checkpoint.pth'):
     torch.save(state, filename)
     if is_best:
-        shutil.copyfile(filename, 'model_best.pth.tar')
+        shutil.copyfile(filename, 'model_best.pth')
 
 
 class AverageMeter(object):
index 4b7f14d74fc26208f1e1de2d38881254e079896d..7732097354afdc0a07e7c5b073dd804de832fcd4 100644 (file)
@@ -28,12 +28,12 @@ def get_config():
     args.model_config = xnn.utils.ConfigNode()
     args.dataset_config = xnn.utils.ConfigNode()
 
-    args.model_name = 'mobilenet_v2_classification'     # model architecture'
+    args.model_name = 'mobilenetv2_tv_x1'     # model architecture'
     args.dataset_name = 'imagenet_classification'       # image folder classification
 
     args.data_path = './data/datasets/ilsvrc'           # path to dataset
     args.save_path = None                               # checkpoints save path
-    args.pretrained = './data/modelzoo/pretrained/pytorch/imagenet_classification/ericsun99/MobileNet-V2-Pytorch/mobilenetv2_Top1_71.806_Top2_90.410.pth.tar' # path to pre_trained model
+    args.pretrained = 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth' # path to pre_trained model
 
     args.workers = 8                                    # number of data loading workers (default: 4)
     args.batch_size = 256                               # mini_batch size (default: 256)
@@ -331,10 +331,10 @@ def write_onnx_model(args, model, save_path, name='checkpoint.onnx'):
     torch.onnx.export(model, dummy_input, os.path.join(save_path,name), export_params=True, verbose=False,
                       do_constant_folding=True, opset_version=args.opset_version)
 
-def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
+def save_checkpoint(state, is_best, filename='checkpoint.pth'):
     torch.save(state, filename)
     if is_best:
-        shutil.copyfile(filename, 'model_best.pth.tar')
+        shutil.copyfile(filename, 'model_best.pth')
 
 
 class AverageMeter(object):
index 07862e7204e64e72fc23a34497c93ffd1f4a08cb..a40907acd93ecab7ea94ead77f18108787490a49 100644 (file)
@@ -39,7 +39,7 @@ def get_config():
     args.model_config.input_channels = 3                # num input channels
 
     args.data_path = './data/datasets/ilsvrc'           # path to dataset
-    args.model_name = 'mobilenet_v2_classification'     # model architecture'
+    args.model_name = 'mobilenetv2_tv_x1'     # model architecture'
     args.dataset_name = 'imagenet_classification'   # image folder classification
     args.save_path = None                               # checkpoints save path
     args.phase = 'training'                             # training/calibration/validation
@@ -621,11 +621,11 @@ def validate(args, val_loader, model, criterion, epoch):
     return top1.avg
 
 
-def save_checkpoint(args, save_path, model, state, is_best, filename='checkpoint.pth.tar'):
+def save_checkpoint(args, save_path, model, state, is_best, filename='checkpoint.pth'):
     filename = os.path.join(save_path, filename)
     torch.save(state, filename)
     if is_best:
-        bestname = os.path.join(save_path, 'model_best.pth.tar')
+        bestname = os.path.join(save_path, 'model_best.pth')
         shutil.copyfile(filename, bestname)
     #
     if args.save_onnx:
index bd91d70c739c1d9429150ee0e1fe61a4a3620b4e..d838078a1c038ffe4aec02e3d4ff7b0f3a5c2de3 100644 (file)
@@ -1100,10 +1100,10 @@ def compute_task_objectives(args, objective_fns, input_var, task_outputs, task_t
     return return_list 
 
 
-def save_checkpoint(args, save_path, model, checkpoint_dict, is_best, filename='checkpoint.pth.tar'):
+def save_checkpoint(args, save_path, model, checkpoint_dict, is_best, filename='checkpoint.pth'):
     torch.save(checkpoint_dict, os.path.join(save_path,filename))
     if is_best:
-        shutil.copyfile(os.path.join(save_path,filename), os.path.join(save_path,'model_best.pth.tar'))
+        shutil.copyfile(os.path.join(save_path,filename), os.path.join(save_path,'model_best.pth'))
     #
     if args.save_onnx:
         write_onnx_model(args, model, save_path, name='checkpoint.onnx')
index d3d3e57d3af67b79164582527fa47b08d2e0026f..a04ba2399495db473d6eef16d295a88d5bd4e6e4 100755 (executable)
@@ -155,7 +155,7 @@ train_classification.main(args)
 # In addition run a quantized calibration, starting from the trained model
 if 'training' in args.phase and (not args.quantize):
     save_path = train_classification.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
+    args.pretrained = os.path.join(save_path, 'model_best.pth')
     args.phase = 'training_quantize'
     args.quantize = True
     args.lr = 1e-5
@@ -170,7 +170,7 @@ if 'training' in args.phase and (not args.quantize):
 # In addition run a separate validation, starting from the calibrated model - to estimate the quantized accuracy accurately
 if 'training' in args.phase or 'calibration' in args.phase:
     save_path = train_classification.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
+    args.pretrained = os.path.join(save_path, 'model_best.pth')
     args.phase = 'validation'
     args.quantize = True
     train_classification.main(args)
index 561a78a79be4c9e918c9b518345ddc4f8ae8e1da..71e1a599a15b5817ef055fe5a85c975c0c522555 100755 (executable)
@@ -76,7 +76,7 @@ args.split_files = (args.data_path+'/train.txt', args.data_path+'/val.txt')
 
 #args.save_path = './data/checkpoints'
 
-args.pretrained = './data/modelzoo/pytorch/semantic_segmentation/cityscapes/jacinto_ai/deeplabv3lite_mobilenetv2_tv_resize768x384_best.pth.tar'
+args.pretrained = './data/modelzoo/pytorch/semantic_segmentation/cityscapes/jacinto_ai/deeplabv3lite_mobilenetv2_tv_768x384_best.pth'
                                     # 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth'
                                     # './data/modelzoo/pretrained/pytorch/imagenet_classification/ericsun99/MobileNet-V2-Pytorch/mobilenetv2_Top1_71.806_Top2_90.410.pth.tar'
                                     # 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
@@ -151,7 +151,7 @@ train_pixel2pixel.main(args)
 # In addition run a quantization aware training, starting from the trained model
 if 'training' in args.phase and (not args.quantize):
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar') if (args.epochs>0) else args.pretrained
+    args.pretrained = os.path.join(save_path, 'model_best.pth') if (args.epochs>0) else args.pretrained
     args.phase = 'training_quantize'
     args.quantize = True
     args.lr = 1e-5
@@ -166,7 +166,7 @@ if 'training' in args.phase and (not args.quantize):
 # In addition run a separate validation
 if 'training' in args.phase or 'calibration' in args.phase:
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
+    args.pretrained = os.path.join(save_path, 'model_best.pth')
     args.phase = 'validation'
     args.quantize = True
     train_pixel2pixel.main(args)
index 00e2d96629e3a430f58c7167334d25d099c6ed27..2f13e151d210bad3de75fd945804c55b39022677 100755 (executable)
@@ -146,7 +146,7 @@ train_pixel2pixel.main(args)
 # In addition run a quantization aware training, starting from the trained model
 if 'training' in args.phase and (not args.quantize):
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar') if (args.epochs>0) else args.pretrained
+    args.pretrained = os.path.join(save_path, 'model_best.pth') if (args.epochs>0) else args.pretrained
     args.phase = 'training_quantize'
     args.quantize = True
     args.lr = 1e-5
@@ -161,7 +161,7 @@ if 'training' in args.phase and (not args.quantize):
 # In addition run a separate validation
 if 'training' in args.phase or 'calibration' in args.phase:
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
+    args.pretrained = os.path.join(save_path, 'model_best.pth')
     args.phase = 'validation'
     args.quantize = True
     train_pixel2pixel.main(args)
index dda5c0f271f7057ab8f1847ee7f9dc085abeaf67..aca1965672f129b939aaac441812eef65e015c14 100755 (executable)
@@ -31,6 +31,7 @@ parser.add_argument('--pretrained', type=str, default=None, help='pretrained mod
 parser.add_argument('--resume', type=str, default=None, help='resume an unfinished training from this model')
 parser.add_argument('--phase', type=str, default=None, help='training/calibration/validation')
 parser.add_argument('--evaluate_start', type=str2bool, default=None, help='Whether to run validation before the training')
+parser.add_argument('--save_onnx', type=str2bool, default=None, help='Whether to export onnx model or not')
 #
 parser.add_argument('--quantize', type=str2bool, default=None, help='Quantize the model')
 parser.add_argument('--histogram_range', type=str2bool, default=None, help='run only evaluation and no training')
@@ -148,7 +149,7 @@ train_pixel2pixel.main(args)
 # In addition run a quantization aware training, starting from the trained model
 if 'training' in args.phase and (not args.quantize):
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar') if (args.epochs>0) else args.pretrained
+    args.pretrained = os.path.join(save_path, 'model_best.pth') if (args.epochs>0) else args.pretrained
     args.phase = 'training_quantize'
     args.quantize = True
     args.lr = 1e-5
@@ -163,7 +164,7 @@ if 'training' in args.phase and (not args.quantize):
 # In addition run a separate validation
 if 'training' in args.phase or 'calibration' in args.phase:
     save_path = train_pixel2pixel.get_save_path(args)
-    args.pretrained = os.path.join(save_path, 'model_best.pth.tar')
+    args.pretrained = os.path.join(save_path, 'model_best.pth')
     args.phase = 'validation'
     args.quantize = True
     train_pixel2pixel.main(args)