1 #!/bin/bash
3 # ----------------------------------
4 # Quantization Aware Training (QAT) Example
5 # Texas Instruments (C) 2018-2020
6 # All Rights Reserved
7 # ----------------------------------
9 # ----------------------------------
10 date_var=`date '+%Y-%m-%d_%H-%M-%S'`
11 base_dir="./data/checkpoints/quantization_example"
12 save_path="$base_dir"/"$date_var"_quantization_example
13 log_file=$save_path/run.log
14 echo Logging the output to: $log_file
16 # ----------------------------------
17 mkdir -p $save_path
18 exec &> >(tee -a "$log_file")
20 # ----------------------------------
21 # model names and pretrained paths from torchvision - add more as required
22 declare -A model_pretrained=(
23 [mobilenet_v2]=https://download.pytorch.org/models/mobilenet_v2-b0353104.pth
24 [resnet50]=https://download.pytorch.org/models/resnet50-19c8e357.pth
25 [mobilenetv2_shicai]='./data/modelzoo/pretrained/pytorch/others/shicai/MobileNet-Caffe/mobilenetv2_shicai_rgb.tar'
26 [shufflenetv2_x1.0]=https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth
27 )
29 # ----------------------------------
30 # parameters for quantization aware training
31 # ----------------------------------
32 lr=1e-5 # initial learning rate for quantization aware training - recommend to use 1e-5 (or at max 5e-5)
33 batch_size=64 # use a relatively smaller batch size as quantization aware training does not use multi-gpu
34 epochs=10 # numerb of epochs to train
35 epoch_size=1000 # artificially limit one training epoch to this many iterations - this argument is only used to limit the training time and may hurt acuracy - set to 0 to use the full training epoch
36 epoch_size_val=0 # validation epoch size - set to 0 for full validation epoch
39 # ----------------------------------
40 for model in "${!model_pretrained[@]}"; do
41 echo ==========================================================
42 pretrained="${model_pretrained[$model]}"
44 # echo ----------------------------------------------------------
45 # echo Estimating evaluation accuracy without quantization for $model
46 # python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch_size 64 --evaluate --pretrained $pretrained --save_path $save_path
47 #
48 # echo ----------------------------------------------------------
49 # echo Estimating evaluation accuracy with quantization for $model
50 # python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch_size 64 --evaluate --quantize --pretrained $pretrained --save_path $save_path
52 echo ----------------------------------------------------------
53 echo Quantization Aware Training for $model
54 # note: this example uses only a part of the training epoch and only 10 such (partial) epochs during quantized training to save time,
55 # but it may necessary to use the full training epoch if the accuracy is not satisfactory.
56 python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch_size $batch_size --lr $lr --epoch_size $epoch_size --epoch_size_val $epoch_size_val --epochs $epochs --quantize --pretrained $pretrained --save_path $save_path
57 done