]> Gitweb @ Texas Instruments - Open Source Git Repositories - git.TI.com/gitweb - jacinto-ai/pytorch-jacinto-ai-devkit.git/blobdiff - run_quantization_example.sh
release commit
[jacinto-ai/pytorch-jacinto-ai-devkit.git] / run_quantization_example.sh
diff --git a/run_quantization_example.sh b/run_quantization_example.sh
new file mode 100755 (executable)
index 0000000..bc61f04
--- /dev/null
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+# ----------------------------------
+base_dir="./data/checkpoints/quantization"
+date_var=`date '+%Y-%m-%d_%H-%M-%S'`
+logdir=$base_dir/"$date_var"_quantization
+logfile=$logdir/run.log
+echo Logging the output to: $logfile
+
+mkdir $base_dir
+mkdir $logdir
+exec &> >(tee -a "$logfile")
+# ----------------------------------
+
+# model names and pretrained paths from torchvision - add more as required
+declare -A model_pretrained=(
+  [mobilenet_v2]=https://download.pytorch.org/models/mobilenet_v2-b0353104.pth
+  [shufflenetv2_x1.0]=https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth
+  [resnet50]=https://download.pytorch.org/models/resnet50-19c8e357.pth
+)
+
+for model in "${!model_pretrained[@]}"; do
+  echo ==========================================================
+  pretrained="${model_pretrained[$model]}"
+
+  echo ----------------------------------------------------------
+  echo Estimating evaluation accuracy without quantization for $model
+  python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch-size 64 --evaluate --pretrained $pretrained
+
+  echo ----------------------------------------------------------
+  echo Estimating evaluation accuracy with quantization for $model
+  python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch-size 64 --evaluate --quantize --pretrained $pretrained
+
+  echo ----------------------------------------------------------
+  echo Quantization Aware Training for $model
+  # note: this example uses only a part of the training epoch and only 10 such (partial) epochs during quantized training to save time,
+  # but it may necessary to use the full training epoch if the accuracy is not satisfactory.
+  python -u ./examples/quantization_example.py ./data/datasets/image_folder_classification --arch $model --batch-size 64 --lr 5e-5 --epoch-size 1000 --epochs 10 --quantize --pretrained $pretrained
+done
\ No newline at end of file