index 4ee691658fa99ca6a361b7ce36278fe2aba46895..519bb8e3f09fc8ef9261e9758a6d2045f2c7808a 100755 (executable)
lr=1e-5 # initial learning rate for quantization aware training - recommend to use 1e-5 (or at max 5e-5)
batch_size=64 # use a relatively smaller batch size as quantization aware training does not use multi-gpu
epochs=10 # numerb of epochs to train
-epoch_size=0.1 # artificially limit one training epoch to this many iterations - this argument is only used to limit the training time and may hurt acuracy - set to 0 to use the full training epoch
+epoch_size=0.1 # artificially limit one training epoch to this many iterations - this argument is only used to limit the training time and may hurt acuracy - set to 0 to use the full training epoch
epoch_size_val=0 # validation epoch size - set to 0 for full validation epoch