1 #!/bin/bash
3 # run_tdnn_1f.sh is like run_tdnn_1e.sh but it use 2 to 6 jobs and add proportional-shrink 20.
5 #exp/chain_cleaned/tdnn1e_sp_bi/: num-iters=253 nj=2..12 num-params=7.0M dim=40+100->3597 combine=-0.095->-0.095 xent:train/valid[167,252,final]=(-1.37,-1.31,-1.31/-1.47,-1.44,-1.44) logprob:train/valid[167,252,final]=(-0.087,-0.078,-0.078/-0.102,-0.099,-0.099)
6 #exp/chain_cleaned/tdnn1f_sp_bi/: num-iters=444 nj=2..6 num-params=7.0M dim=40+100->3603 combine=-0.114->-0.113 xent:train/valid[295,443,final]=(-1.59,-1.51,-1.49/-1.58,-1.52,-1.50) logprob:train/valid[295,443,final]=(-0.112,-0.102,-0.098/-0.122,-0.113,-0.110)
8 # local/chain/compare_wer_general.sh exp/chain_cleaned/tdnn1d_sp_bi exp/chain_cleaned/tdnn1e_sp_bi
9 # System tdnn1e_sp_bi tdnn1f_sp_bi
10 # WER on dev(orig) 9.2 9.0
11 # WER on dev(rescored) 8.6 8.2
12 # WER on test(orig) 9.4 9.1
13 # WER on test(rescored) 8.9 8.7
14 # Final train prob -0.0776 -0.0983
15 # Final valid prob -0.0992 -0.1103
16 # Final train prob (xent) -1.3110 -1.4893
17 # Final valid prob (xent) -1.4353 -1.4951
19 ## how you run this (note: this assumes that the run_tdnn.sh soft link points here;
20 ## otherwise call it directly in its location).
21 # by default, with cleanup:
22 # local/chain/run_tdnn.sh
24 # without cleanup:
25 # local/chain/run_tdnn.sh --train-set train --gmm tri3 --nnet3-affix "" &
27 # note, if you have already run the corresponding non-chain nnet3 system
28 # (local/nnet3/run_tdnn.sh), you may want to run with --stage 14.
30 # This script is like run_tdnn_1a.sh except it uses an xconfig-based mechanism
31 # to get the configuration.
33 set -e -o pipefail
35 # First the options that are passed through to run_ivector_common.sh
36 # (some of which are also used in this script directly).
37 stage=0
38 nj=30
39 decode_nj=30
40 min_seg_len=1.55
41 xent_regularize=0.1
42 train_set=train_cleaned
43 gmm=tri3_cleaned # the gmm for the target data
44 num_threads_ubm=32
45 nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned
47 # The rest are configs specific to this script. Most of the parameters
48 # are just hardcoded at this level, in the commands below.
49 train_stage=-10
50 tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration.
51 tdnn_affix=1f #affix for TDNN directory, e.g. "a" or "b", in case we change the configuration.
52 common_egs_dir= # you can set this to use previously dumped egs.
54 # End configuration section.
55 echo "$0 $@" # Print the command line for logging
57 . cmd.sh
58 . ./path.sh
59 . ./utils/parse_options.sh
62 if ! cuda-compiled; then
63 cat <<EOF && exit 1
64 This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
65 If you want to use GPUs (and have them), go to src/, and configure and make on a machine
66 where "nvcc" is installed.
67 EOF
68 fi
70 local/nnet3/run_ivector_common.sh --stage $stage \
71 --nj $nj \
72 --min-seg-len $min_seg_len \
73 --train-set $train_set \
74 --gmm $gmm \
75 --num-threads-ubm $num_threads_ubm \
76 --nnet3-affix "$nnet3_affix"
79 gmm_dir=exp/$gmm
80 ali_dir=exp/${gmm}_ali_${train_set}_sp_comb
81 tree_dir=exp/chain${nnet3_affix}/tree_bi${tree_affix}
82 lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_comb_lats
83 dir=exp/chain${nnet3_affix}/tdnn${tdnn_affix}_sp_bi
84 train_data_dir=data/${train_set}_sp_hires_comb
85 lores_train_data_dir=data/${train_set}_sp_comb
86 train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires_comb
89 for f in $gmm_dir/final.mdl $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \
90 $lores_train_data_dir/feats.scp $ali_dir/ali.1.gz $gmm_dir/final.mdl; do
91 [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1
92 done
94 if [ $stage -le 14 ]; then
95 echo "$0: creating lang directory with one state per phone."
96 # Create a version of the lang/ directory that has one state per phone in the
97 # topo file. [note, it really has two states.. the first one is only repeated
98 # once, the second one has zero or more repeats.]
99 if [ -d data/lang_chain ]; then
100 if [ data/lang_chain/L.fst -nt data/lang/L.fst ]; then
101 echo "$0: data/lang_chain already exists, not overwriting it; continuing"
102 else
103 echo "$0: data/lang_chain already exists and seems to be older than data/lang..."
104 echo " ... not sure what to do. Exiting."
105 exit 1;
106 fi
107 else
108 cp -r data/lang data/lang_chain
109 silphonelist=$(cat data/lang_chain/phones/silence.csl) || exit 1;
110 nonsilphonelist=$(cat data/lang_chain/phones/nonsilence.csl) || exit 1;
111 # Use our special topology... note that later on may have to tune this
112 # topology.
113 steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >data/lang_chain/topo
114 fi
115 fi
117 if [ $stage -le 15 ]; then
118 # Get the alignments as lattices (gives the chain training more freedom).
119 # use the same num-jobs as the alignments
120 steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \
121 data/lang $gmm_dir $lat_dir
122 rm $lat_dir/fsts.*.gz # save space
123 fi
125 if [ $stage -le 16 ]; then
126 # Build a tree using our new topology. We know we have alignments for the
127 # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use
128 # those.
129 if [ -f $tree_dir/final.mdl ]; then
130 echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it."
131 exit 1;
132 fi
133 steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
134 --context-opts "--context-width=2 --central-position=1" \
135 --cmd "$train_cmd" 4000 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir
136 fi
138 if [ $stage -le 17 ]; then
139 mkdir -p $dir
141 echo "$0: creating neural net configs using the xconfig parser";
143 num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}')
144 learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
146 mkdir -p $dir/configs
147 cat <<EOF > $dir/configs/network.xconfig
148 input dim=100 name=ivector
149 input dim=40 name=input
151 # please note that it is important to have input layer with the name=input
152 # as the layer immediately preceding the fixed-affine-layer to enable
153 # the use of short notation for the descriptor
154 fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
156 # the first splicing is moved before the lda layer, so no splicing here
157 relu-batchnorm-layer name=tdnn1 dim=450 self-repair-scale=1.0e-04
158 relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=450
159 relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1,2) dim=450
160 relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=450
161 relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=450
162 relu-batchnorm-layer name=tdnn6 input=Append(-6,-3,0) dim=450
164 ## adding the layers for chain branch
165 relu-batchnorm-layer name=prefinal-chain input=tdnn6 dim=450 target-rms=0.5
166 output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5
168 # adding the layers for xent branch
169 # This block prints the configs for a separate output that will be
170 # trained with a cross-entropy objective in the 'chain' models... this
171 # has the effect of regularizing the hidden parts of the model. we use
172 # 0.5 / args.xent_regularize as the learning rate factor- the factor of
173 # 0.5 / args.xent_regularize is suitable as it means the xent
174 # final-layer learns at a rate independent of the regularization
175 # constant; and the 0.5 was tuned so as to make the relative progress
176 # similar in the xent and regular final layers.
177 relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=450 target-rms=0.5
178 output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
180 EOF
181 steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
183 fi
185 if [ $stage -le 18 ]; then
186 if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
187 utils/create_split_dir.pl \
188 /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage
189 fi
191 steps/nnet3/chain/train.py --stage $train_stage \
192 --cmd "$decode_cmd" \
193 --feat.online-ivector-dir $train_ivector_dir \
194 --feat.cmvn-opts "--norm-means=false --norm-vars=false" \
195 --chain.xent-regularize 0.1 \
196 --chain.leaky-hmm-coefficient 0.1 \
197 --chain.l2-regularize 0.00005 \
198 --chain.apply-deriv-weights false \
199 --chain.lm-opts="--num-extra-lm-states=2000" \
200 --egs.dir "$common_egs_dir" \
201 --egs.opts "--frames-overlap-per-eg 0" \
202 --egs.chunk-width 150 \
203 --trainer.num-chunk-per-minibatch 128 \
204 --trainer.frames-per-iter 1500000 \
205 --trainer.num-epochs 4 \
206 --trainer.optimization.proportional-shrink 20 \
207 --trainer.optimization.num-jobs-initial 2 \
208 --trainer.optimization.num-jobs-final 6 \
209 --trainer.optimization.initial-effective-lrate 0.001 \
210 --trainer.optimization.final-effective-lrate 0.0001 \
211 --trainer.max-param-change 2.0 \
212 --cleanup.remove-egs true \
213 --feat-dir $train_data_dir \
214 --tree-dir $tree_dir \
215 --lat-dir $lat_dir \
216 --dir $dir
217 fi
221 if [ $stage -le 19 ]; then
222 # Note: it might appear that this data/lang_chain directory is mismatched, and it is as
223 # far as the 'topo' is concerned, but this script doesn't read the 'topo' from
224 # the lang directory.
225 utils/mkgraph.sh --self-loop-scale 1.0 data/lang $dir $dir/graph
226 fi
228 if [ $stage -le 20 ]; then
229 rm $dir/.error 2>/dev/null || true
230 for dset in dev test; do
231 (
232 steps/nnet3/decode.sh --num-threads 4 --nj $decode_nj --cmd "$decode_cmd" \
233 --acwt 1.0 --post-decode-acwt 10.0 \
234 --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${dset}_hires \
235 --scoring-opts "--min-lmwt 5 " \
236 $dir/graph data/${dset}_hires $dir/decode_${dset} || exit 1;
237 steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" data/lang data/lang_rescore \
238 data/${dset}_hires ${dir}/decode_${dset} ${dir}/decode_${dset}_rescore || exit 1
239 ) || touch $dir/.error &
240 done
241 wait
242 if [ -f $dir/.error ]; then
243 echo "$0: something went wrong in decoding"
244 exit 1
245 fi
246 fi
247 exit 0