1 #!/bin/bash
3 set -e
5 # based on run_blstm_6h.sh in fisher_swbd recipe
7 # configs for 'chain'
8 affix=
9 stage=11 # assuming you already ran the xent systems
10 train_stage=-10
11 get_egs_stage=-10
12 dir=exp/chain/blstm_7b
13 decode_iter=
15 # training options
16 num_epochs=4
17 remove_egs=false
18 common_egs_dir=
19 num_data_reps=3
22 min_seg_len=
23 xent_regularize=0.1
24 chunk_width=150
25 chunk_left_context=40
26 chunk_right_context=40
27 # End configuration section.
28 echo "$0 $@" # Print the command line for logging
30 . ./cmd.sh
31 . ./path.sh
32 . ./utils/parse_options.sh
34 if ! cuda-compiled; then
35 cat <<EOF && exit 1
36 This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA
37 If you want to use GPUs (and have them), go to src/, and configure and make on a machine
38 where "nvcc" is installed.
39 EOF
40 fi
42 dir=${dir}${affix:+_$affix}
43 ali_dir=exp/tri5a_rvb_ali
44 treedir=exp/chain/tri6_tree_11000
45 lang=data/lang_chain
48 # The iVector-extraction and feature-dumping parts are the same as the standard
49 # nnet3 setup, and you can skip them by setting "--stage 8" if you have already
50 # run those things.
51 local/nnet3/run_ivector_common.sh --stage $stage --num-data-reps 3|| exit 1;
53 if [ $stage -le 7 ]; then
54 # Create a version of the lang/ directory that has one state per phone in the
55 # topo file. [note, it really has two states.. the first one is only repeated
56 # once, the second one has zero or more repeats.]
57 rm -rf $lang
58 cp -r data/lang $lang
59 silphonelist=$(cat $lang/phones/silence.csl) || exit 1;
60 nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1;
61 # Use our special topology... note that later on may have to tune this
62 # topology.
63 steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo
64 fi
66 if [ $stage -le 8 ]; then
67 # Build a tree using our new topology.
68 # we build the tree using clean features (data/train) rather than
69 # the augmented features (data/train_rvb) to get better alignments
71 steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \
72 --cmd "$train_cmd" 11000 data/train $lang exp/tri5a $treedir
73 fi
75 if [ -z $min_seg_len ]; then
76 min_seg_len=$(python -c "print ($chunk_width+5)/100.0")
77 fi
79 if [ $stage -le 9 ]; then
80 [ -d data/train_rvb_min${min_seg_len}_hires ] && rm -rf data/train_rvb_min${min_seg_len}_hires
81 steps/cleanup/combine_short_segments.py --minimum-duration $min_seg_len \
82 --input-data-dir data/train_rvb_hires \
83 --output-data-dir data/train_rvb_min${min_seg_len}_hires
85 #extract ivectors for the new data
86 steps/online/nnet2/copy_data_dir.sh --utts-per-spk-max 2 \
87 data/train_rvb_min${min_seg_len}_hires data/train_rvb_min${min_seg_len}_hires_max2
88 ivectordir=exp/nnet3/ivectors_train_min${min_seg_len}
89 if [[ $(hostname -f) == *.clsp.jhu.edu ]]; then # this shows how you can split across multiple file-systems.
90 utils/create_split_dir.pl /export/b0{1,2,3,4}/$USER/kaldi-data/egs/aspire/s5/$ivectordir/storage $ivectordir/storage
91 fi
93 steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 200 \
94 data/train_rvb_min${min_seg_len}_hires_max2 \
95 exp/nnet3/extractor $ivectordir || exit 1;
97 # combine the non-hires features for alignments/lattices
98 [ -d data/train_min${min_seg_len} ] && rm -r data/train_min${min_seg_len};
99 utt_prefix="THISISUNIQUESTRING_"
100 spk_prefix="THISISUNIQUESTRING_"
101 utils/copy_data_dir.sh --spk-prefix "$spk_prefix" --utt-prefix "$utt_prefix" \
102 data/train data/train_temp_for_lats
103 steps/cleanup/combine_short_segments.py --minimum-duration $min_seg_len \
104 --input-data-dir data/train_temp_for_lats \
105 --output-data-dir data/train_min${min_seg_len}
106 fi
108 if [ $stage -le 10 ]; then
109 # Get the alignments as lattices (gives the chain training more freedom).
110 # use the same num-jobs as the alignments
111 nj=200
112 lat_dir=exp/tri5a_min${min_seg_len}_lats
113 steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/train_min${min_seg_len} \
114 data/lang exp/tri5a $lat_dir
115 rm -f $lat_dir/fsts.*.gz # save space
117 rvb_lat_dir=exp/tri5a_rvb_min${min_seg_len}_lats
118 mkdir -p $rvb_lat_dir/temp/
119 lattice-copy "ark:gunzip -c $lat_dir/lat.*.gz |" ark,scp:$rvb_lat_dir/temp/lats.ark,$rvb_lat_dir/temp/lats.scp
121 # copy the lattices for the reverberated data
122 rm -f $rvb_lat_dir/temp/combined_lats.scp
123 touch $rvb_lat_dir/temp/combined_lats.scp
124 for i in `seq 1 $num_data_reps`; do
125 cat $rvb_lat_dir/temp/lats.scp | sed -e "s/THISISUNIQUESTRING/rev${i}/g" >> $rvb_lat_dir/temp/combined_lats.scp
126 done
127 sort -u $rvb_lat_dir/temp/combined_lats.scp > $rvb_lat_dir/temp/combined_lats_sorted.scp
129 lattice-copy scp:$rvb_lat_dir/temp/combined_lats_sorted.scp "ark:|gzip -c >$rvb_lat_dir/lat.1.gz" || exit 1;
130 echo "1" > $rvb_lat_dir/num_jobs
132 # copy other files from original lattice dir
133 for f in cmvn_opts final.mdl splice_opts tree; do
134 cp $lat_dir/$f $rvb_lat_dir/$f
135 done
136 fi
138 if [ $stage -le 11 ]; then
139 echo "$0: creating neural net configs using the xconfig parser";
141 num_targets=$(tree-info $treedir/tree | grep num-pdfs | awk '{print $2}')
142 [ -z $num_targets ] && { echo "$0: error getting num-targets"; exit 1; }
143 learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python)
145 lstm_opts="decay-time=20"
147 mkdir -p $dir/configs
148 cat <<EOF > $dir/configs/network.xconfig
149 input dim=100 name=ivector
150 input dim=40 name=input
152 # please note that it is important to have input layer with the name=input
153 # as the layer immediately preceding the fixed-affine-layer to enable
154 # the use of short notation for the descriptor
155 fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat
157 # the first splicing is moved before the lda layer, so no splicing here
159 # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults
160 fast-lstmp-layer name=blstm1-forward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
161 fast-lstmp-layer name=blstm1-backward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
163 fast-lstmp-layer name=blstm2-forward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
164 fast-lstmp-layer name=blstm2-backward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
166 fast-lstmp-layer name=blstm3-forward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 $lstm_opts
167 fast-lstmp-layer name=blstm3-backward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 $lstm_opts
169 ## adding the layers for chain branch
170 output-layer name=output input=Append(blstm3-forward, blstm3-backward) output-delay=0 include-log-softmax=false dim=$num_targets max-change=1.5
172 # adding the layers for xent branch
173 # This block prints the configs for a separate output that will be
174 # trained with a cross-entropy objective in the 'chain' models... this
175 # has the effect of regularizing the hidden parts of the model. we use
176 # 0.5 / args.xent_regularize as the learning rate factor- the factor of
177 # 0.5 / args.xent_regularize is suitable as it means the xent
178 # final-layer learns at a rate independent of the regularization
179 # constant; and the 0.5 was tuned so as to make the relative progress
180 # similar in the xent and regular final layers.
181 output-layer name=output-xent input=Append(blstm3-forward, blstm3-backward) output-delay=0 dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5
183 EOF
184 steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/
185 fi
187 if [ $stage -le 12 ]; then
188 if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then
189 utils/create_split_dir.pl \
190 /export/b0{5,6,7,8}/$USER/kaldi-data/egs/aspire-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage
191 fi
193 touch $dir/egs/.nodelete # keep egs around when that run dies.
195 steps/nnet3/chain/train.py --stage $train_stage \
196 --egs.dir "$common_egs_dir" \
197 --cmd "$decode_cmd" \
198 --feat.online-ivector-dir exp/nnet3/ivectors_train_min${min_seg_len} \
199 --feat.cmvn-opts "--norm-means=false --norm-vars=false" \
200 --chain.xent-regularize $xent_regularize \
201 --chain.leaky-hmm-coefficient 0.1 \
202 --chain.l2-regularize 0.00005 \
203 --chain.apply-deriv-weights false \
204 --chain.lm-opts="--num-extra-lm-states=2000" \
205 --trainer.num-chunk-per-minibatch 64 \
206 --trainer.max-param-change 1.414 \
207 --egs.stage $get_egs_stage \
208 --egs.opts "--frames-overlap-per-eg 0" \
209 --egs.chunk-width $chunk_width \
210 --egs.chunk-left-context $chunk_left_context \
211 --egs.chunk-right-context $chunk_right_context \
212 --egs.chunk-left-context-initial=0 \
213 --egs.chunk-right-context-final=0 \
214 --egs.dir "$common_egs_dir" \
215 --trainer.frames-per-iter 1500000 \
216 --trainer.num-epochs $num_epochs \
217 --trainer.optimization.num-jobs-initial 3 \
218 --trainer.optimization.num-jobs-final 16 \
219 --trainer.optimization.initial-effective-lrate 0.001 \
220 --trainer.optimization.final-effective-lrate 0.0001 \
221 --trainer.optimization.shrink-value 0.99 \
222 --trainer.optimization.momentum 0.0 \
223 --trainer.deriv-truncate-margin 8 \
224 --cleanup.remove-egs $remove_egs \
225 --feat-dir data/train_rvb_min${min_seg_len}_hires \
226 --tree-dir $treedir \
227 --lat-dir exp/tri5a_rvb_min${min_seg_len}_lats \
228 --dir $dir || exit 1;
229 fi
231 if [ $stage -le 13 ]; then
232 # Note: it might appear that this $lang directory is mismatched, and it is as
233 # far as the 'topo' is concerned, but this script doesn't read the 'topo' from
234 # the lang directory.
235 utils/mkgraph.sh --self-loop-scale 1.0 data/lang_pp_test $dir $dir/graph_pp
236 fi
238 if [ $stage -le 14 ]; then
240 extra_left_context=$[$chunk_left_context+10]
241 extra_right_context=$[$chunk_right_context+10]
242 # %WER 25.5 | 2120 27212 | 81.0 11.9 7.1 6.5 25.5 75.0 | -1.022 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iterfinal_pp_fg/score_8/penalty_0.5/ctm.filt.filt.sys
244 local/nnet3/prep_test_aspire.sh --stage 4 --decode-num-jobs 30 --affix "v7" \
245 --extra-left-context $extra_left_context \
246 --extra-right-context $extra_right_context \
247 --extra-left-context-initial 0 \
248 --extra-right-context-final 0 \
249 --frames-per-chunk $chunk_width \
250 --acwt 1.0 --post-decode-acwt 10.0 \
251 --window 10 --overlap 5 \
252 --sub-speaker-frames 6000 --max-count 75 --ivector-scale 0.75 \
253 --pass2-decode-opts "--min-active 1000" \
254 dev_aspire data/lang $dir/graph_pp $dir
255 fi
256 exit 0;
258 #online decoding is not yet supported with RNN AMs. See https://github.com/kaldi-asr/kaldi/issues/1091
260 # %WER 28.0 | 2120 27217 | 78.6 13.3 8.1 6.7 28.0 77.0 | -0.852 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iter600_pp_fg/score_9/penalty_0.25/ctm.filt.filt.sys
261 # %WER 27.1 | 2120 27217 | 78.9 13.1 7.9 6.0 27.1 75.8 | -0.944 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iter700_pp_fg/score_8/penalty_0.5/ctm.filt.filt.sys
262 # %WER 26.9 | 2120 27218 | 79.7 12.1 8.2 6.6 26.9 76.3 | -0.839 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iter1000_pp_fg/score_10/penalty_0.0/ctm.filt.filt.sys
263 # %WER 26.6 | 2120 27220 | 80.2 12.7 7.1 6.8 26.6 76.6 | -1.035 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iter1200_pp_fg/score_8/penalty_0.25/ctm.filt.filt.sys
264 # %WER 26.3 | 2120 27223 | 80.6 12.3 7.2 6.9 26.3 76.8 | -0.978 | exp/chain/blstm_asp2/decode_dev_aspire_whole_uniformsegmented_win10_over5_v7_iter1400_pp_fg/score_9/penalty_0.0/ctm.filt.filt.sys