diff options
l--------- | egs/wsj/s5/local/chain/run_tdnn.sh | 2 | ||||
-rwxr-xr-x | egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh | 337 |
2 files changed, 338 insertions, 1 deletions
diff --git a/egs/wsj/s5/local/chain/run_tdnn.sh b/egs/wsj/s5/local/chain/run_tdnn.sh index 61f8f4991..d48449e28 120000 --- a/egs/wsj/s5/local/chain/run_tdnn.sh +++ b/egs/wsj/s5/local/chain/run_tdnn.sh | |||
@@ -1 +1 @@ | |||
tuning/run_tdnn_1b.sh \ No newline at end of file | tuning/run_tdnn_1c.sh \ No newline at end of file | ||
diff --git a/egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh b/egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh new file mode 100755 index 000000000..7dc30ecf8 --- /dev/null +++ b/egs/wsj/s5/local/chain/tuning/run_tdnn_1c.sh | |||
@@ -0,0 +1,337 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | # 1c is as 1b but using batchnorm instead of renorm | ||
4 | # 1b is as 1a but using --proportional-shrink=60.0 | ||
5 | |||
6 | # local/chain/compare_wer.sh exp/chain/tdnn1a_sp exp/chain/tdnn1b_sp | ||
7 | # System tdnn1a_sp tdnn1b_sp | ||
8 | #WER dev93 (tgpr) 7.87 7.24 | ||
9 | #WER dev93 (tg) 7.61 6.95 | ||
10 | #WER dev93 (big-dict,tgpr) 5.71 5.19 | ||
11 | #WER dev93 (big-dict,fg) 5.10 4.52 | ||
12 | #WER eval92 (tgpr) 5.23 5.09 | ||
13 | #WER eval92 (tg) 4.87 4.64 | ||
14 | #WER eval92 (big-dict,tgpr) 3.24 2.91 | ||
15 | #WER eval92 (big-dict,fg) 2.71 2.39 | ||
16 | # Final train prob -0.0414 -0.0570 | ||
17 | # Final valid prob -0.0634 -0.0680 | ||
18 | # Final train prob (xent) -0.8216 -0.9587 | ||
19 | # Final valid prob (xent) -0.9208 -1.0039 | ||
20 | |||
21 | |||
22 | # steps/info/chain_dir_info.pl exp/chain/tdnn1b_sp | ||
23 | # exp/chain/tdnn1b_sp: num-iters=102 nj=2..5 num-params=7.6M dim=40+100->2889 combine=-0.066->-0.063 xent:train/valid[67,101,final]=(-1.12,-0.979,-0.959/-1.13,-1.03,-1.00) logprob:train/valid[67,101,final]=(-0.071,-0.058,-0.057/-0.077,-0.069,-0.068) | ||
24 | |||
25 | set -e -o pipefail | ||
26 | |||
27 | # First the options that are passed through to run_ivector_common.sh | ||
28 | # (some of which are also used in this script directly). | ||
29 | stage=0 | ||
30 | nj=30 | ||
31 | train_set=train_si284 | ||
32 | test_sets="test_dev93 test_eval92" | ||
33 | gmm=tri4b # this is the source gmm-dir that we'll use for alignments; it | ||
34 | # should have alignments for the specified training data. | ||
35 | num_threads_ubm=32 | ||
36 | nnet3_affix= # affix for exp dirs, e.g. it was _cleaned in tedlium. | ||
37 | |||
38 | # Options which are not passed through to run_ivector_common.sh | ||
39 | affix=1c #affix for TDNN+LSTM directory e.g. "1a" or "1b", in case we change the configuration. | ||
40 | common_egs_dir= | ||
41 | reporting_email= | ||
42 | |||
43 | # LSTM/chain options | ||
44 | train_stage=-10 | ||
45 | xent_regularize=0.1 | ||
46 | |||
47 | # training chunk-options | ||
48 | chunk_width=140,100,160 | ||
49 | # we don't need extra left/right context for TDNN systems. | ||
50 | chunk_left_context=0 | ||
51 | chunk_right_context=0 | ||
52 | |||
53 | # training options | ||
54 | srand=0 | ||
55 | remove_egs=true | ||
56 | |||
57 | #decode options | ||
58 | test_online_decoding=false # if true, it will run the last decoding stage. | ||
59 | |||
60 | # End configuration section. | ||
61 | echo "$0 $@" # Print the command line for logging | ||
62 | |||
63 | |||
64 | . ./cmd.sh | ||
65 | . ./path.sh | ||
66 | . ./utils/parse_options.sh | ||
67 | |||
68 | |||
69 | if ! cuda-compiled; then | ||
70 | cat <<EOF && exit 1 | ||
71 | This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA | ||
72 | If you want to use GPUs (and have them), go to src/, and configure and make on a machine | ||
73 | where "nvcc" is installed. | ||
74 | EOF | ||
75 | fi | ||
76 | |||
77 | local/nnet3/run_ivector_common.sh \ | ||
78 | --stage $stage --nj $nj \ | ||
79 | --train-set $train_set --gmm $gmm \ | ||
80 | --num-threads-ubm $num_threads_ubm \ | ||
81 | --nnet3-affix "$nnet3_affix" | ||
82 | |||
83 | |||
84 | |||
85 | gmm_dir=exp/${gmm} | ||
86 | ali_dir=exp/${gmm}_ali_${train_set}_sp | ||
87 | lat_dir=exp/chain${nnet3_affix}/${gmm}_${train_set}_sp_lats | ||
88 | dir=exp/chain${nnet3_affix}/tdnn${affix}_sp | ||
89 | train_data_dir=data/${train_set}_sp_hires | ||
90 | train_ivector_dir=exp/nnet3${nnet3_affix}/ivectors_${train_set}_sp_hires | ||
91 | lores_train_data_dir=data/${train_set}_sp | ||
92 | |||
93 | # note: you don't necessarily have to change the treedir name | ||
94 | # each time you do a new experiment-- only if you change the | ||
95 | # configuration in a way that affects the tree. | ||
96 | tree_dir=exp/chain${nnet3_affix}/tree_a_sp | ||
97 | # the 'lang' directory is created by this script. | ||
98 | # If you create such a directory with a non-standard topology | ||
99 | # you should probably name it differently. | ||
100 | lang=data/lang_chain | ||
101 | |||
102 | for f in $train_data_dir/feats.scp $train_ivector_dir/ivector_online.scp \ | ||
103 | $lores_train_data_dir/feats.scp $gmm_dir/final.mdl \ | ||
104 | $ali_dir/ali.1.gz $gmm_dir/final.mdl; do | ||
105 | [ ! -f $f ] && echo "$0: expected file $f to exist" && exit 1 | ||
106 | done | ||
107 | |||
108 | |||
109 | if [ $stage -le 12 ]; then | ||
110 | echo "$0: creating lang directory $lang with chain-type topology" | ||
111 | # Create a version of the lang/ directory that has one state per phone in the | ||
112 | # topo file. [note, it really has two states.. the first one is only repeated | ||
113 | # once, the second one has zero or more repeats.] | ||
114 | if [ -d $lang ]; then | ||
115 | if [ $lang/L.fst -nt data/lang/L.fst ]; then | ||
116 | echo "$0: $lang already exists, not overwriting it; continuing" | ||
117 | else | ||
118 | echo "$0: $lang already exists and seems to be older than data/lang..." | ||
119 | echo " ... not sure what to do. Exiting." | ||
120 | exit 1; | ||
121 | fi | ||
122 | else | ||
123 | cp -r data/lang $lang | ||
124 | silphonelist=$(cat $lang/phones/silence.csl) || exit 1; | ||
125 | nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1; | ||
126 | # Use our special topology... note that later on may have to tune this | ||
127 | # topology. | ||
128 | steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo | ||
129 | fi | ||
130 | fi | ||
131 | |||
132 | if [ $stage -le 13 ]; then | ||
133 | # Get the alignments as lattices (gives the chain training more freedom). | ||
134 | # use the same num-jobs as the alignments | ||
135 | steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \ | ||
136 | data/lang $gmm_dir $lat_dir | ||
137 | rm $lat_dir/fsts.*.gz # save space | ||
138 | fi | ||
139 | |||
140 | if [ $stage -le 14 ]; then | ||
141 | # Build a tree using our new topology. We know we have alignments for the | ||
142 | # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use | ||
143 | # those. The num-leaves is always somewhat less than the num-leaves from | ||
144 | # the GMM baseline. | ||
145 | if [ -f $tree_dir/final.mdl ]; then | ||
146 | echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." | ||
147 | exit 1; | ||
148 | fi | ||
149 | steps/nnet3/chain/build_tree.sh \ | ||
150 | --frame-subsampling-factor 3 \ | ||
151 | --context-opts "--context-width=2 --central-position=1" \ | ||
152 | --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ | ||
153 | $lang $ali_dir $tree_dir | ||
154 | fi | ||
155 | |||
156 | |||
157 | if [ $stage -le 15 ]; then | ||
158 | mkdir -p $dir | ||
159 | echo "$0: creating neural net configs using the xconfig parser"; | ||
160 | |||
161 | num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') | ||
162 | learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) | ||
163 | |||
164 | mkdir -p $dir/configs | ||
165 | cat <<EOF > $dir/configs/network.xconfig | ||
166 | input dim=100 name=ivector | ||
167 | input dim=40 name=input | ||
168 | |||
169 | # please note that it is important to have input layer with the name=input | ||
170 | # as the layer immediately preceding the fixed-affine-layer to enable | ||
171 | # the use of short notation for the descriptor | ||
172 | fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat | ||
173 | |||
174 | # the first splicing is moved before the lda layer, so no splicing here | ||
175 | relu-batchnorm-layer name=tdnn1 dim=512 | ||
176 | relu-batchnorm-layer name=tdnn2 dim=512 input=Append(-1,0,1) | ||
177 | relu-batchnorm-layer name=tdnn3 dim=512 input=Append(-1,0,1) | ||
178 | relu-batchnorm-layer name=tdnn4 dim=512 input=Append(-3,0,3) | ||
179 | relu-batchnorm-layer name=tdnn5 dim=512 input=Append(-3,0,3) | ||
180 | relu-batchnorm-layer name=tdnn6 dim=512 input=Append(-6,-3,0) | ||
181 | |||
182 | ## adding the layers for chain branch | ||
183 | relu-batchnorm-layer name=prefinal-chain dim=512 target-rms=0.5 | ||
184 | output-layer name=output include-log-softmax=false dim=$num_targets max-change=1.5 | ||
185 | |||
186 | # adding the layers for xent branch | ||
187 | # This block prints the configs for a separate output that will be | ||
188 | # trained with a cross-entropy objective in the 'chain' models... this | ||
189 | # has the effect of regularizing the hidden parts of the model. we use | ||
190 | # 0.5 / args.xent_regularize as the learning rate factor- the factor of | ||
191 | # 0.5 / args.xent_regularize is suitable as it means the xent | ||
192 | # final-layer learns at a rate independent of the regularization | ||
193 | # constant; and the 0.5 was tuned so as to make the relative progress | ||
194 | # similar in the xent and regular final layers. | ||
195 | relu-batchnorm-layer name=prefinal-xent input=tdnn6 dim=512 target-rms=0.5 | ||
196 | output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 | ||
197 | EOF | ||
198 | steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ | ||
199 | fi | ||
200 | |||
201 | |||
202 | if [ $stage -le 16 ]; then | ||
203 | if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then | ||
204 | utils/create_split_dir.pl \ | ||
205 | /export/b0{3,4,5,6}/$USER/kaldi-data/egs/wsj-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage | ||
206 | fi | ||
207 | |||
208 | steps/nnet3/chain/train.py --stage=$train_stage \ | ||
209 | --cmd="$decode_cmd" \ | ||
210 | --feat.online-ivector-dir=$train_ivector_dir \ | ||
211 | --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ | ||
212 | --chain.xent-regularize $xent_regularize \ | ||
213 | --chain.leaky-hmm-coefficient=0.1 \ | ||
214 | --chain.l2-regularize=0.00005 \ | ||
215 | --chain.apply-deriv-weights=false \ | ||
216 | --chain.lm-opts="--num-extra-lm-states=2000" \ | ||
217 | --trainer.srand=$srand \ | ||
218 | --trainer.max-param-change=2.0 \ | ||
219 | --trainer.num-epochs=4 \ | ||
220 | --trainer.frames-per-iter=3000000 \ | ||
221 | --trainer.optimization.num-jobs-initial=2 \ | ||
222 | --trainer.optimization.num-jobs-final=5 \ | ||
223 | --trainer.optimization.initial-effective-lrate=0.001 \ | ||
224 | --trainer.optimization.final-effective-lrate=0.0001 \ | ||
225 | --trainer.optimization.shrink-value=1.0 \ | ||
226 | --trainer.optimization.proportional-shrink=60.0 \ | ||
227 | --trainer.num-chunk-per-minibatch=256,128,64 \ | ||
228 | --trainer.optimization.momentum=0.0 \ | ||
229 | --egs.chunk-width=$chunk_width \ | ||
230 | --egs.chunk-left-context=0 \ | ||
231 | --egs.chunk-right-context=0 \ | ||
232 | --egs.chunk-left-context-initial=0 \ | ||
233 | --egs.chunk-right-context-final=0 \ | ||
234 | --egs.dir="$common_egs_dir" \ | ||
235 | --egs.opts="--frames-overlap-per-eg 0" \ | ||
236 | --cleanup.remove-egs=$remove_egs \ | ||
237 | --use-gpu=true \ | ||
238 | --reporting.email="$reporting_email" \ | ||
239 | --feat-dir=$train_data_dir \ | ||
240 | --tree-dir=$tree_dir \ | ||
241 | --lat-dir=$lat_dir \ | ||
242 | --dir=$dir || exit 1; | ||
243 | fi | ||
244 | |||
245 | if [ $stage -le 17 ]; then | ||
246 | # The reason we are using data/lang here, instead of $lang, is just to | ||
247 | # emphasize that it's not actually important to give mkgraph.sh the | ||
248 | # lang directory with the matched topology (since it gets the | ||
249 | # topology file from the model). So you could give it a different | ||
250 | # lang directory, one that contained a wordlist and LM of your choice, | ||
251 | # as long as phones.txt was compatible. | ||
252 | |||
253 | utils/lang/check_phones_compatible.sh \ | ||
254 | data/lang_test_tgpr/phones.txt $lang/phones.txt | ||
255 | utils/mkgraph.sh \ | ||
256 | --self-loop-scale 1.0 data/lang_test_tgpr \ | ||
257 | $tree_dir $tree_dir/graph_tgpr || exit 1; | ||
258 | |||
259 | utils/lang/check_phones_compatible.sh \ | ||
260 | data/lang_test_bd_tgpr/phones.txt $lang/phones.txt | ||
261 | utils/mkgraph.sh \ | ||
262 | --self-loop-scale 1.0 data/lang_test_bd_tgpr \ | ||
263 | $tree_dir $tree_dir/graph_bd_tgpr || exit 1; | ||
264 | fi | ||
265 | |||
266 | if [ $stage -le 18 ]; then | ||
267 | frames_per_chunk=$(echo $chunk_width | cut -d, -f1) | ||
268 | rm $dir/.error 2>/dev/null || true | ||
269 | |||
270 | for data in $test_sets; do | ||
271 | ( | ||
272 | data_affix=$(echo $data | sed s/test_//) | ||
273 | nspk=$(wc -l <data/${data}_hires/spk2utt) | ||
274 | for lmtype in tgpr bd_tgpr; do | ||
275 | steps/nnet3/decode.sh \ | ||
276 | --acwt 1.0 --post-decode-acwt 10.0 \ | ||
277 | --extra-left-context 0 --extra-right-context 0 \ | ||
278 | --extra-left-context-initial 0 \ | ||
279 | --extra-right-context-final 0 \ | ||
280 | --frames-per-chunk $frames_per_chunk \ | ||
281 | --nj $nspk --cmd "$decode_cmd" --num-threads 4 \ | ||
282 | --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ | ||
283 | $tree_dir/graph_${lmtype} data/${data}_hires ${dir}/decode_${lmtype}_${data_affix} || exit 1 | ||
284 | done | ||
285 | steps/lmrescore.sh \ | ||
286 | --self-loop-scale 1.0 \ | ||
287 | --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \ | ||
288 | data/${data}_hires ${dir}/decode_{tgpr,tg}_${data_affix} || exit 1 | ||
289 | steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \ | ||
290 | data/lang_test_bd_{tgpr,fgconst} \ | ||
291 | data/${data}_hires ${dir}/decode_${lmtype}_${data_affix}{,_fg} || exit 1 | ||
292 | ) || touch $dir/.error & | ||
293 | done | ||
294 | wait | ||
295 | [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 | ||
296 | fi | ||
297 | |||
298 | # Not testing the 'looped' decoding separately, because for | ||
299 | # TDNN systems it would give exactly the same results as the | ||
300 | # normal decoding. | ||
301 | |||
302 | if $test_online_decoding && [ $stage -le 19 ]; then | ||
303 | # note: if the features change (e.g. you add pitch features), you will have to | ||
304 | # change the options of the following command line. | ||
305 | steps/online/nnet3/prepare_online_decoding.sh \ | ||
306 | --mfcc-config conf/mfcc_hires.conf \ | ||
307 | $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online | ||
308 | |||
309 | rm $dir/.error 2>/dev/null || true | ||
310 | |||
311 | for data in $test_sets; do | ||
312 | ( | ||
313 | data_affix=$(echo $data | sed s/test_//) | ||
314 | nspk=$(wc -l <data/${data}_hires/spk2utt) | ||
315 | # note: we just give it "data/${data}" as it only uses the wav.scp, the | ||
316 | # feature type does not matter. | ||
317 | for lmtype in tgpr bd_tgpr; do | ||
318 | steps/online/nnet3/decode.sh \ | ||
319 | --acwt 1.0 --post-decode-acwt 10.0 \ | ||
320 | --nj $nspk --cmd "$decode_cmd" \ | ||
321 | $tree_dir/graph_${lmtype} data/${data} ${dir}_online/decode_${lmtype}_${data_affix} || exit 1 | ||
322 | done | ||
323 | steps/lmrescore.sh \ | ||
324 | --self-loop-scale 1.0 \ | ||
325 | --cmd "$decode_cmd" data/lang_test_{tgpr,tg} \ | ||
326 | data/${data}_hires ${dir}_online/decode_{tgpr,tg}_${data_affix} || exit 1 | ||
327 | steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \ | ||
328 | data/lang_test_bd_{tgpr,fgconst} \ | ||
329 | data/${data}_hires ${dir}_online/decode_${lmtype}_${data_affix}{,_fg} || exit 1 | ||
330 | ) || touch $dir/.error & | ||
331 | done | ||
332 | wait | ||
333 | [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 | ||
334 | fi | ||
335 | |||
336 | |||
337 | exit 0; | ||