3 # Copyright 2016 Vijayaditya Peddinti
4 # Vimal Manohar
5 # Apache 2.0.
7 from __future__ import division
8 import datetime
9 import re
10 import sys
12 import libs.common as common_lib
15 def parse_progress_logs_for_nonlinearity_stats(exp_dir):
16 """ Parse progress logs for mean and std stats for non-linearities.
18 e.g. for a line that is parsed from progress.*.log:
19 exp/nnet3/lstm_self_repair_ld5_sp/log/progress.9.log:component name=Lstm3_i
20 type=SigmoidComponent, dim=1280, self-repair-scale=1e-05, count=1.96e+05,
21 value-avg=[percentiles(0,1,2,5 10,20,50,80,90
22 95,98,99,100)=(0.05,0.09,0.11,0.15 0.19,0.27,0.50,0.72,0.83
23 0.88,0.92,0.94,0.99), mean=0.502, stddev=0.23],
24 deriv-avg=[percentiles(0,1,2,5 10,20,50,80,90
25 95,98,99,100)=(0.009,0.04,0.05,0.06 0.08,0.10,0.14,0.17,0.18
26 0.19,0.20,0.20,0.21), mean=0.134, stddev=0.0397]
27 """
29 progress_log_files = "%s/log/progress.*.log" % (exp_dir)
30 stats_per_component_per_iter = {}
32 progress_log_lines = common_lib.run_kaldi_command(
33 'grep -e "value-avg.*deriv-avg" {0}'.format(progress_log_files))[0]
35 parse_regex = re.compile(
36 ".*progress.([0-9]+).log:component name=(.+) "
37 "type=(.*)Component,.*"
38 "value-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\].*"
39 "deriv-avg=\[.*mean=([0-9\.\-e]+), stddev=([0-9\.e\-]+)\]")
41 for line in progress_log_lines.split("\n"):
42 mat_obj = parse_regex.search(line)
43 if mat_obj is None:
44 continue
45 # groups = ('9', 'Lstm3_i', 'Sigmoid', '0.502', '0.23',
46 # '0.134', '0.0397')
47 groups = mat_obj.groups()
48 iteration = int(groups[0])
49 component_name = groups[1]
50 component_type = groups[2]
51 value_mean = float(groups[3])
52 value_stddev = float(groups[4])
53 deriv_mean = float(groups[5])
54 deriv_stddev = float(groups[6])
55 try:
56 stats_per_component_per_iter[component_name][
57 'stats'][iteration] = [value_mean, value_stddev,
58 deriv_mean, deriv_stddev]
59 except KeyError:
60 stats_per_component_per_iter[component_name] = {}
61 stats_per_component_per_iter[component_name][
62 'type'] = component_type
63 stats_per_component_per_iter[component_name]['stats'] = {}
64 stats_per_component_per_iter[component_name][
65 'stats'][iteration] = [value_mean, value_stddev,
66 deriv_mean, deriv_stddev]
68 return stats_per_component_per_iter
71 def parse_difference_string(string):
72 dict = {}
73 for parts in string.split():
74 sub_parts = parts.split(":")
75 dict[sub_parts[0]] = float(sub_parts[1])
76 return dict
79 class MalformedClippedProportionLineException(Exception):
80 def __init__(self, line):
81 Exception.__init__(self,
82 "Malformed line encountered while trying to "
83 "extract clipped-proportions.\n{0}".format(line))
86 def parse_progress_logs_for_clipped_proportion(exp_dir):
87 """ Parse progress logs for clipped proportion stats.
89 e.g. for a line that is parsed from progress.*.log:
90 exp/chain/cwrnn_trial2_ld5_sp/log/progress.245.log:component
91 name=BLstm1_forward_c type=ClipGradientComponent, dim=512,
92 norm-based-clipping=true, clipping-threshold=30,
93 clipped-proportion=0.000565527,
94 self-repair-clipped-proportion-threshold=0.01, self-repair-target=0,
95 self-repair-scale=1
96 """
98 progress_log_files = "%s/log/progress.*.log" % (exp_dir)
99 component_names = set([])
100 progress_log_lines = common_lib.run_kaldi_command(
101 'grep -e "{0}" {1}'.format(
102 "clipped-proportion", progress_log_files))[0]
103 parse_regex = re.compile(".*progress\.([0-9]+)\.log:component "
104 "name=(.*) type=.* "
105 "clipped-proportion=([0-9\.e\-]+)")
107 cp_per_component_per_iter = {}
109 max_iteration = 0
110 component_names = set([])
111 for line in progress_log_lines.split("\n"):
112 mat_obj = parse_regex.search(line)
113 if mat_obj is None:
114 if line.strip() == "":
115 continue
116 raise MalformedClippedProportionLineException(line)
117 groups = mat_obj.groups()
118 iteration = int(groups[0])
119 max_iteration = max(max_iteration, iteration)
120 name = groups[1]
121 clipped_proportion = float(groups[2])
122 if clipped_proportion > 1:
123 raise MalformedClippedProportionLineException(line)
124 if iteration not in cp_per_component_per_iter:
125 cp_per_component_per_iter[iteration] = {}
126 cp_per_component_per_iter[iteration][name] = clipped_proportion
127 component_names.add(name)
128 component_names = list(component_names)
129 component_names.sort()
131 # re arranging the data into an array
132 # and into an cp_per_iter_per_component
133 cp_per_iter_per_component = {}
134 for component_name in component_names:
135 cp_per_iter_per_component[component_name] = []
136 data = []
137 data.append(["iteration"]+component_names)
138 for iter in range(max_iteration+1):
139 if iter not in cp_per_component_per_iter:
140 continue
141 comp_dict = cp_per_component_per_iter[iter]
142 row = [iter]
143 for component in component_names:
144 try:
145 row.append(comp_dict[component])
146 cp_per_iter_per_component[component].append(
147 [iter, comp_dict[component]])
148 except KeyError:
149 # if clipped proportion is not available for a particular
150 # component it is set to None
151 # this usually happens during layer-wise discriminative
152 # training
153 row.append(None)
154 data.append(row)
156 return {'table': data,
157 'cp_per_component_per_iter': cp_per_component_per_iter,
158 'cp_per_iter_per_component': cp_per_iter_per_component}
161 def parse_progress_logs_for_param_diffP(exp_dir, pattern, logger=None):
162 """ Parse progress logs for per-component parameter differences.
164 e.g. for a line that is parsed from progress.*.log:
165 exp/chain/cwrnn_trial2_ld5_sp/log/progress.245.log:LOG
166 (nnet3-show-progress:main():nnet3-show-progress.cc:144) Relative parameter
167 differences per layer are [ Cwrnn1_T3_W_r:0.0171537
168 Cwrnn1_T3_W_x:1.33338e-07 Cwrnn1_T2_W_r:0.048075 Cwrnn1_T2_W_x:1.34088e-07
169 Cwrnn1_T1_W_r:0.0157277 Cwrnn1_T1_W_x:0.0212704 Final_affine:0.0321521
170 Cwrnn2_T3_W_r:0.0212082 Cwrnn2_T3_W_x:1.33691e-07 Cwrnn2_T2_W_r:0.0212978
171 Cwrnn2_T2_W_x:1.33401e-07 Cwrnn2_T1_W_r:0.014976 Cwrnn2_T1_W_x:0.0233588
172 Cwrnn3_T3_W_r:0.0237165 Cwrnn3_T3_W_x:1.33184e-07 Cwrnn3_T2_W_r:0.0239754
173 Cwrnn3_T2_W_x:1.3296e-07 Cwrnn3_T1_W_r:0.0194809 Cwrnn3_T1_W_x:0.0271934 ]
174 """
176 if pattern not in set(["Relative parameter differences",
177 "Parameter differences"]):
178 raise Exception("Unknown value for pattern : {0}".format(pattern))
180 progress_log_files = "%s/log/progress.*.log" % (exp_dir)
181 progress_per_iter = {}
182 component_names = set([])
183 progress_log_lines = common_lib.run_kaldi_command(
184 'grep -e "{0}" {1}'.format(pattern, progress_log_files))[0]
185 parse_regex = re.compile(".*progress\.([0-9]+)\.log:"
186 "LOG.*{0}.*\[(.*)\]".format(pattern))
187 for line in progress_log_lines.split("\n"):
188 mat_obj = parse_regex.search(line)
189 if mat_obj is None:
190 continue
191 groups = mat_obj.groups()
192 iteration = groups[0]
193 differences = parse_difference_string(groups[1])
194 component_names = component_names.union(differences.keys())
195 progress_per_iter[int(iteration)] = differences
197 component_names = list(component_names)
198 component_names.sort()
199 # rearranging the parameter differences available per iter
200 # into parameter differences per component
201 progress_per_component = {}
202 for cn in component_names:
203 progress_per_component[cn] = {}
205 max_iter = max(progress_per_iter.keys())
206 total_missing_iterations = 0
207 gave_user_warning = False
208 for iter in range(max_iter + 1):
209 try:
210 component_dict = progress_per_iter[iter]
211 except KeyError:
212 continue
214 for component_name in component_names:
215 try:
216 progress_per_component[component_name][iter] = component_dict[
217 component_name]
218 except KeyError:
219 total_missing_iterations += 1
220 # the component was not found this iteration, may be because of
221 # layerwise discriminative training
222 pass
223 if (total_missing_iterations/len(component_names) > 20
224 and not gave_user_warning and logger is not None):
225 logger.warning("There are more than {0} missing iterations per "
226 "component. Something might be wrong.".format(
227 total_missing_iterations/len(component_names)))
228 gave_user_warning = True
230 return {'progress_per_component': progress_per_component,
231 'component_names': component_names,
232 'max_iter': max_iter}
235 def parse_train_logs(exp_dir):
236 train_log_files = "%s/log/train.*.log" % (exp_dir)
237 train_log_lines = common_lib.run_kaldi_command(
238 'grep -e Accounting {0}'.format(train_log_files))[0]
239 parse_regex = re.compile(".*train\.([0-9]+)\.([0-9]+)\.log:# "
240 "Accounting: time=([0-9]+) thread.*")
242 train_times = {}
243 for line in train_log_lines.split('\n'):
244 mat_obj = parse_regex.search(line)
245 if mat_obj is not None:
246 groups = mat_obj.groups()
247 try:
248 train_times[int(groups[0])][int(groups[1])] = float(groups[2])
249 except KeyError:
250 train_times[int(groups[0])] = {}
251 train_times[int(groups[0])][int(groups[1])] = float(groups[2])
252 iters = train_times.keys()
253 for iter in iters:
254 values = train_times[iter].values()
255 train_times[iter] = max(values)
256 return train_times
259 def parse_prob_logs(exp_dir, key='accuracy'):
260 train_prob_files = "%s/log/compute_prob_train.*.log" % (exp_dir)
261 valid_prob_files = "%s/log/compute_prob_valid.*.log" % (exp_dir)
262 train_prob_strings = common_lib.run_kaldi_command(
263 'grep -e {0} {1}'.format(key, train_prob_files), wait=True)[0]
264 valid_prob_strings = common_lib.run_kaldi_command(
265 'grep -e {0} {1}'.format(key, valid_prob_files))[0]
267 # LOG
268 # (nnet3-chain-compute-prob:PrintTotalStats():nnet-chain-diagnostics.cc:149)
269 # Overall log-probability for 'output' is -0.399395 + -0.013437 = -0.412832
270 # per frame, over 20000 fra
272 # LOG
273 # (nnet3-chain-compute-prob:PrintTotalStats():nnet-chain-diagnostics.cc:144)
274 # Overall log-probability for 'output' is -0.307255 per frame, over 20000
275 # frames.
277 parse_regex = re.compile(
278 ".*compute_prob_.*\.([0-9]+).log:LOG "
279 ".nnet3.*compute-prob:PrintTotalStats..:"
280 "nnet.*diagnostics.cc:[0-9]+. Overall ([a-zA-Z\-]+) for "
281 "'output'.*is ([0-9.\-e]+) .*per frame")
283 train_loss = {}
284 valid_loss = {}
286 for line in train_prob_strings.split('\n'):
287 mat_obj = parse_regex.search(line)
288 if mat_obj is not None:
289 groups = mat_obj.groups()
290 if groups[1] == key:
291 train_loss[int(groups[0])] = groups[2]
292 for line in valid_prob_strings.split('\n'):
293 mat_obj = parse_regex.search(line)
294 if mat_obj is not None:
295 groups = mat_obj.groups()
296 if groups[1] == key:
297 valid_loss[int(groups[0])] = groups[2]
298 iters = list(set(valid_loss.keys()).intersection(train_loss.keys()))
299 iters.sort()
300 return map(lambda x: (int(x), float(train_loss[x]),
301 float(valid_loss[x])), iters)
304 def generate_accuracy_report(exp_dir, key="accuracy"):
305 times = parse_train_logs(exp_dir)
306 data = parse_prob_logs(exp_dir, key)
307 report = []
308 report.append("%Iter\tduration\ttrain_loss\tvalid_loss\tdifference")
309 for x in data:
310 try:
311 report.append("%d\t%s\t%g\t%g\t%g" % (x[0], str(times[x[0]]),
312 x[1], x[2], x[2]-x[1]))
313 except KeyError:
314 continue
316 total_time = 0
317 for iter in times.keys():
318 total_time += times[iter]
319 report.append("Total training time is {0}\n".format(
320 str(datetime.timedelta(seconds=total_time))))
321 return ["\n".join(report), times, data]