summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 9adb98a)
raw | patch | inline | side by side (parent: 9adb98a)
author | Dan Povey <dpovey@gmail.com> | |
Sat, 2 Jun 2012 05:27:07 +0000 (05:27 +0000) | ||
committer | Dan Povey <dpovey@gmail.com> | |
Sat, 2 Jun 2012 05:27:07 +0000 (05:27 +0000) |
395 files changed:
diff --git a/src/NOTES b/src/NOTES
index b6a66a52fcde363b65f94b0907f2c2070c8c43d1..a14e351ed74c233b1c4fd38dff32e23ff47f9d22 100644 (file)
--- a/src/NOTES
+++ b/src/NOTES
--
Fixing commas with no spaces, and some other simple style violations.
-for x in */*.{h,cc}; do cp $x tmpf; cat tmpf | perl -ane 'if(!m/["'\'']/ && !m://: && !m/_\{/) { s/,(\S)/, $1/g; s/if\(/if (/; s/while\(/while (/; s/switch\(/switch (/; } print; ' > $x; done
\ No newline at end of file
+for x in */*.{h,cc}; do cp $x tmpf; cat tmpf | perl -ane 'if(!m/["'\'']/ && !m://: && !m/_\{/) { s/,(\S)/, $1/g; s/if\(/if (/; s/while\(/while (/; s/switch\(/switch (/; } print; ' > $x; done
+
+# add a space after comment-begin //
+for x in */*.{h,cc}; do cp $x tmpf; cat tmpf | perl -ane 's:(^|[^\:])//([^ \n/<!]):$1// $2:; print; ' > $x; done
+
+# make it so declarations with * and & have the * or & as part of the type:
+
+for x in */*.{h,cc}; do cp $x tmpf; cat tmpf | perl -ane ' if (! m:^\s*(//|/\*): && !m:determinized\*: && !m:([a-z]+ ){5}:) { for($x=1;$x<=10;$x++){ s|([^&/ ])([*&]) ([a-z])|$1 $2$3|g; }} print; ' > $x; done
+
+# Remove unnecessary parentheses in single-line "return" expressions.
+
+for x in */*.{h,cc}; do cp $x tmpf; cat tmpf | perl -ane ' s/return\s*\(([^()|?]+)\)\s*;/return $1;/; print; ' | diff - $x; done
\ No newline at end of file
index 2bebc91106d68c154a35bcfd8bbbf73cc1697935..9dcfcd609b6dd7c000bcd669dd1ecfb28753365c 100644 (file)
KALDI_ASSERT(std::abs(cos(exp(M_LOG_2PI)) - 1.0) < 1.0e-05);
}
-void UnitTestAssertFunc() { // Testing Assert*** functions
+void UnitTestAssertFunc() { // Testing Assert** *functions
using namespace kaldi;
for (int i = 1; i < 100; i++) {
float f1 = rand() % 10000 + 1, f2 = rand() % 20 + 1;
diff --git a/src/bin/acc-lda.cc b/src/bin/acc-lda.cc
index 20e53e86dd165766262e26ccaddd78515e52b6be..f3b6b91fe8023a89da63cc8f4387e8d7703218f6 100644 (file)
--- a/src/bin/acc-lda.cc
+++ b/src/bin/acc-lda.cc
lda.Write(ko.Stream(), binary);
KALDI_LOG << "Written statistics.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 026dabed59682a7a954a94fb36b4507949da09be..58f607c49fe4bbabe8d07232621f711184486aab 100644 (file)
DeleteBuildTreeStats(&stats);
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 56e8632a1176bc9adbe90c0cd563f505cf4730d5..6009768c53fc417fbb90f1c45f34908470c4c9fc 100644 (file)
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/ali-to-pdf.cc b/src/bin/ali-to-pdf.cc
index 663406c1aa3b6cab81629064b8148fcd8046fb81..e92971ddfb8a24d4e72d46f450b5b62c3443f57a 100644 (file)
--- a/src/bin/ali-to-pdf.cc
+++ b/src/bin/ali-to-pdf.cc
num_done++;
}
KALDI_LOG << "Converted " << num_done << " alignments to pdf sequences.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 53f3ebc619de6b1f09b6ee68a7d7ec52ba4b0767..e32f38412988325be3f824a78ace75128c29dc52 100644 (file)
--- a/src/bin/ali-to-phones.cc
+++ b/src/bin/ali-to-phones.cc
n_done++;
}
KALDI_LOG << "Done " << n_done << " utterances.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/ali-to-post.cc b/src/bin/ali-to-post.cc
index 4071fa1aec9b49857e0bde3e78dd2b53f4db6bac..9f09208b3e3cb2900bdc2bffcbedd593546dd280 100644 (file)
--- a/src/bin/ali-to-post.cc
+++ b/src/bin/ali-to-post.cc
posterior_writer.Write(alignment_reader.Key(), post);
}
KALDI_LOG << "ali-to-post: converted " << num_alignments << " alignments.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a0558821eeefbc74d611cb12c190803e452884ec..5b118a1982a804f99a3d76e1f584f1f23ccfc12c 100644 (file)
<< num_no_feat << ", other errors on " << num_other_error;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 867cef6ab1acf000ca54598c47740b6cc4218a7b..9784fa9864cb4cea257057dfac60ffc8a535d331 100644 (file)
}
if (done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/align-equal.cc b/src/bin/align-equal.cc
index 3da300a7bb6e00a1b9bd3a45d0a442f490033adf..0486d0ccefac48411392ea99920f8841fb25d586 100644 (file)
--- a/src/bin/align-equal.cc
+++ b/src/bin/align-equal.cc
}
if (done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 4d6b7fffc2d3ee6f8ac2c065996385a7ec48746f..bd315cdfc0b7bd24cfefd0b608b4a80758b0f47b 100644 (file)
--- a/src/bin/align-mapped.cc
+++ b/src/bin/align-mapped.cc
<< num_no_transcript << ", other errors on " << num_other_error;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 312b7732f126f47f063175a11147baecebe3d225..843dbed32dd51f9e52c1e98d68277719d6938df4 100644 (file)
std::cerr << "Wrote tree and mapping\n";
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/build-tree.cc b/src/bin/build-tree.cc
index ece9e06582c46d1a9132ee3add58d61a1cb646b2..2096be66148c9a2ad8468e1c6722570d03659b5a 100644 (file)
--- a/src/bin/build-tree.cc
+++ b/src/bin/build-tree.cc
KALDI_LOG << "Wrote tree\n";
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a6ae73af056f227b19259379974520086cd25b8a..82e6532be69908b9d4ef1dec071453f1ce5803ba 100644 (file)
KALDI_LOG << "Wrote questions to "<<phone_sets_wxfilename;
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 81eaec2c998406f5006d7d6687d2ff7aea4c0cf8..1c380be6bd388630a22c37568c34c6b13ddcae89 100644 (file)
WriteKaldiObject(qo, questions_out_filename, binary);
KALDI_LOG << "Wrote questions to "<<questions_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e8b02792f12821161fa8d07380e75f9b14ffd910..da30b692646a5ebb9d3b0e320d19724a51c68fd7 100644 (file)
KALDI_LOG << "compile-train-graphs: succeeded for " << num_succeed
<< " graphs, failed for " << num_fail;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2b401364331d5958fb1153fc1015de934e22bf18..f9a3895f6ebe34acd72cdc8fa81e18dc1e6bc4f4 100644 (file)
KALDI_LOG << "compile-train-graphs: succeeded for " << num_succeed
<< " graphs, failed for " << num_fail;
return (num_succeed != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 51acba3093a5ebf3a490d0640e71ba2d71afce40..ae6e83dacf4f4a29fe6c299b3de185f86c813c96 100644 (file)
<< (tot_sigmoid/num_scaled) << " over "
<< num_scaled << " utterance. [Note: should go down]";
return (num_scaled != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/compute-wer.cc b/src/bin/compute-wer.cc
index e180e24011a1266051da7890ce21ac1fdd0b5be6..1fc818c9f847721632500d5d2bbf45c3e22ed756 100644 (file)
--- a/src/bin/compute-wer.cc
+++ b/src/bin/compute-wer.cc
std::cout << "Scored " << num_sent << " sentences, "
<< num_absent_sents << " not present in hyp.\n";
return 0;
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/convert-ali.cc b/src/bin/convert-ali.cc
index a9c9fe92037534f0a2b2c74050fb9bb9fa44aca8..610c43e35b8beada2bc79a4a4f32ce9dffb3cb2f 100644 (file)
--- a/src/bin/convert-ali.cc
+++ b/src/bin/convert-ali.cc
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 4e938935270416aed305f21b019dd0b56a1ed77a..dc201c25e644f7317abed4931fdbaf0ddcfef0cf 100644 (file)
--- a/src/bin/copy-gselect.cc
+++ b/src/bin/copy-gselect.cc
<< " limiting sizes to " << num_gselect;
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 11a3ac92667de1ada656f699fe0618dfbebff6cc..1411480aa8b58c342b0a3d9cc998514499ce11ff 100644 (file)
KALDI_LOG << "Copied vector<vector<int32> > to " << vector_out_fn; */
return 0;
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 77c74dac4cccb0bd10576c2ebbaf277c18855e0f..771de4fa96b2ddf14cb6d717e2ef9d50b77afa8c 100644 (file)
KALDI_LOG << "Copied " << num_done << " vectors of int32.";
return (num_done != 0 ? 0 : 1);
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/copy-matrix.cc b/src/bin/copy-matrix.cc
index 6c52996ba025bb5f4ade1da2fc2df80c00b191e6..759affd5a9fcde900413f78e63bb3d53bc00228b 100644 (file)
--- a/src/bin/copy-matrix.cc
+++ b/src/bin/copy-matrix.cc
KALDI_LOG << "Copied " << num_done << " matrices.";
return (num_done != 0 ? 0 : 1);
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index bd36b4308de0514012951d781a34797469cdacac..b2896c05de21044b94a8c488ce85491d153169a3 100644 (file)
WriteKaldiObject(trans_model, transition_model_wxfilename, binary);
KALDI_LOG << "Copied transition model.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/copy-tree.cc b/src/bin/copy-tree.cc
index 405e31feddabd44e34d2e6252d01ed3614445cbb..820f4ce6507850317674c5f93482e4117acc8718 100644 (file)
--- a/src/bin/copy-tree.cc
+++ b/src/bin/copy-tree.cc
ReadKaldiObject(tree_in_filename, &ctx_dep);
WriteKaldiObject(ctx_dep, tree_out_filename, binary);
KALDI_LOG << "Copied tree";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/copy-vector.cc b/src/bin/copy-vector.cc
index c075337732ba393fb42860567dc6db54b14ba814..14449ba393e3670da346156fc083b52ab4bb8a3d 100644 (file)
--- a/src/bin/copy-vector.cc
+++ b/src/bin/copy-vector.cc
}
return (num_done != 0 ? 0 : 1);
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 97760b70ca96dad6cdffb7e92fa3f420ac06710b..99d88d5bff29744165c9e54da6a18ef0755a64a4 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 12a9d64a3e243999c84ce448cd0c2fecb7bd02b8..2b8d60652fc23e5ce6579706a8ab45224f624588 100644 (file)
--- a/src/bin/decode-faster.cc
+++ b/src/bin/decode-faster.cc
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/dot-weights.cc b/src/bin/dot-weights.cc
index dfee9cd0bb6ffec773f5c9a4dc4621b6513780b3..b08911a9ad70bd566992c524d6adcc6f2d1626c4 100644 (file)
--- a/src/bin/dot-weights.cc
+++ b/src/bin/dot-weights.cc
KALDI_LOG << "Done computing dot products of " << num_done
<< " weights; errors on " << num_err;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/draw-tree.cc b/src/bin/draw-tree.cc
index 84fdd0855fb59930028049feddacf2a1df792c29..0319ee929aae11726ffca27b1ca2a503fb00333b 100644 (file)
--- a/src/bin/draw-tree.cc
+++ b/src/bin/draw-tree.cc
if (renderer) delete renderer;
if (query) delete query;
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/est-lda.cc b/src/bin/est-lda.cc
index 330bfaef460cd7fef05edb6e90042f15cabc1883..13f8c7c39145cdf77d78228c6bf2fc8438a59d44 100644 (file)
--- a/src/bin/est-lda.cc
+++ b/src/bin/est-lda.cc
full_lda_mat.Write(ko.Stream(), binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/est-mllt.cc b/src/bin/est-mllt.cc
index 451e595521d8f8f4e5215115b120c8b4f226e9a0..d0937370d9d1c559919939df5594c6a236cc3ddd 100644 (file)
--- a/src/bin/est-mllt.cc
+++ b/src/bin/est-mllt.cc
mat.Write(ko.Stream(), binary);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index d154e81feee97763fbd06943651c9abe93103f07..67c71889db55fec06b7115cc78417d4f9d05f69b 100644 (file)
KALDI_LOG << "Average silence prob is " << (tot_sil_prob/tot_frames)
<< " over " << tot_frames << " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e6591fc425dc8651c964d2240001326f6c2b9c35..3f875a3a1f88cc40460be317ebfe0c79f4e95ff7 100644 (file)
if (word_syms) delete word_syms;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a7e10d02a6c9f4b28f3e54642bf9c8b781face84..cad073f828b68a40a7497d3a68bdb1b0d458b48e 100644 (file)
delete H;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e84a55173d95a8828680a832c6ed3dfea44a9d63..7e2483f50fd24f39275aa65a1ff9263495c5cc07 100644 (file)
: fst_out_filename);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 608eb72d26ff152391ad77bf3c3ca6530edf500d..eef853f53780f04863ac9976c2eac722a68e99da 100644 (file)
KALDI_ERR << "Error writing fst to "
<< (fst_out_filename == "" ? "standard output" : fst_out_filename);
delete fst;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 87f75207a948e2a8ab0509809fc13d447193409c..2410f210d6a622a161036948b8a62eba0fdaf122 100644 (file)
n_done++;
}
KALDI_LOG << "Done " << n_done << " utterances; " << n_err << " had errors.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 75e28d607e3307a0a8e3a4b1dedb98d0607dd851..056abf7efb1b3f284e63cdfe4aa99f0d9539874c 100644 (file)
--- a/src/bin/post-to-tacc.cc
+++ b/src/bin/post-to-tacc.cc
<< num_done << " utterances; wrote stats to "
<< accs_wxfilename;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c5451e084732df1b7ba5fdac2270cc4b65f225f2..996562823b4d22bdefa5428e684940ab171b3639 100644 (file)
}
KALDI_LOG << "Done converting " << num_done << " posteriors to weights.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index dd0169f7e6d8bd29cbd1f315d3873b171d67fad7..04b092f0fa1f5c9ead443b9da2fec68602c8f949 100644 (file)
n_done++;
}
KALDI_LOG << "Done " << n_done << " utterances; " << n_err << " had errors.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index edbd41ca73cdaab47597784083064126daf3eb6f..67dcc7473793b6adfb617748e6a25317e0b19bd9 100644 (file)
}
}
KALDI_LOG << "rand-prune-post: processed " << num_posteriors << " posteriors.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 06f954fac97837b7e767996ed80e774f3c26652b..695beed8bda858bc22fa4fdfab81ade19488f956 100644 (file)
if (reverse) KALDI_LOG << "Done reversing " << num_done << " weights.";
else KALDI_LOG << "Done copying " << num_done << " weights.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/scale-post.cc b/src/bin/scale-post.cc
index 28537a7e99b884309b76819932970803064b4969..ebb1b4dd13679baa01ea240c4e8ca6550a43e616 100644 (file)
--- a/src/bin/scale-post.cc
+++ b/src/bin/scale-post.cc
KALDI_LOG << "Done " << num_scaled << " posteriors; " << num_no_scale
<< " had no scales.";
return (num_scaled != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/scale-vecs.cc b/src/bin/scale-vecs.cc
index 287a14af4dfd47a458f7f7c281108a2789d94381..19cf6926271303ad30a8428372e1a875817c96e9 100644 (file)
--- a/src/bin/scale-vecs.cc
+++ b/src/bin/scale-vecs.cc
vec_writer.Write(vec_reader.Key(), vec);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 67baa2e754e12c13f5f894320cf6ce6c48626b96..bc51cf08ea79bbd68e8612788799ea06369304e7 100644 (file)
}
delete phones_symtab;
phones_symtab = NULL;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 561f3363fe5e436b9ba0f6a67eb264a1672893e8..67b0427a79b05e55ffa0004dbd54f93e8fbb7dcb 100644 (file)
(accumulator_filename != "" ? &occs : NULL));
delete syms;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index d86a9e7dd32b3f18d4ef94ae882aa927dcf2f6c4..17d634512b4a143740527757fd57cc7a9056c1ba 100644 (file)
--- a/src/bin/sum-matrices.cc
+++ b/src/bin/sum-matrices.cc
KALDI_LOG << "Summed " << (po.NumArgs()-1) << " matrices "
<< " of dimension " << mat.NumRows() << " by " << mat.NumCols();
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/sum-post.cc b/src/bin/sum-post.cc
index 2915512f67bb2e5ed3b1e79a50ff5720aae48992..58f348d83a616fe4e86c5981a5064ed0865db370 100644 (file)
--- a/src/bin/sum-post.cc
+++ b/src/bin/sum-post.cc
KALDI_LOG << "Done adding " << num_done << " posteriors; " << num_err
<< " with errors.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 25dbfc2240ffa3906790d97e8bd221e45fccc001..601b9bcef4ae0b5b485816b8884b459d3db2ead1 100644 (file)
KALDI_LOG << "Wrote summed accs ( " << stats.size() << " individual stats)";
DeleteBuildTreeStats(&stats);
return (stats.size() != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/thresh-post.cc b/src/bin/thresh-post.cc
index 19c1fd92273878aeb726531a1ff692c49ad4a7a4..e62f51366eb85b65987a00c4e4f8bd9363656350 100644 (file)
--- a/src/bin/thresh-post.cc
+++ b/src/bin/thresh-post.cc
KALDI_LOG << "thresh-post: thresholded " << num_posteriors
<< " posteriors, reduced them by a factor of "
<< (total_weight_out/total_weight_in) << " on average.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/bin/weight-post.cc b/src/bin/weight-post.cc
index ba9dec26a6b0b6cb3a99ce85104194ff8b2ed721..1ea2cb79ec54fecdb505c8084ed6993adcc88b67 100644 (file)
--- a/src/bin/weight-post.cc
+++ b/src/bin/weight-post.cc
}
KALDI_LOG << "Scaled " << num_done << " posteriors; errors on " << num_err;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e75d750490e90bc29fa6e4f7dc0eff6f84f9138e..72a394744e37f1f8fc0ac012c5516b4f6757fcf3 100644 (file)
}
KALDI_LOG << "Done " << num_posteriors << " posteriors.";
return (num_posteriors != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index baaff5bfa4394ba13438fe45ed74e18c9ae4a8ea..b813a23813157af1a5aff5f7fdd290447ff28e35 100644 (file)
}
-void CuDevice::AccuProfile(const std::string& key, double time) {
+void CuDevice::AccuProfile(const std::string &key, double time) {
if (profile_map_.find(key) == profile_map_.end()) {
profile_map_[key] = 0.0;
}
index 49f4727bd1c3cb5d0f5bf512db2eb92e9ef05850..58aff3cd05c284be65992b5eed4dbee7f5b50659 100644 (file)
private:
CuDevice();
CuDevice(CuDevice&);
- CuDevice& operator=(CuDevice&);
+ CuDevice &operator=(CuDevice&);
public:
~CuDevice();
}
/// Sum the IO time
- void AccuProfile(const std::string& key, double time);
+ void AccuProfile(const std::string &key, double time);
void PrintProfile();
void ResetProfile() {
index cd052e2de295f38b670cfdaf06a9048085fadab8..a09785db146b8e07aa58a3c8f840ae9a483d7cd0 100644 (file)
* CuMatrix
*/
void cudaF_set_const(dim3 Gr, dim3 Bl, float*mat, float value, MatrixDim d);
-void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d);
-void cudaF_scale_cols(dim3 Gr, dim3 Bl, float*mat, const float* scale, MatrixDim d);
-void cudaF_scale_rows(dim3 Gr, dim3 Bl, float*mat, const float* scale, MatrixDim d);
-void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float*mat, const float* vec_div, MatrixDim d);
-void cudaF_add_scaled(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float* dst, MatrixDim d);
-void cudaF_add_scaled_row(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d);
+void cudaF_apply_log(dim3 Gr, dim3 Bl, float *mat, MatrixDim d);
+void cudaF_scale_cols(dim3 Gr, dim3 Bl, float*mat, const float *scale, MatrixDim d);
+void cudaF_scale_rows(dim3 Gr, dim3 Bl, float*mat, const float *scale, MatrixDim d);
+void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float*mat, const float *vec_div, MatrixDim d);
+void cudaF_add_scaled(dim3 Gr, dim3 Bl, float alpha, const float* A, float beta, float *dst, MatrixDim d);
+void cudaF_add_scaled_row(dim3 Gr, dim3 Bl, float alpha, const float *row, float beta, float *dst, MatrixDim d);
void cudaF_mul_elem(dim3 Gr, dim3 Bl, float*mat, const float*A, MatrixDim d);
/*
* CuVector
*/
-void cudaF_add_col_sum(size_t Gr, size_t Bl, float alpha, const float* mat, float beta, float* vec, MatrixDim d);
-void cudaF_add_col_sum_reduce(dim3 Gr, dim3 Bl, float alpha, const float* mat, float beta, float* vec, MatrixDim d);
-void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d);
+void cudaF_add_col_sum(size_t Gr, size_t Bl, float alpha, const float *mat, float beta, float *vec, MatrixDim d);
+void cudaF_add_col_sum_reduce(dim3 Gr, dim3 Bl, float alpha, const float *mat, float beta, float *vec, MatrixDim d);
+void cudaF_invert_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim d);
/*
* cu::
void cudaF_softmax (size_t Gr, size_t Bl, float*y, const float*x, MatrixDim d);
void cudaF_softmax_reduce (dim3 Gr, dim3 Bl, float*y, const float*x, MatrixDim d);
void cudaF_sigmoid (dim3 Gr, dim3 Bl, float*y, const float*x, MatrixDim d);
-void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d);
+void cudaF_diff_sigmoid (dim3 Gr, dim3 Bl, float *eout, const float *e, const float *y, MatrixDim d);
-void cudaF_expand(dim3 Gr, dim3 Bl, float* y, const float* x, const int* off, MatrixDim d_out, MatrixDim d_in);
-void cudaF_rearrange(dim3 Gr, dim3 Bl, float* y, const float* x, const int* copy_from, MatrixDim d_out, MatrixDim d_in);
-void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int* copy_from, MatrixDim d_out, MatrixDim d_in);
+void cudaF_expand(dim3 Gr, dim3 Bl, float *y, const float *x, const int *off, MatrixDim d_out, MatrixDim d_in);
+void cudaF_rearrange(dim3 Gr, dim3 Bl, float *y, const float *x, const int *copy_from, MatrixDim d_out, MatrixDim d_in);
+void cudaF_randomize(dim3 Gr, dim3 Bl, float *y, const float *x, const int *copy_from, MatrixDim d_out, MatrixDim d_in);
-void cudaF_check_class(size_t Gr, size_t Bl, const float* out, const float* des, float* match, MatrixDim d);
-void cudaF_check_class_reduce(dim3 Gr, dim3 Bl, const float* out, const float* des, float* match, MatrixDim d);
+void cudaF_check_class(size_t Gr, size_t Bl, const float *out, const float *des, float *match, MatrixDim d);
+void cudaF_check_class_reduce(dim3 Gr, dim3 Bl, const float *out, const float *des, float *match, MatrixDim d);
-void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d);
+void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float *wei, float *grad, float l1, float lr, MatrixDim d);
-void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, int32_cuda voff, MatrixDim d);
+void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float *mat, float *vec_val, int32_cuda *vec_id, int32_cuda voff, MatrixDim d);
-void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d);
+void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda *vec_tgt, float *mat_net_out, float *vec_log_post, MatrixDim d);
-void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda* vec_ids, float* Y, MatrixDim d);
+void cudaF_softmax_part(dim3 Gr, dim3 Bl, const float* X, const int32_cuda *vec_ids, float* Y, MatrixDim d);
-void cudaF_sum_rows_vec(dim3 Gr, dim3 Bl, const float* mat, float* vec_sum, MatrixDim d);
+void cudaF_sum_rows_vec(dim3 Gr, dim3 Bl, const float *mat, float *vec_sum, MatrixDim d);
/*
index 58d242dc36be33ef5500791b7aac771be7af8a69..d82f7b5b89b3696a8cf9a7059bddaf39116f2f14 100644 (file)
} else
#endif
{
- MatrixBase<float>& y = Y->Mat();
- const MatrixBase<float>& x = X.Mat();
+ MatrixBase<float> &y = Y->Mat();
+ const MatrixBase<float> &x = X.Mat();
for(MatrixIndexT r=0; r<x.NumRows(); r++) {
for(MatrixIndexT c=0; c<x.NumCols(); c++) {
y(r, c) = 1.0/(1.0+exp(-x(r, c)));
} else
#endif
{
- MatrixBase<float>& eout = Eout->Mat();
- const MatrixBase<float>& ein = Ein.Mat();
- const MatrixBase<float>& y = Y.Mat();
+ MatrixBase<float> &eout = Eout->Mat();
+ const MatrixBase<float> &ein = Ein.Mat();
+ const MatrixBase<float> &y = Y.Mat();
for(MatrixIndexT r=0; r<eout.NumRows(); r++) {
for(MatrixIndexT c=0; c<eout.NumCols(); c++) {
eout(r, c) = ein(r, c) * y(r, c)*(1.0-y(r, c));
} else
#endif
{
- MatrixBase<float>& y = Y->Mat();
- const MatrixBase<float>& x = X.Mat();
+ MatrixBase<float> &y = Y->Mat();
+ const MatrixBase<float> &x = X.Mat();
y.CopyFromMat(x);
for(MatrixIndexT r=0; r<x.NumRows(); r++) {
y.Row(r).ApplySoftMax();
-void CheckClass(const CuMatrix<float>& out, const CuMatrix<float> &des, CuVector<float>* match) {
+void CheckClass(const CuMatrix<float> &out, const CuMatrix<float> &des, CuVector<float> *match) {
assert(out.NumCols() == des.NumCols());
assert(out.NumRows() == des.NumRows());
assert(out.Stride() == des.Stride());
@@ -160,9 +160,9 @@ void CheckClass(const CuMatrix<float>& out, const CuMatrix<float> &des, CuVector
} else
#endif
{
- const MatrixBase<float>& mout = out.Mat();
- const MatrixBase<float>& mdes = des.Mat();
- VectorBase<float>& vmatch = match->Vec();
+ const MatrixBase<float> &mout = out.Mat();
+ const MatrixBase<float> &mdes = des.Mat();
+ VectorBase<float> &vmatch = match->Vec();
vmatch.Set(0);
for(MatrixIndexT r=0; r<mout.NumRows(); r++) {
@@ -178,7 +178,7 @@ void CheckClass(const CuMatrix<float>& out, const CuMatrix<float> &des, CuVector
}
-void RegularizeL1(CuMatrix<float>* wei, CuMatrix<float>* grad, float l1, float lr) {
+void RegularizeL1(CuMatrix<float> *wei, CuMatrix<float> *grad, float l1, float lr) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
@@ -193,8 +193,8 @@ void RegularizeL1(CuMatrix<float>* wei, CuMatrix<float>* grad, float l1, float l
} else
#endif
{
- MatrixBase<float>& wei2 = wei->Mat();
- MatrixBase<float>& grad2 = grad->Mat();
+ MatrixBase<float> &wei2 = wei->Mat();
+ MatrixBase<float> &grad2 = grad->Mat();
for(MatrixIndexT r=0; r<wei2.NumRows(); r++) {
for(MatrixIndexT c=0; c<wei2.NumCols(); c++) {
@@ -218,7 +218,7 @@ void RegularizeL1(CuMatrix<float>* wei, CuMatrix<float>* grad, float l1, float l
}
-void FindRowMaxId(const CuMatrix<float>& mat, CuStlVector<int32>* id) {
+void FindRowMaxId(const CuMatrix<float> &mat, CuStlVector<int32> *id) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
}
-void DiffXent(const CuStlVector<int32>& tgt, CuMatrix<BaseFloat>* net_out_or_diff, CuVector<BaseFloat>* log_post_tgt) {
+void DiffXent(const CuStlVector<int32> &tgt, CuMatrix<BaseFloat> *net_out_or_diff, CuVector<BaseFloat> *log_post_tgt) {
assert(tgt.Dim() == net_out_or_diff->NumRows());
log_post_tgt->Resize(tgt.Dim());
@@ -301,7 +301,7 @@ void DiffXent(const CuStlVector<int32>& tgt, CuMatrix<BaseFloat>* net_out_or_dif
}
-void SumRowsVec(const CuMatrix<BaseFloat>& mat, CuVector<BaseFloat>* sum) {
+void SumRowsVec(const CuMatrix<BaseFloat> &mat, CuVector<BaseFloat> *sum) {
// initialize the sum vector
sum->Resize(mat.NumRows());
}
-void Randomize(const CuMatrix<BaseFloat>& src, const CuStlVector<int32>& copy_from_idx, CuMatrix<BaseFloat>* tgt) {
+void Randomize(const CuMatrix<BaseFloat> &src, const CuStlVector<int32> ©_from_idx, CuMatrix<BaseFloat> *tgt) {
assert(src.NumCols() == tgt->NumCols());
assert(src.NumRows() == tgt->NumRows());
@@ -373,9 +373,9 @@ void Randomize(const CuMatrix<BaseFloat>& src, const CuStlVector<int32>& copy_fr
#endif
{
// randomize in CPU
- const MatrixBase<BaseFloat>& srcmat = src.Mat();
- const std::vector<int32>& copy_from_idxvec = copy_from_idx.Vec();
- MatrixBase<BaseFloat>& tgtmat = tgt->Mat();
+ const MatrixBase<BaseFloat> &srcmat = src.Mat();
+ const std::vector<int32> ©_from_idxvec = copy_from_idx.Vec();
+ MatrixBase<BaseFloat> &tgtmat = tgt->Mat();
for(int i=0; i<copy_from_idx.Dim(); i++) {
tgtmat.Row(i).CopyFromVec(srcmat.Row(copy_from_idxvec[i]));
}
index e6fd29fefa07ff3b03427089bfc89270496c8db7..d60cbc91b6a19a392e2578703227df60fcadb8ed 100644 (file)
--- a/src/cudamatrix/cu-math.h
+++ b/src/cudamatrix/cu-math.h
void Softmax(const CuMatrix<float>& X, CuMatrix<float>* Y);
/// check match in the classification for Xentropy
- void CheckClass(const CuMatrix<float>& out, const CuMatrix<float> &des, CuVector<float>* match);
+ void CheckClass(const CuMatrix<float> &out, const CuMatrix<float> &des, CuVector<float> *match);
/// apply the L1 regularization, sets zero to wei and grad elements on zero-crossing
- void RegularizeL1(CuMatrix<float>* wei, CuMatrix<float>* grad, float l1, float lr);
+ void RegularizeL1(CuMatrix<float> *wei, CuMatrix<float> *grad, float l1, float lr);
/// Find the id of the maximal element for each row
- void FindRowMaxId(const CuMatrix<float>& mat, CuStlVector<int32>* id);
+ void FindRowMaxId(const CuMatrix<float> &mat, CuStlVector<int32> *id);
/// Differentiate cross-entropy+softmax coupling (subtract post - tgt)
/// extract per-frame cross-entropy to vector log_post_tgt_
- void DiffXent(const CuStlVector<int32>& tgt, CuMatrix<BaseFloat>* net_out_or_diff, CuVector<BaseFloat>* log_post_tgt_);
+ void DiffXent(const CuStlVector<int32> &tgt, CuMatrix<BaseFloat> *net_out_or_diff, CuVector<BaseFloat> *log_post_tgt_);
/// Sum each row of the matrix
- void SumRowsVec(const CuMatrix<BaseFloat>& mat, CuVector<BaseFloat>* sum);
+ void SumRowsVec(const CuMatrix<BaseFloat> &mat, CuVector<BaseFloat> *sum);
/// ie. switch rows according to copyFrom
- void Randomize(const CuMatrix<BaseFloat>& src, const CuStlVector<int32>& copy_from_idx, CuMatrix<BaseFloat>* tgt);
+ void Randomize(const CuMatrix<BaseFloat> &src, const CuStlVector<int32> ©_from_idx, CuMatrix<BaseFloat> *tgt);
/*
* Templated implementation to make it always compilable
}
template<typename _ElemT>
- void CheckClass(const CuMatrix<_ElemT>& out, const CuMatrix<_ElemT> &des, CuVector<float>& match) {
+ void CheckClass(const CuMatrix<_ElemT> &out, const CuMatrix<_ElemT> &des, CuVector<float> &match) {
KALDI_ERR << __func__ << " Not implemented";
}
template<typename _ElemT>
- void RegularizeL1(CuMatrix<_ElemT>* wei, CuMatrix<_ElemT>* grad, _ElemT l1, _ElemT lr) {
+ void RegularizeL1(CuMatrix<_ElemT> *wei, CuMatrix<_ElemT> *grad, _ElemT l1, _ElemT lr) {
KALDI_ERR << __func__ << " Not implemented";
}
template<typename _ElemT>
- void FindRowMaxId(const CuMatrix<float>& mat, CuStlVector<int32>* id) {
+ void FindRowMaxId(const CuMatrix<float> &mat, CuStlVector<int32> *id) {
KALDI_ERR << __func__ << " Not implemented";
}
template<typename _ElemT>
- void DiffXent(const CuStlVector<int32>& tgt, CuMatrix<_ElemT>* net_out_or_diff, CuVector<_ElemT>* log_post_tgt_) {
+ void DiffXent(const CuStlVector<int32> &tgt, CuMatrix<_ElemT> *net_out_or_diff, CuVector<_ElemT> *log_post_tgt_) {
KALDI_ERR << __func__ << " Not implemented";
}
template<typename _ElemT>
- void SumRowsVec(const CuMatrix<_ElemT>& mat, CuVector<_ElemT>* sum) {
+ void SumRowsVec(const CuMatrix<_ElemT> &mat, CuVector<_ElemT> *sum) {
KALDI_ERR << __func__ << " Not implemented";
}
template<typename _ElemT>
- void Randomize(const CuMatrix<_ElemT>& src, const CuStlVector<int32>& copy_from_idx, CuMatrix<_ElemT>* tgt) {
+ void Randomize(const CuMatrix<_ElemT> &src, const CuStlVector<int32> ©_from_idx, CuMatrix<_ElemT> *tgt) {
KALDI_ERR << __func__ << " Not implemented";
}
index 964160b6946abd4dcb28a1f5889b413705d1fd01..b76748552331b61c5f8e22e5aa819e6c1ed4c2a2 100644 (file)
template<typename _ElemT>
-CuMatrix<_ElemT>& CuMatrix<_ElemT>::CopyFromMat(const CuMatrix<_ElemT>& src) {
+CuMatrix<_ElemT>& CuMatrix<_ElemT>::CopyFromMat(const CuMatrix<_ElemT> &src) {
Resize(src.NumRows(), src.NumCols());
#if HAVE_CUDA==1
template<typename _ElemT>
-CuMatrix<_ElemT>& CuMatrix<_ElemT>::CopyFromMat(const Matrix<_ElemT>& src) {
+CuMatrix<_ElemT>& CuMatrix<_ElemT>::CopyFromMat(const Matrix<_ElemT> &src) {
Resize(src.NumRows(), src.NumCols());
#if HAVE_CUDA==1
template<typename _ElemT>
-void CuMatrix<_ElemT>::CopyToMat(Matrix<_ElemT>* dst) const {
+void CuMatrix<_ElemT>::CopyToMat(Matrix<_ElemT> *dst) const {
if (dst->NumRows() != NumRows() || dst->NumCols() != NumCols()) {
dst->Resize(NumRows(), NumCols());
}
template<typename _ElemT>
-void CuMatrix<_ElemT>::CopyRowsFromMat(int32 r, const CuMatrix<_ElemT>& src, int32 src_ro, int32 dst_ro) {
+void CuMatrix<_ElemT>::CopyRowsFromMat(int32 r, const CuMatrix<_ElemT> &src, int32 src_ro, int32 dst_ro) {
assert(r+src_ro <= src.NumRows());
assert(r+dst_ro <= NumRows());
@@ -203,8 +203,8 @@ void CuMatrix<_ElemT>::CopyRowsFromMat(int32 r, const CuMatrix<_ElemT>& src, int
MatrixIndexT src_pitch = src.Stride()*sizeof(_ElemT);
MatrixIndexT width = src.NumCols()*sizeof(_ElemT);
- const _ElemT* p_src = src.Data() + src_ro*src.Stride();
- _ElemT* p_dst = data_ + dst_ro*stride_;
+ const _ElemT *p_src = src.Data() + src_ro*src.Stride();
+ _ElemT *p_dst = data_ + dst_ro*stride_;
cuSafeCall(cudaMemcpy2D(p_dst, dst_pitch, p_src, src_pitch, width, r, cudaMemcpyDeviceToDevice));
@@ -219,7 +219,7 @@ void CuMatrix<_ElemT>::CopyRowsFromMat(int32 r, const CuMatrix<_ElemT>& src, int
template<typename _ElemT>
-void CuMatrix<_ElemT>::Read(std::istream& is, bool binary) {
+void CuMatrix<_ElemT>::Read(std::istream &is, bool binary) {
Matrix<BaseFloat> tmp;
tmp.Read(is, binary);
CopyFromMat(tmp);
template<typename _ElemT>
-void CuMatrix<_ElemT>::Write(std::ostream& os, bool binary) const {
+void CuMatrix<_ElemT>::Write(std::ostream &os, bool binary) const {
Matrix<BaseFloat> tmp;
CopyToMat(&tmp);
tmp.Write(os, binary);
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuMatrix<_ElemT>& mat) {
+std::ostream &operator << (std::ostream &out, const CuMatrix<_ElemT> &mat) {
Matrix<_ElemT> tmp;
mat.CopyToMat(&tmp);
out << tmp;
template<> void CuMatrix<float>::ApplyLog();
template<> void CuMatrix<float>::MulElements(const CuMatrix<float>& A);
-template<> void CuMatrix<float>::MulColsVec(const CuVector<float>& scale);
-template<> void CuMatrix<float>::MulRowsVec(const CuVector<float>& scale);
-template<> void CuMatrix<float>::DivRowsVec(const CuVector<float>& div);
+template<> void CuMatrix<float>::MulColsVec(const CuVector<float> &scale);
+template<> void CuMatrix<float>::MulRowsVec(const CuVector<float> &scale);
+template<> void CuMatrix<float>::DivRowsVec(const CuVector<float> &div);
template<> void CuMatrix<float>::AddMat(float alpha, const CuMatrix<float>& A, float beta);
-template<> void CuMatrix<float>::AddScaledRow(float alpha, const CuVector<float>& row, float beta);
+template<> void CuMatrix<float>::AddScaledRow(float alpha, const CuVector<float> &row, float beta);
template<> void CuMatrix<float>::AddMatMat(float alpha, const CuMatrix<float>& A, MatrixTransposeType transA, const CuMatrix<float>& B, MatrixTransposeType transB, float beta);
index c47fe5140432dc5dbe6576c6147890299c0bca46..d88df5a723d632ad0cc6e90cb7bee4111d758586 100644 (file)
template<>
-void CuMatrix<float>::MulColsVec(const CuVector<float>& scale) {
+void CuMatrix<float>::MulColsVec(const CuVector<float> &scale) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
template<>
-void CuMatrix<float>::MulRowsVec(const CuVector<float>& scale) {
+void CuMatrix<float>::MulRowsVec(const CuVector<float> &scale) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
template<>
-void CuMatrix<float>::DivRowsVec(const CuVector<float>& div) {
+void CuMatrix<float>::DivRowsVec(const CuVector<float> &div) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
template<>
-void CuMatrix<float>::AddScaledRow(float alpha, const CuVector<float>& row, float beta) {
+void CuMatrix<float>::AddScaledRow(float alpha, const CuVector<float> &row, float beta) {
if (row.Dim() != NumCols()) {
KALDI_ERR << "Non matching dimensions: Cols:" << NumCols() << " VectorDim:" << row.Dim();
index dbc6128a54bc51079e2096312def22fecf7a3f82..a99c09ab6e8441c8e1a9361ffcaa1d5e6f60c0a5 100644 (file)
void Destroy();
/// Copy functions (reallocates when needed)
- ThisType& CopyFromMat(const CuMatrix<_ElemT>& src);
- ThisType& CopyFromMat(const Matrix<_ElemT>& src);
- void CopyToMat(Matrix<_ElemT>* dst) const;
+ ThisType& CopyFromMat(const CuMatrix<_ElemT> &src);
+ ThisType& CopyFromMat(const Matrix<_ElemT> &src);
+ void CopyToMat(Matrix<_ElemT> *dst) const;
/// Copy row interval from matrix
/// @param r [in] number of rows to copy.
/// @param src [in] source matrix.
/// @param src_ro [in] source matrix row offset.
/// @param dst_ro [in] destination matrix row offset.
- void CopyRowsFromMat(int32 r, const CuMatrix<_ElemT>& src, int32 src_ro, int32 dst_ro);
+ void CopyRowsFromMat(int32 r, const CuMatrix<_ElemT> &src, int32 src_ro, int32 dst_ro);
/// I/O functions
- void Read(std::istream& is, bool binary);
- void Write(std::ostream& os, bool binary) const;
+ void Read(std::istream &is, bool binary);
+ void Write(std::ostream &os, bool binary) const;
// Math operations, some calling kernels
}
/// scale i'th column by scale[i]
- void MulColsVec(const CuVector<_ElemT>& scale) {
+ void MulColsVec(const CuVector<_ElemT> &scale) {
KALDI_ERR << "__func__ Not implemented";
}
/// scale i'th row by scale[i]
- void MulRowsVec(const CuVector<_ElemT>& scale) {
+ void MulRowsVec(const CuVector<_ElemT> &scale) {
KALDI_ERR << "__func__ Not implemented";
}
/// divide i'th row by scale[i]
- void DivRowsVec(const CuVector<_ElemT>& div) {
+ void DivRowsVec(const CuVector<_ElemT> &div) {
KALDI_ERR << "__func__ Not implemented";
}
}
/// B = aplha * row + beta * B
- void AddScaledRow(_ElemT alpha, const CuVector<_ElemT>& row, _ElemT beta=1.0) {
+ void AddScaledRow(_ElemT alpha, const CuVector<_ElemT> &row, _ElemT beta=1.0) {
KALDI_ERR << "__func__ Not implemented";
}
MatrixIndexT num_cols_;
MatrixIndexT stride_;
- _ElemT* data_; ///< GPU data pointer
+ _ElemT *data_; ///< GPU data pointer
Matrix<_ElemT> mat_; ///< non-GPU matrix as back-off
/// I/O
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuMatrix<_ElemT>& mat);
+std::ostream &operator << (std::ostream &out, const CuMatrix<_ElemT> &mat);
} // namespace
index 2cbd355d6db0aa09b3d73ac78fa8506dd30521cc..426633d9827e303508c66dd32056788b2eb96a12 100644 (file)
host_size_ = 0;
}
-template<typename T> void CuRand<T>::SeedBuffer(unsigned** tgt, MatrixIndexT state_size) {
+template<typename T> void CuRand<T>::SeedBuffer(unsigned* *tgt, MatrixIndexT state_size) {
// optionally resize host buffer
if (state_size != host_size_) {
delete[] host_;
-template<> void CuRand<float>::RandUniform(CuMatrix<float>* tgt) {
+template<> void CuRand<float>::RandUniform(CuMatrix<float> *tgt) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
-template<> void CuRand<float>::RandGaussian(CuMatrix<float>* tgt) {
+template<> void CuRand<float>::RandGaussian(CuMatrix<float> *tgt) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
-template<> void CuRand<float>::BinarizeProbs(const CuMatrix<float>& probs, CuMatrix<float>* states) {
+template<> void CuRand<float>::BinarizeProbs(const CuMatrix<float> &probs, CuMatrix<float> *states) {
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
Timer tim;
@@ -166,7 +166,7 @@ template<> void CuRand<float>::BinarizeProbs(const CuMatrix<float>& probs, CuMat
-template<> void CuRand<float>::AddGaussNoise(CuMatrix<float>* tgt, float gscale) {
+template<> void CuRand<float>::AddGaussNoise(CuMatrix<float> *tgt, float gscale) {
tmp.Resize(tgt->NumRows(), tgt->NumCols());
RandGaussian(&tmp);
tgt->AddMat(gscale, tmp, 1.0);
index 9f7a5c4889da777cde8a2c7f9a33050cc330e6cf..247c38785db507e5e0a33d89648087e88a6b93a9 100644 (file)
--- a/src/cudamatrix/cu-rand.h
+++ b/src/cudamatrix/cu-rand.h
void SeedGpu(MatrixIndexT state_size);
/// fill with uniform random numbers (0.0-1.0)
- void RandUniform(CuMatrix<T>* tgt);
+ void RandUniform(CuMatrix<T> *tgt);
/// fill with normal random numbers
- void RandGaussian(CuMatrix<T>* tgt);
+ void RandGaussian(CuMatrix<T> *tgt);
/// align probabilities to discrete 0/1 states (use uniform samplig)
- void BinarizeProbs(const CuMatrix<T>& probs, CuMatrix<T>* states);
+ void BinarizeProbs(const CuMatrix<T> &probs, CuMatrix<T> *states);
/// add gaussian noise to each element
- void AddGaussNoise(CuMatrix<T>* tgt, T gscale = 1.0);
+ void AddGaussNoise(CuMatrix<T> *tgt, T gscale = 1.0);
private:
/// seed one buffer
- void SeedBuffer(unsigned** tgt, MatrixIndexT state_size);
+ void SeedBuffer(unsigned* *tgt, MatrixIndexT state_size);
private:
// CuMatrix<unsigned> z1, z2, z3, z4; // cannot use CuMatrix
unsigned *z1_, *z2_, *z3_, *z4_; ///< raw rnd-generator inner state pointers
int32 state_size_; ///< size of the buffers
- unsigned* host_; ///< host bufer, used for initializing
+ unsigned *host_; ///< host bufer, used for initializing
int32 host_size_; ///< size of the host buffer
CuMatrix<T> tmp;
/// declare the BaseFloat specializations, that are in cu-rand.cc
-template<> void CuRand<float>::RandUniform(CuMatrix<float>* tgt);
-template<> void CuRand<float>::RandGaussian(CuMatrix<float>* tgt);
-template<> void CuRand<float>::BinarizeProbs(const CuMatrix<float>& probs, CuMatrix<float>* states);
-template<> void CuRand<float>::AddGaussNoise(CuMatrix<float>* tgt, float gscale);
+template<> void CuRand<float>::RandUniform(CuMatrix<float> *tgt);
+template<> void CuRand<float>::RandGaussian(CuMatrix<float> *tgt);
+template<> void CuRand<float>::BinarizeProbs(const CuMatrix<float> &probs, CuMatrix<float> *states);
+template<> void CuRand<float>::AddGaussNoise(CuMatrix<float> *tgt, float gscale);
/// also define the non-specialized versions, so the code always compies
-template<typename T> void CuRand<T>::RandUniform(CuMatrix<T>* tgt) {
+template<typename T> void CuRand<T>::RandUniform(CuMatrix<T> *tgt) {
KALDI_ERR << __func__ << " Not implemented";
}
-template<typename T> void CuRand<T>::RandGaussian(CuMatrix<T>* tgt) {
+template<typename T> void CuRand<T>::RandGaussian(CuMatrix<T> *tgt) {
KALDI_ERR << __func__ << " Not implemented";
}
-template<typename T> void CuRand<T>::BinarizeProbs(const CuMatrix<T>& probs, CuMatrix<T>* states) {
+template<typename T> void CuRand<T>::BinarizeProbs(const CuMatrix<T> &probs, CuMatrix<T> *states) {
KALDI_ERR << __func__ << " Not implemented";
}
-template<typename T> void CuRand<T>::AddGaussNoise(CuMatrix<T>* tgt, T gscale) {
+template<typename T> void CuRand<T>::AddGaussNoise(CuMatrix<T> *tgt, T gscale) {
KALDI_ERR << __func__ << " Not implemented";
}
index bb7aea8836d993bd2ef66789f2194035b9880f1d..bd2057ea4240c06cae28bde03731ee8e30eae781 100644 (file)
// **************
// float
//
- void cudaF_rand(dim3 Gr, dim3 Bl, float* mat, unsigned* z1, unsigned* z2, unsigned* z3, unsigned* z4, MatrixDim d);
- void cudaF_gauss_rand(dim3 Gr, dim3 Bl, float* mat, unsigned* z1, unsigned* z2, unsigned* z3, unsigned* z4, MatrixDim d);
- void cudaF_binarize_probs(dim3 Gr, dim3 Bl, float* states, const float* probs, float* rand, MatrixDim d);
+ void cudaF_rand(dim3 Gr, dim3 Bl, float *mat, unsigned *z1, unsigned *z2, unsigned *z3, unsigned *z4, MatrixDim d);
+ void cudaF_gauss_rand(dim3 Gr, dim3 Bl, float *mat, unsigned *z1, unsigned *z2, unsigned *z3, unsigned *z4, MatrixDim d);
+ void cudaF_binarize_probs(dim3 Gr, dim3 Bl, float *states, const float *probs, float *rand, MatrixDim d);
}
index 0f5c91c449fff1c52610ec9732617a12fe80c7df..be5a50a70df11cf6c4bdd96b7f443f1689b542e5 100644 (file)
template<typename _ElemT>
-CuStlVector<_ElemT>& CuStlVector<_ElemT>::CopyFromVec(const std::vector<_ElemT>& src) {
+CuStlVector<_ElemT>& CuStlVector<_ElemT>::CopyFromVec(const std::vector<_ElemT> &src) {
Resize(src.size());
#if HAVE_CUDA==1
@@ -101,7 +101,7 @@ CuStlVector<_ElemT>& CuStlVector<_ElemT>::CopyFromVec(const std::vector<_ElemT>&
template<typename _ElemT>
-void CuStlVector<_ElemT>::CopyToVec(std::vector<_ElemT>* dst) const {
+void CuStlVector<_ElemT>::CopyToVec(std::vector<_ElemT> *dst) const {
if (dst->size() != dim_) {
dst->resize(dim_);
}
/// Prints the vector to stream
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuStlVector<_ElemT>& vec) {
+std::ostream &operator << (std::ostream &out, const CuStlVector<_ElemT> &vec) {
std::vector<_ElemT> tmp;
vec.CopyToVec(&tmp);
out << "[";
index 8f04189875d0062318752d66fa5db793a8e4af62..8fb53a53e933666716ab2a8fd158a3db2840b15b 100644 (file)
void Destroy();
/// Copy functions (reallocates when needed)
- ThisType& CopyFromVec(const std::vector<_ElemT>& src);
- void CopyToVec(std::vector<_ElemT>* dst) const;
+ ThisType& CopyFromVec(const std::vector<_ElemT> &src);
+ void CopyToVec(std::vector<_ElemT> *dst) const;
// Math operations
//
private:
size_t dim_;
- _ElemT* data_; ///< GPU data pointer
+ _ElemT *data_; ///< GPU data pointer
std::vector<_ElemT> vec_; ///< non-GPU vector as back-off
};
/// I/O
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuStlVector<_ElemT>& vec);
+std::ostream &operator << (std::ostream &out, const CuStlVector<_ElemT> &vec);
} // namespace
index 6dc949c15131256d4963afd8f4fa3ed99f4c848a..9661b37546e7c94a33b5c7b7f2966fee2796bb98 100644 (file)
template<typename _ElemT>
-CuVector<_ElemT>& CuVector<_ElemT>::CopyFromVec(const CuVector<_ElemT>& src) {
+CuVector<_ElemT>& CuVector<_ElemT>::CopyFromVec(const CuVector<_ElemT> &src) {
Resize(src.Dim());
#if HAVE_CUDA==1
template<typename _ElemT>
-CuVector<_ElemT>& CuVector<_ElemT>::CopyFromVec(const Vector<_ElemT>& src) {
+CuVector<_ElemT>& CuVector<_ElemT>::CopyFromVec(const Vector<_ElemT> &src) {
Resize(src.Dim());
#if HAVE_CUDA==1
template<typename _ElemT>
-void CuVector<_ElemT>::CopyToVec(Vector<_ElemT>* dst) const {
+void CuVector<_ElemT>::CopyToVec(Vector<_ElemT> *dst) const {
if (dst->Dim() != dim_) {
dst->Resize(dim_);
}
template<typename _ElemT>
-void CuVector<_ElemT>::Read(std::istream& is, bool binary) {
+void CuVector<_ElemT>::Read(std::istream &is, bool binary) {
Vector<BaseFloat> tmp;
tmp.Read(is, binary);
CopyFromVec(tmp);
template<typename _ElemT>
-void CuVector<_ElemT>::Write(std::ostream& os, bool binary) const {
+void CuVector<_ElemT>::Write(std::ostream &os, bool binary) const {
Vector<BaseFloat> tmp;
CopyToVec(&tmp);
tmp.Write(os, binary);
/// Prints the vector to stream
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuVector<_ElemT>& vec) {
+std::ostream &operator << (std::ostream &out, const CuVector<_ElemT> &vec) {
Vector<_ElemT> tmp;
vec.CopyToVec(&tmp);
out << tmp;
* declare the float specialized methods
*/
template<> void CuVector<float>::Set(float value);
-template<> void CuVector<float>::AddVec(float alpha, const CuVector<float>& vec, float beta);
-template<> void CuVector<float>::AddColSum(float alpha, const CuMatrix<float>& mat, float beta);
+template<> void CuVector<float>::AddVec(float alpha, const CuVector<float> &vec, float beta);
+template<> void CuVector<float>::AddColSum(float alpha, const CuMatrix<float> &mat, float beta);
template<> void CuVector<float>::InvertElements();
} // namespace kaldi
index 0051894e3d0057f70392d992d28a1f16884c4785..70c4db88e809b5875dd7ed1be84e9191a9e27c57 100644 (file)
template<>
-void CuVector<float>::AddVec(float alpha, const CuVector<float>& vec, float beta) {
+void CuVector<float>::AddVec(float alpha, const CuVector<float> &vec, float beta) {
assert(vec.Dim() == Dim());
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
template<>
-void CuVector<float>::AddColSum(float alpha, const CuMatrix<float>& mat, float beta) {
+void CuVector<float>::AddColSum(float alpha, const CuMatrix<float> &mat, float beta) {
assert(mat.NumCols() == Dim());
#if HAVE_CUDA==1
if (CuDevice::Instantiate().Enabled()) {
index 2154bbb1bd54daa8e29b6626d0ec34f8e19c6578..8cbff35fdf0955b79d4ea1e701310c69768e3252 100644 (file)
void Destroy();
/// Copy functions (reallocates when needed)
- ThisType& CopyFromVec(const CuVector<_ElemT>& src);
- ThisType& CopyFromVec(const Vector<_ElemT>& src);
- void CopyToVec(Vector<_ElemT>* dst) const;
+ ThisType& CopyFromVec(const CuVector<_ElemT> &src);
+ ThisType& CopyFromVec(const Vector<_ElemT> &src);
+ void CopyToVec(Vector<_ElemT> *dst) const;
- void Read(std::istream& is, bool binary);
- void Write(std::ostream& is, bool binary) const;
+ void Read(std::istream &is, bool binary);
+ void Write(std::ostream &is, bool binary) const;
// Math operations
//
KALDI_ERR << __func__ << " Not implemented";
}
- void AddVec(_ElemT alpha, const CuVector<_ElemT>& vec, _ElemT beta=1.0) {
+ void AddVec(_ElemT alpha, const CuVector<_ElemT> &vec, _ElemT beta=1.0) {
KALDI_ERR << __func__ << " Not implemented";
}
- void AddColSum(_ElemT alpha, const CuMatrix<_ElemT>& mat, _ElemT beta=1.0) {
+ void AddColSum(_ElemT alpha, const CuMatrix<_ElemT> &mat, _ElemT beta=1.0) {
KALDI_ERR << __func__ << " Not implemented";
}
private:
size_t dim_;
- _ElemT* data_; ///< GPU data pointer
+ _ElemT *data_; ///< GPU data pointer
Vector<_ElemT> vec_; ///< non-GPU vector as back-off
};
/// I/O
template<typename _ElemT>
-std::ostream& operator << (std::ostream& out, const CuVector<_ElemT>& vec);
+std::ostream &operator << (std::ostream &out, const CuVector<_ElemT> &vec);
} // namespace
index f48c99aade610ba3e4647c2852f24cd3fe28b43d..47b650e834f07d49cd8e16385bc15802382e1097 100644 (file)
static void UnitTestNnet() {
try {
UnitTestSomething();
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index 5b957356cef06a0ce18e1d6200c59ed4e3ebef40..4e41721882af4c31ed22e55476640332663f0dd6 100644 (file)
KALDI_VLOG(3) << "For PDF index " << state << ": transforming means.";
int32 num_gauss = acoustic_model_.GetPdf(state).NumGauss(),
dim = acoustic_model_.Dim();
- const Vector<BaseFloat>& weights = acoustic_model_.GetPdf(state).weights();
- const Matrix<BaseFloat>& invvars = acoustic_model_.GetPdf(state).inv_vars();
+ const Vector<BaseFloat> &weights = acoustic_model_.GetPdf(state).weights();
+ const Matrix<BaseFloat> &invvars = acoustic_model_.GetPdf(state).inv_vars();
xformed_mean_invvars_[state] = new Matrix<BaseFloat>(num_gauss, dim);
mllr_xform_.GetTransformedMeans(regtree_, acoustic_model_, state,
xformed_mean_invvars_[state]);
previous_frame_ = frame;
}
- const Matrix<BaseFloat>& means_invvars = GetXformedMeanInvVars(state);
- const Vector<BaseFloat>& gconsts = GetXformedGconsts(state);
+ const Matrix<BaseFloat> &means_invvars = GetXformedMeanInvVars(state);
+ const Vector<BaseFloat> &gconsts = GetXformedGconsts(state);
Vector<BaseFloat> loglikes(gconsts); // need to recreate for each pdf
// loglikes += means * inv(vars) * data.
index 54b35830cd6839a88c28e65efea90e4bc762e76d..14de86356274c07f77470aae5f007fa4927dcab4 100644 (file)
private:
std::vector<int32> index_map_;
- DecodableInterface* decodable_;
+ DecodableInterface *decodable_;
KALDI_DISALLOW_COPY_AND_ASSIGN(DecodableMapped);
};
index 9a6d3ec70f28c713bc686f489057d413db07e238..eceaa9586a007dfc64e751b80955b6d78e185a1a 100644 (file)
using namespace fst;
std::vector<const VectorFst<StdArc>* > word_fsts(transcripts.size());
for (size_t i = 0; i < transcripts.size(); i++) {
- VectorFst<StdArc>* word_fst = new VectorFst<StdArc>();
+ VectorFst<StdArc> *word_fst = new VectorFst<StdArc>();
MakeLinearAcceptor(transcripts[i], word_fst);
word_fsts[i] = word_fst;
}
index c2bd2f6f1ac045ed41f14f349b9d6aad0dfac113..b652db575f224d2dec3100221da27c027f4ed98b 100644 (file)
UnitTestSimple();
UnitTestHTKCompare1();
std::cout << "Tests succeeded.\n";
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index 9f46757736ec52e04558e09c569fbd29ed82adaa..fa3795c5cf99b3d550e573e932ec80891186c92f 100644 (file)
}
}
-void InitIdftBases(size_t n_bases, size_t dimension, Matrix<BaseFloat>* mat_out) {
+void InitIdftBases(size_t n_bases, size_t dimension, Matrix<BaseFloat> *mat_out) {
float angle = M_PI / (float)(dimension - 1);
float scale = 1.0f / (2.0f * (dimension - 1));
index 09e66e90a35c46c9311e981f48701d2775893e2c..e996ef3ff764c0132849f27d98b1fef1823181eb 100644 (file)
Vector<BaseFloat> *ans);
-void InitIdftBases(size_t n_bases, size_t dimension, Matrix<BaseFloat>* mat_out);
+void InitIdftBases(size_t n_bases, size_t dimension, Matrix<BaseFloat> *mat_out);
// Compute LP coefficients from autocorrelation coefficients.
index 775a7ac86dcd94b071a6a739d746f3f24adc51ca..ab9b30a1c75b93f680fd9b548818ba4cdfbdf1a3 100644 (file)
UnitTestHTKCompare5();
UnitTestHTKCompare6();
std::cout << "Tests succeeded.\n";
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index cf8589654863c6366138d5a5fa57266a232f04f3..90f4c624b09da9b3eb019844fa2b9e7b28e7e20b 100644 (file)
UnitTestSimple();
UnitTestHTKCompare1();
std::cout << "Tests succeeded.\n";
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index d52e5be8c299d8ca05f06373f22b33e1ba2f154e..2de8ce005a91fe2bc2dfdc82a52742f7c042046f 100644 (file)
--- a/src/feat/feature-plp.cc
+++ b/src/feat/feature-plp.cc
template<typename Real>
-SubMatrix<Real> GetCols(MatrixBase<Real>& matrix, size_t co, size_t c = 1) {
+SubMatrix<Real> GetCols(MatrixBase<Real> &matrix, size_t co, size_t c = 1) {
assert(co+c <= matrix.NumCols());
return SubMatrix<Real>(matrix, 0, matrix.NumRows(), co, c);
}
index c4025521e36a3ffcc296cf6cf81ed8c9bb67eccc..3b39e384d8c0876677447f0ec38ca0459566d36a 100644 (file)
// pAC - autocorrelation coefficients [n + 1]
// pLP - linear prediction coefficients [n] (predicted_sn = sum_1^P{a[i] * s[n-i]}})
// F(z) = 1 / (1 - A(z)), 1 is not stored in the demoninator
-BaseFloat Durbin (int n, const BaseFloat* pAC, BaseFloat* pLP, BaseFloat* pTmp) {
+BaseFloat Durbin (int n, const BaseFloat *pAC, BaseFloat *pLP, BaseFloat *pTmp) {
BaseFloat ki; // reflection coefficient
int i;
int j;
}
-void Lpc2Cepstrum (int n, const BaseFloat* pLPC, BaseFloat* pCepst) {
+void Lpc2Cepstrum (int n, const BaseFloat *pLPC, BaseFloat *pCepst) {
int i;
for (i = 0; i < n; i++)
{
index 1b1dbc48ef2a7019ef1b30e747df4c7603bf14cd..858ad4d1f3a1fc20a692fdc4d72a4d2b13a458e6 100644 (file)
// pAC - autocorrelation coefficients [n + 1]
// pLP - linear prediction coefficients [n] (predicted_sn = sum_1^P{a[i] * s[n-i]}})
// F(z) = 1 / (1 - A(z)), 1 is not stored in the demoninator
-BaseFloat Durbin (int n, const BaseFloat* pAC, BaseFloat* pLP, BaseFloat* pTmp);
+BaseFloat Durbin (int n, const BaseFloat *pAC, BaseFloat *pLP, BaseFloat *pTmp);
-void Lpc2Cepstrum (int n, const BaseFloat* pLPC, BaseFloat* pCepst);
+void Lpc2Cepstrum (int n, const BaseFloat *pLPC, BaseFloat *pCepst);
/// @} End of "addtogroup feat"
} // namespace kaldi
index 2860d5f244bc7b9ed6f2f2ddf080f18aef758428..d35f12644add3dde5e18bcb1117eca47320ca320 100644 (file)
feat_writer.Write(key, new_feats);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index ee64839aefb5a6c83632e2c6a843bafba4148762..89d2b7d759d3bd6ee4b657840a391bb7770013c3 100644 (file)
}
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e62e563fbe09532d28b6a266503cb8072bc7dbec..36039f4c4c31b76e2639fe951014e2e8da576e59 100644 (file)
c.Write(ko.Stream(), binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8434a246aa939d01595b7e2b64d9779a7b5df490..32462a4dc944b920cedc39be24cd13c869ddfd75 100644 (file)
fstats.Write(ko.Stream(), binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 256c887891599612b91a387b9e1395ffba7d1910..46173954b1d21e6abfdffb11d6bd86913916ff20 100644 (file)
KALDI_LOG << " Done " << num_success << " out of " << num_utts
<< " utterances.";
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f3c357076f7ab7dfed3e03dc348efe1c6c41d4aa..ad5ed3c76f901a0391e298b84f1786a635cde0a2 100644 (file)
KALDI_LOG << " Done " << num_success << " out of " << num_utts
<< " utterances.";
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index d59cd5327de513a2c797963a7939d852b2bc38e4..fc1bbffa43d3d09d70c04bf136d3c9f1315ef08b 100644 (file)
KALDI_LOG << " Done " << num_success << " out of " << num_utts
<< " utterances.";
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 16f4d6cc05902aa8b3637b2a6e4fed4f92369112..ce82da7979e634a301d2b82b8f4a98735b8091e6 100644 (file)
kaldi_writer.Write(kaldi_reader.Key(), kaldi_reader.Value());
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index fb3522c85320af1e91f0477745a3d6405bb48fbe..fcb09b7e785f17fdb7415a294844a76ac0789c72 100644 (file)
<< num_lines << " in the segments file. ";
/* prints number of segments processed */
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index bddf4e1d0e96c165e39e8a1ab80a9a8d901cccfe..9c71e2edbd1f20dc1d2f4c93109a2b267d594a0d 100644 (file)
ko.Stream() << kaldi_reader.Value().NumCols() << "\n";
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 80640f17ff00d5683f619f7d975e29d8351f1423..7556ab8d48c117e6fe4f457640d87a5716fb3cb8 100644 (file)
length_writer.Write(kaldi_reader.Key(), kaldi_reader.Value().NumRows());
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 7a60c332b106732be4f061b9ff2e7688193e323f..28c0bb1b6e02d938bdce639bd95047ea0e22759e 100644 (file)
WriteKaldiObject(fmpe_stats, stats_wxfilename, binary);
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 56c3a5cd34101a3b4978949b72c06c374aa1020e..6eadfb189d1543fc931b7bfc00e68b2697a8c77c 100644 (file)
KALDI_LOG << " Done " << num_done << " utterances, " << num_err
<< " had errors.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 566d4a7dc5c1374e2f4c803af3507fdd71fd1b0a..61efbe700819face6099ac440600bcf43c562a2b 100644 (file)
--- a/src/featbin/fmpe-copy.cc
+++ b/src/featbin/fmpe-copy.cc
KALDI_LOG << "Copyied fMPE object to " << fmpe_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index cdb5769137c674695bd910d6d59ad00c71ffd807..dae229cbf747b19951d49485c9ebf4f245a16222 100644 (file)
--- a/src/featbin/fmpe-est.cc
+++ b/src/featbin/fmpe-est.cc
KALDI_LOG << "Updated fMPE object and wrote to "
<< fmpe_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index cf012e3bb174d6f895610df0cc952572ae5c1c0b..a45e375d705bcf6ba79de2005aee7f65a3b8ab5a 100644 (file)
--- a/src/featbin/fmpe-init.cc
+++ b/src/featbin/fmpe-init.cc
KALDI_LOG << "Initialized fMPE object and wrote to "
<< fmpe_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c3fb01e23bef26161595de88dd08083d413a3d93..23c4e22b386a65e0ea734eef299ae5598206a210 100644 (file)
KALDI_LOG << "Summed " << (po.NumArgs()-1) << " fMPE stats and wrote to "
<< stats_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 1e6bb4491c6ecf14dff3bf3b64b137177ed70a26..f94cadb7ce6e84e8a82ac659b0f4000a662d10fc 100644 (file)
feat_writer.Write(key, feats);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 86104b2ab5fce6f30e954b4d27e29da94517651c..9422df6c62270ec02a14f097474571f72d24e4f3 100644 (file)
kaldi_writer.Write(kaldi_reader.Key(), spliced);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 749c9e4ccffeca824e9de567d972d94344fbf5e6..9733c227c400410f8dc4804f99b774d2583b3ed7 100644 (file)
kaldi_writer.Write(kaldi_reader.Key(), kaldi_reader.Value());
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2f043aab2f2523b6a56b9c13218c1d9afc4a6521..84c1fea766bd8c2c00f38852882d75be51d37c0b 100644 (file)
<< " had errors.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/fgmmbin/fgmm-global-acc-stats-twofeats.cc b/src/fgmmbin/fgmm-global-acc-stats-twofeats.cc
index ceab8f2e3be62985c86d04d52665e4b4c756c19a..746e51e6b6ac47f2f39228fb4d3ca146a04c2bdf 100644 (file)
WriteKaldiObject(gmm_accs, accs_wxfilename, binary);
KALDI_LOG << "Written accs to " << accs_wxfilename;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2396d758957dadb8cc05f8e0aafdc55c1340376c..a404974b37652cdd1c07651900e48e88cda7f524 100644 (file)
WriteKaldiObject(fgmm_accs, accs_wxfilename, binary);
KALDI_LOG << "Written accs to " << accs_wxfilename;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index fccbca50963b3b8af93b7a72d3c1fabe22a34893..58b444d1c95639a18822eccccce4f2e63cda6a98 100644 (file)
WriteKaldiObject(fgmm, model_out_filename, binary_write);
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index fe2e4e15d8776bb2d3c4edacc799afceccacc586..3a517e571c06ade107688e35bbb8bd059286bdbe 100644 (file)
WriteKaldiObject(fgmm, model_out_filename, binary_write);
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
diff --git a/src/fgmmbin/fgmm-global-get-frame-likes.cc b/src/fgmmbin/fgmm-global-get-frame-likes.cc
index f387b71a11566c6a19e8d162e7a41d91c0d853f8..5f5407aa5c097318bbb9c4e8bf5574b0b98f5c0e 100644 (file)
<< "frame = " << (tot_like/tot_frames) << " over " << tot_frames
<< " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9a012e97103386bca6b1eef8f92e304ace1e665e..1983c4be2d0ad3f68af7537fdee9d0b7e001718f 100644 (file)
// Write out the model
WriteKaldiObject(fgmm, fgmm_out_filename, binary);
KALDI_LOG << "Written merged GMM to " << fgmm_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 35a2afe4779ffb9ade31200a54cdaf3523be2718..bfa286065d039e012172cc9f055a82d42f61295a 100644 (file)
}
KALDI_LOG << "Written stats to " << stats_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 0109d7c8cff6d4ccac07ddd4d1079cf8d465b094..c5ee9017925c31000bc244db79d408e9040fd6f9 100644 (file)
gmm.CopyFromFullGmm(fgmm);
WriteKaldiObject(gmm, gmm_wxfilename, binary);
KALDI_LOG << "Written diagonal GMM to " << gmm_wxfilename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 14d05994ad54435ac7fa66dccf027baf842ec290..7b2598d059d2c7cfbfe4255a0163c686f08aba3e 100644 (file)
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9a17a9d42095215c3f16e8932d0180d1209e2250..cae3c1ce534d7fc2e4b454fa255fab8ae6b8da02 100644 (file)
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 7bf43aa8a2c039c4f67fe461d4c2083770b7f0ed..7e278d14cce2bbd589bd6273bfe3c09cee0ce8d4 100644 (file)
WriteFstKaldi(*fst, fst_out_filename);
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8a639df0e461e27273490e9f4a535e6a9072c8dd..fdeb7e9777ab686b1dbf12c5513a6e70b853df4a 100644 (file)
WriteFstKaldi(composed_fst, fst_out_filename);
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/fstbin/fstcopy.cc b/src/fstbin/fstcopy.cc
index 2db698116bfdb672cb8c39348db62a4961d0dd3d..492cd71ffb084c0e77374bfb1884d3988422e932 100644 (file)
--- a/src/fstbin/fstcopy.cc
+++ b/src/fstbin/fstcopy.cc
KALDI_LOG << "Copied " << n_done << " FSTs.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a83f5c2c24761f33639bbf5184826cad7f4150aa..c4b00a9668acee869b17eb8edfb03a5cd5c2c9dd 100644 (file)
WriteFstKaldi(*fst, fst_out_filename);
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 101ca244a106bc888d2f6a7c2cb301ca81159abd..4a2573209f2e3858a81a1b8b69626402772b49bc 100644 (file)
}
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2d118ecfbb8ce240593c13348a0267c9bc4aa7cb..b2d76435b54a3043019e55fa653ffb7f7800e41b 100644 (file)
--- a/src/fstbin/fstfactor.cc
+++ b/src/fstbin/fstfactor.cc
WriteFstKaldi(fst1, fst1_out_filename);
WriteFstKaldi(fst2, fst2_out_filename);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 259cc1c3532d515b85278a452ed2863dfd02de24..2d4959df0d8147c4eb13bfa2ed142c92f4327f1f 100644 (file)
delete fst;
if (ans) return 0; // success;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 919fed276ba5d97d2f1252c0ee72af32aa43a0b0..fc2ae195e49c43fe4a8c52ddcecae5b0afc02c1f 100644 (file)
<< PrintableWxfilename(disambig_wxfilename);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 7ad66ee4e08a4d355a02d10411ff78207c1aa4cd..53e98d13abc4566c84be8f55ef53c79cee09728c 100644 (file)
delete clg_symtab;
delete phones_symtab;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 86007dc106abd0b8e7d837489b04ece34f3bb9c2..cb2036acd01b7a10ab790161389987d54147e43b 100644 (file)
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6153d442e471a60826a82b0d2c140604cf8676e0..af50db6b3e801415b6ab9ddea6c3b1e8883a40e3 100644 (file)
<< "ask the maintainers to implement it, or call this program "
<< "differently.";
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 3c41db44e6b8c0faf0360421203d514edafdb045..7a5e617929fffa2932259cde804001252ab18d13 100644 (file)
WriteFstKaldi(*fst, fst_out_str);
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/fstbin/fstrand.cc b/src/fstbin/fstrand.cc
index d84cf71cd8dcc30ce102bf580a66c895d844bd40..fa0ea61f7b2e12498782964f014cd8cdbd6cf2bb 100644 (file)
--- a/src/fstbin/fstrand.cc
+++ b/src/fstbin/fstrand.cc
WriteFstKaldi(*rand_fst, fst_out_filename);
delete rand_fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 76479649d5367f870d54f9995fde8980b6b3a4d8..d1c46155102ba5ab0bc24c077dabdb94b151b604 100644 (file)
WriteFstKaldi(*fst, fst_out_filename);
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 1611e5e13300b74bc541f643ee192e3d090d0610..752c2b2ad978660b2186486aede236ffde12874d 100644 (file)
delete fst;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 057fbf43881629ddd0d0c1b4d9bd607795a6f308..fbcb5e243c018f0564551ebfb8edc7b6a6e93143 100644 (file)
<< "ask the maintainers to implement it, or call this program "
<< "differently.";
}
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c30ee91aa831fd33b3fb7bfae68316020c9ac0ef..a75078ec1ae1b5a4975bd1d8453f0dc46ad83b21 100644 (file)
template<class Arc, class LabelT>
ContextFstImpl<Arc, LabelT>::ContextFstImpl(Label subsequential_symbol, // epsilon not allowed.
- const vector<LabelT>& phone_syms, // on output side of ifst.
- const vector<LabelT>& disambig_syms, // on output
+ const vector<LabelT> &phone_syms, // on output side of ifst.
+ const vector<LabelT> &disambig_syms, // on output
int N,
int P):
phone_syms_(phone_syms), disambig_syms_(disambig_syms), subsequential_symbol_(subsequential_symbol) ,
index ad4107eae535d40329f882ff47c85d2d7b52c811..4c67507b00129c246156f466e5b1ca16e6353d67 100644 (file)
--- a/src/fstext/context-fst.h
+++ b/src/fstext/context-fst.h
typedef typename VectorToLabelType::const_iterator VectorToLabelIter;
ContextFstImpl(Label subsequential_symbol, // epsilon not allowed.
- const vector<LabelT>& phones,
- const vector<LabelT>& disambig_syms,
+ const vector<LabelT> &phones,
+ const vector<LabelT> &disambig_syms,
int32 N, // size of ctx window
int32 P);
index ccba995bc458b9e94cfab277e8c15afd9d628a6b..5d3405451e7a6572a64192480036229ed458373b 100644 (file)
}
template<class Arc>
-void DeterministicOnDemandFstImpl<Arc>::AddSingleArc(StateId s, Label l, Arc& a) {
+void DeterministicOnDemandFstImpl<Arc>::AddSingleArc(StateId s, Label l, Arc &a) {
StateLabelPair sl = make_pair(s, l);
state_cache_map_[sl] = a;
}
@@ -162,7 +162,7 @@ void DeterministicOnDemandFstImpl<Arc>::InitArcIterator(StateId s, ArcIteratorDa
template<class Arc>
typename Arc::Weight DeterministicOnDemandFstImpl<Arc>::GetFinalFromNonDetFst(
- const Fst<Arc>* fst, StateId s) {
+ const Fst<Arc> *fst, StateId s) {
Weight w = fst->Final(s);
if (w != Weight::Zero()) return w;
SortedMatcher<Fst<Arc> > sm(*fst, MATCH_INPUT, 1); // enable matching epsilons as well
// helper method for GetArc()
template<class Arc>
bool DeterministicOnDemandFstImpl<Arc>::GetArcFromNonDetFst(
- const Fst<Arc>* fst, StateId s, Label ilabel, Arc *oarc,
+ const Fst<Arc> *fst, StateId s, Label ilabel, Arc *oarc,
typename Arc::Weight iweight) {
// use a SortedMatcher
index 6747ea0c75d4de966c3c87bc75ca341164b5a309..a593b3f68b1afcfe2266eb44c6dbff2ddd85f348 100644 (file)
blnReturn = false;
}
- return(blnReturn);
+ return blnReturn;
}
// Simplify writing
fst->AddState(); // state 5
fst->SetFinal(5, 0.6);
- return(fst);
+ return fst;
}
// what the resulting DeterministicOnDemand FST should be like
fst->AddState(); // state 5
fst->SetFinal(5, 0.6);
- return(fst);
+ return fst;
}
void DeleteTestFst(StdVectorFst *fst) {
// Follow paths from an input fst representing a string
// (poor man's composition)
-Weight WalkSinglePath(StdVectorFst* ifst, StdDeterministicOnDemandFst* dfst) {
- StdArc* oarc = new StdArc();
+Weight WalkSinglePath(StdVectorFst *ifst, StdDeterministicOnDemandFst *dfst) {
+ StdArc *oarc = new StdArc();
StateId isrc=ifst->Start();
StateId dsrc=dfst->Start();
Weight totalCost = Weight::One();
using namespace fst;
// Build from existing fst
cout << "Test with single generated backoff FST" << endl;
- StdVectorFst* nfst = CreateBackoffFst();
- StdVectorFst* rfst = CreateResultFst();
+ StdVectorFst *nfst = CreateBackoffFst();
+ StdVectorFst *rfst = CreateResultFst();
// before using, make sure that it is input sorted
ArcSort(nfst, StdILabelCompare());
- StdDeterministicOnDemandFst* dfst1 = new StdDeterministicOnDemandFst(*nfst);
+ StdDeterministicOnDemandFst *dfst1 = new StdDeterministicOnDemandFst(*nfst);
// Compare all arcs in dfst1 with expected result
for (StateIterator<StdVectorFst> riter(*rfst); !riter.Done(); riter.Next()) {
Weight cost2;
cout << "Single FST case: string with G'" << endl;
cout << " Reading Gp.fst" << endl << std::flush;
- StdVectorFst* gpfst = StdVectorFst::Read("Gp.fst");
+ StdVectorFst *gpfst = StdVectorFst::Read("Gp.fst");
cout << " Creating SDODF from Gp.fst" << endl << std::flush;
- StdDeterministicOnDemandFst* dfst2 = new StdDeterministicOnDemandFst(*gpfst);
- StdVectorFst* strfst = StdVectorFst::Read("str.fst");
+ StdDeterministicOnDemandFst *dfst2 = new StdDeterministicOnDemandFst(*gpfst);
+ StdVectorFst *strfst = StdVectorFst::Read("str.fst");
cost2 = WalkSinglePath(strfst, dfst2);
delete dfst2; delete strfst;
cout << "Composition case: string*bg with G^-1 * G'" << endl;
cout << " Reading Gm.fst" << endl << std::flush;
StdVectorFst *gmfst = StdVectorFst::Read("Gm.fst");
- StdDeterministicOnDemandFst* dfst3 = new StdDeterministicOnDemandFst(*gmfst, *gpfst);
+ StdDeterministicOnDemandFst *dfst3 = new StdDeterministicOnDemandFst(*gmfst, *gpfst);
StdVectorFst *strbgfst = StdVectorFst::Read("strbg.fst");
cost3 = WalkSinglePath(strbgfst, dfst3);
cout << "Cache had "<<dfst3->CacheCalls()<<" calls and "<<dfst3->CacheHits()<<" hits"<< endl;
index 02cf201bee4e34e69d8da8a3ad6b21190a7d16ea..99b01ec27cc567740805229ba2338431fedac3d0 100644 (file)
// there are 2 cases:
// - there is a single underlying FST (then fst2_ is NULL)
// - the composition case, where there are 2 underlying FSTs (then fst2_ is not NULL)
- const Fst<Arc>* fst1_;
- const Fst<Arc>* fst2_;
+ const Fst<Arc> *fst1_;
+ const Fst<Arc> *fst2_;
// private helper method for GetArc()
// Used to get first matching arc from one of the underlying non-deterministic FST's
// (recursively and cumulating the weights) until an arc with input ilabel is found.
// If called with input ilabel equal to epsilon, treats it as any other label
// (i.e. matches it only with epsilon labels).
- static bool GetArcFromNonDetFst(const Fst<Arc>* fst, StateId s, Label ilabel,
+ static bool GetArcFromNonDetFst(const Fst<Arc> *fst, StateId s, Label ilabel,
Arc *oarc, Weight iweight = Weight::One());
// private helper method for GetFinal(). If current state is final returns it;
// else follows epsilons recursively till it finds a final state and returns the
// first one it finds (or Zero() if none found).
- Weight GetFinalFromNonDetFst(const Fst<Arc>* fst, StateId s);
+ Weight GetFinalFromNonDetFst(const Fst<Arc> *fst, StateId s);
// state management for composition
typedef std::pair<StateId, StateId> StatePair;
typename StateLabelMap::iterator scm_it_;
bool HasArc(StateId s, Label l);
- void AddSingleArc(StateId s, Label l, Arc& a);
+ void AddSingleArc(StateId s, Label l, Arc &a);
void SetArc(StateId s, Label l);
bool GetCachedArc(StateId s, Label l, Arc *oarc);
index 0c2b84999c11d7c93cf260936e8347a05d6120fb..e91433708f730744db7225c16057846a93b32e32 100644 (file)
public:
class VectorKey { // Hash function object.
public:
- size_t operator()(const vector<Label>* vec) const {
+ size_t operator()(const vector<Label> *vec) const {
assert(vec != NULL);
size_t hash = 0, factor = 1;
for (typename vector<Label>::const_iterator it = vec->begin(); it != vec->end(); it++)
};
class VectorEqual { // Equality-operator function object.
public:
- size_t operator()(const vector<Label>* vec1, const vector<Label>* vec2) const {
+ size_t operator()(const vector<Label> *vec1, const vector<Label> *vec2) const {
return (*vec1 == *vec2);
}
};
// to the queue).
void ProcessSubset(const pair<vector<Element>*, OutputStateId> & pair) {
- const vector<Element>* subset = pair.first;
+ const vector<Element> *subset = pair.first;
OutputStateId state = pair.second;
vector<Element> closed_subset; // subset after epsilon closure.
index 0abf1e0d5e7b291c9fd85176dc5366c02b00a9ea..98615121c0e84e83396e4183bf80a48781dde2f3 100644 (file)
}
assert(RandEquivalent(*fst, ofst, 5/*paths*/, 0.01/*delta*/, rand()/*seed*/, 100/*path length, max*/));
} catch (...) {
- std::cout << "Failed to determinize* this FST (probably not determinizable)\n";
+ std::cout << "Failed to determinize *this FST (probably not determinizable)\n";
}
delete fst;
}
for(int i = 0; i < 10; i++) {
RandFstOptions opts;
opts.acyclic = true;
- VectorFst<Arc>* ifst = RandFst<Arc>(opts);
+ VectorFst<Arc> *ifst = RandFst<Arc>(opts);
VectorFst<Arc> ofst;
Determinize(*ifst, &ofst);
assert(RandEquivalent(*ifst, ofst, 5, 0.01, rand(), 100));
index 7d004407661d246a2de62e028094408202eff731..d07d55820cf941d692c8eb60ae29d3e4aac37aad 100644 (file)
// but more clearly], so we can check for equivalence.
template<class Arc>
VectorFst<Arc>* MakeLoopFstCompare(const vector<const ExpandedFst<Arc> *> &fsts) {
- VectorFst<Arc>* ans = new VectorFst<Arc>;
+ VectorFst<Arc> *ans = new VectorFst<Arc>;
typedef typename Arc::Label Label;
typedef typename Arc::StateId StateId;
typedef typename Arc::Weight Weight;
index f29debd2ec6afb3befc14afbf5c58e5637f055e1..8abb5a46791e200f141641cdd8b8067d27cbf943 100644 (file)
// It could either be a programming error or a deeper conceptual bug.
return NULL; // nothing was inserted.
} else {
- vector<T>* ret = (*S)[idx] = new vector<T>(m); // New copy of m.
+ vector<T> *ret = (*S)[idx] = new vector<T>(m); // New copy of m.
return ret; // was inserted.
}
}
index 35bfd6a2f99f45b0b8047fc4c06278251d8bb58b..688e3f994962bce5c55a3c211c7230db0ab599c8 100644 (file)
virtual ~TableMatcherImpl() {
assert(RefCount() == 0);
- vector<ArcId>* const empty = ((vector<ArcId>*)(NULL)) + 1; // special marker.
+ vector<ArcId> *const empty = ((vector<ArcId>*)(NULL)) + 1; // special marker.
for (size_t i = 0; i < tables_.size(); i++) {
if (tables_[i] != NULL && tables_[i] != empty)
delete tables_[i];
if (match_type_ == MATCH_NONE)
LOG(FATAL) << "TableMatcher: bad match type";
s_ = s;
- vector<ArcId>* const empty = ((vector<ArcId>*)(NULL)) + 1; // special marker.
+ vector<ArcId> *const empty = ((vector<ArcId>*)(NULL)) + 1; // special marker.
if (static_cast<size_t>(s) >= tables_.size()) {
assert(s>=0);
tables_.resize(s+1, NULL);
diff --git a/src/gmm/am-diag-gmm.cc b/src/gmm/am-diag-gmm.cc
index bf338ae113adf88e6031794c4a450e0e85ac3080..53ad23a09ab2411f5ab095b3a007ad6f5db5eaff 100644 (file)
--- a/src/gmm/am-diag-gmm.cc
+++ b/src/gmm/am-diag-gmm.cc
DeletePointers(&densities_);
}
-void AmDiagGmm::Init(const DiagGmm& proto, int32 num_pdfs) {
+void AmDiagGmm::Init(const DiagGmm &proto, int32 num_pdfs) {
if (densities_.size() != 0) {
KALDI_WARN << "Init() called on a non-empty object. Contents will be "
"overwritten";
}
}
-void AmDiagGmm::AddPdf(const DiagGmm& gmm) {
+void AmDiagGmm::AddPdf(const DiagGmm &gmm) {
if (densities_.size() != 0) // not the first gmm
assert(static_cast<int32>(gmm.Dim()) == dim_);
else
<< reduce_state_factor;
}
-void ClusterGaussiansToUbm(const AmDiagGmm& am,
+void ClusterGaussiansToUbm(const AmDiagGmm &am,
const Vector<BaseFloat> &state_occs,
UbmClusteringOptions opts,
DiagGmm *ubm_out) {
diff --git a/src/gmm/am-diag-gmm.h b/src/gmm/am-diag-gmm.h
index e3faddd39486e99d9b84f531f9001660a6322b51..38ee11ecbaa0967fb2ae1d12324486a219015c6d 100644 (file)
--- a/src/gmm/am-diag-gmm.h
+++ b/src/gmm/am-diag-gmm.h
/// Initializes with a single "prototype" GMM.
void Init(const DiagGmm &proto, int32 num_pdfs);
/// Adds a GMM to the model, and increments the total number of PDFs.
- void AddPdf(const DiagGmm& gmm);
+ void AddPdf(const DiagGmm &gmm);
/// Copies the parameters from another model. Allocates necessary memory.
void CopyFromAmDiagGmm(const AmDiagGmm &other);
* et al., "The subspace Gaussian mixture model - A structured model for speech
* recognition", In Computer Speech and Language, April 2011.
*/
-void ClusterGaussiansToUbm(const AmDiagGmm& am,
+void ClusterGaussiansToUbm(const AmDiagGmm &am,
const Vector<BaseFloat> &state_occs,
UbmClusteringOptions opts,
DiagGmm *ubm_out);
diff --git a/src/gmm/diag-gmm-inl.h b/src/gmm/diag-gmm-inl.h
index 2b661e02992cb213ae9b5f03a8b8c9759c7ca818..c621e5f1b794a73c7a26186fac4d9aee2a4aad9c 100644 (file)
--- a/src/gmm/diag-gmm-inl.h
+++ b/src/gmm/diag-gmm-inl.h
namespace kaldi {
template<class Real>
-void DiagGmm::SetWeights(const VectorBase<Real>& w) {
+void DiagGmm::SetWeights(const VectorBase<Real> &w) {
KALDI_ASSERT(weights_.Dim() == w.Dim());
weights_.CopyFromVec(w);
valid_gconsts_ = false;
template<class Real>
-void DiagGmm::SetMeans(const MatrixBase<Real>& m) {
+void DiagGmm::SetMeans(const MatrixBase<Real> &m) {
KALDI_ASSERT(means_invvars_.NumRows() == m.NumRows()
&& means_invvars_.NumCols() == m.NumCols());
means_invvars_.CopyFromMat(m);
}
template<class Real>
-void DiagGmm::SetComponentMean(int32 g, const VectorBase<Real>& in) {
+void DiagGmm::SetComponentMean(int32 g, const VectorBase<Real> &in) {
KALDI_ASSERT(g < NumGauss() && Dim() == in.Dim());
Vector<Real> tmp(Dim());
tmp.CopyRowFromMat(inv_vars_, g);
template<class Real>
-void DiagGmm::SetInvVarsAndMeans(const MatrixBase<Real>& invvars,
- const MatrixBase<Real>& means) {
+void DiagGmm::SetInvVarsAndMeans(const MatrixBase<Real> &invvars,
+ const MatrixBase<Real> &means) {
KALDI_ASSERT(means_invvars_.NumRows() == means.NumRows()
&& means_invvars_.NumCols() == means.NumCols()
&& inv_vars_.NumRows() == invvars.NumRows()
}
template<class Real>
-void DiagGmm::SetInvVars(const MatrixBase<Real>& v) {
+void DiagGmm::SetInvVars(const MatrixBase<Real> &v) {
KALDI_ASSERT(inv_vars_.NumRows() == v.NumRows()
&& inv_vars_.NumCols() == v.NumCols());
}
template<class Real>
-void DiagGmm::SetComponentInvVar(int32 g, const VectorBase<Real>& v) {
+void DiagGmm::SetComponentInvVar(int32 g, const VectorBase<Real> &v) {
KALDI_ASSERT(g < NumGauss() && v.Dim() == Dim());
int32 dim = Dim();
template<class Real>
-void DiagGmm::GetVars(Matrix<Real>* v) const {
+void DiagGmm::GetVars(Matrix<Real> *v) const {
assert(v != NULL);
v->Resize(NumGauss(), Dim());
v->CopyFromMat(inv_vars_);
template<class Real>
-void DiagGmm::GetComponentMean(int32 gauss, VectorBase<Real>* out) const {
+void DiagGmm::GetComponentMean(int32 gauss, VectorBase<Real> *out) const {
assert(gauss < NumGauss());
assert(static_cast<int32>(out->Dim()) == Dim());
Vector<Real> tmp(Dim());
}
template<class Real>
-void DiagGmm::GetComponentVariance(int32 gauss, VectorBase<Real>* out) const {
+void DiagGmm::GetComponentVariance(int32 gauss, VectorBase<Real> *out) const {
assert(gauss < NumGauss());
assert(static_cast<int32>(out->Dim()) == Dim());
out->CopyRowFromMat(inv_vars_, gauss);
index c6d46c87eac1e7467eb3993ba92e8796407c8207..f7c209c696fd219812ba60a264e413178e70b496 100644 (file)
--- a/src/gmm/diag-gmm-test.cc
+++ b/src/gmm/diag-gmm-test.cc
{
bool binary_in;
- DiagGmm* gmm2 = new DiagGmm();
+ DiagGmm *gmm2 = new DiagGmm();
Input ki("tmpf", &binary_in);
gmm2->Read(ki.Stream(), binary_in);
delete gmm2;
// binary read
- DiagGmm* gmm3;
+ DiagGmm *gmm3;
gmm3 = new DiagGmm();
Input ki2("tmpfb", &binary_in);
gmm3->Read(ki2.Stream(), binary_in);
diff --git a/src/gmm/diag-gmm.h b/src/gmm/diag-gmm.h
index 945d0bde38e6ea5fdca58ba571345cdcecf941f9..4daf19a40bc94edd957ff71c696b1c2d42b714ce 100644 (file)
--- a/src/gmm/diag-gmm.h
+++ b/src/gmm/diag-gmm.h
GmmFlagsType flags = kGmmAll);
/// Const accessors
- const Vector<BaseFloat>& gconsts() const {
+ const Vector<BaseFloat> &gconsts() const {
KALDI_ASSERT(valid_gconsts_);
return gconsts_;
}
- const Vector<BaseFloat>& weights() const { return weights_; }
- const Matrix<BaseFloat>& means_invvars() const { return means_invvars_; }
- const Matrix<BaseFloat>& inv_vars() const { return inv_vars_; }
+ const Vector<BaseFloat> &weights() const { return weights_; }
+ const Matrix<BaseFloat> &means_invvars() const { return means_invvars_; }
+ const Matrix<BaseFloat> &inv_vars() const { return inv_vars_; }
bool valid_gconsts() const { return valid_gconsts_; }
/// Removes single component from model
/// Mutators for both float or double
template<class Real>
- void SetWeights(const VectorBase<Real>& w); ///< Set mixure weights
+ void SetWeights(const VectorBase<Real> &w); ///< Set mixure weights
/// Use SetMeans to update only the Gaussian means (and not variances)
template<class Real>
- void SetMeans(const MatrixBase<Real>& m);
+ void SetMeans(const MatrixBase<Real> &m);
/// Use SetInvVarsAndMeans if updating both means and (inverse) variances
template<class Real>
- void SetInvVarsAndMeans(const MatrixBase<Real>& invvars,
- const MatrixBase<Real>& means);
+ void SetInvVarsAndMeans(const MatrixBase<Real> &invvars,
+ const MatrixBase<Real> &means);
/// Set the (inverse) variances and recompute means_invvars_
template<class Real>
- void SetInvVars(const MatrixBase<Real>& v);
+ void SetInvVars(const MatrixBase<Real> &v);
/// Accessor for covariances.
template<class Real>
- void GetVars(Matrix<Real>* v) const;
+ void GetVars(Matrix<Real> *v) const;
/// Accessor for means.
template<class Real>
void GetMeans(Matrix<Real> *m) const;
/// Mutators for single component, supports float or double
/// Set mean for a single component - internally multiplies with inv(var)
template<class Real>
- void SetComponentMean(int32 gauss, const VectorBase<Real>& in);
+ void SetComponentMean(int32 gauss, const VectorBase<Real> &in);
/// Set inv-var for single component (recommend to do this before
/// setting the mean, if doing both, for numerical reasons).
template<class Real>
- void SetComponentInvVar(int32 gauss, const VectorBase<Real>& in);
+ void SetComponentInvVar(int32 gauss, const VectorBase<Real> &in);
/// Set weight for single component.
inline void SetComponentWeight(int32 gauss, BaseFloat weight);
/// Accessor for single component mean
template<class Real>
- void GetComponentMean(int32 gauss, VectorBase<Real>* out) const;
+ void GetComponentMean(int32 gauss, VectorBase<Real> *out) const;
/// Accessor for single component variance.
template<class Real>
- void GetComponentVariance(int32 gauss, VectorBase<Real>* out) const;
+ void GetComponentVariance(int32 gauss, VectorBase<Real> *out) const;
private:
/// Equals log(weight) - 0.5 * (log det(var) + mean*mean*inv(var))
diff --git a/src/gmm/full-gmm-inl.h b/src/gmm/full-gmm-inl.h
index 162eb820bf1f8a61e87a573da06901cba461b90a..d59a24ba4924aee3b9d167305f1071fbbe5b2325 100644 (file)
--- a/src/gmm/full-gmm-inl.h
+++ b/src/gmm/full-gmm-inl.h
namespace kaldi {
template<class Real>
-void FullGmm::SetWeights(const Vector<Real>& w) {
+void FullGmm::SetWeights(const Vector<Real> &w) {
KALDI_ASSERT(weights_.Dim() == w.Dim());
weights_.CopyFromVec(w);
valid_gconsts_ = false;
}
template<class Real>
-void FullGmm::SetMeans(const Matrix<Real>& m) {
+void FullGmm::SetMeans(const Matrix<Real> &m) {
KALDI_ASSERT(means_invcovars_.NumRows() == m.NumRows()
&& means_invcovars_.NumCols() == m.NumCols());
size_t num_comp = NumGauss();
template<class Real>
void FullGmm::SetInvCovarsAndMeans(
- const std::vector<SpMatrix<Real> >& invcovars, const Matrix<Real>& means) {
+ const std::vector<SpMatrix<Real> > &invcovars, const Matrix<Real> &means) {
KALDI_ASSERT(means_invcovars_.NumRows() == means.NumRows()
&& means_invcovars_.NumCols() == means.NumCols()
&& inv_covars_.size() == invcovars.size());
template<class Real>
void FullGmm::SetInvCovarsAndMeansInvCovars(
- const std::vector<SpMatrix<Real> >& invcovars,
- const Matrix<Real>& means_invcovars) {
+ const std::vector<SpMatrix<Real> > &invcovars,
+ const Matrix<Real> &means_invcovars) {
KALDI_ASSERT(means_invcovars_.NumRows() == means_invcovars.NumRows()
&& means_invcovars_.NumCols() == means_invcovars.NumCols()
&& inv_covars_.size() == invcovars.size());
}
template<class Real>
-void FullGmm::SetInvCovars(const std::vector<SpMatrix<Real> >& v) {
+void FullGmm::SetInvCovars(const std::vector<SpMatrix<Real> > &v) {
KALDI_ASSERT(inv_covars_.size() == v.size());
size_t num_comp = NumGauss();
}
template<class Real>
-void FullGmm::GetCovarsAndMeans(std::vector< SpMatrix<Real> >* covars,
+void FullGmm::GetCovarsAndMeans(std::vector< SpMatrix<Real> > *covars,
Matrix<Real> *means) const {
assert(covars != NULL && means != NULL);
size_t dim = Dim();
index 2de4bd35f5ca97279fdfbcd7225fda78ff8e2c52..f15389ff9f71e80d0f472c7a1b7fa6904edec10b 100644 (file)
--- a/src/gmm/full-gmm-test.cc
+++ b/src/gmm/full-gmm-test.cc
{ // I/O tests
bool binary_in;
- FullGmm* gmm2 = new FullGmm();
+ FullGmm *gmm2 = new FullGmm();
Input ki("tmpf", &binary_in);
gmm2->Read(ki.Stream(), binary_in);
delete gmm2;
// binary read
- FullGmm* gmm3;
+ FullGmm *gmm3;
gmm3 = new FullGmm();
Input ki2("tmpfb", &binary_in);
diff --git a/src/gmm/full-gmm.h b/src/gmm/full-gmm.h
index 3604fc78b9cb8856b4a26e955f8d446b87ae60bc..618502c36fb476a71dd52df7bfe90d67ddd38895 100644 (file)
--- a/src/gmm/full-gmm.h
+++ b/src/gmm/full-gmm.h
/// Outputs the per-component contributions to the
/// log-likelihood
void LogLikelihoods(const VectorBase<BaseFloat> &data,
- Vector<BaseFloat>* loglikes) const;
+ Vector<BaseFloat> *loglikes) const;
/// Outputs the per-component log-likelihoods of a subset
/// of mixture components. Note: indices.size() will
GmmFlagsType flags = kGmmAll);
/// Const accessors
- const Vector<BaseFloat>& gconsts() const { return gconsts_; }
- const Vector<BaseFloat>& weights() const { return weights_; }
- const Matrix<BaseFloat>& means_invcovars() const { return means_invcovars_; }
- const std::vector<SpMatrix<BaseFloat> >& inv_covars() const {
+ const Vector<BaseFloat> &gconsts() const { return gconsts_; }
+ const Vector<BaseFloat> &weights() const { return weights_; }
+ const Matrix<BaseFloat> &means_invcovars() const { return means_invcovars_; }
+ const std::vector<SpMatrix<BaseFloat> > &inv_covars() const {
return inv_covars_; }
/// Non-const accessors
- Matrix<BaseFloat>& means_invcovars() { return means_invcovars_; }
- std::vector<SpMatrix<BaseFloat> >& inv_covars() { return inv_covars_; }
+ Matrix<BaseFloat> &means_invcovars() { return means_invcovars_; }
+ std::vector<SpMatrix<BaseFloat> > &inv_covars() { return inv_covars_; }
/// Mutators for both float or double
template<class Real>
- void SetWeights(const Vector<Real>& w); ///< Set mixure weights
+ void SetWeights(const Vector<Real> &w); ///< Set mixure weights
/// Use SetMeans to update only the Gaussian means (and not variances)
template<class Real>
- void SetMeans(const Matrix<Real>& m);
+ void SetMeans(const Matrix<Real> &m);
/// Use SetInvCovarsAndMeans if updating both means and (inverse) covariances
template<class Real>
- void SetInvCovarsAndMeans(const std::vector<SpMatrix<Real> >& invcovars,
- const Matrix<Real>& means);
+ void SetInvCovarsAndMeans(const std::vector<SpMatrix<Real> > &invcovars,
+ const Matrix<Real> &means);
/// Use this if setting both, in the class's native format.
template<class Real>
- void SetInvCovarsAndMeansInvCovars(const std::vector<SpMatrix<Real> >& invcovars,
- const Matrix<Real>& means_invcovars);
+ void SetInvCovarsAndMeansInvCovars(const std::vector<SpMatrix<Real> > &invcovars,
+ const Matrix<Real> &means_invcovars);
/// Set the (inverse) covariances and recompute means_invcovars_
template<class Real>
- void SetInvCovars(const std::vector<SpMatrix<Real> >& v);
+ void SetInvCovars(const std::vector<SpMatrix<Real> > &v);
/// Accessor for covariances.
template<class Real>
- void GetCovars(std::vector<SpMatrix<Real> >* v) const;
+ void GetCovars(std::vector<SpMatrix<Real> > *v) const;
/// Accessor for means.
template<class Real>
void GetMeans(Matrix<Real> *m) const;
/// Accessor for covariances and means
template<class Real>
- void GetCovarsAndMeans(std::vector< SpMatrix<Real> >* covars,
+ void GetCovarsAndMeans(std::vector< SpMatrix<Real> > *covars,
Matrix<Real> *means) const;
/// Mutators for single component, supports float or double
index 30d287c58f8eb5b76b35ee2a96681fca59de8674..c0c4f0a0d5e0736da94ce42c5efb9da8ba5966bf 100644 (file)
}
BaseFloat AccumAmDiagGmm::AccumulateForGmm(
- const AmDiagGmm &model, const VectorBase<BaseFloat>& data,
+ const AmDiagGmm &model, const VectorBase<BaseFloat> &data,
int32 gmm_index, BaseFloat weight) {
KALDI_ASSERT(static_cast<size_t>(gmm_index) < gmm_accumulators_.size());
BaseFloat log_like =
BaseFloat AccumAmDiagGmm::AccumulateForGmmTwofeats(
const AmDiagGmm &model,
- const VectorBase<BaseFloat>& data1,
- const VectorBase<BaseFloat>& data2,
+ const VectorBase<BaseFloat> &data1,
+ const VectorBase<BaseFloat> &data2,
int32 gmm_index,
BaseFloat weight) {
KALDI_ASSERT(static_cast<size_t>(gmm_index) < gmm_accumulators_.size());
void AccumAmDiagGmm::AccumulateFromPosteriors(
- const AmDiagGmm &model, const VectorBase<BaseFloat>& data,
- int32 gmm_index, const VectorBase<BaseFloat>& posteriors) {
+ const AmDiagGmm &model, const VectorBase<BaseFloat> &data,
+ int32 gmm_index, const VectorBase<BaseFloat> &posteriors) {
KALDI_ASSERT(gmm_index >= 0 && gmm_index < NumAccs());
gmm_accumulators_[gmm_index]->AccumulateFromPosteriors(data, posteriors);
total_frames_ += posteriors.Sum();
}
void AccumAmDiagGmm::AccumulateForGaussian(
- const AmDiagGmm &am, const VectorBase<BaseFloat>& data,
+ const AmDiagGmm &am, const VectorBase<BaseFloat> &data,
int32 gmm_index, int32 gauss_index, BaseFloat weight) {
KALDI_ASSERT(gmm_index >= 0 && gmm_index < NumAccs());
KALDI_ASSERT(gauss_index >= 0
gmm_accumulators_[gmm_index]->AccumulateForComponent(data, gauss_index, weight);
}
-void AccumAmDiagGmm::Read(std::istream& in_stream, bool binary,
+void AccumAmDiagGmm::Read(std::istream &in_stream, bool binary,
bool add) {
int32 num_pdfs;
ExpectToken(in_stream, binary, "<NUMPDFS>");
}
}
-void AccumAmDiagGmm::Write(std::ostream& out_stream, bool binary) const {
+void AccumAmDiagGmm::Write(std::ostream &out_stream, bool binary) const {
int32 num_pdfs = gmm_accumulators_.size();
WriteToken(out_stream, binary, "<NUMPDFS>");
WriteBasicType(out_stream, binary, num_pdfs);
index ae5d451fa65679c4ab1af0e4c2468e729fa9d2cf..d3b678f37bf3fbaaa6e66f61d09eb8aa420b5b32 100644 (file)
/// Accumulate stats for a single GMM in the model; returns log likelihood.
/// This does not work with multiple feature transforms.
BaseFloat AccumulateForGmm(const AmDiagGmm &model,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
int32 gmm_index, BaseFloat weight);
/// Accumulate stats for a single GMM in the model; uses data1 for
/// getting posteriors and data2 for stats. Returns log likelihood.
BaseFloat AccumulateForGmmTwofeats(const AmDiagGmm &model,
- const VectorBase<BaseFloat>& data1,
- const VectorBase<BaseFloat>& data2,
+ const VectorBase<BaseFloat> &data1,
+ const VectorBase<BaseFloat> &data2,
int32 gmm_index, BaseFloat weight);
/// Accumulates stats for a single GMM in the model using pre-computed
/// Gaussian posteriors.
void AccumulateFromPosteriors(const AmDiagGmm &model,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
int32 gmm_index,
- const VectorBase<BaseFloat>& posteriors);
+ const VectorBase<BaseFloat> &posteriors);
/// Accumulate stats for a single Gaussian component in the model.
void AccumulateForGaussian(const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
int32 gmm_index, int32 gauss_index,
BaseFloat weight);
index 53fea87243505b85891367203fcd79e6721bc767..f27bb963ddaddb8f2899c48946ec00e0813e7083 100644 (file)
--- a/src/gmm/mle-diag-gmm.cc
+++ b/src/gmm/mle-diag-gmm.cc
if (flags & kGmmVariances) variance_accumulator_.Scale(d);
}
-void AccumDiagGmm::AccumulateForComponent(const VectorBase<BaseFloat>& data,
+void AccumDiagGmm::AccumulateForComponent(const VectorBase<BaseFloat> &data,
int32 comp_index, BaseFloat weight) {
if (flags_ & kGmmMeans)
KALDI_ASSERT(data.Dim() == Dim());
void AccumDiagGmm::AccumulateFromPosteriors(
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& posteriors) {
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &posteriors) {
if (flags_ & kGmmMeans)
KALDI_ASSERT(static_cast<int32>(data.Dim()) == Dim());
KALDI_ASSERT(static_cast<int32>(posteriors.Dim()) == NumGauss());
}
BaseFloat AccumDiagGmm::AccumulateFromDiag(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat frame_posterior) {
KALDI_ASSERT(gmm.NumGauss() == NumGauss());
KALDI_ASSERT(gmm.Dim() == Dim());
// to each Gaussian in this acc.
// Careful: this wouldn't be valid if it were used to update the
// Gaussian weights.
-void AccumDiagGmm::SmoothWithAccum(BaseFloat tau, const AccumDiagGmm& src_acc) {
+void AccumDiagGmm::SmoothWithAccum(BaseFloat tau, const AccumDiagGmm &src_acc) {
KALDI_ASSERT(src_acc.NumGauss() == num_comp_ && src_acc.Dim() == dim_);
for (int32 i = 0; i < num_comp_; i++) {
if (src_acc.occupancy_(i) != 0.0) { // can only smooth if src was nonzero...
@@ -232,7 +232,7 @@ void AccumDiagGmm::SmoothWithAccum(BaseFloat tau, const AccumDiagGmm& src_acc) {
}
-void AccumDiagGmm::SmoothWithModel(BaseFloat tau, const DiagGmm& gmm) {
+void AccumDiagGmm::SmoothWithModel(BaseFloat tau, const DiagGmm &gmm) {
KALDI_ASSERT(gmm.NumGauss() == num_comp_ && gmm.Dim() == dim_);
Matrix<double> means(num_comp_, dim_);
Matrix<double> vars(num_comp_, dim_);
return ans;
}
-BaseFloat MlObjective(const DiagGmm& gmm,
+BaseFloat MlObjective(const DiagGmm &gmm,
const AccumDiagGmm &diaggmm_acc) {
GmmFlagsType acc_flags = diaggmm_acc.Flags();
Vector<BaseFloat> occ_bf(diaggmm_acc.occupancy());
diff --git a/src/gmm/mle-diag-gmm.h b/src/gmm/mle-diag-gmm.h
index b28bf63a17fc57aa878bf1694c43208f2ab9b8cb..42f4ea99db4ab0917aee8dfbf0a2acbb3f3ca212 100644 (file)
--- a/src/gmm/mle-diag-gmm.h
+++ b/src/gmm/mle-diag-gmm.h
void Scale(BaseFloat f, GmmFlagsType flags);
/// Accumulate for a single component, given the posterior
- void AccumulateForComponent(const VectorBase<BaseFloat>& data,
+ void AccumulateForComponent(const VectorBase<BaseFloat> &data,
int32 comp_index, BaseFloat weight);
/// Accumulate for all components, given the posteriors.
- void AccumulateFromPosteriors(const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& gauss_posteriors);
+ void AccumulateFromPosteriors(const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &gauss_posteriors);
/// Accumulate for all components given a diagonal-covariance GMM.
/// Computes posteriors and returns log-likelihood
BaseFloat AccumulateFromDiag(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat frame_posterior);
/// Increment the stats for this component by the specified amount
/// weighted sum of the current accumulator with the given one. An example use
/// for this is I-smoothing for MMI and MPE. Both accumulators must have the
/// same dimension and number of components.
- void SmoothWithAccum(BaseFloat tau, const AccumDiagGmm& src_acc);
+ void SmoothWithAccum(BaseFloat tau, const AccumDiagGmm &src_acc);
/// Smooths the accumulated counts using the parameters of a given model.
/// An example use of this is MAP-adaptation. The model must have the
/// same dimension and number of components as the current accumulator.
- void SmoothWithModel(BaseFloat tau, const DiagGmm& src_gmm);
+ void SmoothWithModel(BaseFloat tau, const DiagGmm &src_gmm);
// Const accessors
const GmmFlagsType Flags() const { return flags_; }
- const VectorBase<double>& occupancy() const { return occupancy_; }
- const MatrixBase<double>& mean_accumulator() const { return mean_accumulator_; }
- const MatrixBase<double>& variance_accumulator() const { return variance_accumulator_; }
+ const VectorBase<double> &occupancy() const { return occupancy_; }
+ const MatrixBase<double> &mean_accumulator() const { return mean_accumulator_; }
+ const MatrixBase<double> &variance_accumulator() const { return variance_accumulator_; }
private:
int32 dim_;
BaseFloat *count_out);
/// Calc using the DiagGMM exponential form
-BaseFloat MlObjective(const DiagGmm& gmm,
+BaseFloat MlObjective(const DiagGmm &gmm,
const AccumDiagGmm &diaggmm_acc);
int32 FloorVariance(const VectorBase<BaseFloat> &variance_floor_vector,
index 5a1b5dd26aabe7e429fe1ca6d60bcfe8bf2f2e85..0c1ad2aaedeed1ad0615f73f11570c75592e9cbc 100644 (file)
--- a/src/gmm/mle-full-gmm.cc
+++ b/src/gmm/mle-full-gmm.cc
}
void AccumFullGmm::AccumulateForComponent(
- const VectorBase<BaseFloat>& data, int32 comp_index, BaseFloat weight) {
+ const VectorBase<BaseFloat> &data, int32 comp_index, BaseFloat weight) {
assert(data.Dim() == Dim());
double wt = static_cast<double>(weight);
}
void AccumFullGmm::AccumulateFromPosteriors(
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& gauss_posteriors) {
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &gauss_posteriors) {
assert(gauss_posteriors.Dim() == NumGauss());
assert(data.Dim() == Dim());
Vector<double> data_d(data.Dim());
}
BaseFloat AccumFullGmm::AccumulateFromFull(const FullGmm &gmm,
- const VectorBase<BaseFloat>& data, BaseFloat frame_posterior) {
+ const VectorBase<BaseFloat> &data, BaseFloat frame_posterior) {
assert(gmm.NumGauss() == NumGauss());
assert(gmm.Dim() == Dim());
}
BaseFloat AccumFullGmm::AccumulateFromDiag(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data, BaseFloat frame_posterior) {
+ const VectorBase<BaseFloat> &data, BaseFloat frame_posterior) {
assert(gmm.NumGauss() == NumGauss());
assert(gmm.Dim() == Dim());
WriteToken(out_stream, binary, "</GMMACCS>");
}
-BaseFloat MlObjective(const FullGmm& gmm, const AccumFullGmm &fullgmm_acc) {
+BaseFloat MlObjective(const FullGmm &gmm, const AccumFullGmm &fullgmm_acc) {
GmmFlagsType flags = fullgmm_acc.Flags();
Vector<BaseFloat> occ_bf(fullgmm_acc.occupancy());
Matrix<BaseFloat> mean_accs_bf(fullgmm_acc.mean_accumulator());
diff --git a/src/gmm/mle-full-gmm.h b/src/gmm/mle-full-gmm.h
index bd215e8cd4989f18a24efbfc1e7d6e23a4c2401a..41b37a71295594c19431a74f2fb1fb2850034133 100644 (file)
--- a/src/gmm/mle-full-gmm.h
+++ b/src/gmm/mle-full-gmm.h
void Scale(BaseFloat f, GmmFlagsType flags); // scale stats.
/// Accumulate for a single component, given the posterior
- void AccumulateForComponent(const VectorBase<BaseFloat>& data,
+ void AccumulateForComponent(const VectorBase<BaseFloat> &data,
int32 comp_index, BaseFloat weight);
/// Accumulate for all components, given the posteriors.
- void AccumulateFromPosteriors(const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& gauss_posteriors);
+ void AccumulateFromPosteriors(const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &gauss_posteriors);
/// Accumulate for all components given a full-covariance GMM.
/// Computes posteriors and returns log-likelihood
BaseFloat AccumulateFromFull(const FullGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat frame_posterior);
/// Accumulate for all components given a diagonal-covariance GMM.
/// Computes posteriors and returns log-likelihood
BaseFloat AccumulateFromDiag(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat frame_posterior);
/// Accessors
const GmmFlagsType Flags() const { return flags_; }
- const Vector<double>& occupancy() const { return occupancy_; }
- const Matrix<double>& mean_accumulator() const { return mean_accumulator_; }
- const std::vector<SpMatrix<double> >& covariance_accumulator() const { return covariance_accumulator_; }
+ const Vector<double> &occupancy() const { return occupancy_; }
+ const Matrix<double> &mean_accumulator() const { return mean_accumulator_; }
+ const std::vector<SpMatrix<double> > &covariance_accumulator() const { return covariance_accumulator_; }
private:
int32 dim_;
BaseFloat *count_out);
/// Calc using the DiagGMM exponential form
-BaseFloat MlObjective(const FullGmm& gmm,
+BaseFloat MlObjective(const FullGmm &gmm,
const AccumFullGmm &fullgmm_acc);
} // End namespace kaldi
index 0ee66dee6e13f51078a4342392beda6ca18266bd..098b7b02f8db1ed72d4af1142cb20d1303a80556 100644 (file)
KALDI_LOG << "Written accs.";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 75c076a1df8a439258d1bba307c9eb73715d94a9..ff96f0717219b07a927eacec3603bca09462fdec 100644 (file)
KALDI_LOG << "Written accs.";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 22e958c4e7204c8536de7a6e157626ccd5e6bc57..d8c5a7bba78b3c57239ad88e56e81fb47705fd85 100644 (file)
return 0;
else
return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c911fe9e8a693da0cec7ae94360dc8a623cb70de..d32a4d194f67ddf69d10981cee99af25fc47dc18 100644 (file)
KALDI_LOG << "Written accs.";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b41be17be11173a266b90f9b94f2949302db54c6..66506d774024205757e756dc78369ca888b3c3d1 100644 (file)
KALDI_LOG << "Written accs.";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index ffc78a77013acbf16573ff584a91647a1e0b1356..bc569f0c065ac752e846dd27fd66b6073a2aaed6 100644 (file)
}
KALDI_LOG << "Written accs.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/gmmbin/gmm-align-compiled-plusphones.cc b/src/gmmbin/gmm-align-compiled-plusphones.cc
index 0983c8f1c46962634da4d6ac44cc7d34f045d2a7..f802ea26b32a92e5bb9869a2f8dd704e378a7853 100644 (file)
<< num_no_feat << ", other errors on " << num_other_error;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 44127bf47562d3f7a17dbf0e48260b855a26109c..dde574a765e9bf996736b83302f2dae73c6a4818 100644 (file)
<< num_no_feat << ", other errors on " << num_other_error;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9269bb0e3f14a5beeae6eb9221338164d0ecdc20..c23d345460426fe0ee41d38fb5a3c09868c13945 100644 (file)
--- a/src/gmmbin/gmm-align.cc
+++ b/src/gmmbin/gmm-align.cc
<< num_no_transcript << ", other errors on " << num_other_error;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b596f7cabe50176228a32ef25786a8f27d0171d1..9e397f061fd9b07b2f9411ff3b54257f2b7bb3f7 100644 (file)
}
KALDI_LOG << "Wrote model to " << model_wxfilename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 0678c548d0c1407202e197455c0801bb6184ca88..7e3625c76bc029939cfb76fa52b8c551e0735323 100644 (file)
KALDI_LOG << "gmm-compute-likes: computed likelihoods for " << num_done
<< " utterances.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 5382e00bbc99e65e3d307ddf2c4012a3b4ba12e6..29d8ea137e548bb83c85a0f5fdcb478fa413ad88 100644 (file)
Output ko(et_wxfilename, binary);
et.Write(ko.Stream(), binary);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/gmmbin/gmm-copy.cc b/src/gmmbin/gmm-copy.cc
index fd9d1149ce489e3e213e156c33c284426eebadc9..03d2242d57e807cc11de2fc6290d32c267debd2f 100644 (file)
--- a/src/gmmbin/gmm-copy.cc
+++ b/src/gmmbin/gmm-copy.cc
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 362bde2524dffd3d8d0c3e734d98a3c12262e9f0..41aa1e3a9167ec50032a0d12c696150123a7a592 100644 (file)
delete old_lm_fst;
delete new_lm_fst;
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/gmmbin/gmm-decode-faster-regtree-fmllr.cc b/src/gmmbin/gmm-decode-faster-regtree-fmllr.cc
index ce59e2afa18b99304201aa083c158ef7a0128c57..9869f9d6e9e9e4f946fe43f67f85c939e7e923b0 100644 (file)
else
return 1;
}
- catch(const std::exception& e) {
+ catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/gmmbin/gmm-decode-faster-regtree-mllr.cc b/src/gmmbin/gmm-decode-faster-regtree-mllr.cc
index 68c12aca1136572c38200dd1ab98abf120d0ebbb..cccf9ef3bc2e117e5e26ac56334cafdece223adb 100644 (file)
}
// If found, load the transforms for the current utterance.
- const RegtreeMllrDiagGmm& mllr = mllr_reader.Value(utt_or_spk);
+ const RegtreeMllrDiagGmm &mllr = mllr_reader.Value(utt_or_spk);
kaldi::DecodableAmDiagGmmRegtreeMllr gmm_decodable(am_gmm, trans_model,
features, mllr,
regtree,
else
return 1;
}
- catch(const std::exception& e) {
+ catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 4e2c2314335293c6d2de763fc5522000cc4ed503..66dd1df9b093441f67267a4a9da0b8cef00fd477 100644 (file)
if (word_syms) delete word_syms;
delete decode_fst;
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9d2756440d699832dcb8e4c1fc46deb9622627f9..c24ff761f4b910297016998a92b0c6669b6145ca 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 672bcbcecfa6dad78d681c5d47c2dce21fa38f3f..17e4845ec1b1efadeebe8df981f5e114a0d4a0f6 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 865412474c1f811125218c287a0e836288120fb2..d6ffcddedb88be19b3387c92248d80c506fd30f7 100644 (file)
--- a/src/gmmbin/gmm-est-et.cc
+++ b/src/gmmbin/gmm-est-et.cc
<< (tot_objf_impr / tot_count) << " over " << tot_count
<< " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8fee8769514b5a64e95934e58bc2c577845355e7..9c7e5d29a090b4fdc9ab10599028e6a61dffe7b3 100644 (file)
KALDI_LOG << "Overall fMLLR auxf impr per frame is "
<< (tot_impr / tot_t) << " over " << tot_t << " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f04d18011cecc9311a7f980e9fb5dbaf9cfcf339..9255d082ad3055b6374e2c56604718d6dd3f9113 100644 (file)
KALDI_LOG << "Overall fMLLR auxf impr per frame is "
<< (tot_impr / tot_t) << " over " << tot_t << " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 08e86527d421f6c64de35d594f501ac62c6a5a16..3e9670c2740fe09c42882594c79983b737d97ede 100644 (file)
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index fc1ec43128005c9abe2882010a570be1f3fb49a9..b5ee951437af85625cca87a8b5a407d2bdb4ca40 100644 (file)
am_gmm.Write(ko.Stream(), binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 4bae949c42d0e9380dc9820f6551be6943787bfc..7fff09c119c18cba1c18bcb1995254485f57006a 100644 (file)
KALDI_LOG << "Overall LVTLN auxf impr per frame is "
<< (tot_lvtln_impr / tot_t) << " over " << tot_t << " frames.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 864f3b3fe323ec6663464a3ecb72d3d90933bf25..5acffdb60357e9dee370d2bdf7259db7dbc984b6 100644 (file)
KALDI_LOG << "Overall acoustic like per frame = " << (tot_like / tot_t)
<< " over " << tot_t << " frames.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 1aa4cd81b1c916946535761a6d64398e104554d6..b2315305ae8af353809103acaf70a29a30a3ecf9 100644 (file)
KALDI_LOG << "Overall acoustic likelihood was " << (tot_like/tot_t)
<< " over " << tot_t << " frames.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 16857ea49afd56eb1d9b6294a2d38b9230627478..84639f64b2bd6f574b04b9cd5993d6dd00c594c5 100644 (file)
KALDI_LOG << "Overall acoustic likelihood was " << (tot_like/tot_t)
<< " over " << tot_t << " frames.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index da69e1817b1a268aa03e3cb0c5bbe06ec57513ba..5216cc2ae48b9a85dfa8060f836eee4b625f52b7 100644 (file)
KALDI_LOG << "Rescaled model and wrote to " << model_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 8941a6a2216133b6149e722c6eedc253e40f8296..8461c7e1c77967f910bfd83e1f55aa312ae7c06b 100644 (file)
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
diff --git a/src/gmmbin/gmm-est.cc b/src/gmmbin/gmm-est.cc
index ff4a4b96424a115b3e17bb0ce803e038f893f4fd..9d020c6fb680047f61ea521e6261678d3cf96675 100644 (file)
--- a/src/gmmbin/gmm-est.cc
+++ b/src/gmmbin/gmm-est.cc
KALDI_LOG << "Written model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index a98ba98e32357126a82a82fa622256dbe6ef754c..a12c08bd48ef819821b30d0f026e23dce7395210 100644 (file)
accs_a.Write(ko.Stream(), binary);
KALDI_LOG << "Written accs.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c3c447c92737d382e835e2cca380d03b7ced3f14..34ab642b062e8348555bde04d0a7ad50fa7ecbb2 100644 (file)
et.Write(ko.Stream(), binary);
KALDI_LOG << "Applied C transform and wrote ET object.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 37588f47c3ba4435e8181ccf1d545b2891dddf1e..478128391b886c2c596406b8e037d57cfc588e87 100644 (file)
WriteKaldiObject(et, et_wxfilename, binary);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 23d5213338e9b8c768ce0a7cbe55ef3ddc9df334..b3e261efef5819cbc284a6d7246de658f7e9bca0 100644 (file)
A.Write(ko.Stream(), binary);
KALDI_LOG << "Wrote A to " << a_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 3ffe30e500866ed6682fda894e0c3ff4ed06e235..c4b0941f7f89ae8db8d4968e19eea7768bff4362 100644 (file)
KALDI_LOG << "Wrote ET to " << et_wxfilename;
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 98e4e3f636b8b11a297bd3c2760107df5aa3839b..c6ce14450fab9e59bfc097072b77af87e72c9fe0 100644 (file)
fmpe_stats.Write(ko.Stream(), binary);
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index cc680ac633229738068ef16fb8f1b1603b65bdfc..b121adc4f143a800fcf38954b432165d19bcc3d3 100644 (file)
<< " with errors.";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index dcabecd02445226ab98d18e1c22d2ecb3fd87f28..0d69788faa5db62942cea8ac82be0d1fda03b4f9 100644 (file)
<< deriv_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
diff --git a/src/gmmbin/gmm-global-acc-stats-twofeats.cc b/src/gmmbin/gmm-global-acc-stats-twofeats.cc
index b668284681a142b7f16db386b665e0fff6397ad0..42cd4f55b043df8450064c5fc0ef5efd82ceab9a 100644 (file)
WriteKaldiObject(gmm_accs, accs_wxfilename, binary);
KALDI_LOG << "Written accs to " << accs_wxfilename;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6cb77fdd06e0883df4a93251195614b807c9a00c..1d180e5a36aaa7a07c65fb8d94fd7095dc77b2c4 100644 (file)
WriteKaldiObject(gmm_accs, accs_wxfilename, binary);
KALDI_LOG << "Written accs to " << accs_wxfilename;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b11099cadc89e52dbc43eaba4823334d58e89b10..2abad38b5894c4043cc722f1a743cc55e59d7e7c 100644 (file)
WriteKaldiObject(gmm, model_out_filename, binary_write);
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 14162f49e3b3b4e6d9c93979da98d166862bde9c..46f79ddb27bc9d70d1587d0b6e5a07855fc8f882 100644 (file)
KALDI_LOG << "Overall fMLLR auxf impr per frame is "
<< (tot_impr / tot_t) << " over " << tot_t << " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 516d717bd3a7a1517fe1b4e162f647870cbd3d4c..43f4014c7f18a8c0b8e03d3eac891613bea776ca 100644 (file)
WriteKaldiObject(gmm, model_out_filename, binary_write);
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index d7e24df1d98f838785a1544a3d38e3c844e3649e..6e88a7222b9b241c2a18029e98ec10807a63c002 100644 (file)
<< "frame = " << (tot_like/tot_frames) << " over " << tot_frames
<< " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 173955dc4cab4a424ff2d157408b0e9a318512d9..15f10bc1d7d70a25bd168c72d668bf315120d178 100644 (file)
}
KALDI_LOG << "Written stats to " << stats_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index d362dacf89f6da43c988ff03a493dcae94663c3c..ef4d0a680d91bdae4848ce336ca81a21045ed3da 100644 (file)
fgmm.CopyFromDiagGmm(gmm);
WriteKaldiObject(fgmm, fgmm_wxfilename, binary);
KALDI_LOG << "Written full GMM to " << fgmm_wxfilename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 952bff64bdfac8ad75b9a04c01958fb6cd78d2f5..540c84182c4e20c733cc830e5cc34b74dfc3c254 100644 (file)
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/gmmbin/gmm-info.cc b/src/gmmbin/gmm-info.cc
index 74c0c8cd76f07783dba9ad541187d233c728fd0a..5f50aa90a047768f5a532034b74841d4da0675a4 100644 (file)
--- a/src/gmmbin/gmm-info.cc
+++ b/src/gmmbin/gmm-info.cc
<< trans_model.NumTransitionStates() << '\n';
std::cout << "feature dimension " << am_gmm.Dim() << '\n';
std::cout << "number of gaussians " << am_gmm.NumGauss() << '\n';
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index c501a0703094ce55c8502e6348243ce26a99cda4..61132f184306625b420c8bbdca9d071f135c314f 100644 (file)
Output ko(et_wxfilename, binary);
et.Write(ko.Stream(), binary);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 08b5554ec1ac3e7e07c22ad766afe8e734c18b6d..5ff9f083b92f1ba3dea74ed19b0c58654b145bc4 100644 (file)
WriteKaldiObject(lvtln, lvtln_wxfilename, binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 28eeed1aef2564ffac21f7615cf03181669fafd0..f1bef43795db65b3b89968187db9d6cc2bde5cc2 100644 (file)
am_gmm.Write(ko.Stream(), binary);
}
KALDI_LOG << "Wrote model.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 88c9241dd92ee1d8a6623fa49eedb0ae175139aa..2234a9a274b28e6dc0665b51007638395f8c0b74 100644 (file)
KALDI_LOG << "Wrote model.";
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a1f16e7ada4087afbd223974838ab42d1d65b127..374ba5d551484cc78c76aa366435b234772553b2 100644 (file)
delete ctx_dep;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f6cd65c0a65da5ab05d2187450ca73ce236ce1a8..f22cfb7500549ed3baae11acf626cc55eefa9d0d 100644 (file)
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 4a13db2ea953dbf5b7d440528ea7cee60a9d199d..2d4d070ba23f492b0c4ea013b13cd3cb36e80d06 100644 (file)
dst_stats.Write(ko.Stream(), binary_write);
}
KALDI_LOG << "Smoothed stats with tau = " << tau;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index a2f92cfe5ec361a757a8f7bb3e40922106282858..4f3aa4cf411504fe8cbf609f4ac66db540f49ba7 100644 (file)
VectorFst<StdArc> *new_lm_fst = ReadFstKaldi(new_lm_fst_rxfilename);
- fst::DeterministicOnDemandFst<StdArc>* lm_diff_fst =
+ fst::DeterministicOnDemandFst<StdArc> *lm_diff_fst =
new fst::DeterministicOnDemandFst<StdArc>(*old_lm_fst, *new_lm_fst);
bool determinize = config.determinize_lattice;
if (word_syms) delete word_syms;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2d01c24ca5c3445cb0756a3ed68442c8211fdb49..f88ea2acf571522d134ecca9a877fb563b136fbf 100644 (file)
if (word_syms) delete word_syms;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a13e318fe6c276d3cf000048b943be5380a863d7..b93bd77bc378e5fda460c74b2a91bbc7a1fac87d 100644 (file)
if (word_syms) delete word_syms;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 64cbf0223d7eedc0bf7c531233fc1e76938290f9..6964b645ca558c48bfca1386c8a35f55dcbf3ddf 100644 (file)
}
KALDI_LOG << "Written regression tree to " << tree_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 6ac4604e57409eec0ad108f2df081ec8fe7dd6fe..55944eb7dde79b3555d049595fb7d631ecf529b9 100644 (file)
--- a/src/gmmbin/gmm-mixup.cc
+++ b/src/gmmbin/gmm-mixup.cc
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 9411ad736bfec52202c638aa3b841fb11610dc44..10130e1dad66dee5accd715b2f0f7b3fbc02d997 100644 (file)
KALDI_LOG << "Done converting post to gpost";
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8169f658782b4be01bc8ef27fdca00ba63f4ae0d..1a0b912b13c116ab1d25be4276a1e02816ee9ac2 100644 (file)
namespace kaldi {
-void LatticeAcousticRescore(const AmDiagGmm& am,
- const TransitionModel& trans_model,
- const MatrixBase<BaseFloat>& data,
+void LatticeAcousticRescore(const AmDiagGmm &am,
+ const TransitionModel &trans_model,
+ const MatrixBase<BaseFloat> &data,
const std::vector<int32> state_times,
Lattice *lat) {
kaldi::uint64 props = lat->Properties(fst::kFstProperties, false);
KALDI_LOG << "Done " << num_done << " lattices, #frames is " << num_frames;
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index aa281707ca9419aa8e8388b2df3da20430c8bff0..8044458abae10dc50a3093158630762900bd50be 100644 (file)
gmm_accs.Write(ko.Stream(), binary);
}
KALDI_LOG << "Scaled accs with scale " << scale;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 79f7bc61d3a92f142425d1a8a396db2fc87498bb..c173bbd4174e8d7c8e3a071a9252c02536c62b5c 100644 (file)
<< gmm_accs.TotCount() << ", avg like/frame "
<< (gmm_accs.TotLogLike() / gmm_accs.TotCount());
KALDI_LOG << "Written stats to " << stats_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 75d2176c3be1c371f94ee85e5fec2a4cb0378735..2a362400f1b77a20c18c053a588d03cfb952ad39 100644 (file)
lvtln.Write(ko.Stream(), binary);
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index cf8c927ed6efe3eacd85309b0958491a66960e4e..4c14664f2a47d92f9d6a117c0f9a8f1055cd1696 100644 (file)
}
KALDI_LOG << "Written model to " << model_out_wxfilename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index e2df88abc8fffefa41f7a312a9194d1c12a67a4c..826cc4bb26f784e18ad81100606ac930ab4d5df2 100644 (file)
else if (phone > other.phone) return false;
else if (hmm_state < other.hmm_state) return true;
else if (hmm_state > other.hmm_state) return false;
- else return (pdf < other.pdf);
+ else return pdf < other.pdf;
}
bool operator == (const Triple &other) const {
return (phone == other.phone && hmm_state == other.hmm_state
index a10256a0becad2351288649b04d3db4d665a0ee0..3aa4d0eada0685291262ae335b803fe46a3cfdc7 100644 (file)
int32 cur_time = (*times)[state];
for (fst::ArcIterator<Lattice> aiter(lat, state); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.ilabel != 0) { // Non-epsilon input label on arc
// next time instance
int32 cur_time = (*times)[state];
for (fst::ArcIterator<CompactLattice> aiter(lat, state); !aiter.Done();
aiter.Next()) {
- const CompactLatticeArc& arc = aiter.Value();
+ const CompactLatticeArc &arc = aiter.Value();
int32 arc_len = static_cast<int32>(arc.weight.String().size());
if ((*times)[arc.nextstate] == -1)
(*times)[arc.nextstate] = cur_time + arc_len;
int32 cur_time = state_times[state];
for (fst::ArcIterator<Lattice> aiter(lat, state); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.ilabel != 0) { // Non-epsilon arc
int32 phone = trans.TransitionIdToPhone(arc.ilabel);
if (!std::binary_search(silence_phones.begin(),
@@ -291,7 +291,7 @@ int32 LatticePhoneFrameAccuracy(const Lattice &hyp, const TransitionModel &trans
int32 cur_time = state_times_hyp[state];
for (fst::ArcIterator<Lattice> aiter(hyp, state); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.ilabel != 0) { // Non-epsilon arc
int32 phone = trans.TransitionIdToPhone(arc.ilabel);
(*arc_accs)[cur_time][phone] =
vector<double> *state_alphas) {
for (fst::ArcIterator<Lattice> aiter(lat, state); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
double graph_score = arc.weight.Value1(),
am_score = arc.weight.Value2(),
arc_loglike = (*state_alphas)[state] - am_score - graph_score;
if ((*st_it) < state) {
for (fst::ArcIterator<Lattice> aiter(lat, (*st_it)); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.nextstate == state) {
KALDI_ASSERT(arc.ilabel == 0);
double arc_loglike = (*state_betas)[state] - arc.weight.Value1()
st_it != active_states[prev_time].end(); ++st_it) {
for (fst::ArcIterator<Lattice> aiter(lat, (*st_it)); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.nextstate == state) {
int32 key = arc.ilabel;
KALDI_ASSERT(key != 0);
vector<double> *state_alphas_mpe) {
for (fst::ArcIterator<Lattice> aiter(lat, state); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
double graph_score = arc.weight.Value1(),
am_score = arc.weight.Value2(),
arc_loglike = am_score + graph_score;
if ((*st_it) < state) {
for (fst::ArcIterator<Lattice> aiter(lat, (*st_it)); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.nextstate == state) {
KALDI_ASSERT(arc.ilabel == 0);
double arc_loglike = arc.weight.Value1() + arc.weight.Value2();
st_it != active_states[prev_time].end(); ++st_it) {
for (fst::ArcIterator<Lattice> aiter(lat, (*st_it)); !aiter.Done();
aiter.Next()) {
- const LatticeArc& arc = aiter.Value();
+ const LatticeArc &arc = aiter.Value();
if (arc.nextstate == state) {
int32 key = arc.ilabel;
KALDI_ASSERT(key != 0);
diff --git a/src/lat/sausages.h b/src/lat/sausages.h
index b329408ae8807d4986726ff7102bfec4d36e0439..99c6b952e1aa7bd2dfad0d33d715be9288c97e2b 100644 (file)
--- a/src/lat/sausages.h
+++ b/src/lat/sausages.h
const std::pair<int32, BaseFloat> &b) const {
if (a.second > b.second) return true;
else if (a.second < b.second) return false;
- else return (a.first > b.first);
+ else return a.first > b.first;
}
};
};
index 7753367a4eef5bd5e8bd739d90491efeace7e925..b569158d7e60a5a7dab775d59bbe625082fec031 100644 (file)
KALDI_LOG << "Done converting " << n_done << " to best path, "
<< n_err << " had errors.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 145bdc424980f742839e303f4cbeef576d65949c..a4cb824a366078d3af608e6dbb964e08003b9903 100644 (file)
}
KALDI_LOG << "Done adding transition probabilities to " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 67dd03e9305c03708e39f28ee0c3412542e89ef8..fe2c9d4e3bdc372fac5750051787bc84f400f91f 100644 (file)
KALDI_LOG << "Successfully aligned " << num_done << " lattices; "
<< num_err << " had errors.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6de3244debb9d4a0012f74da9031c5c3ce721d7c..e0e35d738a12a72e45f7f06368f134a6c8dd1d6e 100644 (file)
if (word_syms) delete word_syms;
if (n_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 5a3d72d404170fdb4f6ef7e046cd987130f73749..81fe416c13a4281cd373e080575f5b48a9723e13 100644 (file)
KALDI_LOG << "Done " << n_done << " lattices, missing alignments for "
<< n_no_ali << ", other errors on " << n_err;
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8ff735822a7735befc0fe5a2584a7b3f1b7d0538..5057243333b1041ffba2f3088285821316cbb7c8 100644 (file)
KALDI_LOG << "Done " << n_done << " lattices; failed for "
<< n_fail;
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index d9c8e2cef2220c40a841d0602dc0ea6b5880681b..0c78b290351d2da1a3f9a88b3430ba70132c9898 100644 (file)
KALDI_LOG << "Done copying " << n_done << " lattices; backed off to 1st "
<< "archive for " << n_backed_off << " of those.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 20bc4c0b0d8e1e8a1c05cd215afaf2b0d7fe347b..42a00c533c8307f0eac07f0f1973d26d4b9750a3 100644 (file)
}
KALDI_LOG << "Done copying " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 7741562ea16427e0f3a27c43f6b4ba1291a8e32c..e14a6bbef9f7b135c91dd626422abc8e581e3c44 100644 (file)
<< "earlier than specified by the beam on " << n_warn
<< " of these.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c8e1e97bd5aa42d8e4c5e827af72fa2f32606f01..4396a674375853548b71b0955aaaf8b832884552 100644 (file)
KALDI_LOG << "Done " << n_done << " lattices, errors on " << n_error;
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index de9ab1ce1d36a3824497fe8ca7c0a640b528255a..d5b63d837b20cdc029526c0098993e8067f64166 100644 (file)
<< " lattices had empty difference; "
<< n_no_lat << " missing lattices in second archive ";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 3736523dc3a22d1e1677d889749915c1cdb08e94..dda59a1178322215feebc9e2cfedf3b4f694324b 100644 (file)
<< " were not; for " << n_no2nd << ", could not find 2nd lattice.";
return (n_inequivalent == 0);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 5e08c2330ca7c0dbee85e69e06aa245f92571924..cb422e693cf83719b377bc1432792b2721b82a5f 100644 (file)
<< " had empty composition; in " << n_no_2ndlat
<< ", had empty second lattice.";
return (n_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index d12b1f4f68f9acf7dcba64a60f147f40bde3256f..dded845ced01eb95e35297530db1b33177268ee5 100644 (file)
KALDI_LOG << "Done " << n_done << " lattices, failed for " << n_fail;
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b8331b569fcc6e866f0e16a04953fde42801f43a..77fed874167eb27e5d16abf17b0b7dfc31c724b0 100644 (file)
if (word_syms) delete word_syms;
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9ba7988b80390a06cacbea72b82c42a352f73760..100f57b0b543728b49991baf221bd3dd5ce98111 100644 (file)
// convert from Lattice to standard FST
// also maps wildcard symbols to epsilons
// then removes epsilons
-void ConvertLatticeToUnweightedAcceptor(const kaldi::Lattice& ilat,
+void ConvertLatticeToUnweightedAcceptor(const kaldi::Lattice &ilat,
const LabelSet &wildcards,
fst::StdVectorFst *ofst) {
// first convert from lattice to normal FST
unsigned int tot_errs = tot_subs + tot_del + tot_ins;
KALDI_LOG << "Overall %WER "<<(100.*tot_errs)/tot_words<<" [ "<<tot_errs<<" / "<<tot_words<<", "<<tot_ins<<" ins, "<<tot_del<<" del, "<<tot_subs<<" sub ]";
KALDI_LOG << "Scored " << n_done << " lattices, "<<n_fail<<" not present in hyp.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 5f8f21c83147cc284e50b0335495995d81720e04..035c3c7f69f0cdc3220e3423f4f7f2e2ed8d7092 100644 (file)
}
KALDI_LOG << "Done projecting " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index d7ebc5b4932fe14644d9e082552bc058579ece4a..a862c47a973d299a10e6860f9f90c30e4194b45b 100644 (file)
<< " utterances.";
KALDI_LOG << "Done " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a537a58bb47f3e584f792ec0f41da0dc40836339..925fd324673d46d1b7b379d42d44a8d3fbbef3d6 100644 (file)
}
KALDI_LOG << "Done removing alignments from " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b63c308f4578d29d36a4b2a7294402b43eb7f956..ce65bd40f6949f6cca64e8fd2054bdfc7457b0e6 100644 (file)
}
KALDI_LOG << "Done " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9c22d56faf96446d42288997537fa7e7b45cef6b..6d9b66e88ba600d3a666b571e88238a0b480dd8c 100644 (file)
}
KALDI_LOG << "Done converting " << n_done << " lattices to word-level FSTs";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 18fb57fd095f7093cac6adfc64e27763e1334b18..786376018f001d8a045a5fc4b6fbbde73fa76d75 100644 (file)
<< " frames.";
KALDI_LOG << "Done " << num_done << " lattices.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 43909bb415d6cc6aeb8d79e2f1ead413f3908244..596fa74e977684802d371cae5655eb5058824db7 100644 (file)
<< n << ", average actual #paths is "
<< (n_paths_out/(n_done+1.0e-20));
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index eb6dff16f49ed34fa8005aedf253217f51f308ae..beb95114626093900624d54ffb4870171f5ff7f5 100644 (file)
}
KALDI_LOG << "Done copying " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 8b7831d950c34100a802ff6b81e41ba0dacf60ee..ab7bc5a577c71eb9e1cd6119fedfa4f772206a7f 100644 (file)
<< (total_ac_like/total_time);
KALDI_LOG << "Done " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 21e91ad6116bff00df1089320b1970dd23c57b95..cdb2ec3f5f2f878efa48e14343c0e02953ce4fa2 100644 (file)
Lattice lat1 = lattice_reader1.Value();
lattice_reader1.FreeCurrent();
if (lattice_reader2.HasKey(key)) {
- const Lattice& lat2 = lattice_reader2.Value(key);
+ const Lattice &lat2 = lattice_reader2.Value(key);
Union(&lat1, lat2);
n_union++;
} else {
<< n_no_lat << " cases.";
KALDI_LOG << "Done " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 2282ded996fba6bcddd70c13e6b99930b9008c90..f18b2a176803618a5163942a75ed6616409a6e1e 100644 (file)
<< n_err_write << " in error but written anyway, "
<< n_err_nowrite << " in error and not written.";
return (n_ok != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 766a670bb8e3453679b28a04260dcc507c7351ee..eca6b03ee415674b2df66572b25d9995b23c7426 100644 (file)
KALDI_LOG << "Done " << n_done << " n-best entries ,"
<< n_err << " had errors.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9243e8db45fc0a220bd56d61b399e3d63371a055..daf18ca96ff64d211f54ed48f707a1ca708be759 100644 (file)
KALDI_LOG << "Converted " << n_done << " linear lattices to ctm format; "
<< n_err << " had errors.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f2e285ec5ac2775a958eb82f1da3aeb76bf703df..c20a1593b169bb1554a80a07d40629bdb3f0a346 100644 (file)
<< (n_nbest_done/static_cast<BaseFloat>(n_utt_done))
<< " N-best paths per utterance.";
return (n_utt_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 621cce4c1b1d667fbda5f0f278c631fa98075f1f..d7b91954a054dbc6d018d90cfa5982ebcc3216c1 100644 (file)
KALDI_LOG << "Done " << n_done << " n-best entries, "
<< n_err << " had errors.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/lm/arpa2fst.cc b/src/lm/arpa2fst.cc
index 49c3152aaeed36fcb60e6d4741c506bf0805f884..ffa108f299af2cdd920e78c847d4e0962043b1f4 100755 (executable)
--- a/src/lm/arpa2fst.cc
+++ b/src/lm/arpa2fst.cc
lm.Read(arpa_filename, kaldi::kArpaLm, NULL, natural_base);
lm.Write(fst_filename);
exit(0);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/lm/kaldi-lm.cc b/src/lm/kaldi-lm.cc
index 17e4130c6755f1bb42904d9efee36bc1a89413e5..c16de9c8c95f755a0ae12895a881dd5f5330a246 100644 (file)
--- a/src/lm/kaldi-lm.cc
+++ b/src/lm/kaldi-lm.cc
std::istream &strm,
const string &sourcename,
GrammarType gtype,
- fst::SymbolTable* pst,
+ fst::SymbolTable *pst,
bool useNaturalLog,
const string startSent,
const string endSent) {
if (gtype == kArpaLm || gtype == kTextString) {
// always allocate local symbol table so we know we always have to delete it
- fst::SymbolTable* psyms = new fst::SymbolTable("lmInputSymbols");
+ fst::SymbolTable *psyms = new fst::SymbolTable("lmInputSymbols");
// initialize FST and reserve initial state
// (we can retrieve it through Start())
diff --git a/src/lm/kaldi-lm.h b/src/lm/kaldi-lm.h
index b2b6ad567d8e1a8baf8a261f202ec6a7ec189a58..e7b8e7f26d51e2964f5c7ec5a7eb7d86a64d2f63 100644 (file)
--- a/src/lm/kaldi-lm.h
+++ b/src/lm/kaldi-lm.h
bool Read(std::istream &strm,
const string &sourcename,
GrammarType gtype,
- fst::SymbolTable* pst = NULL,
+ fst::SymbolTable *pst = NULL,
bool useNaturalLog = true,
const string startSent = "<s>",
const string endSent = "</s>") {
/// Reads from a named input file. Empty filename reads from standard input.
bool Read(const string &filename,
GrammarType gtype,
- fst::SymbolTable* pst = 0,
+ fst::SymbolTable *pst = 0,
bool useNaturalLog = true,
const string startSent = "<s>",
const string endSent = "</s>") {
}
private:
- fst::VectorFst<fst::StdArc>* pfst_;
+ fst::VectorFst<fst::StdArc> *pfst_;
fst::VectorFst<fst::StdArc>* ReadStream(std::istream &strm,
const string &sourcename,
GrammarType gtype,
- fst::SymbolTable* pst,
+ fst::SymbolTable *pst,
bool useNaturalLog,
const string startSent,
const string endSent);
index e336515fe465351e38ad59b7955ab645a0136a38..6d4346ab87cd199cba231db08b05577905810429 100644 (file)
--- a/src/lm/kaldi-lmtable.cc
+++ b/src/lm/kaldi-lmtable.cc
StateId LmFstConverter::AddStateFromSymb(
const std::vector<string> &ngramString,
int kstart, int kend,
- const char* sep,
- fst::StdVectorFst* pfst,
- fst::SymbolTable* psst,
- bool& newlyAdded) {
+ const char *sep,
+ fst::StdVectorFst *pfst,
+ fst::SymbolTable *psst,
+ bool &newlyAdded) {
int64 snum, lastAvailable;
fst::StdArc::StateId sid;
std::ostringstream oss("");
return sid;
}
-void LmFstConverter::ConnectUnusedStates(fst::StdVectorFst* pfst) {
+void LmFstConverter::ConnectUnusedStates(fst::StdVectorFst *pfst) {
// go through all states with a recorded backoff destination
// and find out any that has no output arcs and is not final
float logProb,
float logBow,
std::vector<string> &ngs,
- fst::StdVectorFst* pfst,
- fst::SymbolTable* psst,
+ fst::StdVectorFst *pfst,
+ fst::SymbolTable *psst,
const string startSent,
const string endSent) {
fst::StdArc::StateId src, dst, dbo;
#ifndef HAVE_IRSTLM
bool LmTable::ReadFstFromLmFile(std::istream &istrm,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
bool useNaturalOpt,
const string startSent,
const string endSent) {
// #ifdef HAVE_IRSTLM implementation
bool LmTable::ReadFstFromLmFile(std::istream &istrm,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
bool useNaturalOpt,
const string startSent,
const string endSent) {
// run through all nodes in table (as in dumplm)
void LmTable::DumpStart(ngram ng,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
const string startSent,
const string endSent) {
#ifdef KALDI_PARANOID
// run through given levels and positions in table
void LmTable::DumpContinue(ngram ng, int ilev, int elev,
table_entry_pos_t ipos, table_entry_pos_t epos,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
fst::SymbolTable *pStateSymbs,
const string startSent, const string endSent) {
LMT_TYPE ndt = tbltype[ilev];
diff --git a/src/lm/kaldi-lmtable.h b/src/lm/kaldi-lmtable.h
index 0aa25861467c53b29ef678394d5b5d5b3835aa3e..f804c2af8ac17312c91ab61e05cf4eeba4f291d7 100644 (file)
--- a/src/lm/kaldi-lmtable.h
+++ b/src/lm/kaldi-lmtable.h
float prob,
float bow,
std::vector<string> &ngs,
- fst::StdVectorFst* pfst,
- fst::SymbolTable* psst,
+ fst::StdVectorFst *pfst,
+ fst::SymbolTable *psst,
const string startSent,
const string endSent);
float convertArpaLogProbToWeight(float lp) {
if ( use_natural_log_ ) {
// convert from arpa base 10 log to natural base, then to cost
- return (-2.302585*lp);
+ return -2.302585*lp;
} else {
// keep original base but convert to cost
- return (-lp);
+ return -lp;
}
}
- bool IsFinal(fst::StdVectorFst* pfst,
+ bool IsFinal(fst::StdVectorFst *pfst,
StateId s) {
return(pfst->Final(s) != fst::StdArc::Weight::Zero());
}
- void ConnectUnusedStates(fst::StdVectorFst* pfst);
+ void ConnectUnusedStates(fst::StdVectorFst *pfst);
private:
StateId AddStateFromSymb(const std::vector<string> &ngramString,
int kstart,
int kend,
- const char* sep,
- fst::StdVectorFst* pfst,
- fst::SymbolTable* psst,
- bool& newlyAdded);
+ const char *sep,
+ fst::StdVectorFst *pfst,
+ fst::SymbolTable *psst,
+ bool &newlyAdded);
bool use_natural_log_;
BkStateMap bkState_;
~LmTable() { if (conv_) delete conv_; }
bool ReadFstFromLmFile(std::istream &istrm,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
bool useNaturalLog,
const string startSent,
const string endSent);
private:
- LmFstConverter* conv_;
+ LmFstConverter *conv_;
};
#else
/// table_entry_pos_t wdprune(float *thr, int aflag = 0);
/// void load(std::istream& inp, ...);
bool ReadFstFromLmFile(std::istream &istrm,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
bool useNaturalOpt,
const string startSent,
const string endSent);
/// Method specific to the IRSTLM implementation.
void DumpStart(ngram ng,
- fst::StdVectorFst* pfst,
+ fst::StdVectorFst *pfst,
const string startSent,
const string endSent);
/// Method specific to the IRSTLM implementation.
void DumpContinue(ngram ng,
int ilev, int elev,
table_entry_pos_t ipos, table_entry_pos_t epos,
- fst::StdVectorFst* pfst, fst::SymbolTable *pStateSymbs,
+ fst::StdVectorFst *pfst, fst::SymbolTable *pStateSymbs,
const string startSent, const string endSent);
- LmFstConverter* conv_;
+ LmFstConverter *conv_;
};
#endif
diff --git a/src/lm/lm-lib-test.cc b/src/lm/lm-lib-test.cc
index 2546e679de18da54d5a0f74d0eb9b60df6af3b90..cfbcf430bad56609699ed8b0b41d8889626f2db8 100644 (file)
--- a/src/lm/lm-lib-test.cc
+++ b/src/lm/lm-lib-test.cc
#define MAX_SENTENCE_LENGTH 1000
/// @brief Recursively prints all complete paths starting at s and their score.
-static LmWeight PrintCompletePath(fst::SymbolTable* pst,
- fst::StdVectorFst* pfst,
+static LmWeight PrintCompletePath(fst::SymbolTable *pst,
+ fst::StdVectorFst *pfst,
fst::StdArc::StateId s,
LmWeight score) {
fst::ArcIterator<fst::StdVectorFst> ai(*pfst, s);
}
/// @brief Recursively prints all complete paths starting from initial state.
-static LmWeight PrintCompletePaths(fst::SymbolTable* pst,
- fst::StdVectorFst* pfst) {
+static LmWeight PrintCompletePaths(fst::SymbolTable *pst,
+ fst::StdVectorFst *pfst) {
KALDI_ASSERT(pst);
KALDI_ASSERT(pfst);
KALDI_ASSERT(pfst->Start() >=0);
/// @brief Creates an FST that generates any sequence of symbols
/// taken from given symbol table.
/// This FST is then associated with given symbol table.
-static fst::StdVectorFst* CreateGenFst(fst::SymbolTable* pst) {
+static fst::StdVectorFst* CreateGenFst(fst::SymbolTable *pst) {
fst::StdArc::StateId initId, midId, finalId;
- fst::StdVectorFst* genFst = new fst::StdVectorFst;
+ fst::StdVectorFst *genFst = new fst::StdVectorFst;
pst->AddSymbol(epsilon); // added if not there
int64 boslab = pst->AddSymbol(startOfSentence); // added if not there
int64 eoslab = pst->AddSymbol(endOfSentence); // added if not there
}
/// @brief Randomly generates ntests paths with uniform distribution.
-static fst::StdVectorFst* CreateRandPathFst(int n, fst::StdVectorFst* genFst) {
+static fst::StdVectorFst* CreateRandPathFst(int n, fst::StdVectorFst *genFst) {
typedef fst::UniformArcSelector<fst::StdArc> UniformSelector;
int nTrials = 50;
MAX_SENTENCE_LENGTH, n);
for (int i = 0; i < nTrials; i++) {
- fst::StdVectorFst* tmpFst = new fst::StdVectorFst;
+ fst::StdVectorFst *tmpFst = new fst::StdVectorFst;
RandGen(*genFst, tmpFst, opts);
if (tmpFst->Properties(fst::kCoAccessible, true)) {
// std::cout << "Got valid random path after " << i << " tries" << '\n';
}
/// @brief Tests if all paths generated from genFst are included in testFst.
-static bool coverageTests(fst::StdVectorFst* genFst,
- fst::StdVectorFst* testFst,
+static bool coverageTests(fst::StdVectorFst *genFst,
+ fst::StdVectorFst *testFst,
int ntests) {
bool success = true;
#ifdef KALDI_PARANOID
if (!pathFst) return false;
// compose paths with language model fst
- fst::StdVectorFst* outFst = new fst::StdVectorFst;
+ fst::StdVectorFst *outFst = new fst::StdVectorFst;
// std::cout << "Path FST " << '\n';
// printFirstCompletePath(pst, pathFst, pathFst->Start());
// first create an FST that generates
// any sequence of symbols taken from symbol table
- fst::StdVectorFst* genFst = CreateGenFst(lm.GetFst()->MutableInputSymbols());
+ fst::StdVectorFst *genFst = CreateGenFst(lm.GetFst()->MutableInputSymbols());
// see if path generated in this FST are covered by the LM FST
std::cout << "For any sequence of symbols found in symbol table:" << '\n';
composedFst.Write("composed.fst");
// find best path score
- fst::StdVectorFst* bestFst = new fst::StdVectorFst;
+ fst::StdVectorFst *bestFst = new fst::StdVectorFst;
fst::ShortestPath(composedFst, bestFst, 1);
std::cout << "Best path has " << bestFst->NumStates() << " states" << '\n';
index e1a9b22d18eeb4fb6f9ee64ab7b2ed888396d0eb..66322964ee5ad9cebfefd2d715148f268457c57b 100644 (file)
template<>
template<>
-void MatrixBase<float>::AddVecVec(const float alpha, const VectorBase<float>& ra, const VectorBase<float>& rb);
+void MatrixBase<float>::AddVecVec(const float alpha, const VectorBase<float> &ra, const VectorBase<float> &rb);
template<>
template<>
-void MatrixBase<double>::AddVecVec(const double alpha, const VectorBase<double>& ra, const VectorBase<double>& rb);
+void MatrixBase<double>::AddVecVec(const double alpha, const VectorBase<double> &ra, const VectorBase<double> &rb);
template<>
index 7c1cfb4e61afafd52b6a239eeffb52fd65f23dca..9829b646fd7ad662b41179a83385902a499f497d 100644 (file)
bool inverse_needed) {
KALDI_ASSERT(num_rows_ == num_cols_);
#ifndef HAVE_ATLAS
- KaldiBlasInt* pivot = new KaldiBlasInt[num_rows_];
+ KaldiBlasInt *pivot = new KaldiBlasInt[num_rows_];
KaldiBlasInt M = num_rows_;
KaldiBlasInt N = num_cols_;
KaldiBlasInt LDA = stride_;
KaldiBlasInt result;
KaldiBlasInt l_work = std::max<KaldiBlasInt>(1, N);
- float* p_work = new float[l_work];
+ float *p_work = new float[l_work];
sgetrf_(&M, &N, data_, &LDA, pivot, &result);
const int pivot_offset = 1;
#else
- int* pivot = new int[num_rows_];
+ int *pivot = new int[num_rows_];
int result = clapack_sgetrf(CblasColMajor, num_rows_, num_cols_,
data_, stride_, pivot);
const int pivot_offset = 0;
bool inverse_needed) {
KALDI_ASSERT(num_rows_ == num_cols_);
#ifndef HAVE_ATLAS
- KaldiBlasInt* pivot = new KaldiBlasInt[num_rows_];
+ KaldiBlasInt *pivot = new KaldiBlasInt[num_rows_];
KaldiBlasInt M = num_rows_;
KaldiBlasInt N = num_cols_;
KaldiBlasInt LDA = stride_;
KaldiBlasInt result;
KaldiBlasInt l_work = std::max<KaldiBlasInt>(1, N);
- double* p_work = new double[l_work];
+ double *p_work = new double[l_work];
dgetrf_(&M, &N, data_, &LDA, pivot, &result);
const int pivot_offset = 1;
#else
- int* pivot = new int[num_rows_];
+ int *pivot = new int[num_rows_];
int result = clapack_dgetrf(CblasColMajor, num_rows_, num_cols_, data_,
stride_, pivot);
const int pivot_offset = 0;
template<>
template<>
void MatrixBase<float>::AddVecVec(const float alpha,
- const VectorBase<float>& a,
- const VectorBase<float>& rb) {
+ const VectorBase<float> &a,
+ const VectorBase<float> &rb) {
KALDI_ASSERT(a.Dim() == num_rows_ && rb.Dim() == num_cols_);
cblas_sger(CblasRowMajor, a.Dim(), rb.Dim(), alpha, a.Data(), 1, rb.Data(),
1, data_, stride_);
template<class Real>
template<class OtherReal>
void MatrixBase<Real>::AddVecVec(const Real alpha,
- const VectorBase<OtherReal>& a,
- const VectorBase<OtherReal>& b) {
+ const VectorBase<OtherReal> &a,
+ const VectorBase<OtherReal> &b) {
KALDI_ASSERT(a.Dim() == num_rows_ && b.Dim() == num_cols_);
const OtherReal *a_data = a.Data(), *b_data = b.Data();
Real *row_data = data_;
// instantiate the template above.
template
void MatrixBase<float>::AddVecVec(const float alpha,
- const VectorBase<double>& a,
- const VectorBase<double>& b);
+ const VectorBase<double> &a,
+ const VectorBase<double> &b);
template
void MatrixBase<double>::AddVecVec(const double alpha,
- const VectorBase<float>& a,
- const VectorBase<float>& b);
+ const VectorBase<float> &a,
+ const VectorBase<float> &b);
template<>
template<>
void MatrixBase<double>::AddVecVec(const double alpha,
- const VectorBase<double>& a,
- const VectorBase<double>& rb) {
+ const VectorBase<double> &a,
+ const VectorBase<double> &rb) {
KALDI_ASSERT(a.Dim() == num_rows_ && rb.Dim() == num_cols_);
cblas_dger(CblasRowMajor, a.Dim(), rb.Dim(), alpha, a.Data(), 1, rb.Data(),
1, data_, stride_);
@@ -446,7 +446,7 @@ void MatrixBase<float>::LapackGesvd(VectorBase<float> *s, MatrixBase<float> *U_i
&result);
l_work = static_cast<KaldiBlasInt>(work_query);
- float* p_work = new float[l_work];
+ float *p_work = new float[l_work];
// perform svd
sgesvd_(v_job, u_job,
@@ -517,7 +517,7 @@ void MatrixBase<double>::LapackGesvd(VectorBase<double> *s, MatrixBase<double> *
&result);
l_work = static_cast<KaldiBlasInt>(work_query);
- double* p_work = new double[l_work];
+ double *p_work = new double[l_work];
// perform svd
dgesvd_(v_job, u_job,
template<typename Real>
-void MatrixBase<Real>::MulElements(const MatrixBase<Real>& a) {
+void MatrixBase<Real>::MulElements(const MatrixBase<Real> &a) {
KALDI_ASSERT(a.NumRows() == num_rows_ && a.NumCols() == num_cols_);
MatrixIndexT i;
MatrixIndexT j;
}
template<typename Real>
-void MatrixBase<Real>::DivElements(const MatrixBase<Real>& a) {
+void MatrixBase<Real>::DivElements(const MatrixBase<Real> &a) {
KALDI_ASSERT(a.NumRows() == num_rows_ && a.NumCols() == num_cols_);
MatrixIndexT i;
MatrixIndexT j;
}
template<typename Real> // scales each row by scale[i].
-void MatrixBase<Real>::MulRowsVec(const VectorBase<Real>& scale) {
+void MatrixBase<Real>::MulRowsVec(const VectorBase<Real> &scale) {
KALDI_ASSERT(scale.Dim() == num_rows_);
MatrixIndexT M = num_rows_, N = num_cols_;
}
template<typename Real> // scales each column by scale[i].
-void MatrixBase<Real>::MulColsVec(const VectorBase<Real>& scale) {
+void MatrixBase<Real>::MulColsVec(const VectorBase<Real> &scale) {
KALDI_ASSERT(scale.Dim() == num_cols_);
for (MatrixIndexT i = 0; i < num_rows_; i++) {
for (MatrixIndexT j = 0; j < num_cols_; j++) {
// be quite complicated to implement a "const SubMatrix" class that
// would not allow its contents to be changed.
template<typename Real>
-SubMatrix<Real>::SubMatrix(const MatrixBase<Real>& rT,
+SubMatrix<Real>::SubMatrix(const MatrixBase<Real> &rT,
const MatrixIndexT ro,
const MatrixIndexT r,
const MatrixIndexT co,
@@ -1930,8 +1930,8 @@ double TraceMatMatMatMat(const MatrixBase<double> &A, MatrixTransposeType transA
const MatrixBase<double> &C, MatrixTransposeType transC,
const MatrixBase<double> &D, MatrixTransposeType transD);
-template<class Real> void SortSvd(VectorBase<Real>* rs, MatrixBase<Real>* rU,
- MatrixBase<Real>* rVt) {
+template<class Real> void SortSvd(VectorBase<Real> *rs, MatrixBase<Real> *rU,
+ MatrixBase<Real> *rVt) {
// Makes sure the Svd is sorted (from greatest to least element).
MatrixIndexT D = rs->Dim();
KALDI_ASSERT(rU->NumCols() == D && (!rVt || rVt->NumRows() == D) && rs->Min() >= 0);
@@ -1957,12 +1957,12 @@ template<class Real> void SortSvd(VectorBase<Real>* rs, MatrixBase<Real>* rU,
}
template
-void SortSvd(VectorBase<float>* rs, MatrixBase<float>* rU,
- MatrixBase<float>* rVt);
+void SortSvd(VectorBase<float> *rs, MatrixBase<float> *rU,
+ MatrixBase<float> *rVt);
template
-void SortSvd(VectorBase<double>* rs, MatrixBase<double>* rU,
- MatrixBase<double>* rVt);
+void SortSvd(VectorBase<double> *rs, MatrixBase<double> *rU,
+ MatrixBase<double> *rVt);
template<class Real>
void CreateEigenvalueMatrix(const VectorBase<Real> &re, const VectorBase<Real> &im,
index ff4b23f4a8716badcd3740b3a1bcf5e7bc634424..cb4176b67125e55c5259bef3b63619e06af95364 100644 (file)
Real Min() const;
/// Element by element multiplication with a given matrix.
- void MulElements(const MatrixBase<Real>& a);
+ void MulElements(const MatrixBase<Real> &a);
/// Divide each element by the corresponding element of a given matrix.
- void DivElements(const MatrixBase<Real>& a);
+ void DivElements(const MatrixBase<Real> &a);
/// Multiply each element with a scalar value.
void Scale(Real alpha);
/// *this += alpha * a * b^T
template<class OtherReal>
- void AddVecVec(const Real alpha, const VectorBase<OtherReal>& a,
- const VectorBase<OtherReal>& b);
+ void AddVecVec(const Real alpha, const VectorBase<OtherReal> &a,
+ const VectorBase<OtherReal> &b);
/// [each row of *this] += alpha * v
template<class OtherReal>
- void AddVecToRows(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVecToRows(const Real alpha, const VectorBase<OtherReal> &v);
/// [each col of *this] += alpha * v
template<class OtherReal>
- void AddVecToCols(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVecToCols(const Real alpha, const VectorBase<OtherReal> &v);
/// *this += alpha * M [or M^T]
void AddMat(const Real alpha, const MatrixBase<Real> &M,
protected:
/// Initializer, callable only from child.
- explicit MatrixBase(Real* data, MatrixIndexT cols, MatrixIndexT rows, MatrixIndexT stride) :
+ explicit MatrixBase(Real *data, MatrixIndexT cols, MatrixIndexT rows, MatrixIndexT stride) :
data_(data), num_cols_(cols), num_rows_(rows), stride_(stride) {
KALDI_ASSERT_IS_FLOATING_TYPE(Real);
}
index 97b7daa663739df2f8d249e5564c195f46afe6b6..6c173e1f7434215041c3e52908251859101b125b 100644 (file)
namespace kaldi {
template<typename Real>
-std::ostream & operator << (std::ostream& os, const VectorBase<Real>& rv) {
+std::ostream & operator << (std::ostream &os, const VectorBase<Real> &rv) {
rv.Write(os, false);
return os;
}
template<typename Real>
-std::istream &operator >> (std::istream& is, VectorBase<Real>& rv) {
+std::istream &operator >> (std::istream &is, VectorBase<Real> &rv) {
rv.Read(is, false);
return is;
}
template<typename Real>
-std::istream &operator >> (std::istream& is, Vector<Real>& rv) {
+std::istream &operator >> (std::istream &is, Vector<Real> &rv) {
rv.Read(is, false);
return is;
}
template<>
-float VecVec<>(const VectorBase<float>& ra, const VectorBase<float>& rb);
+float VecVec<>(const VectorBase<float> &ra, const VectorBase<float> &rb);
template<>
-double VecVec<>(const VectorBase<double>& ra, const VectorBase<double>& rb);
+double VecVec<>(const VectorBase<double> &ra, const VectorBase<double> &rb);
template<>
template<>
-void VectorBase<float>::AddVec(const float alpha, const VectorBase<float>& rv);
+void VectorBase<float>::AddVec(const float alpha, const VectorBase<float> &rv);
template<>
template<>
void VectorBase<double>::AddVec<double>(const double alpha,
- const VectorBase<double>& rv);
+ const VectorBase<double> &rv);
template<>
void VectorBase<float>::AddMatVec(const float alpha, const MatrixBase<float>& M,
MatrixTransposeType trans,
- const VectorBase<float>& v, const float beta);
+ const VectorBase<float> &v, const float beta);
template<>
void VectorBase<double>::AddMatVec(const double alpha,
const MatrixBase<double>& M,
MatrixTransposeType trans,
- const VectorBase<double>& v,
+ const VectorBase<double> &v,
const double beta);
template<>
void VectorBase<float>::AddSpVec(const float alpha, const SpMatrix<float>& M,
- const VectorBase<float>& v, const float beta);
+ const VectorBase<float> &v, const float beta);
template<>
void VectorBase<double>::AddSpVec(const double alpha, const SpMatrix<double>& M,
- const VectorBase<double>& v,
+ const VectorBase<double> &v,
const double beta);
template<>
index 0b97097cd449dea9a0cb51df659cdb0f5448d8f0..d44db50664640a8660b0f0a71586ce92aa9f26f7 100644 (file)
namespace kaldi {
-template<> float VecVec<>(const VectorBase<float>& ra,
- const VectorBase<float>& rb) {
+template<> float VecVec<>(const VectorBase<float> &ra,
+ const VectorBase<float> &rb) {
MatrixIndexT adim = ra.Dim();
KALDI_ASSERT(adim == rb.Dim());
return cblas_sdot(adim, ra.Data(), 1, rb.Data(), 1);
}
-template<> double VecVec<>(const VectorBase<double>& ra,
- const VectorBase<double>& rb) {
+template<> double VecVec<>(const VectorBase<double> &ra,
+ const VectorBase<double> &rb) {
MatrixIndexT adim = ra.Dim();
KALDI_ASSERT(adim == rb.Dim());
return cblas_ddot(adim, ra.Data(), 1, rb.Data(), 1);
template<class Real, class OtherReal>
-Real VecVec(const VectorBase<Real>& ra,
- const VectorBase<OtherReal>& rb) {
+Real VecVec(const VectorBase<Real> &ra,
+ const VectorBase<OtherReal> &rb) {
MatrixIndexT adim = ra.Dim();
KALDI_ASSERT(adim == rb.Dim());
const Real *a_data = ra.Data();
// instantiate the template above.
template
-float VecVec<>(const VectorBase<float>& ra,
- const VectorBase<double>& rb);
+float VecVec<>(const VectorBase<float> &ra,
+ const VectorBase<double> &rb);
template
-double VecVec<>(const VectorBase<double>& ra,
- const VectorBase<float>& rb);
+double VecVec<>(const VectorBase<double> &ra,
+ const VectorBase<float> &rb);
template<>
template<>
void VectorBase<float>::AddVec(const float alpha,
- const VectorBase<float>& rv) {
+ const VectorBase<float> &rv) {
KALDI_ASSERT(dim_ == rv.dim_);
KALDI_ASSERT(&rv != this);
cblas_saxpy(dim_, alpha, rv.Data(), 1, data_, 1);
template<>
template<>
void VectorBase<double>::AddVec(const double alpha,
- const VectorBase<double>& rv) {
+ const VectorBase<double> &rv) {
KALDI_ASSERT(dim_ == rv.dim_);
KALDI_ASSERT(&rv != this);
cblas_daxpy(dim_, alpha, rv.Data(), 1, data_, 1);
template<>
void VectorBase<float>::AddMatVec(const float alpha,
- const MatrixBase<float>& rM,
+ const MatrixBase<float> &rM,
MatrixTransposeType trans,
- const VectorBase<float>& rv,
+ const VectorBase<float> &rv,
const float beta) {
KALDI_ASSERT((trans == kNoTrans && rM.NumCols() == rv.dim_ && rM.NumRows() == dim_)
|| (trans == kTrans && rM.NumRows() == rv.dim_ && rM.NumCols() == dim_));
template<>
void VectorBase<double>::AddMatVec(const double alpha,
- const MatrixBase<double>& rM,
+ const MatrixBase<double> &rM,
MatrixTransposeType trans,
- const VectorBase<double>& rv,
+ const VectorBase<double> &rv,
const double beta) {
KALDI_ASSERT((trans == kNoTrans && rM.NumCols() == rv.dim_ && rM.NumRows() == dim_)
|| (trans == kTrans && rM.NumRows() == rv.dim_ && rM.NumCols() == dim_));
template<>
void VectorBase<float>::AddSpVec(const float alpha,
- const SpMatrix<float>& rM,
- const VectorBase<float>& rv,
+ const SpMatrix<float> &rM,
+ const VectorBase<float> &rv,
const float beta) {
KALDI_ASSERT(rM.NumRows() == rv.dim_ && dim_ == rv.dim_);
KALDI_ASSERT(&rv != this);
template<>
void VectorBase<double>::AddSpVec(const double alpha,
- const SpMatrix<double>& rM,
- const VectorBase<double>& rv,
+ const SpMatrix<double> &rM,
+ const VectorBase<double> &rv,
const double beta) {
KALDI_ASSERT(rM.NumRows() == rv.dim_ && dim_ == rv.dim_);
KALDI_ASSERT(&rv != this);
}
template<>
-void VectorBase<float>::MulTp(const TpMatrix<float>& rM,
+void VectorBase<float>::MulTp(const TpMatrix<float> &rM,
const MatrixTransposeType trans) {
KALDI_ASSERT(rM.NumRows() == dim_);
cblas_stpmv(CblasRowMajor, CblasLower, static_cast<CBLAS_TRANSPOSE>(trans),
}
template<>
-void VectorBase<double>::MulTp(const TpMatrix<double>& rM,
+void VectorBase<double>::MulTp(const TpMatrix<double> &rM,
const MatrixTransposeType trans) {
KALDI_ASSERT(rM.NumRows() == dim_);
cblas_dtpmv(CblasRowMajor, CblasLower, static_cast<CBLAS_TRANSPOSE>(trans),
return;
}
MatrixIndexT size;
- void* data;
- void* free_data;
+ void *data;
+ void *free_data;
// size = align<16>(dim * sizeof(Real));
size = dim * sizeof(Real);
/// Copy data from another vector
template<typename Real>
-void VectorBase<Real>::CopyFromVec(const VectorBase<Real>& rv) {
+void VectorBase<Real>::CopyFromVec(const VectorBase<Real> &rv) {
KALDI_ASSERT(Dim() == rv.Dim());
CopyFromPtr(rv.data_, rv.Dim());
}
}
template<typename Real>
-void VectorBase<Real>::CopyRowsFromMat(const MatrixBase<Real>& mat) {
+void VectorBase<Real>::CopyRowsFromMat(const MatrixBase<Real> &mat) {
KALDI_ASSERT(dim_ == mat.NumCols() * mat.NumRows());
- Real* inc_data = data_;
+ Real *inc_data = data_;
const MatrixIndexT cols = mat.NumCols(), rows = mat.NumRows();
if (mat.Stride() == mat.NumCols()) {
template<typename Real>
template<typename OtherReal>
-void VectorBase<Real>::CopyRowsFromMat(const MatrixBase<OtherReal>& mat) {
+void VectorBase<Real>::CopyRowsFromMat(const MatrixBase<OtherReal> &mat) {
KALDI_ASSERT(dim_ == mat.NumCols() * mat.NumRows());
- Real* vec_data = data_;
+ Real *vec_data = data_;
const MatrixIndexT cols = mat.NumCols(),
rows = mat.NumRows();
template<typename Real>
-void VectorBase<Real>::CopyColsFromMat(const MatrixBase<Real>& mat) {
+void VectorBase<Real>::CopyColsFromMat(const MatrixBase<Real> &mat) {
KALDI_ASSERT(dim_ == mat.NumCols() * mat.NumRows());
Real* inc_data = data_;
const MatrixIndexT cols = mat.NumCols(), rows = mat.NumRows(), stride = mat.Stride();
- const Real* mat_inc_data = mat.Data();
+ const Real *mat_inc_data = mat.Data();
for (MatrixIndexT i = 0; i < cols; i++) {
for (MatrixIndexT j = 0; j < rows; j++) {
}
template<typename Real>
-void VectorBase<Real>::AddRowSumMat(const MatrixBase<Real>& rM) {
+void VectorBase<Real>::AddRowSumMat(const MatrixBase<Real> &rM) {
// note the double accumulator
double sum;
KALDI_ASSERT(dim_ == rM.NumCols());
}
template<typename Real>
-void VectorBase<Real>::AddColSumMat(const MatrixBase<Real>& rM) {
+void VectorBase<Real>::AddColSumMat(const MatrixBase<Real> &rM) {
// note the double accumulator
double sum;
KALDI_ASSERT(dim_ == rM.NumRows());
}
template<typename Real>
-void VectorBase<Real>::ApplyLogAndCopy(const VectorBase<Real>& rv) {
+void VectorBase<Real>::ApplyLogAndCopy(const VectorBase<Real> &rv) {
KALDI_ASSERT(dim_ == rv.Dim());
for (MatrixIndexT i = 0; i < dim_; i++) {
data_[i] = log(rv(i));
void VectorBase<double>::MulElements(const VectorBase<float> &rv);
template<typename Real>
-void VectorBase<Real>::AddVecVec(Real alpha, const VectorBase<Real>& v,
- const VectorBase<Real>& r, Real beta) {
+void VectorBase<Real>::AddVecVec(Real alpha, const VectorBase<Real> &v,
+ const VectorBase<Real> &r, Real beta) {
KALDI_ASSERT((dim_ == v.dim_ && dim_ == r.dim_));
KALDI_ASSERT(this != &v && this != &r);
// remove __restrict__ if it causes compilation problems.
}
template<typename Real>
-void VectorBase<Real>::DivElemByElem(const VectorBase<Real>& rv) {
+void VectorBase<Real>::DivElemByElem(const VectorBase<Real> &rv) {
KALDI_ASSERT(dim_ == rv.dim_);
for (MatrixIndexT i = 0; i < dim_; i++) {
data_[i] /= rv.data_[i];
}
template<typename Real>
-void VectorBase<Real>::AddVecDivVec(Real alpha, const VectorBase<Real>& rv,
- const VectorBase<Real>& rr, Real beta) {
+void VectorBase<Real>::AddVecDivVec(Real alpha, const VectorBase<Real> &rv,
+ const VectorBase<Real> &rr, Real beta) {
KALDI_ASSERT((dim_ == rv.dim_ && dim_ == rr.dim_));
for (MatrixIndexT i = 0; i < dim_; i++) {
data_[i] = alpha * rv.data_[i]/rr.data_[i] + beta * data_[i] ;
template<typename Real>
template<typename OtherReal>
-void VectorBase<Real>::AddVec(const Real alpha, const VectorBase<OtherReal>& v) {
+void VectorBase<Real>::AddVec(const Real alpha, const VectorBase<OtherReal> &v) {
KALDI_ASSERT(dim_ == v.dim_);
// remove __restrict__ if it causes compilation problems.
register __restrict__ Real *data = data_;
@@ -739,13 +739,13 @@ void VectorBase<Real>::AddVec(const Real alpha, const VectorBase<OtherReal>& v)
}
template
-void VectorBase<float>::AddVec(const float alpha, const VectorBase<double>& v);
+void VectorBase<float>::AddVec(const float alpha, const VectorBase<double> &v);
template
-void VectorBase<double>::AddVec(const double alpha, const VectorBase<float>& v);
+void VectorBase<double>::AddVec(const double alpha, const VectorBase<float> &v);
template<typename Real>
template<typename OtherReal>
-void VectorBase<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal>& v) {
+void VectorBase<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal> &v) {
KALDI_ASSERT(dim_ == v.dim_);
// remove __restrict__ if it causes compilation problems.
register __restrict__ Real *data = data_;
@@ -760,9 +760,9 @@ void VectorBase<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal>& v)
}
template
-void VectorBase<float>::AddVec2(const float alpha, const VectorBase<double>& v);
+void VectorBase<float>::AddVec2(const float alpha, const VectorBase<double> &v);
template
-void VectorBase<double>::AddVec2(const double alpha, const VectorBase<float>& v);
+void VectorBase<double>::AddVec2(const double alpha, const VectorBase<float> &v);
template<typename Real>
template<class Real>
-void VectorBase<Real>::AddVec2(const Real alpha, const VectorBase<Real>& rv) {
+void VectorBase<Real>::AddVec2(const Real alpha, const VectorBase<Real> &rv) {
KALDI_ASSERT(dim_ == rv.dim_);
for (MatrixIndexT i = 0; i < dim_; i++) {
data_[i] += rv.data_[i]*rv.data_[i]*alpha;
// this <-- beta*this + alpha*M*v.
template<class Real>
-void VectorBase<Real>::AddTpVec(const Real alpha, const TpMatrix<Real>& rM,
+void VectorBase<Real>::AddTpVec(const Real alpha, const TpMatrix<Real> &rM,
const MatrixTransposeType trans,
- const VectorBase<Real>& rv,
+ const VectorBase<Real> &rv,
const Real beta) {
KALDI_ASSERT(dim_ == rv.dim_ && dim_ == rM.NumRows());
if (beta == 0.0) {
index caeeee0e83095f994695653c2b5c17283ca520ad..b1f5f65880cc0517af336894718c522110f29c0d 100644 (file)
}
/// Copy data from another vector (must match own size).
- void CopyFromVec(const VectorBase<Real>& v);
+ void CopyFromVec(const VectorBase<Real> &v);
/// Copy data from a SpMatrix or TpMatrix (must match own size).
template<typename OtherReal>
/// Copy data from another vector of different type (double vs. float)
template<typename OtherReal>
- void CopyFromVec(const VectorBase<OtherReal>& v);
+ void CopyFromVec(const VectorBase<OtherReal> &v);
/// Apply natural log to all elements. Throw if any element of
/// the vector is negative (but doesn't complain about zero; the
void ApplyLog();
/// Apply natural log to another vector and put result in *this.
- void ApplyLogAndCopy(const VectorBase<Real>& v);
+ void ApplyLogAndCopy(const VectorBase<Real> &v);
/// Apply exponential to each value in vector.
void ApplyExp();
/// Add vector : *this = *this + alpha * rv (with casting between floats and
/// doubles)
template<class OtherReal>
- void AddVec(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVec(const Real alpha, const VectorBase<OtherReal> &v);
/// Add vector : *this = *this + alpha * rv^2 [element-wise squaring].
- void AddVec2(const Real alpha, const VectorBase<Real>& v);
+ void AddVec2(const Real alpha, const VectorBase<Real> &v);
/// Add vector : *this = *this + alpha * rv^2 [element-wise squaring],
/// with casting between floats and doubles.
template<class OtherReal>
- void AddVec2(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVec2(const Real alpha, const VectorBase<OtherReal> &v);
/// Add matrix times vector : this <-- beta*this + alpha*M*v.
/// Calls BLAS GEMV.
void AddMatVec(const Real alpha, const MatrixBase<Real>&M,
- const MatrixTransposeType trans, const VectorBase<Real>& v,
+ const MatrixTransposeType trans, const VectorBase<Real> &v,
const Real beta = 0.0);
/// Add symmetric positive definite matrix times vector:
/// this <-- beta*this + alpha*M*v. Calls BLAS SPMV.
void AddSpVec(const Real alpha, const SpMatrix<Real>&M,
- const VectorBase<Real>& v, const Real beta = 0.0);
+ const VectorBase<Real> &v, const Real beta = 0.0);
/// Add triangular matrix times vector: this <-- beta*this + alpha*M*v.
/// Works even if rv == *this.
void AddTpVec(const Real alpha, const TpMatrix<Real>&M,
- const MatrixTransposeType trans, const VectorBase<Real>& v,
+ const MatrixTransposeType trans, const VectorBase<Real> &v,
const Real beta = 0.0);
/// Multipy element-by-element by another vector.
- void MulElements(const VectorBase<Real>& v);
+ void MulElements(const VectorBase<Real> &v);
/// Multipy element-by-element by another vector of different type.
template<typename OtherReal>
- void MulElements(const VectorBase<OtherReal>& v);
+ void MulElements(const VectorBase<OtherReal> &v);
/// Add a constant to each element of a vector.
void Add(Real c);
/// Add element-by-element product of vectlrs:
// this <-- alpha * v .* r + beta*this .
- void AddVecVec(Real alpha, const VectorBase<Real>& v,
+ void AddVecVec(Real alpha, const VectorBase<Real> &v,
const VectorBase<Real> &r, Real beta);
/// Divide element-by-element by a vector.
/// Add element-by-element quotient of two vectors.
/// this <---- alpha*v/r + beta*this
- void AddVecDivVec(Real alpha, const VectorBase<Real>& v,
- const VectorBase<Real>& r, Real beta);
+ void AddVecDivVec(Real alpha, const VectorBase<Real> &v,
+ const VectorBase<Real> &r, Real beta);
/// Multiplies all elements by this constant.
void Scale(Real alpha);
: VectorBase<Real>() { Resize(s, resize_type); }
/// Copy constructor. The need for this is controversial.
- Vector(const Vector<Real>& v) : VectorBase<Real>() { // (cannot be explicit)
+ Vector(const Vector<Real> &v) : VectorBase<Real>() { // (cannot be explicit)
Resize(v.Dim());
CopyFromVec(v);
}
/// Copy-constructor from base-class, needed to copy from SubVector.
- explicit Vector(const VectorBase<Real>& v) : VectorBase<Real>() {
+ explicit Vector(const VectorBase<Real> &v) : VectorBase<Real>() {
Resize(v.Dim());
CopyFromVec(v);
}
/// Type conversion constructor.
template<typename OtherReal>
- explicit Vector(const VectorBase<OtherReal>& v): VectorBase<Real>() {
+ explicit Vector(const VectorBase<OtherReal> &v): VectorBase<Real>() {
Resize(v.Dim());
CopyFromVec(v);
}
#ifdef KALDI_MEMALIGN_MANUAL
/// data to be freed (in case of manual memalignment use, see common.h)
- Real* free_data_;
+ Real *free_data_;
#endif
};
/// Constructor from a Vector or SubVector.
/// SubVectors are not const-safe and it's very hard to make them
/// so for now we just give up. This function contains const_cast.
- SubVector(const VectorBase<Real>& t, const MatrixIndexT origin,
+ SubVector(const VectorBase<Real> &t, const MatrixIndexT origin,
const MatrixIndexT length) : VectorBase<Real>() {
// following assert equiv to origin>=0 && length>=0 &&
// origin+length <= rt.dim_
/// Returns dot product between v1 and v2.
template<typename Real>
-Real VecVec(const VectorBase<Real>& v1, const VectorBase<Real>& v2);
+Real VecVec(const VectorBase<Real> &v1, const VectorBase<Real> &v2);
template<typename Real, typename OtherReal>
-Real VecVec(const VectorBase<Real>& v1, const VectorBase<OtherReal>& v2);
+Real VecVec(const VectorBase<Real> &v1, const VectorBase<OtherReal> &v2);
/// Returns \f$ v_1^T M v_2 \f$ .
index 2112e41206fb01f46208bba04e39287c93219d48..832970ffa8c296c89a89239572aeb08eea8dc0d2 100644 (file)
This equation is valid for k = 0...N/2-1, which is the range of the sequences B_k and
C_k. We don't use is for k = 0, which is a special case considered below. For
1 < k < N/2, it's convenient to consider the pair k, k', where k' = N/2 - k.
- Remember that C_k' = C_k^* and D_k' = D_k^* [where * is conjugation]. Also,
+ Remember that C_k' = C_k^ *and D_k' = D_k^* [where * is conjugation]. Also,
1^(N/2 / N) = -1. So we have:
A_k' = C_k^* - 1^(k/N) D_k^* (a0b)
We do (a0) and (a0b) together.
index 8bd01e7d7721762b829cdd5a1d98eae2627cfaf9..3cabf1179555044e94b8c28f64810d7e2219f82b 100644 (file)
}
MatrixIndexT size = r * (r + 1) / 2 * sizeof(Real);
- void* data; // aligned memory block
+ void *data; // aligned memory block
void* free_data; // memory block to be really freed
if ((data = KALDI_MEMALIGN(16, size, &free_data)) != NULL) {
template<typename Real>
template<typename OtherReal>
-void PackedMatrix<Real>::CopyFromPacked(const PackedMatrix<OtherReal>& orig) {
+void PackedMatrix<Real>::CopyFromPacked(const PackedMatrix<OtherReal> &orig) {
KALDI_ASSERT(NumRows() == orig.NumRows());
if (sizeof(Real) == sizeof(OtherReal)) {
memcpy(data_, orig.Data(), SizeInBytes());
@@ -134,19 +134,19 @@ void PackedMatrix<Real>::CopyFromPacked(const PackedMatrix<OtherReal>& orig) {
// template instantiations.
template
-void PackedMatrix<float>::CopyFromPacked(const PackedMatrix<double>& orig);
+void PackedMatrix<float>::CopyFromPacked(const PackedMatrix<double> &orig);
template
-void PackedMatrix<double>::CopyFromPacked(const PackedMatrix<float>& orig);
+void PackedMatrix<double>::CopyFromPacked(const PackedMatrix<float> &orig);
template
-void PackedMatrix<double>::CopyFromPacked(const PackedMatrix<double>& orig);
+void PackedMatrix<double>::CopyFromPacked(const PackedMatrix<double> &orig);
template
-void PackedMatrix<float>::CopyFromPacked(const PackedMatrix<float>& orig);
+void PackedMatrix<float>::CopyFromPacked(const PackedMatrix<float> &orig);
template<typename Real>
template<typename OtherReal>
-void PackedMatrix<Real>::CopyFromVec(const SubVector<OtherReal>& vec) {
+void PackedMatrix<Real>::CopyFromVec(const SubVector<OtherReal> &vec) {
MatrixIndexT size = (NumRows()*(NumRows()+1)) / 2;
KALDI_ASSERT(vec.Dim() == size);
if (sizeof(Real) == sizeof(OtherReal)) {
// template instantiations.
template
-void PackedMatrix<float>::CopyFromVec(const SubVector<double>& orig);
+void PackedMatrix<float>::CopyFromVec(const SubVector<double> &orig);
template
-void PackedMatrix<double>::CopyFromVec(const SubVector<float>& orig);
+void PackedMatrix<double>::CopyFromVec(const SubVector<float> &orig);
template
-void PackedMatrix<double>::CopyFromVec(const SubVector<double>& orig);
+void PackedMatrix<double>::CopyFromVec(const SubVector<double> &orig);
template
-void PackedMatrix<float>::CopyFromVec(const SubVector<float>& orig);
+void PackedMatrix<float>::CopyFromVec(const SubVector<float> &orig);
template<>
-void PackedMatrix<float>::AddVec2(const float alpha, const Vector<float>& rv) {
+void PackedMatrix<float>::AddVec2(const float alpha, const Vector<float> &rv) {
KALDI_ASSERT(rv.Dim() == num_rows_);
cblas_sspr(CblasRowMajor, CblasLower, rv.Dim(), alpha, rv.Data(), 1, data_);
}
template<>
-void PackedMatrix<double>::AddVec2(const double alpha, const Vector<double>& rv) {
+void PackedMatrix<double>::AddVec2(const double alpha, const Vector<double> &rv) {
KALDI_ASSERT(rv.Dim() == num_rows_);
cblas_dspr(CblasRowMajor, CblasLower, rv.Dim(), alpha, rv.Data(), 1, data_);
}
index ee09543fdf24b902a3c3e87fdfab198f71100a42..40485f72f4c209fe56da6c1f2b62538c498313bd 100644 (file)
explicit PackedMatrix(MatrixIndexT r, MatrixResizeType resize_type = kSetZero):
data_(NULL) { Resize(r, resize_type); }
- explicit PackedMatrix(const PackedMatrix<Real>& orig) : data_(NULL) {
+ explicit PackedMatrix(const PackedMatrix<Real> &orig) : data_(NULL) {
Resize(orig.num_rows_);
CopyFromPacked(orig);
}
template<class OtherReal>
- explicit PackedMatrix(const PackedMatrix<OtherReal>& orig) : data_(NULL) {
+ explicit PackedMatrix(const PackedMatrix<OtherReal> &orig) : data_(NULL) {
Resize(orig.NumRows());
CopyFromPacked(orig);
}
void ScaleDiag(const Real alpha); // Scales diagonal by alpha.
template<class OtherReal>
- void CopyFromPacked(const PackedMatrix<OtherReal>& orig);
+ void CopyFromPacked(const PackedMatrix<OtherReal> &orig);
/// CopyFromVec just interprets the vector as having the same layout
/// as the packed matrix. Must have the same dimension, i.e.
/// orig.Dim() == (NumRows()*(NumRows()+1)) / 2;
template<class OtherReal>
- void CopyFromVec(const SubVector<OtherReal>& orig);
+ void CopyFromVec(const SubVector<OtherReal> &orig);
Real* Data() { return data_; }
const Real* Data() const { return data_; }
}
// This code is duplicated in child classes to avoid extra levels of calls.
- Real& operator() (MatrixIndexT r, MatrixIndexT c) {
+ Real &operator() (MatrixIndexT r, MatrixIndexT c) {
KALDI_ASSERT(static_cast<UnsignedMatrixIndexT>(r) <
static_cast<UnsignedMatrixIndexT>(num_rows_) &&
static_cast<UnsignedMatrixIndexT>(c) <
// *this <-- *this + alpha* rV * rV^T.
// The "2" in the name is because the argument is repeated.
- void AddVec2(const Real alpha, const Vector<Real>& rv);
+ void AddVec2(const Real alpha, const Vector<Real> &rv);
void Scale(Real c);
friend std::ostream & operator << <> (std::ostream & out,
protected:
// Will only be called from this class or derived classes.
void AddPacked(const Real alpha, const PackedMatrix<Real>& M);
- Real* data_;
+ Real *data_;
MatrixIndexT num_rows_;
private:
/// Init assumes the current contents of the class are is invalid (i.e. junk or
/// pointed to by data_ will be undefined.
void Init(MatrixIndexT dim);
#ifdef KALDI_MEMALIGN_MANUAL
- Real* free_data_;
+ Real *free_data_;
#endif
index 8bf5cb791c4366746fe5c8257852e567686b15f9..30649fc37bd7f0d1cba52b9fd32c40d5c4759928 100644 (file)
--- a/src/matrix/sp-matrix.cc
+++ b/src/matrix/sp-matrix.cc
@@ -187,7 +187,7 @@ void SpMatrix<Real>::CopyFromMat(const MatrixBase<Real> &M, SpCopyType copy_type
// diagonal update, this <-- this + diag(v)
template<class Real>
template<class OtherReal>
-void SpMatrix<Real>::AddVec(const Real alpha, const VectorBase<OtherReal>& v) {
+void SpMatrix<Real>::AddVec(const Real alpha, const VectorBase<OtherReal> &v) {
int32 num_rows = this->num_rows_;
KALDI_ASSERT(num_rows == v.Dim());
const OtherReal *src = v.Data();
@@ -202,22 +202,22 @@ void SpMatrix<Real>::AddVec(const Real alpha, const VectorBase<OtherReal>& v) {
}
// instantiate the template above.
template
-void SpMatrix<float>::AddVec(const float alpha, const VectorBase<double>& v);
+void SpMatrix<float>::AddVec(const float alpha, const VectorBase<double> &v);
template
-void SpMatrix<double>::AddVec(const double alpha, const VectorBase<float>& v);
+void SpMatrix<double>::AddVec(const double alpha, const VectorBase<float> &v);
template
-void SpMatrix<float>::AddVec(const float alpha, const VectorBase<float>& v);
+void SpMatrix<float>::AddVec(const float alpha, const VectorBase<float> &v);
template
-void SpMatrix<double>::AddVec(const double alpha, const VectorBase<double>& v);
+void SpMatrix<double>::AddVec(const double alpha, const VectorBase<double> &v);
template<>
template<>
-void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<float>& v);
+void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<float> &v);
template<>
template<>
-void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<double>& v);
+void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<double> &v);
#ifndef HAVE_ATLAS
template<>
@@ -815,7 +815,7 @@ SolveDoubleQuadraticMatrixProblem(const MatrixBase<float> &G, const SpMatrix<flo
// rank-one update, this <-- this + alpha V V'
template<>
template<>
-void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<float>& v) {
+void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<float> &v) {
KALDI_ASSERT(v.Dim() == this->NumRows());
cblas_sspr(CblasRowMajor, CblasLower, v.Dim(), alpha, v.Data(), 1,
this->data_);
// rank-one update, this <-- this + alpha V V'
template<>
template<>
-void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<double>& v) {
+void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<double> &v) {
KALDI_ASSERT(v.Dim() == num_rows_);
cblas_dspr(CblasRowMajor, CblasLower, v.Dim(), alpha, v.Data(), 1, data_);
}
template<class Real>
template<class OtherReal>
-void SpMatrix<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal>& v) {
+void SpMatrix<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal> &v) {
KALDI_ASSERT(v.Dim() == this->NumRows());
Real *data = this->data_;
const OtherReal *v_data = v.Data();
@@ -844,9 +844,9 @@ void SpMatrix<Real>::AddVec2(const Real alpha, const VectorBase<OtherReal>& v) {
// instantiate the template above.
template
-void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<double>& v);
+void SpMatrix<float>::AddVec2(const float alpha, const VectorBase<double> &v);
template
-void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<float>& v);
+void SpMatrix<double>::AddVec2(const double alpha, const VectorBase<float> &v);
double VecSpVec(const VectorBase<double> &v1, const SpMatrix<double> &M, const VectorBase<double> &v2) {
diff --git a/src/matrix/sp-matrix.h b/src/matrix/sp-matrix.h
index fda8c9293884f6d2fd5895f3de0435ea43ad21e2..a3bda3091b8f280fd1cb26f25984d2d0668a46bf 100644 (file)
--- a/src/matrix/sp-matrix.h
+++ b/src/matrix/sp-matrix.h
explicit SpMatrix(MatrixIndexT r, MatrixResizeType resize_type = kSetZero)
: PackedMatrix<Real>(r, resize_type) {}
- SpMatrix(const SpMatrix<Real>& orig)
+ SpMatrix(const SpMatrix<Real> &orig)
: PackedMatrix<Real>(orig) {}
template<class OtherReal>
- explicit SpMatrix(const SpMatrix<OtherReal>& orig)
+ explicit SpMatrix(const SpMatrix<OtherReal> &orig)
: PackedMatrix<Real>(orig) {}
// Duplicating code from PackedMatrix.h
}
- inline Real& operator() (MatrixIndexT r, MatrixIndexT c) {
+ inline Real &operator() (MatrixIndexT r, MatrixIndexT c) {
if (static_cast<UnsignedMatrixIndexT>(c) >
static_cast<UnsignedMatrixIndexT>(r))
std::swap(c, r);
/// rank-one update, this <-- this + alpha V V'
template<class OtherReal>
- void AddVec2(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVec2(const Real alpha, const VectorBase<OtherReal> &v);
/// diagonal update, this <-- this + diag(v)
template<class OtherReal>
- void AddVec(const Real alpha, const VectorBase<OtherReal>& v);
+ void AddVec(const Real alpha, const VectorBase<OtherReal> &v);
/// rank-N update:
/// if (transM == kNoTrans)
index 556de9f1adb009f635911cdbbf9939f8b4f13eaf..e06f011b8ee97796872a85892e461d444408d924 100644 (file)
--- a/src/matrix/tp-matrix.cc
+++ b/src/matrix/tp-matrix.cc
template<typename Real>
-void TpMatrix<Real>::Cholesky(const SpMatrix<Real>& rOrig) {
+void TpMatrix<Real>::Cholesky(const SpMatrix<Real> &rOrig) {
KALDI_ASSERT(rOrig.NumRows() == this->NumRows());
MatrixIndexT n = this->NumRows();
this->SetZero();
diff --git a/src/matrix/tp-matrix.h b/src/matrix/tp-matrix.h
index 51650a7ef70f37e67ffd19093fcd4db1c53b23a1..f387d3b748dec432fc33e9fb69c5bf4495484b21 100644 (file)
--- a/src/matrix/tp-matrix.h
+++ b/src/matrix/tp-matrix.h
// Duplicating code from PackedMatrix.h
}
- Real& operator() (MatrixIndexT r, MatrixIndexT c) {
+ Real &operator() (MatrixIndexT r, MatrixIndexT c) {
KALDI_ASSERT(static_cast<UnsignedMatrixIndexT>(r) <
static_cast<UnsignedMatrixIndexT>(this->num_rows_));
KALDI_ASSERT(static_cast<UnsignedMatrixIndexT>(c) <=
index 056a07aa8476f0fb9307ca545fc50b5c289c1666..10919447794a5fad000f28b268cefd4f3d4a5c12 100644 (file)
class Sigmoid : public Component {
public:
- Sigmoid(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ Sigmoid(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: Component(dim_in, dim_out, nnet)
{ }
~Sigmoid()
return kSigmoid;
}
- void PropagateFnc(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+ void PropagateFnc(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
// y = 1/(1+e^-x)
cu::Sigmoid(in, out);
}
- void BackpropagateFnc(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const CuMatrix<BaseFloat> &in_err, CuMatrix<BaseFloat> *out_err) {
// ey = y(1-y)ex
- const CuMatrix<BaseFloat>& y = nnet_->PropagateBuffer()[nnet_->IndexOfLayer(*this)+1];
+ const CuMatrix<BaseFloat> &y = nnet_->PropagateBuffer()[nnet_->IndexOfLayer(*this)+1];
cu::DiffSigmoid(in_err, y, out_err);
}
};
class Softmax : public Component {
public:
- Softmax(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ Softmax(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: Component(dim_in, dim_out, nnet)
{ }
~Softmax()
return kSoftmax;
}
- void PropagateFnc(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+ void PropagateFnc(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
// y = e^x_j/sum_j(e^x_j)
cu::Softmax(in, out);
}
- void BackpropagateFnc(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const CuMatrix<BaseFloat> &in_err, CuMatrix<BaseFloat> *out_err) {
// simply copy the error
// (ie. assume crossentropy error function,
// while in_err contains (net_output-target) :
index 5d11d9b9c607d07d633fd704e8aff8ac2c5cb5b9..af535fa7c949b7739e591250a1241babd2150b74 100644 (file)
class BiasedLinearity : public UpdatableComponent {
public:
- BiasedLinearity(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ BiasedLinearity(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: UpdatableComponent(dim_in, dim_out, nnet),
linearity_(dim_out, dim_in), bias_(dim_out),
linearity_corr_(dim_out, dim_in), bias_corr_(dim_out)
return kBiasedLinearity;
}
- void ReadData(std::istream& is, bool binary) {
+ void ReadData(std::istream &is, bool binary) {
linearity_.Read(is, binary);
bias_.Read(is, binary);
KALDI_ASSERT(bias_.Dim() == output_dim_);
}
- void WriteData(std::ostream& os, bool binary) const {
+ void WriteData(std::ostream &os, bool binary) const {
linearity_.Write(os, binary);
bias_.Write(os, binary);
}
- void PropagateFnc(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+ void PropagateFnc(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
// precopy bias
out->AddScaledRow(1.0, bias_, 0.0);
// multiply by weights^t
out->AddMatMat(1.0, in, kNoTrans, linearity_, kTrans, 1.0);
}
- void BackpropagateFnc(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const CuMatrix<BaseFloat> &in_err, CuMatrix<BaseFloat> *out_err) {
// multiply error by weights
out_err->AddMatMat(1.0, in_err, kNoTrans, linearity_, kNoTrans, 0.0);
}
- void Update(const CuMatrix<BaseFloat>& input, const CuMatrix<BaseFloat>& err) {
+ void Update(const CuMatrix<BaseFloat> &input, const CuMatrix<BaseFloat> &err) {
// compute gradient
linearity_corr_.AddMatMat(1.0, err, kTrans, input, kNoTrans, momentum_);
bias_corr_.AddColSum(1.0, err, momentum_);
index 29c9eb37d037ef4009907df12370b6b745dbf8e4..5a42bf1e8487b7facd5fc2236e6e603e03fd39e3 100644 (file)
-void CacheTgtMat::AddData(const CuMatrix<BaseFloat>& features, const CuMatrix<BaseFloat>& targets) {
+void CacheTgtMat::AddData(const CuMatrix<BaseFloat> &features, const CuMatrix<BaseFloat> &targets) {
if (state_ == FULL) {
KALDI_ERR << "Cannot add data, cache already full";
}
-void CacheTgtMat::GetBunch(CuMatrix<BaseFloat>* features, CuMatrix<BaseFloat>* targets) {
+void CacheTgtMat::GetBunch(CuMatrix<BaseFloat> *features, CuMatrix<BaseFloat> *targets) {
if (state_ == EMPTY) {
KALDI_ERR << "GetBunch on empty cache!!!";
}
index a3448b2e7a2d74e2337863fbc8a90a000fc14f5d..ae47be02423a84fce462d123ba810a980197896e 100644 (file)
void Init(int32 cachesize, int32 bunchsize);
/// Add data to cache
- void AddData(const CuMatrix<BaseFloat>& features, const CuMatrix<BaseFloat>& targets);
+ void AddData(const CuMatrix<BaseFloat> &features, const CuMatrix<BaseFloat> &targets);
/// Randomizes the cache
void Randomize();
/// Get the bunch of training data from cache
- void GetBunch(CuMatrix<BaseFloat>* features, CuMatrix<BaseFloat>* targets);
+ void GetBunch(CuMatrix<BaseFloat> *features, CuMatrix<BaseFloat> *targets);
/// Returns true if the cache was completely filled
diff --git a/src/nnet/nnet-cache.cc b/src/nnet/nnet-cache.cc
index 6bb73db68ad2332a66fee5bf3bcc0a857dec0fdb..896f01b45deee15ba9bd113e9af36695dc69c2f8 100644 (file)
--- a/src/nnet/nnet-cache.cc
+++ b/src/nnet/nnet-cache.cc
-void Cache::AddData(const CuMatrix<BaseFloat>& features, const std::vector<int32>& targets) {
+void Cache::AddData(const CuMatrix<BaseFloat> &features, const std::vector<int32> &targets) {
if (state_ == FULL) {
KALDI_ERR << "Cannot add data, cache already full";
}
-void Cache::GetBunch(CuMatrix<BaseFloat>* features, std::vector<int32>* targets) {
+void Cache::GetBunch(CuMatrix<BaseFloat> *features, std::vector<int32> *targets) {
if (state_ == EMPTY) {
KALDI_ERR << "GetBunch on empty cache!!!";
}
diff --git a/src/nnet/nnet-cache.h b/src/nnet/nnet-cache.h
index ce6196282b68e4bec6b4067143e23e7de3c3e222..602b13e513ce4cb71e1ef84cb7b06d1faf349bfc 100644 (file)
--- a/src/nnet/nnet-cache.h
+++ b/src/nnet/nnet-cache.h
void Init(int32 cachesize, int32 bunchsize);
/// Add data to cache
- void AddData(const CuMatrix<BaseFloat>& features, const std::vector<int32>& targets);
+ void AddData(const CuMatrix<BaseFloat> &features, const std::vector<int32> &targets);
/// Randomizes the cache
void Randomize();
/// Get the bunch of training data from cache
- void GetBunch(CuMatrix<BaseFloat>* features, std::vector<int32>* targets);
+ void GetBunch(CuMatrix<BaseFloat> *features, std::vector<int32> *targets);
/// Returns true if the cache was completely filled
index 7b17dc171cbf9c71344865c5b14a8bc467b553ce..629e7c31a827be64ca9be56a8dce034d388a78e1 100644 (file)
return NULL;
}
-Component::ComponentType Component::MarkerToType(const std::string& s) {
+Component::ComponentType Component::MarkerToType(const std::string &s) {
int32 N=sizeof(kMarkerMap)/sizeof(kMarkerMap[0]);
for(int i=0; i<N; i++) {
if (0 == strcmp(kMarkerMap[i].value, s.c_str()))
}
-Component* Component::Read(std::istream& is, bool binary, Nnet* nnet) {
+Component* Component::Read(std::istream &is, bool binary, Nnet *nnet) {
int32 dim_out, dim_in;
std::string token;
ReadBasicType(is, binary, &dim_out);
ReadBasicType(is, binary, &dim_in);
- Component* p_comp=NULL;
+ Component *p_comp=NULL;
switch (comp_type) {
case Component::kBiasedLinearity :
p_comp = new BiasedLinearity(dim_in, dim_out, nnet);
}
-void Component::Write(std::ostream& os, bool binary) const {
+void Component::Write(std::ostream &os, bool binary) const {
WriteToken(os, binary, Component::TypeToMarker(GetType()));
WriteBasicType(os, binary, OutputDim());
WriteBasicType(os, binary, InputDim());
index cd16c43d1cb4a6487debcf3a3b12afcdd7532d45..da7adf5ce3ae7c43dbc56ad284bf5c46335b33a2 100644 (file)
/// Pair of type and marker
struct key_value {
const Component::ComponentType key;
- const char* value;
+ const char *value;
};
/// Mapping of types and markers
static const struct key_value kMarkerMap[];
/// Convert component type to marker
static const char* TypeToMarker(ComponentType t);
/// Convert marker to component type
- static ComponentType MarkerToType(const std::string& s);
+ static ComponentType MarkerToType(const std::string &s);
- Component(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet* nnet)
+ Component(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet *nnet)
: input_dim_(input_dim), output_dim_(output_dim), nnet_(nnet) { }
virtual ~Component() { }
}
/// Perform forward pass propagateion Input->Output
- void Propagate(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out);
+ void Propagate(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out);
/// Perform backward pass propagateion ErrorInput->ErrorOutput
- void Backpropagate(const CuMatrix<BaseFloat>& in_err,
- CuMatrix<BaseFloat>* out_err);
+ void Backpropagate(const CuMatrix<BaseFloat> &in_err,
+ CuMatrix<BaseFloat> *out_err);
/// Read component from stream
- static Component* Read(std::istream& is, bool binary, Nnet* nnet);
+ static Component* Read(std::istream &is, bool binary, Nnet *nnet);
/// Write component to stream
- void Write(std::ostream& os, bool binary) const;
+ void Write(std::ostream &os, bool binary) const;
// abstract interface for propagation/backpropagation
protected:
/// Forward pass transformation (to be implemented by descendents...)
- virtual void PropagateFnc(const CuMatrix<BaseFloat>& in,
- CuMatrix<BaseFloat>* out) = 0;
+ virtual void PropagateFnc(const CuMatrix<BaseFloat> &in,
+ CuMatrix<BaseFloat> *out) = 0;
/// Backward pass transformation (to be implemented by descendents...)
- virtual void BackpropagateFnc(const CuMatrix<BaseFloat>& in_err,
- CuMatrix<BaseFloat>* out_err) = 0;
+ virtual void BackpropagateFnc(const CuMatrix<BaseFloat> &in_err,
+ CuMatrix<BaseFloat> *out_err) = 0;
/// Reads the component content
- virtual void ReadData(std::istream& is, bool binary) { }
+ virtual void ReadData(std::istream &is, bool binary) { }
/// Writes the component content
- virtual void WriteData(std::ostream& os, bool binary) const { }
+ virtual void WriteData(std::ostream &os, bool binary) const { }
// data members
MatrixIndexT input_dim_; ///< Size of input vectors
MatrixIndexT output_dim_; ///< Size of output vectors
- Nnet* nnet_; ///< Pointer to the whole network
+ Nnet *nnet_; ///< Pointer to the whole network
private:
KALDI_DISALLOW_COPY_AND_ASSIGN(Component);
};
*/
class UpdatableComponent : public Component {
public:
- UpdatableComponent(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet* nnet)
+ UpdatableComponent(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet *nnet)
: Component(input_dim, output_dim, nnet),
learn_rate_(0.0), momentum_(0.0), l2_penalty_(0.0), l1_penalty_(0.0) { }
virtual ~UpdatableComponent() { }
}
/// Compute gradient and update parameters
- virtual void Update(const CuMatrix<BaseFloat>& input,
- const CuMatrix<BaseFloat>& err) = 0;
+ virtual void Update(const CuMatrix<BaseFloat> &input,
+ const CuMatrix<BaseFloat> &err) = 0;
/// Sets the learning rate of gradient descent
void SetLearnRate(BaseFloat lrate) {
-inline void Component::Propagate(const CuMatrix<BaseFloat>& in,
- CuMatrix<BaseFloat>* out) {
+inline void Component::Propagate(const CuMatrix<BaseFloat> &in,
+ CuMatrix<BaseFloat> *out) {
if (input_dim_ != in.NumCols()) {
KALDI_ERR << "Nonmatching dims, component:" << input_dim_ << " data:" << in.NumCols();
}
}
-inline void Component::Backpropagate(const CuMatrix<BaseFloat>& in_err,
- CuMatrix<BaseFloat>* out_err) {
+inline void Component::Backpropagate(const CuMatrix<BaseFloat> &in_err,
+ CuMatrix<BaseFloat> *out_err) {
if (output_dim_ != in_err.NumCols()) {
KALDI_ERR << "Nonmatching dims, component:" << output_dim_
<< " data:" << in_err.NumCols();
diff --git a/src/nnet/nnet-loss.cc b/src/nnet/nnet-loss.cc
index 9caef6c76a8f3d8353adb38c2f635758a6a43ef7..8521a23c45bab01f589b79571fbd072e6eb234b3 100644 (file)
--- a/src/nnet/nnet-loss.cc
+++ b/src/nnet/nnet-loss.cc
namespace kaldi {
-void Xent::Eval(const CuMatrix<BaseFloat>& net_out, const CuMatrix<BaseFloat>& target, CuMatrix<BaseFloat>* diff) {
+void Xent::Eval(const CuMatrix<BaseFloat> &net_out, const CuMatrix<BaseFloat> &target, CuMatrix<BaseFloat> *diff) {
KALDI_ASSERT(net_out.NumCols() == target.NumCols());
KALDI_ASSERT(net_out.NumRows() == target.NumRows());
}
-void Xent::EvalVec(const CuMatrix<BaseFloat>& net_out, const std::vector<int32>& target, CuMatrix<BaseFloat>* diff) {
+void Xent::EvalVec(const CuMatrix<BaseFloat> &net_out, const std::vector<int32> &target, CuMatrix<BaseFloat> *diff) {
// evaluate the frame-level classification
int32 correct=0;
cu::FindRowMaxId(net_out, &max_id_);
-void Mse::Eval(const CuMatrix<BaseFloat>& net_out, const CuMatrix<BaseFloat>& target, CuMatrix<BaseFloat>* diff) {
+void Mse::Eval(const CuMatrix<BaseFloat> &net_out, const CuMatrix<BaseFloat> &target, CuMatrix<BaseFloat> *diff) {
KALDI_ASSERT(net_out.NumCols() == target.NumCols());
KALDI_ASSERT(net_out.NumRows() == target.NumRows());
diff->Resize(net_out.NumRows(), net_out.NumCols());
diff --git a/src/nnet/nnet-loss.h b/src/nnet/nnet-loss.h
index 77b6e469792304fecf2619c518753f3461fd26ae..26d9ff3ca8192b4cdabf989dd591cead3fe4fe53 100644 (file)
--- a/src/nnet/nnet-loss.h
+++ b/src/nnet/nnet-loss.h
~Xent() { }
/// Evaluate cross entropy from hard labels
- void Eval(const CuMatrix<BaseFloat>& net_out, const CuMatrix<BaseFloat>& target,
- CuMatrix<BaseFloat>* diff);
+ void Eval(const CuMatrix<BaseFloat> &net_out, const CuMatrix<BaseFloat> &target,
+ CuMatrix<BaseFloat> *diff);
/// Evaluate cross entropy from soft labels
- void EvalVec(const CuMatrix<BaseFloat>& net_out, const std::vector<int32>& target,
- CuMatrix<BaseFloat>* diff);
+ void EvalVec(const CuMatrix<BaseFloat> &net_out, const std::vector<int32> &target,
+ CuMatrix<BaseFloat> *diff);
/// Generate string with error report
std::string Report();
~Mse() { }
/// Evaluate mean square error from target values
- void Eval(const CuMatrix<BaseFloat>& net_out, const CuMatrix<BaseFloat>& target,
- CuMatrix<BaseFloat>* diff);
+ void Eval(const CuMatrix<BaseFloat> &net_out, const CuMatrix<BaseFloat> &target,
+ CuMatrix<BaseFloat> *diff);
/// Generate string with error report
std::string Report();
diff --git a/src/nnet/nnet-nnet.cc b/src/nnet/nnet-nnet.cc
index ebe01e2db2dc9dec83d5d675c8471c0e33a677a9..614af7fb94acfec93c507dfc370d182734460742 100644 (file)
--- a/src/nnet/nnet-nnet.cc
+++ b/src/nnet/nnet-nnet.cc
namespace kaldi {
-void Nnet::Propagate(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+void Nnet::Propagate(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
KALDI_ASSERT(NULL != out);
if (LayerCount() == 0) {
nnet_[i]->Propagate(propagate_buf_[i], &propagate_buf_[i+1]);
}
- CuMatrix<BaseFloat>& mat = propagate_buf_[nnet_.size()];
+ CuMatrix<BaseFloat> &mat = propagate_buf_[nnet_.size()];
out->Resize(mat.NumRows(), mat.NumCols());
out->CopyFromMat(mat);
}
-void Nnet::Backpropagate(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>* out_err) {
+void Nnet::Backpropagate(const CuMatrix<BaseFloat> &in_err, CuMatrix<BaseFloat> *out_err) {
if(LayerCount() == 0) { KALDI_ERR << "Cannot backpropagate on empty network"; }
// we need at least L+1 input bufers
// don't copy the in_err to buffers, use it as is...
int32 i = nnet_.size()-1;
if (nnet_[i]->IsUpdatable()) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
if (uc->GetLearnRate() > 0.0) {
uc->Update(propagate_buf_[i], in_err);
}
// backpropagate by using buffers
for(i--; i >= 1; i--) {
if (nnet_[i]->IsUpdatable()) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
if (uc->GetLearnRate() > 0.0) {
uc->Update(propagate_buf_[i], backpropagate_buf_[i]);
}
@@ -104,7 +104,7 @@ void Nnet::Backpropagate(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>*
// update first layer
if (nnet_[0]->IsUpdatable() && 0 >= backprop_stop) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[0]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[0]);
if (uc->GetLearnRate() > 0.0) {
uc->Update(propagate_buf_[0], backpropagate_buf_[0]);
}
@@ -120,7 +120,7 @@ void Nnet::Backpropagate(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>*
}
-void Nnet::Feedforward(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+void Nnet::Feedforward(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
KALDI_ASSERT(NULL != out);
if (LayerCount() == 0) {
}
-void Nnet::Read(std::istream& in, bool binary) {
+void Nnet::Read(std::istream &in, bool binary) {
// get the network layers from a factory
Component *comp;
while (NULL != (comp = Component::Read(in, binary, this))) {
}
-void Nnet::SetLearnRate(BaseFloat lrate, const char* lrate_factors) {
+void Nnet::SetLearnRate(BaseFloat lrate, const char *lrate_factors) {
// split lrate_factors to a vector
std::vector<BaseFloat> lrate_factor_vec;
if (NULL != lrate_factors) {
- char* copy = new char[strlen(lrate_factors)+1];
+ char *copy = new char[strlen(lrate_factors)+1];
strcpy(copy, lrate_factors);
- char* tok = NULL;
+ char *tok = NULL;
while(NULL != (tok = strtok((tok==NULL?copy:NULL),",:; "))) {
lrate_factor_vec.push_back(atof(tok));
}
diff --git a/src/nnet/nnet-nnet.h b/src/nnet/nnet-nnet.h
index 82196fcf2bba9c3c4a44476d2700ab143b0f824f..6ecf3279629903720770f00c567d113084c5641d 100644 (file)
--- a/src/nnet/nnet-nnet.h
+++ b/src/nnet/nnet-nnet.h
public:
/// Perform forward pass through the network
- void Propagate(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out);
+ void Propagate(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out);
/// Perform backward pass through the network
- void Backpropagate(const CuMatrix<BaseFloat>& in_err, CuMatrix<BaseFloat>* out_err);
+ void Backpropagate(const CuMatrix<BaseFloat> &in_err, CuMatrix<BaseFloat> *out_err);
/// Perform forward pass through the network, don't keep buffers (use it when not training)
- void Feedforward(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out);
+ void Feedforward(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out);
MatrixIndexT InputDim() const; ///< Dimensionality of the input features
MatrixIndexT OutputDim() const; ///< Dimensionality of the desired vectors
}
/// Read the MLP from file (can add layers to exisiting instance of Nnet)
- void Read(const std::string& file);
+ void Read(const std::string &file);
/// Read the MLP from stream (can add layers to exisiting instance of Nnet)
- void Read(std::istream& in, bool binary);
+ void Read(std::istream &in, bool binary);
/// Write MLP to file
- void Write(const std::string& file, bool binary);
+ void Write(const std::string &file, bool binary);
/// Write MLP to stream
- void Write(std::ostream& out, bool binary);
+ void Write(std::ostream &out, bool binary);
/// Set the learning rate values to trainable layers,
/// factors can disable training of individual layers
- void SetLearnRate(BaseFloat lrate, const char* lrate_factors);
+ void SetLearnRate(BaseFloat lrate, const char *lrate_factors);
/// Get the global learning rate value
BaseFloat GetLearnRate() {
return learn_rate_;
private:
/// Creates a component by reading from stream, return NULL if no more components
- static Component* ComponentFactory(std::istream& in, bool binary, Nnet* nnet);
+ static Component* ComponentFactory(std::istream &in, bool binary, Nnet *nnet);
/// Dumps individual component to stream
- static void ComponentDumper(std::ostream& out, bool binary, const Component& comp);
+ static void ComponentDumper(std::ostream &out, bool binary, const Component &comp);
typedef std::vector<Component*> NnetType;
}
-inline int32 Nnet::IndexOfLayer(const Component& comp) const {
+inline int32 Nnet::IndexOfLayer(const Component &comp) const {
for(int32 i=0; i<LayerCount(); i++) {
if (&comp == nnet_[i]) return i;
}
}
-inline void Nnet::Read(const std::string& file) {
+inline void Nnet::Read(const std::string &file) {
bool binary;
Input in(file, &binary);
Read(in.Stream(), binary);
}
-inline void Nnet::Write(const std::string& file, bool binary) {
+inline void Nnet::Write(const std::string &file, bool binary) {
Output out(file, binary, true);
Write(out.Stream(), binary);
out.Close();
}
-inline void Nnet::Write(std::ostream& out, bool binary) {
+inline void Nnet::Write(std::ostream &out, bool binary) {
for(int32 i=0; i<LayerCount(); i++) {
nnet_[i]->Write(out, binary);
}
diff --git a/src/nnet/nnet-rbm.h b/src/nnet/nnet-rbm.h
index fb32b8b6293bc0b6bb25430daf914e1c95e866ad..d93f6c49ef80d27c6cda3d9519cfac03552dd4fa 100644 (file)
--- a/src/nnet/nnet-rbm.h
+++ b/src/nnet/nnet-rbm.h
GAUSSIAN
} RbmNodeType;
- RbmBase(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ RbmBase(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: UpdatableComponent(dim_in, dim_out, nnet)
{ }
/*Is included in Component:: itf
virtual void Propagate(
- const CuMatrix<BaseFloat>& vis_probs,
- CuMatrix<BaseFloat>* hid_probs
+ const CuMatrix<BaseFloat> &vis_probs,
+ CuMatrix<BaseFloat> *hid_probs
) = 0;
*/
virtual void Reconstruct(
- const CuMatrix<BaseFloat>& hid_state,
- CuMatrix<BaseFloat>* vis_probs
+ const CuMatrix<BaseFloat> &hid_state,
+ CuMatrix<BaseFloat> *vis_probs
) = 0;
virtual void RbmUpdate(
- const CuMatrix<BaseFloat>& pos_vis,
- const CuMatrix<BaseFloat>& pos_hid,
- const CuMatrix<BaseFloat>& neg_vis,
- const CuMatrix<BaseFloat>& neg_hid
+ const CuMatrix<BaseFloat> &pos_vis,
+ const CuMatrix<BaseFloat> &pos_hid,
+ const CuMatrix<BaseFloat> &neg_vis,
+ const CuMatrix<BaseFloat> &neg_hid
) = 0;
virtual RbmNodeType VisType() = 0;
return kRbm;
}
- void ReadData(std::istream& is, bool binary) {
+ void ReadData(std::istream &is, bool binary) {
std::string vis_node_type, hid_node_type;
ReadToken(is, binary, &vis_node_type);
ReadToken(is, binary, &hid_node_type);
KALDI_ASSERT(hid_bias_.Dim() == output_dim_);
}
- void WriteData(std::ostream& os, bool binary) {
+ void WriteData(std::ostream &os, bool binary) {
switch (vis_type_) {
case BERNOULLI : WriteToken(os,binary,"BERN"); break;
case GAUSSIAN : WriteToken(os,binary,"GAUSS"); break;
// UpdatableComponent API
- void PropagateFnc(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+ void PropagateFnc(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
// precopy bias
out->AddScaledRow(1.0, hid_bias_, 0.0);
// multiply by weights^t
}
}
- void BackpropagateFnc(const CuMatrix<BaseFloat>& in, CuMatrix<BaseFloat>* out) {
+ void BackpropagateFnc(const CuMatrix<BaseFloat> &in, CuMatrix<BaseFloat> *out) {
KALDI_ERR << "Cannot backpropagate through RBM!"
<< "Better convert it to <BiasedLinearity>";
}
- virtual void Update(const CuMatrix<BaseFloat>& input,
- const CuMatrix<BaseFloat>& err) {
+ virtual void Update(const CuMatrix<BaseFloat> &input,
+ const CuMatrix<BaseFloat> &err) {
KALDI_ERR << "Cannot update RBM by backprop!"
<< "Better convert it to <BiasedLinearity>";
}
// RBM training API
- void Reconstruct(const CuMatrix<BaseFloat>& hid_state, CuMatrix<BaseFloat>* vis_probs) {
+ void Reconstruct(const CuMatrix<BaseFloat> &hid_state, CuMatrix<BaseFloat> *vis_probs) {
// check the dim
if (output_dim_ != hid_state.NumCols()) {
KALDI_ERR << "Nonmatching dims, component:" << output_dim_ << " data:" << hid_state.NumCols();
}
}
- void RbmUpdate(const CuMatrix<BaseFloat>& pos_vis, const CuMatrix<BaseFloat>& pos_hid, const CuMatrix<BaseFloat>& neg_vis, const CuMatrix<BaseFloat>& neg_hid) {
+ void RbmUpdate(const CuMatrix<BaseFloat> &pos_vis, const CuMatrix<BaseFloat> &pos_hid, const CuMatrix<BaseFloat> &neg_vis, const CuMatrix<BaseFloat> &neg_hid) {
assert(pos_vis.NumRows() == pos_hid.NumRows() &&
pos_vis.NumRows() == neg_vis.NumRows() &&
pos_vis.NumRows() == neg_hid.NumRows() &&
diff --git a/src/nnet/nnet-test.cc b/src/nnet/nnet-test.cc
index 21622c23f58c05ba1cd4c27355f5667a57cf4465..a6472370d43c1d2c0d0fa50a5b16c22653f142f7 100644 (file)
--- a/src/nnet/nnet-test.cc
+++ b/src/nnet/nnet-test.cc
static void UnitTestNnet() {
try {
UnitTestSomething();
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index 50dc7214e359462533f8f71740f87d4afc815105..c9cc8b3dabebeb5e5d2f5440b2605fbbd1d013b7 100644 (file)
class Sigmoid : public Component {
public:
- Sigmoid(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ Sigmoid(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: Component(dim_in, dim_out, nnet)
{ }
~Sigmoid()
return kSigmoid;
}
- void PropagateFnc(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out) {
+ void PropagateFnc(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out) {
// y = 1/(1+e^-x)
for(MatrixIndexT r=0; r<out->NumRows(); r++) {
for(MatrixIndexT c=0; c<out->NumCols(); c++) {
}
}
- void BackpropagateFnc(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const Matrix<BaseFloat> &in_err, Matrix<BaseFloat> *out_err) {
// ey = y(1-y)ex
- const Matrix<BaseFloat>& y = nnet_->PropagateBuffer()[nnet_->IndexOfLayer(*this)+1];
+ const Matrix<BaseFloat> &y = nnet_->PropagateBuffer()[nnet_->IndexOfLayer(*this)+1];
for(MatrixIndexT r=0; r<out_err->NumRows(); r++) {
for(MatrixIndexT c=0; c<out_err->NumCols(); c++) {
class Softmax : public Component {
public:
- Softmax(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ Softmax(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: Component(dim_in, dim_out, nnet)
{ }
~Softmax()
return kSoftmax;
}
- void PropagateFnc(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out) {
+ void PropagateFnc(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out) {
// y = e^x_j/sum_j(e^x_j)
out->CopyFromMat(in);
for(MatrixIndexT r=0; r<out->NumRows(); r++) {
}
}
- void BackpropagateFnc(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const Matrix<BaseFloat> &in_err, Matrix<BaseFloat> *out_err) {
// simply copy the error
// (ie. assume crossentropy error function,
// while in_err contains (net_output-target) :
index a86d19f8b31d576444f37fb3b6a1aca16a32bcc7..99d302492ab8d577f889f5c2275ae9ccb5fe31b8 100644 (file)
class BiasedLinearity : public UpdatableComponent {
public:
- BiasedLinearity(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet* nnet)
+ BiasedLinearity(MatrixIndexT dim_in, MatrixIndexT dim_out, Nnet *nnet)
: UpdatableComponent(dim_in, dim_out, nnet),
linearity_(dim_out, dim_in), bias_(dim_out),
linearity_corr_(dim_out, dim_in), bias_corr_(dim_out)
return kBiasedLinearity;
}
- void ReadData(std::istream& is, bool binary) {
+ void ReadData(std::istream &is, bool binary) {
linearity_.Read(is, binary);
bias_.Read(is, binary);
KALDI_ASSERT(bias_.Dim() == output_dim_);
}
- void WriteData(std::ostream& os, bool binary) const {
+ void WriteData(std::ostream &os, bool binary) const {
linearity_.Write(os, binary);
bias_.Write(os, binary);
}
- void PropagateFnc(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out) {
+ void PropagateFnc(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out) {
// precopy bias
for (MatrixIndexT i=0; i<out->NumRows(); i++) {
out->CopyRowFromVec(bias_, i);
out->AddMatMat(1.0, in, kNoTrans, linearity_, kTrans, 1.0);
}
- void BackpropagateFnc(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out_err) {
+ void BackpropagateFnc(const Matrix<BaseFloat> &in_err, Matrix<BaseFloat> *out_err) {
// multiply error by weights
out_err->AddMatMat(1.0, in_err, kNoTrans, linearity_, kNoTrans, 0.0);
}
- void Update(const Matrix<BaseFloat>& input, const Matrix<BaseFloat>& err) {
+ void Update(const Matrix<BaseFloat> &input, const Matrix<BaseFloat> &err) {
// compute gradient
linearity_corr_.AddMatMat(1.0, err, kTrans, input, kNoTrans, momentum_);
index 8bcf116b8dda589d407bb0d4c926f7d9e20d0a4f..7c9e6eaf79a3497d94eb939d3e5eb96f11aec1af 100644 (file)
return NULL;
}
-Component::ComponentType Component::TokenToType(const std::string& s) {
+Component::ComponentType Component::TokenToType(const std::string &s) {
int32 N=sizeof(kTokenMap)/sizeof(kTokenMap[0]);
for(int i=0; i<N; i++) {
if (0 == strcmp(kTokenMap[i].value, s.c_str()))
}
-Component* Component::Read(std::istream& is, bool binary, Nnet* nnet) {
+Component* Component::Read(std::istream &is, bool binary, Nnet *nnet) {
int32 dim_out, dim_in;
std::string token;
ReadBasicType(is, binary, &dim_out);
ReadBasicType(is, binary, &dim_in);
- Component* p_comp;
+ Component *p_comp;
switch (comp_type) {
case Component::kBiasedLinearity :
p_comp = new BiasedLinearity(dim_in, dim_out, nnet);
}
-void Component::Write(std::ostream& os, bool binary) const {
+void Component::Write(std::ostream &os, bool binary) const {
WriteToken(os, binary, Component::TypeToToken(GetType()));
WriteBasicType(os, binary, OutputDim());
WriteBasicType(os, binary, InputDim());
index a62f5cba658e18d9fc238d4680f10655a8e7cc90..10b7e42ddb6cec143d3bfd7059209052f6c8d4c2 100644 (file)
/// Pair of type and token
struct key_value {
const Component::ComponentType key;
- const char* value;
+ const char *value;
};
/// Mapping of types and tokens
static const struct key_value kTokenMap[];
/// Convert component type to token
static const char* TypeToToken(ComponentType t);
/// Convert token to component type
- static ComponentType TokenToType(const std::string& s);
+ static ComponentType TokenToType(const std::string &s);
- Component(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet* nnet)
+ Component(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet *nnet)
: input_dim_(input_dim), output_dim_(output_dim), nnet_(nnet) { }
virtual ~Component() { }
}
/// Perform forward pass propagateion Input->Output
- void Propagate(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out);
+ void Propagate(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out);
/// Perform backward pass propagateion ErrorInput->ErrorOutput
- void Backpropagate(const Matrix<BaseFloat>& in_err,
- Matrix<BaseFloat>* out_err);
+ void Backpropagate(const Matrix<BaseFloat> &in_err,
+ Matrix<BaseFloat> *out_err);
/// Read component from stream
- static Component* Read(std::istream& is, bool binary, Nnet* nnet);
+ static Component* Read(std::istream &is, bool binary, Nnet *nnet);
/// Write component to stream
- void Write(std::ostream& os, bool binary) const;
+ void Write(std::ostream &os, bool binary) const;
// abstract interface for propagation/backpropagation
protected:
/// Forward pass transformation (to be implemented by descendents...)
- virtual void PropagateFnc(const Matrix<BaseFloat>& in,
- Matrix<BaseFloat>* out) = 0;
+ virtual void PropagateFnc(const Matrix<BaseFloat> &in,
+ Matrix<BaseFloat> *out) = 0;
/// Backward pass transformation (to be implemented by descendents...)
- virtual void BackpropagateFnc(const Matrix<BaseFloat>& in_err,
- Matrix<BaseFloat>* out_err) = 0;
+ virtual void BackpropagateFnc(const Matrix<BaseFloat> &in_err,
+ Matrix<BaseFloat> *out_err) = 0;
/// Reads the component content
- virtual void ReadData(std::istream& is, bool binary) { }
+ virtual void ReadData(std::istream &is, bool binary) { }
/// Writes the component content
- virtual void WriteData(std::ostream& os, bool binary) const { }
+ virtual void WriteData(std::ostream &os, bool binary) const { }
// data members
MatrixIndexT input_dim_; ///< Size of input vectors
MatrixIndexT output_dim_; ///< Size of output vectors
- Nnet* nnet_; ///< Pointer to the whole network
+ Nnet *nnet_; ///< Pointer to the whole network
private:
KALDI_DISALLOW_COPY_AND_ASSIGN(Component);
};
*/
class UpdatableComponent : public Component {
public:
- UpdatableComponent(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet* nnet)
+ UpdatableComponent(MatrixIndexT input_dim, MatrixIndexT output_dim, Nnet *nnet)
: Component(input_dim, output_dim, nnet),
learn_rate_(0.0), momentum_(0.0), l2_penalty_(0.0), l1_penalty_(0.0) { }
virtual ~UpdatableComponent() { }
}
/// Compute gradient and update parameters
- virtual void Update(const Matrix<BaseFloat>& input,
- const Matrix<BaseFloat>& err) = 0;
+ virtual void Update(const Matrix<BaseFloat> &input,
+ const Matrix<BaseFloat> &err) = 0;
/// Sets the learning rate of gradient descent
void LearnRate(BaseFloat lrate) {
-inline void Component::Propagate(const Matrix<BaseFloat>& in,
- Matrix<BaseFloat>* out) {
+inline void Component::Propagate(const Matrix<BaseFloat> &in,
+ Matrix<BaseFloat> *out) {
if (input_dim_ != in.NumCols()) {
KALDI_ERR << "Nonmatching dims, component:" << input_dim_ << " data:" << in.NumCols();
}
}
-inline void Component::Backpropagate(const Matrix<BaseFloat>& in_err,
- Matrix<BaseFloat>* out_err) {
+inline void Component::Backpropagate(const Matrix<BaseFloat> &in_err,
+ Matrix<BaseFloat> *out_err) {
if (output_dim_ != in_err.NumCols()) {
KALDI_ERR << "Nonmatching dims, component:" << output_dim_
<< " data:" << in_err.NumCols();
index e8e90683e18ecc60499a1e1b186e9c53e4ae6111..138f878fa00c93fc4cc8d04d09e8de1803880026 100644 (file)
namespace kaldi {
-void Xent::Eval(const Matrix<BaseFloat>& net_out, const Matrix<BaseFloat>& target, Matrix<BaseFloat>* diff) {
+void Xent::Eval(const Matrix<BaseFloat> &net_out, const Matrix<BaseFloat> &target, Matrix<BaseFloat> *diff) {
KALDI_ASSERT(net_out.NumCols() == target.NumCols());
KALDI_ASSERT(net_out.NumRows() == target.NumRows());
diff->Resize(net_out.NumRows(), net_out.NumCols(), kUndefined);
}
-void Xent::Eval(const Matrix<BaseFloat>& net_out, const std::vector<int32>& target, Matrix<BaseFloat>* diff) {
+void Xent::Eval(const Matrix<BaseFloat> &net_out, const std::vector<int32> &target, Matrix<BaseFloat> *diff) {
KALDI_ASSERT(net_out.NumRows() == (int32)target.size());
// check the labels
}
-int32 Xent::Correct(const Matrix<BaseFloat>& net_out, const std::vector<int32>& target) {
+int32 Xent::Correct(const Matrix<BaseFloat> &net_out, const std::vector<int32> &target) {
int32 correct = 0;
for(int32 r=0; r<net_out.NumRows(); r++) {
BaseFloat max = -1;
}
-void Mse::Eval(const Matrix<BaseFloat>& net_out, const Matrix<BaseFloat>& target, Matrix<BaseFloat>* diff) {
+void Mse::Eval(const Matrix<BaseFloat> &net_out, const Matrix<BaseFloat> &target, Matrix<BaseFloat> *diff) {
KALDI_ASSERT(net_out.NumCols() == target.NumCols());
KALDI_ASSERT(net_out.NumRows() == target.NumRows());
diff->Resize(net_out.NumRows(), net_out.NumCols(), kUndefined);
index d54b2a9d02b479065c981561de4da2362df87520..ca445c06f32ead556e21858e78d3994f24e1a376 100644 (file)
--- a/src/nnet_cpu/nnet-loss.h
+++ b/src/nnet_cpu/nnet-loss.h
~Xent() { }
/// Evaluate cross entropy from hard labels
- void Eval(const Matrix<BaseFloat>& net_out, const Matrix<BaseFloat>& target,
- Matrix<BaseFloat>* diff);
+ void Eval(const Matrix<BaseFloat> &net_out, const Matrix<BaseFloat> &target,
+ Matrix<BaseFloat> *diff);
/// Evaluate cross entropy from soft labels
- void Eval(const Matrix<BaseFloat>& net_out, const std::vector<int32>& target,
- Matrix<BaseFloat>* diff);
+ void Eval(const Matrix<BaseFloat> &net_out, const std::vector<int32> &target,
+ Matrix<BaseFloat> *diff);
/// Generate string with error report
std::string Report();
private:
- int32 Correct(const Matrix<BaseFloat>& net_out,
- const std::vector<int32>& target);
+ int32 Correct(const Matrix<BaseFloat> &net_out,
+ const std::vector<int32> &target);
private:
int32 frames_;
~Mse() { }
/// Evaluate mean square error from target values
- void Eval(const Matrix<BaseFloat>& net_out, const Matrix<BaseFloat>& target,
- Matrix<BaseFloat>* diff);
+ void Eval(const Matrix<BaseFloat> &net_out, const Matrix<BaseFloat> &target,
+ Matrix<BaseFloat> *diff);
/// Generate string with error report
std::string Report();
index 2f6be79f2a544770d84812b9daa2f708d110f470..778aa9e08a9c9be5f85b9e7d33d45fd6dc96f80f 100644 (file)
namespace kaldi {
-void Nnet::Propagate(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out) {
+void Nnet::Propagate(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out) {
KALDI_ASSERT(NULL != out);
if (LayerCount() == 0) {
nnet_[i]->Propagate(propagate_buf_[i], &propagate_buf_[i+1]);
}
- Matrix<BaseFloat>& mat = propagate_buf_[nnet_.size()];
+ Matrix<BaseFloat> &mat = propagate_buf_[nnet_.size()];
out->Resize(mat.NumRows(), mat.NumCols(), kUndefined);
out->CopyFromMat(mat);
}
-void Nnet::Backpropagate(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out_err) {
+void Nnet::Backpropagate(const Matrix<BaseFloat> &in_err, Matrix<BaseFloat> *out_err) {
if(LayerCount() == 0) { KALDI_ERR << "Cannot backpropagate on empty network"; }
// we need at least L+1 input bufers
// don't copy the in_err to buffers, use it as is...
int32 i = nnet_.size()-1;
if (nnet_[i]->IsUpdatable()) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
if (uc->LearnRate() > 0.0) {
uc->Update(propagate_buf_[i], in_err);
}
// backpropagate by using buffers
for(i--; i >= 1; i--) {
if (nnet_[i]->IsUpdatable()) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[i]);
if (uc->LearnRate() > 0.0) {
uc->Update(propagate_buf_[i], backpropagate_buf_[i]);
}
@@ -104,7 +104,7 @@ void Nnet::Backpropagate(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out
// update first layer
if (nnet_[0]->IsUpdatable() && 0 >= backprop_stop) {
- UpdatableComponent* uc = dynamic_cast<UpdatableComponent*>(nnet_[0]);
+ UpdatableComponent *uc = dynamic_cast<UpdatableComponent*>(nnet_[0]);
if (uc->LearnRate() > 0.0) {
uc->Update(propagate_buf_[0], backpropagate_buf_[0]);
}
@@ -120,7 +120,7 @@ void Nnet::Backpropagate(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out
}
-void Nnet::Feedforward(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out) {
+void Nnet::Feedforward(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out) {
KALDI_ASSERT(NULL != out);
if (LayerCount() == 0) {
}
-void Nnet::Read(std::istream& in, bool binary) {
+void Nnet::Read(std::istream &in, bool binary) {
// get the network layers from a factory
Component *comp;
while (NULL != (comp = Component::Read(in, binary, this))) {
}
-void Nnet::LearnRate(BaseFloat lrate, const char* lrate_factors) {
+void Nnet::LearnRate(BaseFloat lrate, const char *lrate_factors) {
// split lrate_factors to a vector
std::vector<BaseFloat> lrate_factor_vec;
if (NULL != lrate_factors) {
- char* copy = new char[strlen(lrate_factors)+1];
+ char *copy = new char[strlen(lrate_factors)+1];
strcpy(copy, lrate_factors);
- char* tok = NULL;
+ char *tok = NULL;
while(NULL != (tok = strtok((tok==NULL?copy:NULL),",:; "))) {
lrate_factor_vec.push_back(atof(tok));
}
index b83fbeb9d01cb9b1cef5d684812bb64b8a951d4d..5d0e530dae8f7540a8e845813529a81523e9962d 100644 (file)
--- a/src/nnet_cpu/nnet-nnet.h
+++ b/src/nnet_cpu/nnet-nnet.h
public:
/// Perform forward pass through the network
- void Propagate(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out);
+ void Propagate(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out);
/// Perform backward pass through the network
- void Backpropagate(const Matrix<BaseFloat>& in_err, Matrix<BaseFloat>* out_err);
+ void Backpropagate(const Matrix<BaseFloat> &in_err, Matrix<BaseFloat> *out_err);
/// Perform forward pass through the network, don't keep buffers (use it when not training)
- void Feedforward(const Matrix<BaseFloat>& in, Matrix<BaseFloat>* out);
+ void Feedforward(const Matrix<BaseFloat> &in, Matrix<BaseFloat> *out);
MatrixIndexT InputDim() const; ///< Dimensionality of the input features
MatrixIndexT OutputDim() const; ///< Dimensionality of the desired vectors
}
/// Read the MLP from file (can add layers to exisiting instance of Nnet)
- void Read(const std::string& file);
+ void Read(const std::string &file);
/// Read the MLP from stream (can add layers to exisiting instance of Nnet)
- void Read(std::istream& in, bool binary);
+ void Read(std::istream &in, bool binary);
/// Write MLP to file
- void Write(const std::string& file, bool binary);
+ void Write(const std::string &file, bool binary);
/// Write MLP to stream
- void Write(std::ostream& out, bool binary);
+ void Write(std::ostream &out, bool binary);
/// Set the learning rate values to trainable layers,
/// factors can disable training of individual layers
- void LearnRate(BaseFloat lrate, const char* lrate_factors);
+ void LearnRate(BaseFloat lrate, const char *lrate_factors);
/// Get the global learning rate value
BaseFloat LearnRate() {
return learn_rate_;
private:
/// Creates a component by reading from stream, return NULL if no more components
- static Component* ComponentFactory(std::istream& in, bool binary, Nnet* nnet);
+ static Component* ComponentFactory(std::istream &in, bool binary, Nnet *nnet);
/// Dumps individual component to stream
- static void ComponentDumper(std::ostream& out, bool binary, const Component& comp);
+ static void ComponentDumper(std::ostream &out, bool binary, const Component &comp);
typedef std::vector<Component*> NnetType;
}
-inline int32 Nnet::IndexOfLayer(const Component& comp) const {
+inline int32 Nnet::IndexOfLayer(const Component &comp) const {
for(int32 i=0; i<LayerCount(); i++) {
if (&comp == nnet_[i]) return i;
}
}
-inline void Nnet::Read(const std::string& file) {
+inline void Nnet::Read(const std::string &file) {
bool binary;
Input in(file, &binary);
Read(in.Stream(), binary);
}
-inline void Nnet::Write(const std::string& file, bool binary) {
+inline void Nnet::Write(const std::string &file, bool binary) {
Output out(file, binary, true);
Write(out.Stream(), binary);
out.Close();
}
-inline void Nnet::Write(std::ostream& out, bool binary) {
+inline void Nnet::Write(std::ostream &out, bool binary) {
for(int32 i=0; i<LayerCount(); i++) {
nnet_[i]->Write(out, binary);
}
index 9a0007e69b7aaf173d0c5815e1ce54820b939ccc..35ceb97c9abec1a9b31e036b68d932d3523f4f60 100644 (file)
namespace kaldi {
template<typename Real>
-static void MyApplySoftMax(VectorBase<Real>& v) {
+static void MyApplySoftMax(VectorBase<Real> &v) {
Real max = v.Max();
v.Add(-max);
v.ApplyExp();
}
template<typename Real>
-static void MyApplySoftMax2(VectorBase<Real>& v) {
+static void MyApplySoftMax2(VectorBase<Real> &v) {
- Real* p=v.Data();
+ Real *p=v.Data();
Real max = -1e20;
for(int32 i=0; i<v.Dim(); i++) {
Real v = *p++;
}
-void Rnnlm::Propagate(const std::vector<int32>& in, Matrix<BaseFloat>* out) {
+void Rnnlm::Propagate(const std::vector<int32> &in, Matrix<BaseFloat> *out) {
// resize hidden buffer
h2_.Resize(in.size(), V1_.NumCols(), kSetZero);
}
-void Rnnlm::Backpropagate(const MatrixBase<BaseFloat>& in_err) {
+void Rnnlm::Backpropagate(const MatrixBase<BaseFloat> &in_err) {
// prepare error buffer
e2_.Resize(in_err.NumRows(), h2_.NumCols(), kSetZero);
}
-BaseFloat Rnnlm::Score(const std::vector<int32>& seq, const VectorBase<BaseFloat>* hid, int32 prev_wrd) {
+BaseFloat Rnnlm::Score(const std::vector<int32> &seq, const VectorBase<BaseFloat> *hid, int32 prev_wrd) {
std::vector<int32>in;
in.push_back(prev_wrd);
index bc9471f0b4e2e1b35af22b6a39559833bd64c828..28937424915146a7958d99591c36cc7a7939061f 100644 (file)
// Disable copy construction and assignment
private:
Rnnlm(Rnnlm&);
- Rnnlm& operator=(Rnnlm&);
+ Rnnlm &operator=(Rnnlm&);
//////////////////////////////////////////////////////////////
// Constructor & Destructor
* p(y_i|w_i), ie. first row contains the posteriors of the next word
* after the first observed word1
*/
- void Propagate(const std::vector<int32>& in, Matrix<BaseFloat>* out);
+ void Propagate(const std::vector<int32> &in, Matrix<BaseFloat> *out);
/// Perform backward pass through the network
- void Backpropagate(const MatrixBase<BaseFloat>& in_err);
+ void Backpropagate(const MatrixBase<BaseFloat> &in_err);
/// Score a sequence
- BaseFloat Score(const std::vector<int32>& seq, const VectorBase<BaseFloat>* hid = NULL, int32 prev_wrd = 1);
+ BaseFloat Score(const std::vector<int32> &seq, const VectorBase<BaseFloat> *hid = NULL, int32 prev_wrd = 1);
MatrixIndexT InputDim() const; ///< Dimensionality of the input features
MatrixIndexT OutputDimCls() const; ///< Dimensionality of the desired vectors
/// Get the last hidden layer state
const VectorBase<BaseFloat>& HidVector() const;
/// Set hidden layer state for next input sequence
- void HidVector(const VectorBase<BaseFloat>& v);
+ void HidVector(const VectorBase<BaseFloat> &v);
/// Get hidden layer states of last sequence
const Matrix<BaseFloat>& HidMatrix() const;
/// Read the MLP from file (can add layers to exisiting instance of Rnnlm)
- void Read(const std::string& file);
+ void Read(const std::string &file);
/// Read the MLP from stream (can add layers to exisiting instance of Rnnlm)
- void Read(std::istream& in, bool binary);
+ void Read(std::istream &in, bool binary);
/// Write MLP to file
- void Write(const std::string& file, bool binary);
+ void Write(const std::string &file, bool binary);
/// Write MLP to stream
- void Write(std::ostream& out, bool binary);
+ void Write(std::ostream &out, bool binary);
/// Set the learning rate values to trainable layers,
/// factors can disable training of individual layers
return h2_.Row(h2_.NumRows()-1);
}
-inline void Rnnlm::HidVector(const VectorBase<BaseFloat>& v) {
+inline void Rnnlm::HidVector(const VectorBase<BaseFloat> &v) {
h2_last_.CopyFromVec(v);
}
-inline void Rnnlm::Read(const std::string& file) {
+inline void Rnnlm::Read(const std::string &file) {
bool binary;
Input in(file, &binary);
Read(in.Stream(), binary);
in.Close();
}
-inline void Rnnlm::Read(std::istream& in, bool binary) {
+inline void Rnnlm::Read(std::istream &in, bool binary) {
ExpectToken(in,binary,"<rnnlm_v1.0>");
ExpectToken(in,binary,"<v1>");
in >> V1_;
}
-inline void Rnnlm::Write(const std::string& file, bool binary) {
+inline void Rnnlm::Write(const std::string &file, bool binary) {
Output out(file, binary, true);
Write(out.Stream(), binary);
out.Close();
}
-inline void Rnnlm::Write(std::ostream& out, bool binary) {
+inline void Rnnlm::Write(std::ostream &out, bool binary) {
WriteToken(out,binary,"<rnnlm_v1.0>");
WriteToken(out,binary,"<v1>");
out << V1_;
index 8f2ff167edcec9ef90b5cd361cc66289f67cbc8d..72794a3d23f159c23f6c69ca17646c08f5da86e6 100644 (file)
*/
class RnnlmAux {
public:
- static void ReadDict(const std::string& file, std::map<std::string, int32>* dict) {
+ static void ReadDict(const std::string &file, std::map<std::string, int32> *dict) {
bool binary;
Input in(file, &binary);
std::string word;
in.Close();
}
- static bool AddLine(std::istream& is, const std::map<std::string, int32>& dict, std::vector<int32>* seq) {
+ static bool AddLine(std::istream &is, const std::map<std::string, int32> &dict, std::vector<int32> *seq) {
char line[4096];
is.getline(line, 4096);
- const char* delim = " \t";
+ const char *delim = " \t";
std::vector<const char*> words;
// parse the line, check OOVs'
- const char* w = NULL;
+ const char *w = NULL;
while (NULL != (w = strtok((w==NULL?line:NULL), delim))) {
if(dict.find(w) == dict.end()) return false; // OOV
words.push_back(w);
index 685939fee7ecb2d913cadce17ca3eb9314734196..6f7538f2cef47a0cc99d927813ce095cba5bf3ad 100644 (file)
static void UnitTestNnet() {
try {
UnitTestSomething();
- } catch (const std::exception& e) {
+ } catch (const std::exception &e) {
std::cerr << e.what();
}
}
index d986f0a33d8b3a9f8539a5d69ce0b65212363cfa..0bf9d008fabea1de8e05ffb44694ae703fc8487e 100644 (file)
--- a/src/nnetbin/nnet-copy.cc
+++ b/src/nnetbin/nnet-copy.cc
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 06742355cb15bb4015325f630f0894b1920d4208..6d7aa5736ac6c84a65e4a4339b7b415cd30d48a2 100644 (file)
#endif
return ((num_done>0)?0:1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
KALDI_ERR << e.what();
return -1;
}
diff --git a/src/nnetbin/nnet-train-mse-tgtmat-frmshuff.cc b/src/nnetbin/nnet-train-mse-tgtmat-frmshuff.cc
index 455cf79eb96f81e3512130506612f9940e982c37..485c78dcc3e0f5a2cc9a25622c075a99597e7b37 100644 (file)
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/nnetbin/nnet-train-xent-hardlab-frmshuff.cc b/src/nnetbin/nnet-train-xent-hardlab-frmshuff.cc
index be6dcabae8bc9a79457d88b40be57a076f60ae22..b8521cb3056524f1ddeb3ddc8a1b027365e43f87 100644 (file)
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/nnetbin/nnet-train-xent-hardlab-perutt.cc b/src/nnetbin/nnet-train-xent-hardlab-perutt.cc
index c8e548bcc6572b813ff45a7c2cf6b402ddf0c343..82c75260f4aa0516b1fd3644afe26504ee769dae 100644 (file)
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a95d6ad013d5507f57b5b265dd0bd69faa5c6095..2326a01bd45122b75e7a6d09f17526a7fa9d2af8 100644 (file)
nnet.Read(model_filename);
KALDI_ASSERT(nnet.LayerCount()==1);
KALDI_ASSERT(nnet.Layer(0)->GetType() == Component::kRbm);
- RbmBase& rbm = dynamic_cast<RbmBase&>(*nnet.Layer(0));
+ RbmBase &rbm = dynamic_cast<RbmBase&>(*nnet.Layer(0));
rbm.SetLearnRate(learn_rate);
rbm.SetMomentum(momentum);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 1a019f6bf827d0a12d6fe4d8b2c62dac6558d5b0..6bc5beed28067b9d9cd837696cc0b8ac94fe8434 100644 (file)
KALDI_LOG << "Done " << num_done << " files";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
KALDI_ERR << e.what();
return -1;
}
diff --git a/src/nnetbin_cpu/nnet-train-xent-hardlab-perutt.cc b/src/nnetbin_cpu/nnet-train-xent-hardlab-perutt.cc
index b82797ba05988e1741a44a8160a17dcaa6c447c6..93a0a4df99af4f9a0e9b0573808579a2b028a279 100644 (file)
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 69b98866847a473b12ae4d4d43d939a60bb66390..fbcff7af798bc35113affa2177709391134f7b61 100644 (file)
KALDI_LOG << xent.Report();
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/sgmm/am-sgmm.cc b/src/sgmm/am-sgmm.cc
index f236dcfcf0dd8aa42f20c0c2480eb6edf64e7aec..c1c4313769311fb4d0f48dfbe15e5168b1ea5e45 100644 (file)
--- a/src/sgmm/am-sgmm.cc
+++ b/src/sgmm/am-sgmm.cc
}
-void AmSgmm::ComputePerFrameVars(const VectorBase<BaseFloat>& data,
+void AmSgmm::ComputePerFrameVars(const VectorBase<BaseFloat> &data,
const std::vector<int32> &gselect,
const SgmmPerSpkDerivedVars &spk_vars,
BaseFloat logdet_s,
}
-void AmSgmmFunctions::ComputeDistances(const AmSgmm& model,
+void AmSgmmFunctions::ComputeDistances(const AmSgmm &model,
const Vector<BaseFloat> &state_occs,
MatrixBase<BaseFloat> *dists) {
int32 num_states = model.NumPdfs(),
diff --git a/src/sgmm/am-sgmm.h b/src/sgmm/am-sgmm.h
index a87187cb497547e7ed7dd29519079045f253f150..40e3c598a1d8ded08656f36e043a5867fa7ca1a3 100644 (file)
--- a/src/sgmm/am-sgmm.h
+++ b/src/sgmm/am-sgmm.h
/// 'logdet_s' term is the log determinant of the FMLLR transform, or 0.0 if
/// no FMLLR is used or it's single-class fMLLR applied in the feature
/// extraction, and we're not keeping track of it here.
- void ComputePerFrameVars(const VectorBase<BaseFloat>& data,
+ void ComputePerFrameVars(const VectorBase<BaseFloat> &data,
const std::vector<int32> &gselect,
const SgmmPerSpkDerivedVars &spk_vars,
BaseFloat logdet_s,
/// "State-Level Data Borrowing for Low-Resource Speech Recognition based on
/// Subspace GMMs", by Yanmin Qian et. al, Interspeech 2011.
/// Model must have one substate per state.
- static void ComputeDistances(const AmSgmm& model,
+ static void ComputeDistances(const AmSgmm &model,
const Vector<BaseFloat> &state_occs,
MatrixBase<BaseFloat> *dists);
};
index 6ee8fdfa7e852bff269ea225e3ab3b8345500fd4..69eb37dce8ffea0d2697fcaa7bcef6ae69e1a1f8 100644 (file)
BaseFloat MleAmSgmmAccs::Accumulate(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const VectorBase<BaseFloat> &v_s, // may be empty
int32 j, BaseFloat weight,
SgmmUpdateFlagsType flags) {
BaseFloat MleAmSgmmAccs::AccumulateFromPosteriors(
const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const Matrix<BaseFloat> &posteriors,
const VectorBase<BaseFloat> &v_s, // may be empty
int32 j,
SgmmUpdateFlagsType flags) {
double tot_count = 0.0;
- const vector<int32>& gselect = frame_vars.gselect;
+ const vector<int32> &gselect = frame_vars.gselect;
// Intermediate variables
Vector<BaseFloat> gammat(gselect.size());
Vector<BaseFloat> xt_jmi(feature_dim_), mu_jmi(feature_dim_),
return tot_count;
}
-void MleAmSgmmAccs::CommitStatsForSpk(const AmSgmm& model,
+void MleAmSgmmAccs::CommitStatsForSpk(const AmSgmm &model,
const VectorBase<BaseFloat> &v_s) {
if (v_s.Dim() != 0 && spk_space_dim_ > 0 && gamma_s_.Dim() != 0) {
if (!v_s.IsZero())
BaseFloat
MleSgmmSpeakerAccs::Accumulate(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
int32 j,
BaseFloat weight) {
// Calculate Gaussian posteriors and collect statistics
BaseFloat
MleSgmmSpeakerAccs::AccumulateFromPosteriors(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const Matrix<BaseFloat> &posteriors,
int32 j) {
double tot_count = 0.0;
int32 feature_dim = model.FeatureDim(),
spk_space_dim = model.SpkSpaceDim();
KALDI_ASSERT(spk_space_dim != 0);
- const vector<int32>& gselect = frame_vars.gselect;
+ const vector<int32> &gselect = frame_vars.gselect;
// Intermediate variables
Vector<double> xt_jmi(feature_dim), mu_jmi(feature_dim),
index 06f219b27cda3f086d1765c63ac0786f4278c8ba..a03cf8159ef58673bd422baee327fd8deab09302 100644 (file)
/// Returns likelihood.
BaseFloat Accumulate(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const VectorBase<BaseFloat> &v_s, // spk-vec, may be empty
int32 state_index, BaseFloat weight,
SgmmUpdateFlagsType flags);
/// Returns count accumulated (may differ from posteriors.Sum()
/// due to weight pruning).
BaseFloat AccumulateFromPosteriors(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const Matrix<BaseFloat> &posteriors,
const VectorBase<BaseFloat> &v_s, // may be empty
int32 state_index,
/// Accumulates global stats for the current speaker (if applicable).
/// If flags contains kSgmmSpeakerProjections (N), must call
/// this after finishing the speaker's data.
- void CommitStatsForSpk(const AmSgmm& model,
+ void CommitStatsForSpk(const AmSgmm &model,
const VectorBase<BaseFloat> &v_s);
/// Accessors
/// Accumulate statistics. Returns per-frame log-likelihood.
BaseFloat Accumulate(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
int32 state_index, BaseFloat weight);
/// Accumulate statistics, given posteriors. Returns total
/// count accumulated, which may differ from posteriors.Sum()
/// due to randomized pruning.
BaseFloat AccumulateFromPosteriors(const AmSgmm &model,
- const SgmmPerFrameDerivedVars& frame_vars,
+ const SgmmPerFrameDerivedVars &frame_vars,
const Matrix<BaseFloat> &posteriors,
int32 state_index);
diff --git a/src/sgmm/fmllr-sgmm.cc b/src/sgmm/fmllr-sgmm.cc
index 33e422effea3cea1e470a35b2cb0088e5cffbbdb..e1324907e166423ba65a80450d1bc5fbd8298eea 100644 (file)
--- a/src/sgmm/fmllr-sgmm.cc
+++ b/src/sgmm/fmllr-sgmm.cc
fmllr_bases[b].CopyRowsFromVec(U.Row(b));
}
KALDI_LOG << "Estimated " << num_fmllr_bases << " fMLLR basis matrices.";
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
KALDI_WARN << "Not estimating FMLLR bases because of a thrown exception:\n"
<< e.what();
fmllr_bases.resize(0);
diff --git a/src/sgmm/fmllr-sgmm.h b/src/sgmm/fmllr-sgmm.h
index 3ab41a6e03a8857b196607446fbd73dfd288ddf2..a9af18e3fa17e98b990ae01f6182bbeccdc6f483 100644 (file)
--- a/src/sgmm/fmllr-sgmm.h
+++ b/src/sgmm/fmllr-sgmm.h
void AccumulateFromPosteriors(const AmSgmm &sgmm,
const SgmmPerSpkDerivedVars &spk,
const VectorBase<BaseFloat> &data,
- const std::vector<int32>& gauss_select,
+ const std::vector<int32> &gauss_select,
const Matrix<BaseFloat> &posteriors,
int32 state_index);
/// Accessors
int32 Dim() const { return dim_; }
- const AffineXformStats& stats() const { return stats_; }
+ const AffineXformStats &stats() const { return stats_; }
private:
AffineXformStats stats_; ///< Accumulated stats
index 320fb92b7448aa67808b31da0ddc67c307aa40e0..e2e1c962ad54c1dfe7698b72c17fc02188d0f957 100644 (file)
/// Note: the pdf-index j, relating to the original SGMM
/// in sgmm_, is only needed to select the right vector to
/// compute Gaussian-level alignments with.
- void Accumulate(const SgmmPerFrameDerivedVars& frame_vars,
+ void Accumulate(const SgmmPerFrameDerivedVars &frame_vars,
int32 j,
BaseFloat weight);
index 5bec7923321dc4eb15dba04f752a03b50363681d..23da499f9dcc89643223d0fce75a06d263dc7c34 100644 (file)
--- a/src/sgmmbin/init-ubm.cc
+++ b/src/sgmmbin/init-ubm.cc
}
KALDI_LOG << "Written UBM to " << gmm_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 73b58b0cc4151ed7060e30dcb8cb2ed5fba14e51..49e99c696633291709b1491ba805cc37a5ed6f63 100644 (file)
KALDI_LOG << "Written accs to: " << accs_wxfilename;
}
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 73de0472fd35a76fd2770c94521b21f9377c1979..0e12e13beef133765738590a7b3d8fbd97801c4c 100644 (file)
}
KALDI_LOG << "Written accs.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index ea2e47875345ca99b4d2f4e86905c21e082f063e..d4c8dbe5f130364ffab6794bc39cf12f4c50dc99 100644 (file)
}
KALDI_LOG << "Written accs.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index caac94809fccbcb79aff1232918d3a98ba713eec..fa1e38ebf5b1dd6be920844c61fe0dabf6adc5a5 100644 (file)
}
KALDI_LOG << "Written accs.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 843e2e6f17f8b18a72fc097e108aefb7c8be203d..4ff53cb44141b78f808a5e6687f8f640b9b35305 100644 (file)
}
KALDI_LOG << "Written accs.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 0a881a6478132b100ba71c1798df98f922096d96..03e7e0fb3bd5648ac5f66e0f418e07bb932947ff 100644 (file)
<< stats.size();
DeleteBuildTreeStats(&stats);
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index bdeb2959a69e67cd8ddb23b84d65e7e8125dbab0..fd038fd026f929b6c9f93743948fd969c540110e 100644 (file)
KALDI_LOG << "Overall log-likelihood per frame is " << (tot_like/frame_count)
<< " over " << frame_count << " frames.";
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 0e84e283386cb39a22921aaa4674ef08add3012d..4ec2b1fe6c51b45803261fa08e9400accae2c430 100644 (file)
KALDI_LOG << "Wrote tree\n";
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 38164f1ee8aefba4eecc4fcfdf93fc2fc50c95d9..5dd67ded38483d37ac852138fc3c0193737fdf39 100644 (file)
dists.Write(ko.Stream(), binary);
KALDI_LOG << "Wrote distances to " << distances_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 73b4acc8609fee7a3f01f6ef70c42805af7fb98d..89e55158562af3396a6afb8f484b00370e136ad8 100644 (file)
KALDI_LOG << "Wrote questions to "<<phone_sets_wxfilename;
DeleteBuildTreeStats(&stats);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f70f40f675dfe4467900766086d7a604359faab1..ec7d709fe254c5c9d8434c8d3f1c6350535fc1b1 100644 (file)
}
KALDI_LOG << "Written model to " << sgmm_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 785ca24f2ef0caedd5d67131ca81af47a43f4eb5..59806e6bbd106195f4bddb733bc262cfae78a3c8 100644 (file)
--- a/src/sgmmbin/sgmm-copy.cc
+++ b/src/sgmmbin/sgmm-copy.cc
KALDI_LOG << "Written model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index ffe6da87d317729ce3c7adbf7e022c4b61765f53..9f39ebf7200be05b3754277023b518960e9adf6b 100644 (file)
if (word_syms) delete word_syms;
delete decode_fst;
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 77b7dc3d77948475053463bc0f5e0dfb75fb7ba7..0f280e7b4d75fa678c2361c426951e944cf38e84 100644 (file)
KALDI_LOG << "Wrote model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 0c1bd48f9787ef18c3ec3c83142851ed6514387a..9cc500278f7f46583dc618353a13aadb2bc2fbca 100644 (file)
KALDI_LOG << "Num frames " << tot_t << ", auxf impr per frame is "
<< (tot_impr / tot_t);
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index dc32fa6d63ec314a88e8cd5499a394c02baa5374..a310be71a6296565ab3f76832dddc1a1f5f0bd81 100644 (file)
KALDI_LOG << "Num frames " << tot_t << ", auxf impr per frame is "
<< (tot_impr / tot_t);
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index c0de7e930f3a4624e9ea8dc4e6c1b17483b163d7..1e3cb027176350916f921dc6f9d029232de89f77 100644 (file)
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 59b49a54465800b922d9cb21f5c5551461b64bf5..0411f227440ed24e0d5d3909856c91f2623c6ceb 100644 (file)
KALDI_LOG << "Overall auxf impr per frame is " << (tot_impr / tot_t)
<< " over " << tot_t << " frames.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 51d1db18a6e43ad55926408ce0b2c0cca93ff511..c233ab4091e0ff560d3ede7cc01bbb6521d4fa70 100644 (file)
KALDI_LOG << "Done " << num_done << " files, " << num_no_post
<< " with no posts, " << num_other_error << " with other errors.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 576cb4f0820791e9f341297e75f15319d0469bb1..c77dc573dc4617433523d576d9618f8b69e447b3 100644 (file)
--- a/src/sgmmbin/sgmm-est.cc
+++ b/src/sgmmbin/sgmm-est.cc
KALDI_LOG << "Written model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6b2d4f7fb6981803997178bc9cd16b93ba659050..40865f02d41a72322525edd1677c03c5b4c377e1 100644 (file)
if (num_done != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6687a1cf0297d4640df571b68616811bab98c767..1aecaf5e608965038d1fb85b01e9c493e5782539 100644 (file)
--- a/src/sgmmbin/sgmm-info.cc
+++ b/src/sgmmbin/sgmm-info.cc
}
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index bc63dd0c623aed6fae4af720d6d23d241fa76f8c..f3726c4183fad41cff18e75c0ddf00175673e7af 100644 (file)
sgmm_out_filename, binary);
KALDI_LOG << "Written model to " << sgmm_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index c6915cd735e48d01af70aeb125bd5d1e19b5165e..f2431bae065b884bc705aa905bb82386e5e51ff1 100644 (file)
--- a/src/sgmmbin/sgmm-init.cc
+++ b/src/sgmmbin/sgmm-init.cc
}
KALDI_LOG << "Written model to " << sgmm_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 9e206789ee65e65329eb8d4ae7778f5577a21bf2..bb807e253da8e60fdbd96832bc9d2d722d0ab329 100644 (file)
if (word_syms) delete word_syms;
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f29d775d31751d28d7cf839972c1308f30af1abf..2d6581353bfacf35d1cc20a4127bf45721029980 100644 (file)
if (word_syms) delete word_syms;
delete decode_fst;
return (num_success != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 3f669cfcc4dd8b609d3f369a5dbb64166e176fe9..549474f040f78edad690271e46877cfa22841a8e 100644 (file)
KALDI_LOG << "Written model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index e792854fc28a9d79a48d868b20a5472d60a6cf50..dc8c52c602ac40b6104b785f1509c5bf476a1456 100644 (file)
KALDI_LOG << "Written model to " << model_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 5bdfe588f98a6fb560c27f6871ea56f3311751e0..0a768c6f09914251945be7f47ef5ca1456230bd6 100644 (file)
<< " with other errors.";
return (num_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 15a898a2af5a2feb1bda0481fd7e4e4a2bf37137..e12363253ca979c34b864efde49bc898f0731d98 100644 (file)
namespace kaldi {
-void LatticeAcousticRescore(const AmSgmm& am,
- const TransitionModel& trans_model,
- const MatrixBase<BaseFloat>& data,
+void LatticeAcousticRescore(const AmSgmm &am,
+ const TransitionModel &trans_model,
+ const MatrixBase<BaseFloat> &data,
const SgmmPerSpkDerivedVars &spk_vars,
const std::vector<std::vector<int32> > &gselect,
const SgmmGselectConfig &sgmm_config,
KALDI_LOG << "Done " << n_done << " lattices.";
return (n_done != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index b11bdab26fc7e1cac3e0ad2eff0eef7f33ad3185..b3d92f5a168963b2ee81597dca7e0e128279f18f 100644 (file)
}
KALDI_LOG << "Written stats to " << stats_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 446ff9328b70fa8759d1c0630b6c6066e1d93fff..e060f239fcd0f2f8008316667f57f9e6e822fec5 100644 (file)
<< stats.size();
DeleteBuildTreeStats(&stats);
return (stats.size() != 0 ? 0 : 1);
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index df6ce20cce6f1c52d9f8db7b052e92da3d5f6bef..81f4abe15738a0e6cb73fac539f1f370cecac9a2 100644 (file)
KALDI_LOG << "Written UBM to " << ubm_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index a2dd1ebd908a966f75fa56f9b93efe3ff3012114..73304084fb178e72d54e576cc3c63d1e30826973 100644 (file)
DeletePointers(&tied_densities_);
}
-void AmTiedDiagGmm::Init(const DiagGmm& proto) {
+void AmTiedDiagGmm::Init(const DiagGmm &proto) {
if (densities_.size() != 0 || tied_densities_.size() != 0) {
KALDI_WARN << "Init() called on a non-empty object. Contents will be "
"overwritten";
index 12244b6e8a3cc5b20370bbb8ebdf8017ad3f5065..43f6d5fdfcd96bd3a5bf926f354d3ac41f57e294 100644 (file)
DeletePointers(&tied_densities_);
}
-void AmTiedFullGmm::Init(const FullGmm& proto) {
+void AmTiedFullGmm::Init(const FullGmm &proto) {
if (densities_.size() != 0 || tied_densities_.size() != 0) {
KALDI_WARN << "Init() called on a non-empty object. Contents will be "
"overwritten";
index 0ea4e3388274cb6c7f542d0e5c199acb12a9c4e2..b4bfeeb918d28c3d2e876883ab33ba8d7ba18335 100644 (file)
AccumulateFromPosteriors(posteriors);
}
-void AccumAmTiedDiagGmm::Read(std::istream& in_stream, bool binary, bool add) {
+void AccumAmTiedDiagGmm::Read(std::istream &in_stream, bool binary, bool add) {
int32 num_pdfs, num_tied;
ExpectToken(in_stream, binary, "<NUMPDFS>");
ReadBasicType(in_stream, binary, &num_pdfs);
}
}
-void AccumAmTiedDiagGmm::Write(std::ostream& out_stream, bool binary) const {
+void AccumAmTiedDiagGmm::Write(std::ostream &out_stream, bool binary) const {
int32 num_pdfs = gmm_accumulators_.size();
int32 num_tied = tied_gmm_accumulators_.size();
WriteToken(out_stream, binary, "<NUMPDFS>");
index ece58fe26d6e420bde7b536fd253b87bd74ebc4e..aa6464c61001f218a5e1b94c8a3e7a24dd157c45 100644 (file)
AccumulateFromPosteriors(posteriors);
}
-void AccumAmTiedFullGmm::Read(std::istream& in_stream, bool binary, bool add) {
+void AccumAmTiedFullGmm::Read(std::istream &in_stream, bool binary, bool add) {
int32 num_pdfs, num_tied;
ExpectToken(in_stream, binary, "<NUMPDFS>");
ReadBasicType(in_stream, binary, &num_pdfs);
}
}
-void AccumAmTiedFullGmm::Write(std::ostream& out_stream, bool binary) const {
+void AccumAmTiedFullGmm::Write(std::ostream &out_stream, bool binary) const {
int32 num_pdfs = gmm_accumulators_.size();
int32 num_tied = tied_gmm_accumulators_.size();
WriteToken(out_stream, binary, "<NUMPDFS>");
index cb780de261f93eff9b70bf22b4ec3df679657bb2..7210470aa0a8d94515d5f76bc1a1cc07e87eae46 100644 (file)
--- a/src/tied/mle-tied-gmm.h
+++ b/src/tied/mle-tied-gmm.h
void AccumulateForComponent(int32 comp_index, BaseFloat weight);
/// Accumulate for all components, given the posteriors.
- void AccumulateFromPosteriors(const VectorBase<BaseFloat>& gauss_posteriors);
+ void AccumulateFromPosteriors(const VectorBase<BaseFloat> &gauss_posteriors);
/// Propagate the sufficient statistics to the target accumulator
void Propagate(AccumTiedGmm *target) const;
// Accessors
const GmmFlagsType Flags() const { return flags_; }
- const Vector<double>& occupancy() const { return occupancy_; }
+ const Vector<double> &occupancy() const { return occupancy_; }
private:
int32 num_comp_;
index 6ceb2a17e970cfc15d4aa05629b4a2332ddd47b2..14ec3e9668bc84c123960729d7b084fccd65150a 100644 (file)
--- a/src/tied/tied-gmm-inl.h
+++ b/src/tied/tied-gmm-inl.h
namespace kaldi {
template<class Real>
-void TiedGmm::SetWeights(const VectorBase<Real>& w) {
+void TiedGmm::SetWeights(const VectorBase<Real> &w) {
KALDI_ASSERT(weights_.Dim() == w.Dim());
weights_.CopyFromVec(w);
}
diff --git a/src/tied/tied-gmm.h b/src/tied/tied-gmm.h
index 05d1564b338823d26d896595608292d4d72c132d..2cc3169bb1f1c038c00bcd803c81f5f215f07401 100644 (file)
--- a/src/tied/tied-gmm.h
+++ b/src/tied/tied-gmm.h
void Read(std::istream &rIn, bool binary);
/// Const accessors
- const Vector<BaseFloat>& weights() const { return weights_; }
- const int32& codebook_index() const { return codebook_index_; }
+ const Vector<BaseFloat> &weights() const { return weights_; }
+ const int32 &codebook_index() const { return codebook_index_; }
/// Mutators for both float or double
template<class Real>
- void SetWeights(const VectorBase<Real>& w); ///< Set mixure weights
+ void SetWeights(const VectorBase<Real> &w); ///< Set mixure weights
/// Mutators for single component, supports float or double
/// Set weight for single component.
index c60651f58d3ec6cb91c40820121a91705427ae5a..7285fc55ec67c4f7f647270b3134a0dea2f92321 100644 (file)
diag.Write(ou1.Stream(), binary);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 77586664060b404c2c660166568cfc8cffe08cc6..9798cefc4a8aa1bd7976ecf4a3042645ef93edd3 100644 (file)
DeleteBuildTreeStats(&stats);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 05318a0356d46c4bb7ba1b12621120f7c023060c..48599d30dd0839e1458b2d18ffb6f6c7a0d1e436 100644 (file)
KALDI_LOG << "Wrote " << acc_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 6f0e612ab7bcab8ab1087f909d1c02e9a46860a1..0a7b94f6bb7944ec74c1d948f75fcc64ebe1f627 100644 (file)
KALDI_LOG << "Wrote " << acc_out_filename;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-diag-gmm-acc-stats-ali.cc b/src/tiedbin/tied-diag-gmm-acc-stats-ali.cc
index 2064646339d3ea5ad29bfbeefa3de8c1fd2f0dab..8c66c63afc84c2760923ed41666ca459cb93ceb9 100644 (file)
return 0;
else
return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-diag-gmm-align-compiled.cc b/src/tiedbin/tied-diag-gmm-align-compiled.cc
index 99ffdcf23150190a261a2c10aac44afad68ec53c..2cc308ac691a453e5fb42e10bc63a8772e4b7fef 100644 (file)
return 0;
else
return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-diag-gmm-decode-faster.cc b/src/tiedbin/tied-diag-gmm-decode-faster.cc
index 4280cb88a60c28b4d9c30c3c63a5ec08d19193f2..695e48ec8c416c53643a39ebab74a1ad27b5bd31 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-diag-gmm-decode-simple.cc b/src/tiedbin/tied-diag-gmm-decode-simple.cc
index 4ae2a958111014566bfe86fffe152667ffc1ed3e..e56793222f234dbf8c03bf17a9eb40a4125e15db 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 7b845159e51feb8a889a49f79a6bb726bf322d5e..6ecff9de589ad50900c27da04a1eff220869d27f 100644 (file)
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index e7fd25081e6cfcf80a38f0f98e189914385997dd..deea21ad59faebb22c9cfc216eea493ba3e878b3 100644 (file)
KALDI_LOG << "Wrote tree and model.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index f633a5c582828dfa25623318e58ffc65a80dc0fd..da42fbb68b6478c1fc934a77dea62e872e6fddf8 100644 (file)
delete ctx_dep;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-full-gmm-acc-stats-ali.cc b/src/tiedbin/tied-full-gmm-acc-stats-ali.cc
index 29228ef9e6d14ec74aa10489fdc2362d3e421e37..fe0cb7b3e93c466ed72d32d6c654d43ecb8119e5 100644 (file)
return 0;
else
return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-full-gmm-align-compiled.cc b/src/tiedbin/tied-full-gmm-align-compiled.cc
index 9484504e831b1204cbde79ba45668f61d2aa255c..798e569189ae89157b6552f37b8b70f9f9688fb3 100644 (file)
return 0;
else
return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-full-gmm-decode-faster.cc b/src/tiedbin/tied-full-gmm-decode-faster.cc
index a37b1061efa508fb3121a4358bb1bd8360e8e318..179b1ec03ce40b0a275f7dfeda022db45ec2ee2f 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
diff --git a/src/tiedbin/tied-full-gmm-decode-simple.cc b/src/tiedbin/tied-full-gmm-decode-simple.cc
index b8091bf23d225487a19959d9c0946754b34b8e64..d1a8c2f746fd93959487bda5a56017307ab09b7b 100644 (file)
delete decode_fst;
if (num_success != 0) return 0;
else return 1;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 77cf51cf5cfb1c5210dd7c9f957d25bbf9809c86..a3a4a2c90beb71757579eac26fee61d69fe8515d 100644 (file)
}
KALDI_LOG << "Written model to " << model_out_filename;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what() << '\n';
return -1;
}
index 0b0636396854bbc3acbbdbbbcfc5173f12226f27..bcca02e495aef25e4fa9bc0c20ed98c2b327193d 100644 (file)
KALDI_LOG << "Wrote tree and model.";
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 45068f0d1dcc3b2afe254021ff9ee54e584c9334..a0dd343dacaaad1f9a6054e8cac362f4e17c81ba 100644 (file)
delete ctx_dep;
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 47232a70ec2e02e2409a4e2537ee574c7bf22cf4..898366500b65500580c712dd35e9af6d86b45d64 100644 (file)
--- a/src/tiedbin/tied-lbg.cc
+++ b/src/tiedbin/tied-lbg.cc
DeletePointers(&pdfs);
return 0;
- } catch(const std::exception& e) {
+ } catch(const std::exception &e) {
std::cerr << e.what();
return -1;
}
index 9709ee3e793441510789a82bd79f7313e7915133..9f4b2f528a3e9d77817f12be994667407d410374 100644 (file)
void
FmllrDiagGmmAccs::
AccumulateFromPosteriors(const DiagGmm &pdf,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
const VectorBase<BaseFloat> &posterior) {
size_t num_comp = static_cast<int32>(pdf.NumGauss());
size_t dim = static_cast<size_t>(dim_);
BaseFloat
FmllrDiagGmmAccs::AccumulateForGmm(const DiagGmm &pdf,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat weight) {
size_t num_comp = static_cast<int32>(pdf.NumGauss());
Vector<BaseFloat> posterior(num_comp);
BaseFloat ComputeFmllrMatrixDiagGmm(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
std::string fmllr_type, // "none", "offset", "diag", "full"
int32 num_iters,
MatrixBase<BaseFloat> *out_xform) {
BaseFloat ComputeFmllrMatrixDiagGmmFull(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
int32 num_iters,
MatrixBase<BaseFloat> *out_xform) {
size_t dim = stats.G_.size();
}
BaseFloat ComputeFmllrMatrixDiagGmmDiagonal(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *out_xform) {
// The "Diagonal" here means a diagonal fMLLR matrix, i.e. like W = [ A; b] where
// A is diagonal.
@@ -310,7 +310,7 @@ BaseFloat ComputeFmllrMatrixDiagGmmDiagonal(const MatrixBase<BaseFloat> &in_xfor
}
BaseFloat ComputeFmllrMatrixDiagGmmOffset(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *out_xform) {
int32 dim = stats.G_.size();
KALDI_ASSERT(in_xform.NumRows() == dim && in_xform.NumCols() == dim+1);
}
float FmllrAuxFuncDiagGmm(const MatrixBase<float> &xform,
- const AffineXformStats& stats) {
+ const AffineXformStats &stats) {
size_t dim = stats.G_.size();
Matrix<double> xform_d(xform);
Vector<double> xform_row_g(dim + 1);
}
double FmllrAuxFuncDiagGmm(const MatrixBase<double> &xform,
- const AffineXformStats& stats) {
+ const AffineXformStats &stats) {
size_t dim = stats.G_.size();
Vector<double> xform_row_g(dim + 1);
SubMatrix<double> A(xform, 0, dim, 0, dim);
BaseFloat FmllrAuxfGradient(const MatrixBase<BaseFloat> &xform,
// if this is changed back to Matrix<double>
// un-comment the Resize() below.
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *grad_out) {
size_t dim = stats.G_.size();
Matrix<double> xform_d(xform);
index 0f11b452a70d3d05ec590a098a0d8ae3ea5fdf08..dca1afed87feafd89e840690216150e636e496c6 100644 (file)
/// Accumulate stats for a single GMM in the model; returns log likelihood.
BaseFloat AccumulateForGmm(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat weight);
/// Accumulate stats for a GMM, given supplied posteriors.
void AccumulateFromPosteriors(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
const VectorBase<BaseFloat> &posteriors);
void Update(const FmllrOptions &opts,
// It starts the optimization from the current value of the matrix (e.g. use
// InitFmllr to get this).
// Returns auxf improvement.
-BaseFloat ComputeFmllrDiagGmm(const FmllrDiagGmmAccs& accs,
+BaseFloat ComputeFmllrDiagGmm(const FmllrDiagGmmAccs &accs,
const FmllrOptions &opts,
Matrix<BaseFloat> *out_fmllr,
BaseFloat *logdet); // add this to likelihoods
/// Uses full fMLLR matrix (no structure). Returns the
/// objective function improvement, not normalized by number of frames.
BaseFloat ComputeFmllrMatrixDiagGmmFull(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
int32 num_iters,
MatrixBase<BaseFloat> *out_xform);
@@ -128,23 +128,23 @@ BaseFloat ComputeFmllrMatrixDiagGmmFull(const MatrixBase<BaseFloat> &in_xform,
/// course, these statistics are unnecessarily large for this case. Returns the
/// objective function improvement, not normalized by number of frames.
BaseFloat ComputeFmllrMatrixDiagGmmDiagonal(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *out_xform);
// Simpler implementation I am testing.
BaseFloat ComputeFmllrMatrixDiagGmmDiagonal2(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *out_xform);
/// This does offset-only fMLLR, i.e. it only estimates an offset.
BaseFloat ComputeFmllrMatrixDiagGmmOffset(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *out_xform);
/// This function internally calls ComputeFmllrMatrixDiagGmm{Full, Diagonal, Offset},
/// depending on "fmllr_type".
BaseFloat ComputeFmllrMatrixDiagGmm(const MatrixBase<BaseFloat> &in_xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
std::string fmllr_type, // "none", "offset", "diag", "full"
int32 num_iters,
MatrixBase<BaseFloat> *out_xform);
/// Returns the (diagonal-GMM) FMLLR auxiliary function value given the transform
/// and the stats.
float FmllrAuxFuncDiagGmm(const MatrixBase<float> &xform,
- const AffineXformStats& stats);
+ const AffineXformStats &stats);
double FmllrAuxFuncDiagGmm(const MatrixBase<double> &xform,
- const AffineXformStats& stats);
+ const AffineXformStats &stats);
/// Returns the (diagonal-GMM) FMLLR auxiliary function value given the transform
/// and the stats.
BaseFloat FmllrAuxfGradient(const MatrixBase<BaseFloat> &xform,
- const AffineXformStats& stats,
+ const AffineXformStats &stats,
MatrixBase<BaseFloat> *grad_out);
diff --git a/src/transform/hlda.cc b/src/transform/hlda.cc
index cd8e3e0ae0fe254a9122e0e041056e6d1fac5c4f..2e5dcbcbddde3ac1fdba7fb1de1ce754127ab872 100644 (file)
--- a/src/transform/hlda.cc
+++ b/src/transform/hlda.cc
HldaAccsDiagGmm::
AccumulateFromPosteriors(int32 pdf_id,
const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& posteriors) {
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &posteriors) {
Vector<double> data_dbl(data);
KALDI_ASSERT(static_cast<size_t>(pdf_id) < occs_.size()
&& occs_[pdf_id].Dim() == posteriors.Dim());
diff --git a/src/transform/hlda.h b/src/transform/hlda.h
index 2ef10733eb643f3d8d78b142b14866c84b025b0c..87f27b9d2526d41537a07716e2c351545a5ef62a 100644 (file)
--- a/src/transform/hlda.h
+++ b/src/transform/hlda.h
/// Accumulates stats (you have to first work out the posteriors yourself).
void AccumulateFromPosteriors(int32 pdf_id,
const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& posteriors);
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &posteriors);
private:
diff --git a/src/transform/mllt.cc b/src/transform/mllt.cc
index f7a9279668699c22fbec0da41d2f94a9eaee5ea7..3483b72cf68db45990ccd2b52973ac65d0ae3644 100644 (file)
--- a/src/transform/mllt.cc
+++ b/src/transform/mllt.cc
}
void MlltAccs::AccumulateFromPosteriors(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& posteriors) {
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &posteriors) {
KALDI_ASSERT(data.Dim() == gmm.Dim());
KALDI_ASSERT(data.Dim() == Dim());
KALDI_ASSERT(posteriors.Dim() == gmm.NumGauss());
- const Matrix<BaseFloat>& means_invvars = gmm.means_invvars();
- const Matrix<BaseFloat>& inv_vars = gmm.inv_vars();
+ const Matrix<BaseFloat> &means_invvars = gmm.means_invvars();
+ const Matrix<BaseFloat> &inv_vars = gmm.inv_vars();
Vector<BaseFloat> mean(data.Dim());
SpMatrix<double> tmp(data.Dim());
Vector<double> offset_dbl(data.Dim());
}
BaseFloat MlltAccs::AccumulateFromGmm(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat weight) { // e.g. weight = 1.0
Vector<BaseFloat> posteriors(gmm.NumGauss());
BaseFloat ans = gmm.ComponentPosteriors(data, &posteriors);
diff --git a/src/transform/mllt.h b/src/transform/mllt.h
index bc86e41ebdc7046affdf97c50b161df9754632d0..3dea4d466ffc237f5a8500fd7eb9205c7169921c 100644 (file)
--- a/src/transform/mllt.h
+++ b/src/transform/mllt.h
void AccumulateFromPosteriors(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
- const VectorBase<BaseFloat>& posteriors);
+ const VectorBase<BaseFloat> &data,
+ const VectorBase<BaseFloat> &posteriors);
// Returns GMM likelihood.
BaseFloat AccumulateFromGmm(const DiagGmm &gmm,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
BaseFloat weight); // e.g. weight = 1.0
// premultiplies the means of the model by M. typically called
index 0b89a4959828b8c701bed4abba1bf255e55ce7ed..83b7173d05d320b79bab3ddeadbfe2bc72830491 100644 (file)
BaseFloat RegtreeFmllrDiagGmmAccs::AccumulateForGmm(
const RegressionTree ®tree, const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data, size_t pdf_index, BaseFloat weight) {
+ const VectorBase<BaseFloat> &data, size_t pdf_index, BaseFloat weight) {
const DiagGmm &pdf = am.GetPdf(pdf_index);
int32 num_comp = pdf.NumGauss();
Vector<BaseFloat> posterior(num_comp);
void RegtreeFmllrDiagGmmAccs::AccumulateForGaussian(
const RegressionTree ®tree, const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data, size_t pdf_index, size_t gauss_index,
+ const VectorBase<BaseFloat> &data, size_t pdf_index, size_t gauss_index,
BaseFloat weight) {
const DiagGmm &pdf = am.GetPdf(pdf_index);
size_t dim = static_cast<size_t>(dim_);
index cfa8df725118e8521cd24110094c889c93093217..41951e9cfc29d578c90b96f2b946251a9f6a2425 100644 (file)
class RegtreeFmllrDiagGmm {
public:
RegtreeFmllrDiagGmm() : dim_(-1), num_xforms_(-1), valid_logdet_(false) {}
- explicit RegtreeFmllrDiagGmm(const RegtreeFmllrDiagGmm& other)
+ explicit RegtreeFmllrDiagGmm(const RegtreeFmllrDiagGmm &other)
: dim_(other.dim_), num_xforms_(other.num_xforms_),
xform_matrices_(other.xform_matrices_), logdet_(other.logdet_),
valid_logdet_(other.valid_logdet_),
/// Mutators
void SetParameters(const MatrixBase<BaseFloat> &mat, size_t regclass);
- void set_bclass2xforms(const std::vector<int32>& in) { bclass2xforms_ = in; }
+ void set_bclass2xforms(const std::vector<int32> &in) { bclass2xforms_ = in; }
private:
int32 dim_; ///< Dimension of feature vectors
/// This does not work with multiple feature transforms.
BaseFloat AccumulateForGmm(const RegressionTree ®tree,
const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
size_t pdf_index, BaseFloat weight);
/// Accumulate stats for a single Gaussian component in the model.
void AccumulateForGaussian(const RegressionTree ®tree,
const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
size_t pdf_index, size_t gauss_index,
BaseFloat weight);
/// Accessors
int32 Dim() const { return dim_; }
int32 NumBaseClasses() const { return num_baseclasses_; }
- const std::vector<AffineXformStats*>& baseclass_stats() const {
+ const std::vector<AffineXformStats*> &baseclass_stats() const {
return baseclass_stats_;
}
index eb03c69d3385667f17608a0ea166d6324dc1985b..63efa280d88b3513102ac2db5ac9edcd619392c8 100644 (file)
void RegtreeMllrDiagGmm::GetTransformedMeans(const RegressionTree ®tree,
const AmDiagGmm &am,
int32 pdf_index,
- MatrixBase<BaseFloat>* out) const {
+ MatrixBase<BaseFloat> *out) const {
KALDI_ASSERT(static_cast<int32>(bclass2xforms_.size()) ==
regtree.NumBaseclasses());
int32 num_gauss = am.GetPdf(pdf_index).NumGauss();
BaseFloat RegtreeMllrDiagGmmAccs::AccumulateForGmm(
const RegressionTree ®tree, const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data, int32 pdf_index, BaseFloat weight) {
+ const VectorBase<BaseFloat> &data, int32 pdf_index, BaseFloat weight) {
const DiagGmm &pdf = am.GetPdf(pdf_index);
int32 num_comp = static_cast<int32>(pdf.NumGauss());
Vector<BaseFloat> posterior(num_comp);
void RegtreeMllrDiagGmmAccs::AccumulateForGaussian(
const RegressionTree ®tree, const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data, int32 pdf_index, int32 gauss_index,
+ const VectorBase<BaseFloat> &data, int32 pdf_index, int32 gauss_index,
BaseFloat weight) {
const DiagGmm &pdf = am.GetPdf(pdf_index);
Vector<double> data_d(data);
}
static BaseFloat MllrAuxFunction(const Matrix<BaseFloat> &xform,
- const AffineXformStats& stats) {
+ const AffineXformStats &stats) {
int32 dim = stats.G_.size();
Matrix<double> xform_d(xform);
Vector<double> xform_row_g(dim + 1);
index f1c271f91ca06e7093008359b0abea6eed3e7c2a..34c48cf8a189796c35b4d0584a7b91f0a2025dd5 100644 (file)
/// Get all the transformed means for a given pdf.
void GetTransformedMeans(const RegressionTree ®tree, const AmDiagGmm &am,
- int32 pdf_index, MatrixBase<BaseFloat>* out) const;
+ int32 pdf_index, MatrixBase<BaseFloat> *out) const;
void Write(std::ostream &out_stream, bool binary) const;
void Read(std::istream &in_stream, bool binary);
/// Mutators
void SetParameters(const MatrixBase<BaseFloat> &mat, int32 regclass);
- void set_bclass2xforms(const std::vector<int32>& in) { bclass2xforms_ = in; }
+ void set_bclass2xforms(const std::vector<int32> &in) { bclass2xforms_ = in; }
/// Accessors
const std::vector< Matrix<BaseFloat> > xform_matrices() const {
/// This does not work with multiple feature transforms.
BaseFloat AccumulateForGmm(const RegressionTree ®tree,
const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
int32 pdf_index, BaseFloat weight);
/// Accumulate stats for a single Gaussian component in the model.
void AccumulateForGaussian(const RegressionTree ®tree,
const AmDiagGmm &am,
- const VectorBase<BaseFloat>& data,
+ const VectorBase<BaseFloat> &data,
int32 pdf_index, int32 gauss_index,
BaseFloat weight);
/// Accessors
int32 Dim() const { return dim_; }
int32 NumBaseClasses() const { return num_baseclasses_; }
- const std::vector<AffineXformStats*>& baseclass_stats() const {
+ const std::vector<AffineXformStats*> &baseclass_stats() const {
return baseclass_stats_;
}
index 9efe9b64c2f54bd78fc58bb595242ea70dc56378..76116d643b0733f1e18a39fcae89af250883271c 100644 (file)
std::set<EventKeyType> keys_all_saw_set;
// Now we have two distinct sets of keys keys_all and keys_some.
// We now create the Clusterable* stuff.
- BuildTreeStatsType dummy_stats; // dummy because the Clusterable* pointers are actually NULL.
+ BuildTreeStatsType dummy_stats; // dummy because the Clusterable *pointers are actually NULL.
size_t n_stats = rand() % 100;
// make sure we sometimes have empty or just one stats: may find extra bugs.
if (n_stats > 90) n_stats = 0;
std::set<EventKeyType> keys_all_saw_set;
// Now we have two distinct sets of keys keys_all and keys_some.
// We now create the Clusterable* stuff.
- BuildTreeStatsType stats; // dummy because the Clusterable* pointers are actually NULL.
+ BuildTreeStatsType stats; // dummy because the Clusterable *pointers are actually NULL.
size_t n_stats = rand() % 100;
// make sure we sometimes have empty or just one stats: may find extra bugs.
if (n_stats > 90) n_stats = 0;
diff --git a/src/tree/event-map.h b/src/tree/event-map.h
index bc1c4898057e7a9f3b3071d13425ba085368687b..d3c7c88fac0fd8405eed44adc30cb74c3bb0dd80 100644 (file)
--- a/src/tree/event-map.h
+++ b/src/tree/event-map.h
struct EventMapVectorHash { // Hashing object for EventMapVector. Works for both pointers and references.
// Not used in event-map.{h, cc}
size_t operator () (const EventType &vec);
- size_t operator () (const EventType* ptr) { return (*this)(*ptr); }
+ size_t operator () (const EventType *ptr) { return (*this)(*ptr); }
};
struct EventMapVectorEqual { // Equality object for EventType pointers-- test equality of underlying vector.
// Not used in event-map.{h, cc}
- size_t operator () (const EventType* p1, const EventType *p2) { return (*p1 == *p2); }
+ size_t operator () (const EventType *p1, const EventType *p2) { return (*p1 == *p2); }
};
index 36f2481f93d9291e65c7303b6ce2a8180a1c38a3..6929277dcba052020ac298979a93956b949d60c1 100644 (file)
KALDI_WARN << "BasicVectorHolder::Read, error reading line " << (is.eof() ? "[eof]" : "");
return false; // probably eof. fail in any case.
}
- const char* white_chars = " \t\n\r\f\v";
+ const char *white_chars = " \t\n\r\f\v";
SplitStringToVector(line, white_chars, &t_, true); // true== omit empty strings e.g.
// between spaces.
return true;
void Clear() { feats_.Resize(0, 0); }
// Writes Sphinx-format features
- static bool Write(std::ostream& os, bool binary, const T& m) {
+ static bool Write(std::ostream &os, bool binary, const T &m) {
if (!binary) {
KALDI_WARN << "SphinxMatrixHolder can't write Sphinx features in text ";
return false;
index e6f6f9eec9bbd39195660ca7bd2300c686ea74bc..23d6b8684e1915e827c08c4808828348ce8a03ab 100644 (file)
void UnitTestParseOptions() {
int argc = 4;
std::string str;
- const char* argv[4] = { "--i=boo", "a", "b", "c" };
+ const char *argv[4] = { "--i=boo", "a", "b", "c" };
ParseOptions po("my usage msg");
po.Register("i", &str, "My variable");
po.Read(argc, argv);
index 6b2455108f19ff08ebe99b214d604b744cd1297d..0f4fd82df6a9132d5ef1b66e0106edc21bac544c 100644 (file)
-void ParseOptions::Register(const std::string& name, bool* b,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, bool *b,
+ const std::string &doc) {
KALDI_ASSERT(b != NULL);
std::string idx = name;
NormalizeArgName(&idx);
+ ((*b)? "true)" : "false)"));
}
-void ParseOptions::Register(const std::string& name, int32* i,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, int32 *i,
+ const std::string &doc) {
KALDI_ASSERT(i != NULL);
std::string idx = name;
NormalizeArgName(&idx);
doc_map_[idx] = DocInfo(name, ss.str());
}
-void ParseOptions::Register(const std::string& name, uint32* u,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, uint32 *u,
+ const std::string &doc) {
KALDI_ASSERT(u != NULL);
std::string idx = name;
NormalizeArgName(&idx);
doc_map_[idx] = DocInfo(name, ss.str());
}
-void ParseOptions::Register(const std::string& name, float* f,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, float *f,
+ const std::string &doc) {
KALDI_ASSERT(f != NULL);
std::string idx = name;
NormalizeArgName(&idx);
doc_map_[idx] = DocInfo(name, ss.str());
}
-void ParseOptions::Register(const std::string& name, double* f,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, double *f,
+ const std::string &doc) {
KALDI_ASSERT(f != NULL);
std::string idx = name;
NormalizeArgName(&idx);
doc_map_[idx] = DocInfo(name, ss.str());
}
-void ParseOptions::Register(const std::string& name, std::string* s,
- const std::string& doc) {
+void ParseOptions::Register(const std::string &name, std::string *s,
+ const std::string &doc) {
KALDI_ASSERT(s != NULL);
std::string idx = name;
NormalizeArgName(&idx);
-int ParseOptions::Read(int argc, const char* const argv[]) {
+int ParseOptions::Read(int argc, const char *const argv[]) {
argc_ = argc;
argv_ = argv;
std::string key, value;
}
}
-void ParseOptions::PrintConfig(std::ostream& os) {
+void ParseOptions::PrintConfig(std::ostream &os) {
os << '\n' << "[[ Configuration of UI-Registered options ]]"
<< '\n';
std::string key;
}
-void ParseOptions::ReadConfigFile(const std::string& filename) {
+void ParseOptions::ReadConfigFile(const std::string &filename) {
std::ifstream is(filename.c_str(), std::ifstream::in);
if (!is.good()) {
std::cerr << "Cannot open config file "<<filename <<'\n';
-void ParseOptions::SplitLongArg(std::string in, std::string* key,
- std::string* value) {
+void ParseOptions::SplitLongArg(std::string in, std::string *key,
+ std::string *value) {
assert(in.substr(0, 2) == "--"); // precondition.
size_t pos = in.find_first_of('=', 0);
if (pos == std::string::npos) {
}
-void ParseOptions::NormalizeArgName(std::string* str) {
+void ParseOptions::NormalizeArgName(std::string *str) {
std::string out;
std::string::iterator it;
-bool ParseOptions::SetOption(const std::string& key, const std::string& value) {
+bool ParseOptions::SetOption(const std::string &key, const std::string &value) {
if (bool_map_.end() != bool_map_.find(key)) {
*(bool_map_[key]) = ToBool(value);
} else if (int_map_.end() != int_map_.find(key)) {
int32 ParseOptions::ToInt(std::string str) {
- char* end_pos;
+ char *end_pos;
// strtol is cheaper than stringstream...
// strtol accepts decimal 438143, hexa 0x1f2d3 and octal 067123
int32 ret = std::strtol(str.c_str(), &end_pos, 0);
}
uint32 ParseOptions::ToUInt(std::string str) {
- char* end_pos;
+ char *end_pos;
// strtol is cheaper than stringstream...
// strtol accepts decimal 438143, hexa 0x1f2d3 and octal 067123
uint32 ret = std::strtoul(str.c_str(), &end_pos, 0);
float ParseOptions::ToFloat(std::string str) {
- char* end_pos;
+ char *end_pos;
// strtod is cheaper than stringstream...
float ret = std::strtod(str.c_str(), &end_pos);
if (str.c_str() == end_pos) {
}
double ParseOptions::ToDouble(std::string str) {
- char* end_pos;
+ char *end_pos;
// strtod is cheaper than stringstream...
double ret = std::strtod(str.c_str(), &end_pos);
if (str.c_str() == end_pos) {
index 33518f19363c75c5495302ff0a36cf49737630a5..38f9f51e1c08bc037ae9c2e6669a6241927677b0 100644 (file)
--- a/src/util/parse-options.h
+++ b/src/util/parse-options.h
public:
/// Register boolean variable
- void Register(const std::string& name, bool* b, const std::string& doc);
+ void Register(const std::string &name, bool *b, const std::string &doc);
/// Register int32 variable
- void Register(const std::string& name, int32 *i, const std::string& doc);
+ void Register(const std::string &name, int32 *i, const std::string &doc);
/// Register unsinged int32 variable
- void Register(const std::string& name, uint32* u,
- const std::string& doc);
+ void Register(const std::string &name, uint32 *u,
+ const std::string &doc);
/// Register float variable
- void Register(const std::string& name, float* f, const std::string& doc);
+ void Register(const std::string &name, float *f, const std::string &doc);
/// Register double variable [useful as we change BaseFloat type].
- void Register(const std::string& name, double* f, const std::string& doc);
+ void Register(const std::string &name, double *f, const std::string &doc);
/// Register string variable
- void Register(const std::string& name, std::string* s,
- const std::string& doc);
+ void Register(const std::string &name, std::string *s,
+ const std::string &doc);
/**
* Parses the command line options and fills the ParseOptions-registered
* Returns the first position in argv that was not used.
* [typically not useful: use NumParams() and GetParam(). ]
*/
- int Read(int argc, const char*const* argv);
+ int Read(int argc, const char*const *argv);
/// Prints the usage documentation [provided in the constructor].
void PrintUsage(bool print_command_line = false);
/// Prints the actual configuration of all the registered variables
- void PrintConfig(std::ostream& os);
+ void PrintConfig(std::ostream &os);
/// Number of positional parameters (c.f. argc-1).
int NumArgs();
private:
/// Reads the options values from a config file
- void ReadConfigFile(const std::string& filename);
+ void ReadConfigFile(const std::string &filename);
- void SplitLongArg(std::string in, std::string* key, std::string* value);
- void NormalizeArgName(std::string* str);
+ void SplitLongArg(std::string in, std::string *key, std::string *value);
+ void NormalizeArgName(std::string *str);
- bool SetOption(const std::string& key, const std::string& value);
+ bool SetOption(const std::string &key, const std::string &value);
bool ToBool(std::string str);
int32 ToInt(std::string str);
*/
struct DocInfo {
DocInfo() {}
- DocInfo(const std::string& name, const std::string& usemsg)
+ DocInfo(const std::string &name, const std::string &usemsg)
: name_(name), use_msg_(usemsg) {}
std::string name_;
std::vector<std::string> positional_args_;
const char *usage_;
int argc_;
- const char*const* argv_;
+ const char*const *argv_;
};
} // namespace kaldi
diff --git a/src/util/stl-utils.h b/src/util/stl-utils.h
index a2a77d9bba8b7c3e4778acb64e46e105a1b4c686..5c21cd0101cb2d09568ff3c61596fad141f7b4ea 100644 (file)
--- a/src/util/stl-utils.h
+++ b/src/util/stl-utils.h
struct CompareFirstMemberOfPair {
inline bool operator() (const std::pair<A, B> &p1,
const std::pair<A, B> &p2) {
- return (p1.first < p2.first);
+ return p1.first < p2.first;
}
};
index 83fc57736a6a1b10d0997f0f5ce477647c43e84a..2f38b497c5abff756699b3ecfde26351e53f637d 100644 (file)
return static_cast<char>(32 + rand() % 95); // between ' ' and '~'
}
-const char* ws_delim = " \t\n\r";
+const char *ws_delim = " \t\n\r";
char GetRandDelim() {
if (rand() % 2 == 0)
return static_cast<char>(33 + rand() % 94); // between '!' and '~';
diff --git a/src/util/text-utils.cc b/src/util/text-utils.cc
index 6195f68ade392a7328f7d52aacdd617c7c1c4834..1a5d7aa13d3f2a246782ef6c516a9711ce9f2950 100644 (file)
--- a/src/util/text-utils.cc
+++ b/src/util/text-utils.cc
void Trim(std::string *str) {
- const char* white_chars = " \t\n\r\f\v";
+ const char *white_chars = " \t\n\r\f\v";
std::string::size_type pos = str->find_last_not_of(white_chars);
if (pos != std::string::npos) {
void SplitStringOnFirstSpace(const std::string &str,
std::string *first,
std::string *rest) {
- const char* white_chars = " \t\n\r\f\v";
+ const char *white_chars = " \t\n\r\f\v";
typedef std::string::size_type I;
const I npos = std::string::npos;
I first_nonwhite = str.find_first_not_of(white_chars);