kaldi_thchs30_2 run.sh
作者:互联网
run.sh
#[ -d $thchs ] || mkdir -p $thchs || exit 1
#echo "downloading THCHS30 at $thchs ..."
#local/download_and_untar.sh $thchs http://www.openslr.org/resources/18 data_thchs30 || exit 1
#local/download_and_untar.sh $thchs http://www.openslr.org/resources/18 resource || exit 1
#local/download_and_untar.sh $thchs http://www.openslr.org/resources/18 test-noise || exit 1
取消的话可以通过命令下载数据集
#generate text, wav.scp, utt2pk, spk2utt
local/thchs-30_data_prep.sh $H $thchs/data_thchs30 || exit 1;
前1有具体解释过程,创建训练、验证、测试集,读取文件名文件绝对路径写道wav.scp,说话人对应写到utt2spk,word和phone写到对应的文本中
#produce MFCC features
rm -rf data/mfcc && mkdir -p data/mfcc && cp -R data/{train,dev,test,test_phone} data/mfcc || exit 1;
删除之前mfcc,建立新mfcc 文件,将data下数据复制到mfcc路径下
for x in train dev test; do
#make mfcc
steps/make_mfcc.sh --nj $n --cmd "$train_cmd" data/mfcc/$x exp/make_mfcc/$x mfcc/$x || exit 1;
#compute cmvn
steps/compute_cmvn_stats.sh data/mfcc/$x exp/mfcc_cmvn/$x mfcc/$x || exit 1;
done
#copy feats and cmvn to test.ph, avoid duplicated mfcc & cmvn
cp data/mfcc/test/feats.scp data/mfcc/test_phone && cp data/mfcc/test/cmvn.scp data/mfcc/test_phone || exit 1;
计算mfcc特征,然后计算cmvn:倒谱均值和方差的归一化,复制到test_phone文件夹下
#prepare language stuff
#build a large lexicon(词典) that invovles words in both the training and decoding.
(
echo "make word graph ..."
cd $H; mkdir -p data/{dict,lang,graph} && \
cp $thchs/resource/dict/{extra_questions.txt,nonsilence_phones.txt,optional_silence.txt,silence_phones.txt} data/dict && \
cat $thchs/resource/dict/lexicon.txt $thchs/data_thchs30/lm_word/lexicon.txt | \
grep -v '<s>' | grep -v '</s>' | sort -u > data/dict/lexicon.txt || exit 1;
utils/prepare_lang.sh --position_dependent_phones false data/dict "<SPOKEN_NOISE>" data/local/lang data/lang || exit 1;
gzip -c $thchs/data_thchs30/lm_word/word.3gram.lm > data/graph/word.3gram.lm.gz || exit 1;
utils/format_lm.sh data/lang data/graph/word.3gram.lm.gz $thchs/data_thchs30/lm_word/lexicon.txt data/graph/lang || exit 1;
)
构建一个包含训练和解码用到的词的词典,词3元
cat 将文件1内容输入到文件2文档里
grep 搜索文本中是否含有特定字符串,-v 表示输出不匹配的
创建三个文件夹,复制thchs/resource中的内容到data/dict文件夹下,cat查看lexicon下的字符串,使用grep搜索,排除开始符号和结尾符号 ,sort排序-u去重
其实就是,生成字典以及相关的所有文件,包括L.fst文件,用于生成HCLG.fst(HCLG是解码时的重要组成部分。HCLG.fst是由4个fst经过一系列算法(组合、确定化和最小化等)组合而成的。4个fst分别是H.fst、C.fst、L.fst和G.fst,分别是HMM模型、上下文环境、词典和语言模型对应的fst)
gzip -c,对language model打包并保留原文件
调用format_lm.sh 转换语言模型格式
#make_phone_graph
(
echo "make phone graph ..."
cd $H; mkdir -p data/{dict_phone,graph_phone,lang_phone} && \
cp $thchs/resource/dict/{extra_questions.txt,nonsilence_phones.txt,optional_silence.txt,silence_phones.txt} data/dict_phone && \
cat $thchs/data_thchs30/lm_phone/lexicon.txt | grep -v '<eps>' | sort -u > data/dict_phone/lexicon.txt && \
echo "<SPOKEN_NOISE> sil " >> data/dict_phone/lexicon.txt || exit 1;
utils/prepare_lang.sh --position_dependent_phones false data/dict_phone "<SPOKEN_NOISE>" data/local/lang_phone data/lang_phone || exit 1;
gzip -c $thchs/data_thchs30/lm_phone/phone.3gram.lm > data/graph_phone/phone.3gram.lm.gz || exit 1;
utils/format_lm.sh data/lang_phone data/graph_phone/phone.3gram.lm.gz $thchs/data_thchs30/lm_phone/lexicon.txt \
data/graph_phone/lang || exit 1;
)
准备因素图 因素三元
和前词准备过程同理
#monophone
steps/train_mono.sh --boost-silence 1.25 --nj $n --cmd "$train_cmd" data/mfcc/train data/lang exp/mono || exit 1;
#test monophone model
local/thchs-30_decode.sh --mono true --nj $n "steps/decode.sh" exp/mono data/mfcc &
#monophone_ali
steps/align_si.sh --boost-silence 1.25 --nj $n --cmd "$train_cmd" data/mfcc/train data/lang exp/mono exp/mono_ali || exit 1;
训练单因素模型脚本,迭代40次,每次迭代进行一次对齐
测试单因素模型
单因素对齐
#triphone
steps/train_deltas.sh --boost-silence 1.25 --cmd "$train_cmd" 2000 10000 data/mfcc/train data/lang exp/mono_ali exp/tri1 || exit 1;
#test tri1 model
local/thchs-30_decode.sh --nj $n "steps/decode.sh" exp/tri1 data/mfcc &
#triphone_ali
steps/align_si.sh --nj $n --cmd "$train_cmd" data/mfcc/train data/lang exp/tri1 exp/tri1_ali || exit 1;
训练上下文相关的三因素模型
#lda_mllt
steps/train_lda_mllt.sh --cmd "$train_cmd" --splice-opts "--left-context=3 --right-context=3" 2500 15000 data/mfcc/train data/lang exp/tri1_ali exp/tri2b || exit 1;
#test tri2b model
local/thchs-30_decode.sh --nj $n "steps/decode.sh" exp/tri2b data/mfcc &
#lda_mllt_ali
steps/align_si.sh --nj $n --cmd "$train_cmd" --use-graphs true data/mfcc/train data/lang exp/tri2b exp/tri2b_ali || exit 1;
进行线性判别分析和最大似然线性转换
#sat
steps/train_sat.sh --cmd "$train_cmd" 2500 15000 data/mfcc/train data/lang exp/tri2b_ali exp/tri3b || exit 1;
#test tri3b model,训练三因素解码模型
local/thchs-30_decode.sh --nj $n "steps/decode_fmllr.sh" exp/tri3b data/mfcc &
#sat_ali
steps/align_fmllr.sh --nj $n --cmd "$train_cmd" data/mfcc/train data/lang exp/tri3b exp/tri3b_ali || exit 1;
训练发音人自适应,基于特征空间最大似然线性回归
#quick
steps/train_quick.sh --cmd "$train_cmd" 4200 40000 data/mfcc/train data/lang exp/tri3b_ali exp/tri4b || exit 1;
#test tri4b model
local/thchs-30_decode.sh --nj $n "steps/decode_fmllr.sh" exp/tri4b data/mfcc &
#quick_ali
steps/align_fmllr.sh --nj $n --cmd "$train_cmd" data/mfcc/train data/lang exp/tri4b exp/tri4b_ali || exit 1;
#quick_ali_cv
steps/align_fmllr.sh --nj $n --cmd "$train_cmd" data/mfcc/dev data/lang exp/tri4b exp/tri4b_ali_cv || exit 1;
用现有特征进行模型训练
对于当前模型中在树构建之后的每个状态,它基于树统计中的计数的重叠判断的相似性来选择旧模型中最接近的状态。
#train dnn model
local/nnet/run_dnn.sh --stage 0 --nj $n exp/tri4b exp/tri4b_ali exp/tri4b_ali_cv || exit 1;
#train dae model
#python2.6 or above is required for noisy data generation.
#To speed up the process, pyximport for python is recommeded.
local/dae/run_dae.sh $thchs || exit 1;
训练DNN,包括xnet和MPE
标签:run,mfcc,--,train,thchs30,sh,exp,data 来源: https://blog.csdn.net/weixin_42035898/article/details/112469895