sp is added to the model.
This commit is contained in:
parent
b1b1942fa0
commit
41d4fa5ff9
Binary file not shown.
@ -378,17 +378,22 @@ def ipa2htk(ipa):
|
|||||||
return ''.join(htk_splitted)
|
return ''.join(htk_splitted)
|
||||||
|
|
||||||
|
|
||||||
def performance_on_stimmen(stimmen_dir, hmmdefs):
|
def performance_on_stimmen(config_dir, stimmen_dir, hmmdefs):
|
||||||
#hmmdefs = r'c:\OneDrive\Research\rug\experiments\acoustic_model\fame\htk\model_\hmm1\iter20\hmmdefs'
|
|
||||||
#stimmen_dir = r'c:\OneDrive\Research\rug\experiments\acoustic_model\fame\htk\stimmen'
|
|
||||||
lattice_file = os.path.join(stimmen_dir, 'word_lattice.ltc')
|
lattice_file = os.path.join(stimmen_dir, 'word_lattice.ltc')
|
||||||
hvite_scp = os.path.join(stimmen_dir, 'hvite.scp')
|
hvite_scp = os.path.join(stimmen_dir, 'hvite.scp')
|
||||||
#fh.make_filelist(os.path.join(stimmen_dir, 'mfc'), hvite_scp, file_type='mfc')
|
#fh.make_filelist(os.path.join(stimmen_dir, 'mfc'), hvite_scp, file_type='mfc')
|
||||||
hresult_scp = os.path.join(stimmen_dir, 'hresult.scp')
|
hresult_scp = os.path.join(stimmen_dir, 'hresult.scp')
|
||||||
#fh.make_filelist(os.path.join(stimmen_dir, 'mfc'), hresult_scp, file_type='rec')
|
#fh.make_filelist(os.path.join(stimmen_dir, 'mfc'), hresult_scp, file_type='rec')
|
||||||
lexicon_file = os.path.join(stimmen_dir, 'lexicon_recognition.dic')
|
lexicon_file = os.path.join(stimmen_dir, 'lexicon_recognition.dic')
|
||||||
chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_file)
|
|
||||||
|
|
||||||
|
# get feature_size from hmmdefs.
|
||||||
|
with open(hmmdefs) as f:
|
||||||
|
line = f.readline()
|
||||||
|
line = f.readline().strip()
|
||||||
|
feature_size = int(line.split(' ')[2])
|
||||||
|
|
||||||
|
chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_file, feature_size)
|
||||||
|
|
||||||
result = chtk.recognition(
|
result = chtk.recognition(
|
||||||
lattice_file,
|
lattice_file,
|
||||||
hmmdefs,
|
hmmdefs,
|
||||||
|
@ -26,7 +26,7 @@ make_mlf = 0
|
|||||||
extract_features = 0
|
extract_features = 0
|
||||||
flat_start = 0
|
flat_start = 0
|
||||||
train_model_without_sp = 0
|
train_model_without_sp = 0
|
||||||
add_sp = 0
|
add_sp = 1
|
||||||
train_model_with_sp = 0
|
train_model_with_sp = 0
|
||||||
train_model_with_sp_align_mlf = 0
|
train_model_with_sp_align_mlf = 0
|
||||||
train_triphone = 0
|
train_triphone = 0
|
||||||
@ -35,6 +35,9 @@ train_triphone = 0
|
|||||||
|
|
||||||
# pre-defined values.
|
# pre-defined values.
|
||||||
dataset_list = ['devel', 'test', 'train']
|
dataset_list = ['devel', 'test', 'train']
|
||||||
|
feature_size = 39
|
||||||
|
improvement_threshold = 0.5
|
||||||
|
|
||||||
hmmdefs_name = 'hmmdefs'
|
hmmdefs_name = 'hmmdefs'
|
||||||
proto_name = 'proto'
|
proto_name = 'proto'
|
||||||
|
|
||||||
@ -47,7 +50,9 @@ sil_hed = os.path.join(config_dir, 'sil.hed')
|
|||||||
prototype = os.path.join(config_dir, proto_name)
|
prototype = os.path.join(config_dir, proto_name)
|
||||||
|
|
||||||
model_dir = os.path.join(default.htk_dir, 'model')
|
model_dir = os.path.join(default.htk_dir, 'model')
|
||||||
|
model0_dir = os.path.join(model_dir, 'hmm0')
|
||||||
|
model1_dir = os.path.join(model_dir, 'hmm1')
|
||||||
|
model1sp_dir = os.path.join(model_dir, 'hmm1sp')
|
||||||
|
|
||||||
# directories / files to be made.
|
# directories / files to be made.
|
||||||
lexicon_dir = os.path.join(default.htk_dir, 'lexicon')
|
lexicon_dir = os.path.join(default.htk_dir, 'lexicon')
|
||||||
@ -55,9 +60,6 @@ lexicon_htk_asr = os.path.join(lexicon_dir, 'lex.htk_asr')
|
|||||||
lexicon_htk_oov = os.path.join(lexicon_dir, 'lex.htk_oov')
|
lexicon_htk_oov = os.path.join(lexicon_dir, 'lex.htk_oov')
|
||||||
lexicon_htk = os.path.join(lexicon_dir, 'lex.htk')
|
lexicon_htk = os.path.join(lexicon_dir, 'lex.htk')
|
||||||
|
|
||||||
|
|
||||||
#model1_dir = os.path.join(model_dir, 'hmm1')
|
|
||||||
|
|
||||||
feature_dir = os.path.join(default.htk_dir, 'mfc')
|
feature_dir = os.path.join(default.htk_dir, 'mfc')
|
||||||
fh.make_new_directory(feature_dir, existing_dir='leave')
|
fh.make_new_directory(feature_dir, existing_dir='leave')
|
||||||
tmp_dir = os.path.join(default.htk_dir, 'tmp')
|
tmp_dir = os.path.join(default.htk_dir, 'tmp')
|
||||||
@ -65,11 +67,17 @@ fh.make_new_directory(tmp_dir, existing_dir='leave')
|
|||||||
label_dir = os.path.join(default.htk_dir, 'label')
|
label_dir = os.path.join(default.htk_dir, 'label')
|
||||||
fh.make_new_directory(label_dir, existing_dir='leave')
|
fh.make_new_directory(label_dir, existing_dir='leave')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## training
|
## training
|
||||||
hcompv_scp_train = os.path.join(tmp_dir, 'train.scp')
|
hcompv_scp_train = os.path.join(tmp_dir, 'train.scp')
|
||||||
mlf_file_train = os.path.join(label_dir, 'train_phone.mlf')
|
mlf_file_train = os.path.join(label_dir, 'train_phone.mlf')
|
||||||
mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf')
|
mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf')
|
||||||
|
|
||||||
|
## testing
|
||||||
|
htk_stimmen_dir = os.path.join(default.htk_dir, 'stimmen')
|
||||||
|
|
||||||
|
|
||||||
## train without sp
|
## train without sp
|
||||||
niter_max = 10
|
niter_max = 10
|
||||||
|
|
||||||
@ -100,7 +108,7 @@ if make_lexicon:
|
|||||||
|
|
||||||
|
|
||||||
## intialize the instance for HTK.
|
## intialize the instance for HTK.
|
||||||
chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_htk)
|
chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_htk, feature_size)
|
||||||
|
|
||||||
|
|
||||||
## ======================= make label files =======================
|
## ======================= make label files =======================
|
||||||
@ -223,11 +231,14 @@ if extract_features:
|
|||||||
if flat_start:
|
if flat_start:
|
||||||
timer_start = time.time()
|
timer_start = time.time()
|
||||||
print('==== flat start ====')
|
print('==== flat start ====')
|
||||||
feature_size = 39
|
|
||||||
model0_dir = os.path.join(model_dir, 'hmm0')
|
|
||||||
fh.make_new_directory(model0_dir, existing_dir='leave')
|
fh.make_new_directory(model0_dir, existing_dir='leave')
|
||||||
|
|
||||||
chtk.flat_start(hcompv_scp_train, model0_dir, feature_size)
|
chtk.flat_start(hcompv_scp_train, model0_dir)
|
||||||
|
|
||||||
|
# create macros.
|
||||||
|
vFloors = os.path.join(model0_dir, 'vFloors')
|
||||||
|
if os.path.exists(vFloors):
|
||||||
|
chtk.create_macros(vFloors)
|
||||||
|
|
||||||
# allocate mean & variance to all phones in the phone list
|
# allocate mean & variance to all phones in the phone list
|
||||||
print('>>> allocating mean & variance to all phones in the phone list...')
|
print('>>> allocating mean & variance to all phones in the phone list...')
|
||||||
@ -241,69 +252,38 @@ if flat_start:
|
|||||||
|
|
||||||
## ======================= train model without short pause =======================
|
## ======================= train model without short pause =======================
|
||||||
if train_model_without_sp:
|
if train_model_without_sp:
|
||||||
fh.make_new_directory(model1_dir)
|
|
||||||
|
|
||||||
print('==== train model without sp ====')
|
print('==== train model without sp ====')
|
||||||
if not os.path.exists(os.path.join(model1_dir, 'iter0')):
|
|
||||||
shutil.copytree(model0_dir, os.path.join(model1_dir, 'iter0'))
|
timer_start = time.time()
|
||||||
for niter in range(1, niter_max):
|
niter = chtk.re_estimation_until_saturated(
|
||||||
timer_start = time.time()
|
model1_dir,
|
||||||
hmm_n = 'iter' + str(niter)
|
model0_dir, improvement_threshold, hcompv_scp_train,
|
||||||
hmm_n_pre = 'iter' + str(niter-1)
|
os.path.join(htk_stimmen_dir, 'mfc'),
|
||||||
modeln_dir = os.path.join(model1_dir, hmm_n)
|
'mfc',
|
||||||
modeln_dir_pre = os.path.join(model1_dir, hmm_n_pre)
|
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
||||||
|
mlf_file=mlf_file_train,
|
||||||
# re-estimation
|
lexicon_file=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic')
|
||||||
fh.make_new_directory(modeln_dir)
|
)
|
||||||
pyhtk.re_estimation(
|
|
||||||
config_train,
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
os.path.join(modeln_dir_pre, hmmdefs_name),
|
|
||||||
modeln_dir,
|
|
||||||
hcompv_scp_train, phonelist_txt,
|
|
||||||
mlf_file=mlf_file_train,
|
|
||||||
macros=os.path.join(modeln_dir_pre, 'macros'))
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
|
||||||
|
|
||||||
|
|
||||||
## ======================= adding sp to the model =======================
|
## ======================= adding sp to the model =======================
|
||||||
if add_sp:
|
if add_sp:
|
||||||
print('==== adding sp to the model ====')
|
print('==== adding sp to the model ====')
|
||||||
|
# reference:
|
||||||
|
# http://www.f.waseda.jp/yusukekondo/htk.html#flat_start_estimation
|
||||||
|
|
||||||
# make model with sp.
|
# make model with sp.
|
||||||
print('>>> modifying the last model in the previous step...')
|
niter = 7
|
||||||
modeln_dir_pre = os.path.join(model1_dir, 'iter'+str(niter_max-1))
|
print('>>> adding sp state to the last model in the previous step...')
|
||||||
modeln_dir = modeln_dir_pre.replace('iter' + str(niter_max-1), 'iter' + str(niter_max))
|
fh.make_new_directory(model1sp_dir, existing_dir='leave')
|
||||||
fh.make_new_directory(modeln_dir)
|
modeln_dir_pre = os.path.join(model1_dir, 'iter'+str(niter))
|
||||||
shutil.copy(
|
|
||||||
os.path.join(modeln_dir_pre, 'macros'),
|
|
||||||
os.path.join(modeln_dir, 'macros'))
|
|
||||||
shutil.copy(
|
|
||||||
os.path.join(modeln_dir_pre, hmmdefs_name),
|
|
||||||
os.path.join(modeln_dir, hmmdefs_name))
|
|
||||||
|
|
||||||
## =======================
|
## update hmmdefs and macros.
|
||||||
## manually make changes to modeln_dir/hmmdefs
|
print('>>> adding sp to the model...')
|
||||||
## =======================
|
modeln_dir = os.path.join(model1sp_dir, 'iter0')
|
||||||
# add states 'sil'.
|
chtk.add_sp(modeln_dir_pre, modeln_dir)
|
||||||
# http://www.f.waseda.jp/yusukekondo/htk.html#flat_start_estimation
|
|
||||||
#shutil.copy(
|
|
||||||
# os.path.join(model_dir, 'hmmdefs.txt'),
|
|
||||||
# os.path.join(modeln_dir, hmmdefs_name))
|
|
||||||
|
|
||||||
#hmmdefs_file_pre = os.path.join(modeln_dir_pre, hmmdefs_name)
|
|
||||||
hmmdefs_file = os.path.join(modeln_dir, hmmdefs_name)
|
|
||||||
macros_file = os.path.join(modeln_dir, 'macros')
|
|
||||||
#with open(hmmdefs_file_pre) as f:
|
|
||||||
# lines = f.read()
|
|
||||||
#lines_ = lines.split('~h ')
|
|
||||||
#sil_model = [line for line in lines_ if line.split('\n')[0].replace('"', '') == 'sil'][0]
|
|
||||||
|
|
||||||
# update hmmdefs and macros.
|
|
||||||
print('>>> updating hmmdefs and macros...')
|
|
||||||
modeln_dir_pre = modeln_dir
|
|
||||||
modeln_dir = modeln_dir.replace('iter' + str(niter_max), 'iter' + str(niter_max+1))
|
|
||||||
fh.make_new_directory(modeln_dir)
|
|
||||||
pyhtk.include_sil_in_hmmdefs(macros_file, hmmdefs_file, modeln_dir, sil_hed, phonelist_txt)
|
|
||||||
|
|
||||||
|
|
||||||
## ======================= train model with short pause =======================
|
## ======================= train model with short pause =======================
|
||||||
|
Loading…
Reference in New Issue
Block a user