Compare commits
No commits in common. "b444b70af94852eed3a9892d3457c24479e0a959" and "fa81b70b27f1909b807c56decdc8af0485169ae8" have entirely different histories.
b444b70af9
...
fa81b70b27
Binary file not shown.
@ -51,9 +51,6 @@
|
|||||||
<Compile Include="fame_hmm.py" />
|
<Compile Include="fame_hmm.py" />
|
||||||
<Compile Include="phoneset\fame_asr.py" />
|
<Compile Include="phoneset\fame_asr.py" />
|
||||||
<Compile Include="phoneset\fame_ipa.py" />
|
<Compile Include="phoneset\fame_ipa.py" />
|
||||||
<Compile Include="phoneset\fame_phonetics.py">
|
|
||||||
<SubType>Code</SubType>
|
|
||||||
</Compile>
|
|
||||||
<Compile Include="stimmen_functions.py" />
|
<Compile Include="stimmen_functions.py" />
|
||||||
<Compile Include="stimmen_test.py" />
|
<Compile Include="stimmen_test.py" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
@ -345,7 +345,6 @@ def fix_lexicon(lexicon_file):
|
|||||||
|
|
||||||
for i in lex[lex['word'].str.startswith('\'')].index.values:
|
for i in lex[lex['word'].str.startswith('\'')].index.values:
|
||||||
lex.iat[i, 0] = lex.iat[i, 0].replace('\'', '\\\'')
|
lex.iat[i, 0] = lex.iat[i, 0].replace('\'', '\\\'')
|
||||||
|
|
||||||
# to_csv does not work with space seperator. therefore all tabs should manually be replaced.
|
# to_csv does not work with space seperator. therefore all tabs should manually be replaced.
|
||||||
#lex.to_csv(lexicon_file, index=False, header=False, encoding="utf-8", sep=' ', quoting=csv.QUOTE_NONE, escapechar='\\')
|
#lex.to_csv(lexicon_file, index=False, header=False, encoding="utf-8", sep=' ', quoting=csv.QUOTE_NONE, escapechar='\\')
|
||||||
lex.to_csv(lexicon_file, index=False, header=False, sep='\t', encoding='utf-8')
|
lex.to_csv(lexicon_file, index=False, header=False, sep='\t', encoding='utf-8')
|
||||||
@ -370,7 +369,6 @@ def ipa2asr(ipa):
|
|||||||
def ipa2htk(ipa):
|
def ipa2htk(ipa):
|
||||||
curr_dir = os.path.dirname(os.path.abspath(__file__))
|
curr_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
translation_key_ipa2asr = np.load(os.path.join(curr_dir, 'phoneset', 'fame_ipa2asr.npy')).item(0)
|
translation_key_ipa2asr = np.load(os.path.join(curr_dir, 'phoneset', 'fame_ipa2asr.npy')).item(0)
|
||||||
#translation_key_ipa2asr = np.load(r'c:\Users\Aki\source\repos\acoustic_model\acoustic_model\phoneset\fame_ipa2asr.npy').item(0)
|
|
||||||
|
|
||||||
ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones)
|
ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones)
|
||||||
ipa_splitted = fame_ipa.phone_reduction(ipa_splitted)
|
ipa_splitted = fame_ipa.phone_reduction(ipa_splitted)
|
||||||
|
@ -11,7 +11,7 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
|
||||||
import fame_functions
|
import fame_functions
|
||||||
from phoneset import fame_ipa, fame_asr, fame_phonetics
|
from phoneset import fame_ipa, fame_asr
|
||||||
import defaultfiles as default
|
import defaultfiles as default
|
||||||
sys.path.append(default.toolbox_dir)
|
sys.path.append(default.toolbox_dir)
|
||||||
import file_handling as fh
|
import file_handling as fh
|
||||||
@ -25,11 +25,11 @@ make_label = 0 # it takes roughly 4800 sec on Surface pro 2.
|
|||||||
make_mlf = 0
|
make_mlf = 0
|
||||||
extract_features = 0
|
extract_features = 0
|
||||||
flat_start = 0
|
flat_start = 0
|
||||||
train_monophone_without_sp = 0
|
train_model_without_sp = 0
|
||||||
add_sp = 0
|
add_sp = 0
|
||||||
train_monophone_with_re_aligned_mlf = 0
|
train_model_with_re_aligned_mlf = 1
|
||||||
train_triphone = 0
|
train_triphone = 0
|
||||||
train_triphone_tied = 1
|
|
||||||
|
|
||||||
|
|
||||||
# pre-defined values.
|
# pre-defined values.
|
||||||
@ -44,23 +44,21 @@ lexicon_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr')
|
|||||||
lexicon_oov = os.path.join(default.fame_dir, 'lexicon', 'lex.oov')
|
lexicon_oov = os.path.join(default.fame_dir, 'lexicon', 'lex.oov')
|
||||||
|
|
||||||
config_dir = os.path.join(default.htk_dir, 'config')
|
config_dir = os.path.join(default.htk_dir, 'config')
|
||||||
phonelist_full_txt = os.path.join(config_dir, 'phonelist_full.txt')
|
|
||||||
tree_hed = os.path.join(config_dir, 'tree.hed')
|
sil_hed = os.path.join(config_dir, 'sil.hed')
|
||||||
quest_hed = os.path.join(config_dir, 'quests.hed')
|
prototype = os.path.join(config_dir, proto_name)
|
||||||
|
|
||||||
model_dir = os.path.join(default.htk_dir, 'model')
|
model_dir = os.path.join(default.htk_dir, 'model')
|
||||||
model_mono0_dir = os.path.join(model_dir, 'mono0')
|
model0_dir = os.path.join(model_dir, 'hmm0')
|
||||||
model_mono1_dir = os.path.join(model_dir, 'mono1')
|
model1_dir = os.path.join(model_dir, 'hmm1')
|
||||||
model_mono1sp_dir = os.path.join(model_dir, 'mono1sp')
|
model1sp_dir = os.path.join(model_dir, 'hmm1sp')
|
||||||
model_mono1sp2_dir = os.path.join(model_dir, 'mono1sp2')
|
model1sp2_dir = os.path.join(model_dir, 'hmm1sp2')
|
||||||
model_tri1_dir = os.path.join(model_dir, 'tri1')
|
|
||||||
|
|
||||||
# directories / files to be made.
|
# directories / files to be made.
|
||||||
lexicon_dir = os.path.join(default.htk_dir, 'lexicon')
|
lexicon_dir = os.path.join(default.htk_dir, 'lexicon')
|
||||||
lexicon_htk_asr = os.path.join(lexicon_dir, 'lex.htk_asr')
|
lexicon_htk_asr = os.path.join(lexicon_dir, 'lex.htk_asr')
|
||||||
lexicon_htk_oov = os.path.join(lexicon_dir, 'lex.htk_oov')
|
lexicon_htk_oov = os.path.join(lexicon_dir, 'lex.htk_oov')
|
||||||
lexicon_htk = os.path.join(lexicon_dir, 'lex.htk')
|
lexicon_htk = os.path.join(lexicon_dir, 'lex.htk')
|
||||||
lexicon_htk_triphone = os.path.join(lexicon_dir, 'lex_triphone.htk')
|
|
||||||
|
|
||||||
feature_dir = os.path.join(default.htk_dir, 'mfc')
|
feature_dir = os.path.join(default.htk_dir, 'mfc')
|
||||||
fh.make_new_directory(feature_dir, existing_dir='leave')
|
fh.make_new_directory(feature_dir, existing_dir='leave')
|
||||||
@ -73,9 +71,7 @@ fh.make_new_directory(label_dir, existing_dir='leave')
|
|||||||
## training
|
## training
|
||||||
hcompv_scp_train = os.path.join(tmp_dir, 'train.scp')
|
hcompv_scp_train = os.path.join(tmp_dir, 'train.scp')
|
||||||
mlf_file_train = os.path.join(label_dir, 'train_phone.mlf')
|
mlf_file_train = os.path.join(label_dir, 'train_phone.mlf')
|
||||||
mlf_file_train_with_sp = os.path.join(label_dir, 'train_phone_with_sp.mlf')
|
|
||||||
mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf')
|
mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf')
|
||||||
hcompv_scp_train_updated = hcompv_scp_train.replace('.scp', '_updated.scp')
|
|
||||||
|
|
||||||
## testing
|
## testing
|
||||||
htk_stimmen_dir = os.path.join(default.htk_dir, 'stimmen')
|
htk_stimmen_dir = os.path.join(default.htk_dir, 'stimmen')
|
||||||
@ -103,15 +99,6 @@ if make_lexicon:
|
|||||||
# http://electroblaze.blogspot.nl/2013/03/understanding-htk-error-messages.html
|
# http://electroblaze.blogspot.nl/2013/03/understanding-htk-error-messages.html
|
||||||
print('>>> fixing the lexicon...')
|
print('>>> fixing the lexicon...')
|
||||||
fame_functions.fix_lexicon(lexicon_htk)
|
fame_functions.fix_lexicon(lexicon_htk)
|
||||||
|
|
||||||
## add sp to the end of each line.
|
|
||||||
#print('>>> adding sp...')
|
|
||||||
#with open(lexicon_htk) as f:
|
|
||||||
# lines = f.read().split('\n')
|
|
||||||
#lines = [line + ' sp' for line in lines]
|
|
||||||
#with open(lexicon_htk_with_sp, 'wb') as f:
|
|
||||||
# f.write(bytes('\n'.join(lines), 'ascii'))
|
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
|
|
||||||
|
|
||||||
@ -179,14 +166,11 @@ if make_mlf:
|
|||||||
label_dir_ = os.path.join(label_dir, dataset)
|
label_dir_ = os.path.join(label_dir, dataset)
|
||||||
mlf_word = os.path.join(label_dir, dataset + '_word.mlf')
|
mlf_word = os.path.join(label_dir, dataset + '_word.mlf')
|
||||||
mlf_phone = os.path.join(label_dir, dataset + '_phone.mlf')
|
mlf_phone = os.path.join(label_dir, dataset + '_phone.mlf')
|
||||||
mlf_phone_with_sp = os.path.join(label_dir, dataset + '_phone_with_sp.mlf')
|
|
||||||
|
|
||||||
print(">>> generating a word level mlf file for {}...".format(dataset))
|
print(">>> generating a word level mlf file for {}...".format(dataset))
|
||||||
chtk.label2mlf(label_dir_, mlf_word)
|
chtk.label2mlf(label_dir_, mlf_word)
|
||||||
print(">>> generating a phone level mlf file for {}...".format(dataset))
|
print(">>> generating a phone level mlf file for {}...".format(dataset))
|
||||||
chtk.mlf_word2phone(mlf_phone, mlf_word, with_sp=False)
|
chtk.mlf_word2phone(mlf_phone, mlf_word)
|
||||||
chtk.mlf_word2phone(mlf_phone_with_sp, mlf_word, with_sp=True)
|
|
||||||
|
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
|
|
||||||
@ -242,38 +226,38 @@ if extract_features:
|
|||||||
if flat_start:
|
if flat_start:
|
||||||
timer_start = time.time()
|
timer_start = time.time()
|
||||||
print('==== flat start ====')
|
print('==== flat start ====')
|
||||||
fh.make_new_directory(model_mono0_dir, existing_dir='leave')
|
fh.make_new_directory(model0_dir, existing_dir='leave')
|
||||||
|
|
||||||
chtk.flat_start(hcompv_scp_train, model_mono0_dir)
|
chtk.flat_start(hcompv_scp_train, model0_dir)
|
||||||
|
|
||||||
# create macros.
|
# create macros.
|
||||||
vFloors = os.path.join(model_mono0_dir, 'vFloors')
|
vFloors = os.path.join(model0_dir, 'vFloors')
|
||||||
if os.path.exists(vFloors):
|
if os.path.exists(vFloors):
|
||||||
chtk.create_macros(vFloors)
|
chtk.create_macros(vFloors)
|
||||||
|
|
||||||
# allocate mean & variance to all phones in the phone list
|
# allocate mean & variance to all phones in the phone list
|
||||||
print('>>> allocating mean & variance to all phones in the phone list...')
|
print('>>> allocating mean & variance to all phones in the phone list...')
|
||||||
chtk.create_hmmdefs(
|
chtk.create_hmmdefs(
|
||||||
os.path.join(model_mono0_dir, proto_name),
|
os.path.join(model0_dir, proto_name),
|
||||||
os.path.join(model_mono0_dir, 'hmmdefs')
|
os.path.join(model0_dir, 'hmmdefs')
|
||||||
)
|
)
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
|
|
||||||
|
|
||||||
## ======================= train model without short pause =======================
|
## ======================= train model without short pause =======================
|
||||||
if train_monophone_without_sp:
|
if train_model_without_sp:
|
||||||
print('==== train monophone without sp ====')
|
print('==== train model without sp ====')
|
||||||
|
|
||||||
timer_start = time.time()
|
timer_start = time.time()
|
||||||
niter = chtk.re_estimation_until_saturated(
|
niter = chtk.re_estimation_until_saturated(
|
||||||
model_mono1_dir,
|
model1_dir,
|
||||||
model_mono0_dir, improvement_threshold, hcompv_scp_train,
|
model0_dir, improvement_threshold, hcompv_scp_train,
|
||||||
os.path.join(htk_stimmen_dir, 'mfc'),
|
os.path.join(htk_stimmen_dir, 'mfc'),
|
||||||
'mfc',
|
'mfc',
|
||||||
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
||||||
mlf_file=mlf_file_train,
|
mlf_file=mlf_file_train,
|
||||||
lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic')
|
lexicon_file=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic')
|
||||||
)
|
)
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
@ -288,62 +272,54 @@ if add_sp:
|
|||||||
|
|
||||||
# make model with sp.
|
# make model with sp.
|
||||||
print('>>> adding sp state to the last model in the previous step...')
|
print('>>> adding sp state to the last model in the previous step...')
|
||||||
fh.make_new_directory(model_mono1sp_dir, existing_dir='leave')
|
fh.make_new_directory(model1sp_dir, existing_dir='leave')
|
||||||
niter = chtk.get_niter_max(model_mono1_dir)
|
niter = chtk.get_niter_max(model1_dir)
|
||||||
modeln_dir_pre = os.path.join(model_mono1_dir, 'iter'+str(niter))
|
modeln_dir_pre = os.path.join(model1_dir, 'iter'+str(niter))
|
||||||
modeln_dir = os.path.join(model_mono1sp_dir, 'iter0')
|
modeln_dir = os.path.join(model1sp_dir, 'iter0')
|
||||||
|
|
||||||
chtk.add_sp(modeln_dir_pre, modeln_dir)
|
chtk.add_sp(modeln_dir_pre, modeln_dir)
|
||||||
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
|
|
||||||
print('>>> re-estimation...')
|
|
||||||
niter = chtk.re_estimation_until_saturated(
|
niter = chtk.re_estimation_until_saturated(
|
||||||
model_mono1sp_dir, modeln_dir, improvement_threshold, hcompv_scp_train,
|
model1sp_dir, modeln_dir, improvement_threshold, hcompv_scp_train,
|
||||||
os.path.join(htk_stimmen_dir, 'mfc'),
|
os.path.join(htk_stimmen_dir, 'mfc'),
|
||||||
'mfc',
|
'mfc',
|
||||||
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
||||||
mlf_file=mlf_file_train_with_sp,
|
mlf_file=mlf_file_train,
|
||||||
lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'),
|
lexicon_file=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'),
|
||||||
model_type='monophone_with_sp'
|
model_type='monophone_with_sp'
|
||||||
)
|
)
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
|
||||||
|
|
||||||
|
|
||||||
## ======================= train model with re-aligned mlf =======================
|
## ======================= train model with re-aligned mlf =======================
|
||||||
if train_monophone_with_re_aligned_mlf:
|
if train_model_with_re_aligned_mlf:
|
||||||
print('==== traina monophone with re-aligned mlf ====')
|
print('==== traina model with re-aligned mlf ====')
|
||||||
timer_start = time.time()
|
|
||||||
|
|
||||||
print('>>> re-aligning the training data... ')
|
print('>>> re-aligning the training data... ')
|
||||||
niter = chtk.get_niter_max(model_mono1sp_dir)
|
timer_start = time.time()
|
||||||
modeln_dir = os.path.join(model_mono1sp_dir, 'iter'+str(niter))
|
niter = chtk.get_niter_max(model1sp_dir)
|
||||||
|
modeln_dir = os.path.join(model1sp_dir, 'iter'+str(niter))
|
||||||
chtk.make_aligned_label(
|
chtk.make_aligned_label(
|
||||||
os.path.join(modeln_dir, 'macros'),
|
os.path.join(modeln_dir, 'macros'),
|
||||||
os.path.join(modeln_dir, 'hmmdefs'),
|
os.path.join(modeln_dir, 'hmmdefs'),
|
||||||
mlf_file_train_aligned,
|
mlf_file_train_aligned,
|
||||||
os.path.join(label_dir, 'train_word.mlf'),
|
os.path.join(label_dir, 'train_word.mlf'),
|
||||||
hcompv_scp_train)
|
hcompv_scp_train)
|
||||||
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
print('>>> updating the script file... ')
|
|
||||||
chtk.update_script_file(
|
|
||||||
mlf_file_train_aligned,
|
|
||||||
mlf_file_train_with_sp,
|
|
||||||
hcompv_scp_train,
|
|
||||||
hcompv_scp_train_updated)
|
|
||||||
|
|
||||||
print('>>> re-estimation... ')
|
print('>>> re-estimation... ')
|
||||||
timer_start = time.time()
|
timer_start = time.time()
|
||||||
fh.make_new_directory(model_mono1sp2_dir, existing_dir='leave')
|
fh.make_new_directory(model1sp2_dir, existing_dir='leave')
|
||||||
niter = chtk.get_niter_max(model_mono1sp_dir)
|
niter = chtk.get_niter_max(model1sp_dir)
|
||||||
niter = chtk.re_estimation_until_saturated(
|
niter = chtk.re_estimation_until_saturated(
|
||||||
model_mono1sp2_dir,
|
model1sp2_dir,
|
||||||
os.path.join(model_mono1sp_dir, 'iter'+str(niter)),
|
os.path.join(model1sp_dir, 'iter'+str(niter)),
|
||||||
improvement_threshold,
|
improvement_threshold,
|
||||||
hcompv_scp_train_updated,
|
hcompv_scp_train,
|
||||||
os.path.join(htk_stimmen_dir, 'mfc'),
|
os.path.join(htk_stimmen_dir, 'mfc'),
|
||||||
'mfc',
|
'mfc',
|
||||||
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
||||||
mlf_file=mlf_file_train_aligned,
|
mlf_file=mlf_file_train,
|
||||||
lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'),
|
lexicon_file=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'),
|
||||||
model_type='monophone_with_sp'
|
model_type='monophone_with_sp'
|
||||||
)
|
)
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
print("elapsed time: {}".format(time.time() - timer_start))
|
||||||
@ -351,81 +327,19 @@ if train_monophone_with_re_aligned_mlf:
|
|||||||
|
|
||||||
## ======================= train triphone =======================
|
## ======================= train triphone =======================
|
||||||
if train_triphone:
|
if train_triphone:
|
||||||
print('==== traina triphone model ====')
|
|
||||||
timer_start = time.time()
|
|
||||||
|
|
||||||
triphonelist_txt = os.path.join(config_dir, 'triphonelist.txt')
|
|
||||||
triphone_mlf = os.path.join(default.htk_dir, 'label', 'train_triphone.mlf')
|
triphone_mlf = os.path.join(default.htk_dir, 'label', 'train_triphone.mlf')
|
||||||
|
macros = os.path.join(model_dir, 'hmm1_tri', 'iter0', 'macros')
|
||||||
print('>>> making triphone list... ')
|
hmmdefs = os.path.join(model_dir, 'hmm1_tri', 'iter0', 'hmmdefs')
|
||||||
chtk.make_triphonelist(
|
model_out_dir = os.path.join(model_dir, 'hmm1_tri', 'iter1')
|
||||||
triphonelist_txt,
|
run_command([
|
||||||
triphone_mlf,
|
'HERest', '-B',
|
||||||
mlf_file_train_aligned)
|
'-C', config_train,
|
||||||
|
'-I', triphone_mlf,
|
||||||
print('>>> making triphone header... ')
|
'-t', '250.0', '150.0', '1000.0',
|
||||||
chtk.make_tri_hed(
|
'-s', 'stats'
|
||||||
os.path.join(config_dir, 'mktri.hed')
|
'-S', hcompv_scp_train,
|
||||||
)
|
'-H', macros,
|
||||||
|
'-H', hmmdefs,
|
||||||
print('>>> init triphone model... ')
|
'-M', model_out_dir,
|
||||||
niter = chtk.get_niter_max(model_mono1sp2_dir)
|
os.path.join(config_dir, 'triphonelist.txt')
|
||||||
fh.make_new_directory(os.path.join(model_tri1_dir, 'iter0'), existing_dir='leave')
|
])
|
||||||
chtk.init_triphone(
|
|
||||||
os.path.join(model_mono1sp2_dir, 'iter'+str(niter)),
|
|
||||||
os.path.join(model_tri1_dir, 'iter0')
|
|
||||||
)
|
|
||||||
|
|
||||||
print('>>> re-estimation... ')
|
|
||||||
# I wanted to train until satulated:
|
|
||||||
# #niter = chtk.re_estimation_until_saturated(
|
|
||||||
# model_tri1_dir,
|
|
||||||
# os.path.join(model_tri1_dir, 'iter0'),
|
|
||||||
# improvement_threshold,
|
|
||||||
# hcompv_scp_train_updated,
|
|
||||||
# os.path.join(htk_stimmen_dir, 'mfc'),
|
|
||||||
# 'mfc',
|
|
||||||
# os.path.join(htk_stimmen_dir, 'word_lattice.ltc'),
|
|
||||||
# mlf_file=triphone_mlf,
|
|
||||||
# lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'),
|
|
||||||
# model_type='triphone'
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# but because the data size is limited, some triphone cannot be trained and received the error:
|
|
||||||
# ERROR [+8231] GetHCIModel: Cannot find hmm [i:-]r[+???]
|
|
||||||
# therefore only two times re-estimation is performed.
|
|
||||||
output_dir = model_tri1_dir
|
|
||||||
|
|
||||||
for niter in range(1, 4):
|
|
||||||
hmm_n = 'iter' + str(niter)
|
|
||||||
hmm_n_pre = 'iter' + str(niter-1)
|
|
||||||
_modeln_dir = os.path.join(output_dir, hmm_n)
|
|
||||||
_modeln_dir_pre = os.path.join(output_dir, hmm_n_pre)
|
|
||||||
|
|
||||||
fh.make_new_directory(_modeln_dir, 'leave')
|
|
||||||
chtk.re_estimation(
|
|
||||||
os.path.join(_modeln_dir_pre, 'hmmdefs'),
|
|
||||||
_modeln_dir,
|
|
||||||
hcompv_scp_train_updated,
|
|
||||||
mlf_file=triphone_mlf,
|
|
||||||
macros=os.path.join(_modeln_dir_pre, 'macros'),
|
|
||||||
model_type='triphone')
|
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
|
||||||
|
|
||||||
|
|
||||||
## ======================= train triphone =======================
|
|
||||||
if train_triphone_tied:
|
|
||||||
print('==== traina tied-state triphone ====')
|
|
||||||
timer_start = time.time()
|
|
||||||
|
|
||||||
print('>>> making lexicon for triphone... ')
|
|
||||||
chtk.make_triphone_full(phonelist_full_txt, lexicon_htk_triphone)
|
|
||||||
|
|
||||||
print('>>> making headers... ')
|
|
||||||
chtk.make_tree_header(tree_hed)
|
|
||||||
fame_phonetics.make_quests_hed(quest_hed)
|
|
||||||
|
|
||||||
print("elapsed time: {}".format(time.time() - timer_start))
|
|
||||||
|
|
||||||
|
|
@ -109,30 +109,30 @@ np.save(os.path.join('phoneset', 'fame_ipa2asr.npy'), translation_key_ipa2asr)
|
|||||||
|
|
||||||
|
|
||||||
## check which letters are not coded in ascii.
|
## check which letters are not coded in ascii.
|
||||||
#print('asr phones which cannot be coded in ascii:\n')
|
print('asr phones which cannot be coded in ascii:\n')
|
||||||
#for i in fame_asr.phoneset_short:
|
for i in fame_asr.phoneset_short:
|
||||||
# try:
|
try:
|
||||||
# i_encoded = i.encode("ascii")
|
i_encoded = i.encode("ascii")
|
||||||
# #print("{0} --> {1}".format(i, i.encode("ascii")))
|
#print("{0} --> {1}".format(i, i.encode("ascii")))
|
||||||
# except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
# print(">>> {}".format(i))
|
print(">>> {}".format(i))
|
||||||
|
|
||||||
#print("letters in the scripts which is not coded in ascii:\n")
|
print("letters in the scripts which is not coded in ascii:\n")
|
||||||
#for dataset in ['train', 'devel', 'test']:
|
for dataset in ['train', 'devel', 'test']:
|
||||||
# timer_start = time.time()
|
timer_start = time.time()
|
||||||
|
|
||||||
# script_list = os.path.join(default.fame_dir, 'data', dataset, 'text')
|
script_list = os.path.join(default.fame_dir, 'data', dataset, 'text')
|
||||||
# with open(script_list, "rt", encoding="utf-8") as fin:
|
with open(script_list, "rt", encoding="utf-8") as fin:
|
||||||
# scripts = fin.read().split('\n')
|
scripts = fin.read().split('\n')
|
||||||
|
|
||||||
# for line in scripts:
|
for line in scripts:
|
||||||
# sentence = ' '.join(line.split(' ')[1:])
|
sentence = ' '.join(line.split(' ')[1:])
|
||||||
# sentence_htk = fame_functions.word2htk(sentence)
|
sentence_htk = fame_functions.word2htk(sentence)
|
||||||
|
|
||||||
# #if len(re.findall(r'[âêôûč\'àéèúćäëïöü]', sentence))==0:
|
#if len(re.findall(r'[âêôûč\'àéèúćäëïöü]', sentence))==0:
|
||||||
# try:
|
try:
|
||||||
# sentence_htk = bytes(sentence_htk, 'ascii')
|
sentence_htk = bytes(sentence_htk, 'ascii')
|
||||||
# except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
# print(sentence)
|
print(sentence)
|
||||||
# print(sentence_htk)
|
print(sentence_htk)
|
||||||
|
|
||||||
|
@ -80,11 +80,8 @@ def phone_reduction(phones):
|
|||||||
Args:
|
Args:
|
||||||
phones (list): list of phones.
|
phones (list): list of phones.
|
||||||
"""
|
"""
|
||||||
if sum([phone in phones for phone in phones_to_be_removed]) != 0:
|
|
||||||
print('input includes phone(s) which is not defined in fame_asr.')
|
|
||||||
print('those phone(s) are removed.')
|
|
||||||
return [reduction_key.get(i, i) for i in phones
|
return [reduction_key.get(i, i) for i in phones
|
||||||
if i not in phones_to_be_removed]
|
if not i in phones_to_be_removed]
|
||||||
|
|
||||||
phoneset_short = list(set(phone_reduction(phoneset)))
|
phoneset_short = list(set(phone_reduction(phoneset)))
|
||||||
phoneset_short.sort()
|
phoneset_short.sort()
|
||||||
@ -99,7 +96,7 @@ translation_key_asr2htk = {
|
|||||||
'ṷ': 'u_',
|
'ṷ': 'u_',
|
||||||
|
|
||||||
# on the analogy of German umlaut, 'e' is used.
|
# on the analogy of German umlaut, 'e' is used.
|
||||||
'ö': 'oe', 'ö:': 'oe:', ''
|
'ö': 'oe', 'ö:': 'oe:',
|
||||||
'ü': 'ue', 'ü:': 'ue:',
|
'ü': 'ue', 'ü:': 'ue:',
|
||||||
|
|
||||||
# on the analogy of Chinese...
|
# on the analogy of Chinese...
|
||||||
|
@ -61,7 +61,7 @@ phoneset = [
|
|||||||
'ɔⁿ',
|
'ɔⁿ',
|
||||||
'ɔ:',
|
'ɔ:',
|
||||||
'ɔ:ⁿ',
|
'ɔ:ⁿ',
|
||||||
'ɔ̈', # not included in lex.ipa
|
#'ɔ̈', # not included in lex.ipa
|
||||||
'ɔ̈.',
|
'ɔ̈.',
|
||||||
'ɔ̈:',
|
'ɔ̈:',
|
||||||
|
|
||||||
|
@ -1,197 +0,0 @@
|
|||||||
import sys
|
|
||||||
import os
|
|
||||||
os.chdir(r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model')
|
|
||||||
|
|
||||||
import fame_functions
|
|
||||||
from phoneset import fame_ipa, fame_asr
|
|
||||||
import convert_phoneset
|
|
||||||
|
|
||||||
|
|
||||||
## general
|
|
||||||
stop = 'p, b, t, d, k, g'
|
|
||||||
nasal = 'm, n, ŋ'
|
|
||||||
fricative = 's, z, f, v, h, x, j'
|
|
||||||
liquid = 'l, r'
|
|
||||||
vowel = 'a, a:, e:, i, i:, i̯, o, o:, u, u:, ṷ, ö, ö:, ü, ü:, ɔ, ɔ:, ɔ̈, ə, ɛ, ɛ:, ɪ, ɪ:'
|
|
||||||
|
|
||||||
## consonant
|
|
||||||
c_front = 'p, b, m, f, v'
|
|
||||||
c_central = 't, d, n, s, z, l, r'
|
|
||||||
c_back = 'k, g, ŋ, h, x, j'
|
|
||||||
|
|
||||||
fortis = 'p, t, k, f, s'
|
|
||||||
lenis = 'b, d, g, v, z, j'
|
|
||||||
neither_fortis_nor_lenis = 'm, n, ŋ, h, l, r, x'
|
|
||||||
|
|
||||||
coronal = 't, d, n, s, z, l, r, j'
|
|
||||||
non_coronal = 'p, b, m, k, g, ŋ, f, v, h, x'
|
|
||||||
|
|
||||||
anterior = 'p, b, m, t, d, n, f, v, s, z, l'
|
|
||||||
non_anterior = 'k, g, ŋ, h, x, j, r'
|
|
||||||
|
|
||||||
continuent = 'm, n, ŋ, f, v, s, z, h, l, r'
|
|
||||||
non_continuent = 'p, b, t, d, k, g, x, j'
|
|
||||||
|
|
||||||
strident = 's, z, j'
|
|
||||||
non_strident = 'f, v, h'
|
|
||||||
unstrident = 'p, b, t, d, m, n, ŋ, k, g, r, x'
|
|
||||||
|
|
||||||
glide = 'h, l, r'
|
|
||||||
syllabic = 'm, l, ŋ'
|
|
||||||
|
|
||||||
unvoiced = 'p, t, k, s, f, x, h'
|
|
||||||
voiced = 'b, d, g, z, v, m, n, ŋ, l, r, j'
|
|
||||||
|
|
||||||
#affricate: ???
|
|
||||||
non_affricate = 's, z, f, v'
|
|
||||||
|
|
||||||
voiced_stop = 'b, d, g'
|
|
||||||
unvoiced_stop = 'p, t, k'
|
|
||||||
front_stop = 'p, b'
|
|
||||||
central_stop = 't, d'
|
|
||||||
back_stop = 'k, g'
|
|
||||||
|
|
||||||
voiced_fricative = 'z, v'
|
|
||||||
unvoiced_fricative = 's, f'
|
|
||||||
front_fricative = 'f, v'
|
|
||||||
central_fricative = 's, z'
|
|
||||||
back_fricative = 'j'
|
|
||||||
|
|
||||||
|
|
||||||
## vowel
|
|
||||||
v_front = 'i, i:, i̯, ɪ, ɪ:, e:, ə, ɛ, ɛ:, a, a:'
|
|
||||||
v_central = 'ə, ɛ, ɛ:, a, a:'
|
|
||||||
v_back = 'u, u:, ü, ü:, ṷ, ɔ, ɔ:, ɔ̈, ö, ö:, o, o:'
|
|
||||||
|
|
||||||
long = 'a:, e:, i:, o:, u:, ö:, ü:, ɔ:, ɛ:, ɪ:'
|
|
||||||
short = 'a, i, i̯, o, u, ṷ, ö, ü, ɔ, ɔ̈, ə, ɛ, ɪ'
|
|
||||||
|
|
||||||
#Dipthong: ???
|
|
||||||
#Front-Start: ???
|
|
||||||
#Fronting: ???
|
|
||||||
|
|
||||||
high = 'i, i:, i̯, ɪ, ɪ: u, u:, ṷ, ə, e:, o, o:, ö, ö:, ü, ü:'
|
|
||||||
medium = 'e:, ə, ɛ, ɛ:, ɔ, ɔ:, ɔ̈, o, o:, ö, ö:'
|
|
||||||
low = 'a, a:, ɛ, ɛ:, ɔ, ɔ:, ɔ̈'
|
|
||||||
|
|
||||||
rounded = 'a, a:, o, o:, u, u:, ṷ, ö, ö:, ü, ü:, ɔ, ɔ:, ɔ̈'
|
|
||||||
unrounded = 'i, i:, i̯, e:, ə, ɛ, ɛ:, ɪ, ɪ:'
|
|
||||||
|
|
||||||
i_vowel = 'i, i:, i̯, ɪ, ɪ:'
|
|
||||||
e_vowel = 'e:,ə, ɛ, ɛ:'
|
|
||||||
a_vowel = 'a, a:'
|
|
||||||
o_vowel = 'o, o:, ö, ö:, ɔ, ɔ:, ɔ̈'
|
|
||||||
u_vowel = 'u, u:, ṷ, ü, ü:'
|
|
||||||
|
|
||||||
## htk phoneset
|
|
||||||
phoneset = fame_asr.phoneset_htk
|
|
||||||
|
|
||||||
## convert ipa group to htk format for quests.hed.
|
|
||||||
def _ipa2quest(R_or_L, ipa_text):
|
|
||||||
assert R_or_L in ['R', 'L'], print('the first argument should be either R or L.')
|
|
||||||
ipa_list = ipa_text.replace(' ', '').split(',')
|
|
||||||
if R_or_L == 'R':
|
|
||||||
quests_list = ['*+' + fame_functions.ipa2htk(ipa) for ipa in ipa_list]
|
|
||||||
else:
|
|
||||||
quests_list = [fame_functions.ipa2htk(ipa) + '-*' for ipa in ipa_list]
|
|
||||||
return ','.join(quests_list)
|
|
||||||
|
|
||||||
|
|
||||||
def make_quests_hed(quest_hed):
|
|
||||||
def _add_quests_item(R_or_L, item_name_, ipa_text):
|
|
||||||
assert R_or_L in ['R', 'L'], print('the first argument should be either R or L.')
|
|
||||||
item_name = R_or_L + '_' + item_name_
|
|
||||||
with open(quest_hed, 'ab') as f:
|
|
||||||
f.write(bytes('QS "' + item_name + '"\t{ ' + _ipa2quest(R_or_L, ipa_text) + ' }\n', 'ascii'))
|
|
||||||
|
|
||||||
if os.path.exists(quest_hed):
|
|
||||||
os.remove(quest_hed)
|
|
||||||
|
|
||||||
for R_or_L in ['R', 'L']:
|
|
||||||
_add_quests_item(R_or_L, 'NonBoundary', '*')
|
|
||||||
_add_quests_item(R_or_L, 'Silence', 'sil')
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Stop', stop)
|
|
||||||
_add_quests_item(R_or_L, 'Nasal', nasal)
|
|
||||||
_add_quests_item(R_or_L, 'Fricative', fricative)
|
|
||||||
_add_quests_item(R_or_L, 'Liquid', liquid)
|
|
||||||
_add_quests_item(R_or_L, 'Vowel', vowel)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'C-Front', c_front)
|
|
||||||
_add_quests_item(R_or_L, 'C-Central', c_central)
|
|
||||||
_add_quests_item(R_or_L, 'C-Back', c_back)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'V-Front', v_front)
|
|
||||||
_add_quests_item(R_or_L, 'V-Central', v_central)
|
|
||||||
_add_quests_item(R_or_L, 'V-Back', v_back)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Front', c_front + v_front)
|
|
||||||
_add_quests_item(R_or_L, 'Central', c_central + v_central)
|
|
||||||
_add_quests_item(R_or_L, 'Back', c_front + v_back)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Fortis', fortis)
|
|
||||||
_add_quests_item(R_or_L, 'Lenis', lenis)
|
|
||||||
_add_quests_item(R_or_L, 'UnFortLenis', neither_fortis_nor_lenis)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Coronal', coronal)
|
|
||||||
_add_quests_item(R_or_L, 'NonCoronal', non_coronal)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Anterior', anterior)
|
|
||||||
_add_quests_item(R_or_L, 'NonAnterior', non_anterior)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Continuent', continuent)
|
|
||||||
_add_quests_item(R_or_L, 'NonContinuent', non_continuent)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Strident', strident)
|
|
||||||
_add_quests_item(R_or_L, 'NonStrident', non_strident)
|
|
||||||
_add_quests_item(R_or_L, 'UnStrident', unstrident)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Glide', glide)
|
|
||||||
_add_quests_item(R_or_L, 'Syllabic', syllabic)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Unvoiced-Cons', unvoiced)
|
|
||||||
_add_quests_item(R_or_L, 'Voiced-Cons', voiced)
|
|
||||||
_add_quests_item(R_or_L, 'Unvoiced-All', unvoiced + ', sil')
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Long', long)
|
|
||||||
_add_quests_item(R_or_L, 'Short', short)
|
|
||||||
|
|
||||||
#_add_quests_item(R_or_L, 'Dipthong', xxx)
|
|
||||||
#_add_quests_item(R_or_L, 'Front-Start', xxx)
|
|
||||||
#_add_quests_item(R_or_L, 'Fronting', xxx)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'High', high)
|
|
||||||
_add_quests_item(R_or_L, 'Medium', medium)
|
|
||||||
_add_quests_item(R_or_L, 'Low', low)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Rounded', rounded)
|
|
||||||
_add_quests_item(R_or_L, 'UnRounded', unrounded)
|
|
||||||
|
|
||||||
#_add_quests_item(R_or_L, 'Affricative', rounded)
|
|
||||||
_add_quests_item(R_or_L, 'NonAffricative', non_affricate)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'IVowel', i_vowel)
|
|
||||||
_add_quests_item(R_or_L, 'EVowel', e_vowel)
|
|
||||||
_add_quests_item(R_or_L, 'AVowel', a_vowel)
|
|
||||||
_add_quests_item(R_or_L, 'OVowel', o_vowel)
|
|
||||||
_add_quests_item(R_or_L, 'UVowel', u_vowel)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Voiced-Stop', voiced_stop)
|
|
||||||
_add_quests_item(R_or_L, 'UnVoiced-Stop', unvoiced_stop)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Front-Stop', front_stop)
|
|
||||||
_add_quests_item(R_or_L, 'Central-Stop', central_stop)
|
|
||||||
_add_quests_item(R_or_L, 'Back-Stop', back_stop)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Voiced-Fric', voiced_fricative)
|
|
||||||
_add_quests_item(R_or_L, 'UnVoiced-Fric', unvoiced_fricative)
|
|
||||||
|
|
||||||
_add_quests_item(R_or_L, 'Front-Fric', front_fricative)
|
|
||||||
_add_quests_item(R_or_L, 'Central-Fric', central_fricative)
|
|
||||||
_add_quests_item(R_or_L, 'Back-Fric', back_fricative)
|
|
||||||
|
|
||||||
for p in phoneset:
|
|
||||||
_add_quests_item(R_or_L, p, p)
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user