label alignment using HVite is added.

This commit is contained in:
yemaozi88 2019-02-14 00:21:28 +01:00
parent 8f89f60538
commit c185072d5b
11 changed files with 527 additions and 339 deletions

Binary file not shown.

View File

@ -4,7 +4,7 @@
<SchemaVersion>2.0</SchemaVersion> <SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>4d8c8573-32f0-4a62-9e62-3ce5cc680390</ProjectGuid> <ProjectGuid>4d8c8573-32f0-4a62-9e62-3ce5cc680390</ProjectGuid>
<ProjectHome>.</ProjectHome> <ProjectHome>.</ProjectHome>
<StartupFile>fame_hmm.py</StartupFile> <StartupFile>htk_vs_kaldi.py</StartupFile>
<SearchPath> <SearchPath>
</SearchPath> </SearchPath>
<WorkingDirectory>.</WorkingDirectory> <WorkingDirectory>.</WorkingDirectory>

View File

@ -38,3 +38,9 @@ def convert_phoneset(word_list, translation_key):
translation_key (dict): translation_key (dict):
""" """
return [translation_key.get(phone, phone) for phone in word_list] return [translation_key.get(phone, phone) for phone in word_list]
def phone_reduction(phones, reduction_key):
multi_character_tokenize(wo.strip(), multi_character_phones)
return [reduction_key.get(i, i) for i in phones
if not i in phones_to_be_removed]

View File

@ -17,6 +17,7 @@ novo_api_dir = os.path.join(WSL_dir, 'python-novo-api', 'novoapi')
rug_dir = r'c:\OneDrive\Research\rug' rug_dir = r'c:\OneDrive\Research\rug'
experiments_dir = os.path.join(rug_dir, 'experiments') experiments_dir = os.path.join(rug_dir, 'experiments')
htk_dir = os.path.join(experiments_dir, 'acoustic_model', 'fame', 'htk') htk_dir = os.path.join(experiments_dir, 'acoustic_model', 'fame', 'htk')
kaldi_dir = os.path.join(WSL_dir, 'kaldi-trunk', 'egs', '_stimmen')
stimmen_dir = os.path.join(experiments_dir, 'stimmen') stimmen_dir = os.path.join(experiments_dir, 'stimmen')
# data # data

View File

@ -321,9 +321,11 @@ def combine_lexicon(lexicon_file1, lexicon_file2, lexicon_out):
lex.to_csv(lexicon_out, index=False, header=False, sep='\t', encoding='utf-8') lex.to_csv(lexicon_out, index=False, header=False, sep='\t', encoding='utf-8')
def fix_single_quote(lexicon_file): def fix_lexicon(lexicon_file):
""" add '\' before all single quote at the beginning of words. """ fix lexicon
convert special characters to ascii compatible characters. - add '\' before all single quote at the beginning of words.
- convert special characters to ascii compatible characters.
- add silence.
Args: Args:
lexicon_file (path): lexicon file, which will be overwitten. lexicon_file (path): lexicon file, which will be overwitten.
@ -331,6 +333,12 @@ def fix_single_quote(lexicon_file):
""" """
lex = load_lexicon(lexicon_file) lex = load_lexicon(lexicon_file)
lex = lex.dropna() # remove N/A. lex = lex.dropna() # remove N/A.
# add 'sil'
row = pd.Series(['SILENCE', 'sil'], index=lex.columns)
lex = lex.append(row, ignore_index=True)
lex = lex.sort_values(by='word', ascending=True)
for i in lex[lex['word'].str.startswith('\'')].index.values: for i in lex[lex['word'].str.startswith('\'')].index.values:
lex.iat[i, 0] = lex.iat[i, 0].replace('\'', '\\\'') lex.iat[i, 0] = lex.iat[i, 0].replace('\'', '\\\'')
# to_csv does not work with space seperator. therefore all tabs should manually be replaced. # to_csv does not work with space seperator. therefore all tabs should manually be replaced.
@ -346,10 +354,11 @@ def word2htk(word):
def ipa2asr(ipa): def ipa2asr(ipa):
curr_dir = os.path.dirname(os.path.abspath(__file__)) curr_dir = os.path.dirname(os.path.abspath(__file__))
translation_key_ipa2asr = np.load(os.path.join(curr_dir, 'phoneset', 'fame_ipa2asr.npy')).item(0) translation_key_ipa2asr = np.load(os.path.join(curr_dir, 'phoneset', 'fame_ipa2asr.npy')).item(0)
#ipa_ = fame_asr.phone_reduction(ipa)
ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones) ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones)
ipa_splitted = fame_ipa.phone_reduction(ipa_splitted) ipa_splitted = fame_ipa.phone_reduction(ipa_splitted)
asr_splitted = convert_phoneset.convert_phoneset(ipa_splitted, translation_key_ipa2asr) asr_splitted = convert_phoneset.convert_phoneset(ipa_splitted, translation_key_ipa2asr)
asr_splitted = fame_asr.phone_reduction(asr_splitted)
return ''.join(asr_splitted) return ''.join(asr_splitted)
@ -360,5 +369,6 @@ def ipa2htk(ipa):
ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones) ipa_splitted = convert_phoneset.split_word(ipa, fame_ipa.multi_character_phones)
ipa_splitted = fame_ipa.phone_reduction(ipa_splitted) ipa_splitted = fame_ipa.phone_reduction(ipa_splitted)
asr_splitted = convert_phoneset.convert_phoneset(ipa_splitted, translation_key_ipa2asr) asr_splitted = convert_phoneset.convert_phoneset(ipa_splitted, translation_key_ipa2asr)
asr_splitted = fame_asr.phone_reduction(asr_splitted)
htk_splitted = convert_phoneset.convert_phoneset(asr_splitted, fame_asr.translation_key_asr2htk) htk_splitted = convert_phoneset.convert_phoneset(asr_splitted, fame_asr.translation_key_asr2htk)
return ''.join(htk_splitted) return ''.join(htk_splitted)

View File

@ -27,7 +27,8 @@ extract_features = 0
flat_start = 0 flat_start = 0
train_model_without_sp = 0 train_model_without_sp = 0
add_sp = 0 add_sp = 0
train_model_with_sp = 1 train_model_with_sp = 0
train_model_with_sp_align_mlf = 1
@ -75,6 +76,7 @@ if not os.path.exists(label_dir):
## training ## training
hcompv_scp_train = os.path.join(tmp_dir, 'train.scp') hcompv_scp_train = os.path.join(tmp_dir, 'train.scp')
mlf_file_train = os.path.join(label_dir, 'train_phone.mlf') mlf_file_train = os.path.join(label_dir, 'train_phone.mlf')
mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf')
## train without sp ## train without sp
niter_max = 10 niter_max = 10
@ -102,7 +104,8 @@ if make_lexicon:
# (1) Replace all tabs with single space; # (1) Replace all tabs with single space;
# (2) Put a '\' before any dictionary entry beginning with single quote # (2) Put a '\' before any dictionary entry beginning with single quote
#http://electroblaze.blogspot.nl/2013/03/understanding-htk-error-messages.html #http://electroblaze.blogspot.nl/2013/03/understanding-htk-error-messages.html
fame_functions.fix_single_quote(lexicon_htk) print('>>> fixing the lexicon...')
fame_functions.fix_lexicon(lexicon_htk)
print("elapsed time: {}".format(time.time() - timer_start)) print("elapsed time: {}".format(time.time() - timer_start))
@ -269,11 +272,11 @@ if train_model_without_sp:
fh.make_new_directory(modeln_dir) fh.make_new_directory(modeln_dir)
pyhtk.re_estimation( pyhtk.re_estimation(
config_train, config_train,
os.path.join(modeln_dir_pre, 'macros'),
os.path.join(modeln_dir_pre, hmmdefs_name), os.path.join(modeln_dir_pre, hmmdefs_name),
modeln_dir, modeln_dir,
hcompv_scp_train, phonelist_txt, hcompv_scp_train, phonelist_txt,
mlf_file=mlf_file_train) mlf_file=mlf_file_train,
macros=os.path.join(modeln_dir_pre, 'macros'))
print("elapsed time: {}".format(time.time() - timer_start)) print("elapsed time: {}".format(time.time() - timer_start))
@ -321,7 +324,6 @@ if add_sp:
## ======================= train model with short pause ======================= ## ======================= train model with short pause =======================
if train_model_with_sp: if train_model_with_sp:
print('==== train model with sp ====') print('==== train model with sp ====')
#for niter in range(niter_max+1, niter_max*2+1):
for niter in range(20, 50): for niter in range(20, 50):
timer_start = time.time() timer_start = time.time()
hmm_n = 'iter' + str(niter) hmm_n = 'iter' + str(niter)
@ -333,9 +335,31 @@ if train_model_with_sp:
fh.make_new_directory(modeln_dir) fh.make_new_directory(modeln_dir)
pyhtk.re_estimation( pyhtk.re_estimation(
config_train, config_train,
os.path.join(modeln_dir_pre, 'macros'),
os.path.join(modeln_dir_pre, hmmdefs_name), os.path.join(modeln_dir_pre, hmmdefs_name),
modeln_dir, modeln_dir,
hcompv_scp_train, phonelist_txt, hcompv_scp_train, phonelist_txt,
mlf_file=mlf_file_train) mlf_file=mlf_file_train,
macros=os.path.join(modeln_dir_pre, 'macros'))
print("elapsed time: {}".format(time.time() - timer_start))
## ======================= train model with short pause =======================
if train_model_with_sp_align_mlf:
print('==== train model with sp with align.mlf ====')
for niter in range(50, 60):
timer_start = time.time()
hmm_n = 'iter' + str(niter)
hmm_n_pre = 'iter' + str(niter-1)
modeln_dir = os.path.join(model1_dir, hmm_n)
modeln_dir_pre = os.path.join(model1_dir, hmm_n_pre)
# re-estimation
fh.make_new_directory(modeln_dir)
pyhtk.re_estimation(
config_train,
os.path.join(modeln_dir_pre, hmmdefs_name),
modeln_dir,
hcompv_scp_train, phonelist_txt,
mlf_file=mlf_file_train_aligned,
macros=os.path.join(modeln_dir_pre, 'macros'))
print("elapsed time: {}".format(time.time() - timer_start)) print("elapsed time: {}".format(time.time() - timer_start))

View File

@ -11,6 +11,7 @@ import glob
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from collections import Counter
#import matplotlib.pyplot as plt #import matplotlib.pyplot as plt
#from sklearn.metrics import confusion_matrix #from sklearn.metrics import confusion_matrix
@ -50,11 +51,14 @@ from htk import pyhtk
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') #lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr')
#lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk') #lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk')
## procedure # procedure
make_dic_file = 0
make_HTK_files = 1
extract_features = 0
#make_htk_dict_files = 0 #make_htk_dict_files = 0
#do_forced_alignment_htk = 0 #do_forced_alignment_htk = 0
#eval_forced_alignment_htk = 0 #eval_forced_alignment_htk = 0
#make_kaldi_data_files = 0 make_kaldi_files = 0
#make_kaldi_lexicon_txt = 0 #make_kaldi_lexicon_txt = 0
#load_forced_alignment_kaldi = 1 #load_forced_alignment_kaldi = 1
#eval_forced_alignment_kaldi = 1 #eval_forced_alignment_kaldi = 1
@ -66,13 +70,34 @@ from htk import pyhtk
#sys.path.append(os.path.join(default.repo_dir, 'toolbox')) #sys.path.append(os.path.join(default.repo_dir, 'toolbox'))
#from evaluation import plot_confusion_matrix #from evaluation import plot_confusion_matrix
## HTK related files.
config_dir = os.path.join(default.htk_dir, 'config') config_dir = os.path.join(default.htk_dir, 'config')
model_dir = os.path.join(default.htk_dir, 'model') model_dir = os.path.join(default.htk_dir, 'model')
feature_dir = os.path.join(default.htk_dir, 'mfc', 'stimmen')
config_hcopy = os.path.join(config_dir, 'config.HCopy')
# files to be made.
lattice_file = os.path.join(config_dir, 'stimmen.ltc') lattice_file = os.path.join(config_dir, 'stimmen.ltc')
#pyhtk.create_word_lattice_file( phonelist_txt = os.path.join(config_dir, 'phonelist.txt')
# os.path.join(config_dir, 'stimmen.net'), stimmen_dic = os.path.join(default.htk_dir, 'lexicon', 'stimmen_recognition.dic')
# lattice_file) hcopy_scp = os.path.join(default.htk_dir, 'tmp', 'stimmen_test_hcopy.scp')
hvite_scp = os.path.join(default.htk_dir, 'tmp', 'stimmen_test.scp') hvite_scp = os.path.join(default.htk_dir, 'tmp', 'stimmen_test_hvite.scp')
hresult_scp = os.path.join(default.htk_dir, 'tmp', 'stimmen_test_result.scp')
## Kaldi related files.
kaldi_data_dir = os.path.join(default.kaldi_dir, 'data')
# files to be made.
wav_scp = os.path.join(kaldi_data_dir, 'test', 'wav.scp')
text_file = os.path.join(kaldi_data_dir, 'test', 'text')
utt2spk = os.path.join(kaldi_data_dir, 'test', 'utt2spk')
corpus_txt = os.path.join(kaldi_data_dir, 'local', 'corpus.txt')
lexicon_txt = os.path.join(kaldi_data_dir, 'local', 'dict', 'lexicon.txt')
nonsilence_phones_txt = os.path.join(kaldi_data_dir, 'local', 'dict', 'nonsilence_phones.txt')
silence_phones_txt = os.path.join(kaldi_data_dir, 'local', 'dict', 'silence_phones.txt')
optional_silence_txt = os.path.join(kaldi_data_dir, 'local', 'dict', 'optional_silence.txt')
## ======================= load test data ====================== ## ======================= load test data ======================
@ -85,36 +110,159 @@ df = stimmen_functions.add_row_htk(df)
word_list = [i for i in list(set(df['word'])) if not pd.isnull(i)] word_list = [i for i in list(set(df['word'])) if not pd.isnull(i)]
word_list = sorted(word_list) word_list = sorted(word_list)
# pronunciation variants
for word in word_list: ## ======================= make dic file to check pronunciation variants ======================
# dic file should be manually modified depends on the task - recognition / forced-alignemnt.
if make_dic_file:
# for HTK.
with open(stimmen_dic, mode='wb') as f:
for word in word_list:
df_ = df[df['word']==word] df_ = df[df['word']==word]
print('{0} has {1} variants'.format(word, len(np.unique(df_['htk']))) pronunciations = list(np.unique(df_['htk']))
pronunciations_ = [word.upper() + ' sil ' + ' '.join(convert_phoneset.split_word(
htk, fame_asr.multi_character_phones_htk)) + ' sil'
for htk in pronunciations]
f.write(bytes('\n'.join(pronunciations_) + '\n', 'ascii'))
f.write(bytes('SILENCE sil\n', 'ascii'))
#fh.make_filelist(stimmen_test_dir, hvite_scp, file_type='wav') # for Kaldi.
fh.make_new_directory(os.path.join(kaldi_data_dir, 'local', 'dict'))
with open(lexicon_txt, mode='wb') as f:
f.write(bytes('!SIL sil\n', 'utf-8'))
f.write(bytes('<UNK> spn\n', 'utf-8'))
for word in word_list:
df_ = df[df['word']==word]
pronunciations = list(np.unique(df_['asr']))
pronunciations_ = [word.lower() + ' ' + ' '.join(convert_phoneset.split_word(
asr, fame_asr.multi_character_phones))
for asr in pronunciations]
f.write(bytes('\n'.join(pronunciations_) + '\n', 'utf-8'))
#output = pyhtk.recognition(
# os.path.join(default.htk_dir, 'config', 'config.rec',
# lattice_file,
# os.path.join(model_dir, 'hmm1', 'iter13'),
# dictionary_file,
# os.path.join(config_dir, 'phonelist.txt'),
# hvite_scp)
#pyhtk.create_label_file( ## ======================= test data for recognition ======================
# row['word'], # only target pronunciation variants.
# os.path.join(stimmen_test_dir, filename.replace('.wav', '.lab'))) df_rec = pd.DataFrame(index=[], columns=list(df.keys()))
for word in word_list:
variants = [htk.replace(' ', '')
for htk in stimmen_functions.load_pronunciations(word.upper(), stimmen_dic)]
df_ = df[df['word'] == word]
for index, row in df_.iterrows():
if row['htk'] in variants:
df_rec = df_rec.append(row, ignore_index=True)
## ======================= make a HTK dic file ======================
#if make_htk_dic_file:
# output_type = 3
dictionary_txt = os.path.join(default.htk_dir, 'lexicon', 'stimmen.dic')
#for word in word_list:
word = word_list[2]
# pronunciation variant of the target word.
pronunciations = df_test['asr'][df_test['word'].str.match(word)]
# make dic file. ## ======================= make files required for HTK ======================
#am_func.make_htk_dict(word, pronvar_, htk_dict_file, output_type) if make_HTK_files:
# make a word lattice file.
pyhtk.create_word_lattice_file(
os.path.join(config_dir, 'stimmen.net'),
lattice_file)
# extract features.
with open(hcopy_scp, 'wb') as f:
filelist = [os.path.join(stimmen_test_dir, filename) + '\t'
+ os.path.join(feature_dir, os.path.basename(filename).replace('.wav', '.mfc'))
for filename in df['filename']]
f.write(bytes('\n'.join(filelist), 'ascii'))
pyhtk.wav2mfc(config_hcopy, hcopy_scp)
# make label files.
for index, row in df.iterrows():
filename = row['filename'].replace('.wav', '.lab')
label_file = os.path.join(feature_dir, filename)
with open(label_file, 'wb') as f:
label_string = 'START\n' + row['word'].upper() + '\nEND\n'
f.write(bytes(label_string, 'ascii'))
## ======================= make files required for Kaldi =======================
if make_kaldi_files:
fh.make_new_directory(os.path.join(kaldi_data_dir, 'test'))
fh.make_new_directory(os.path.join(kaldi_data_dir, 'test', 'local'))
fh.make_new_directory(os.path.join(kaldi_data_dir, 'conf'))
# remove previous files.
if os.path.exists(wav_scp):
os.remove(wav_scp)
if os.path.exists(text_file):
os.remove(text_file)
if os.path.exists(utt2spk):
os.remove(utt2spk)
f_wav_scp = open(wav_scp, 'a', encoding="utf-8", newline='\n')
f_text_file = open(text_file, 'a', encoding="utf-8", newline='\n')
f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n')
# make wav.scp, text, and utt2spk files.
for i, row in df_rec.iterrows():
filename = row['filename']
print('=== {0}: {1} ==='.format(i, filename))
wav_file = os.path.join(stimmen_test_dir, filename)
#if os.path.exists(wav_file):
speaker_id = 'speaker_' + str(i).zfill(4)
utterance_id = filename.replace('.wav', '')
utterance_id = utterance_id.replace(' ', '_')
utterance_id = speaker_id + '-' + utterance_id
# output
f_wav_scp.write('{0} {1}\n'.format(
utterance_id,
wav_file.replace('c:/', '/mnt/c/').replace('\\', '/'))) # convert path to unix format.
f_text_file.write('{0}\t{1}\n'.format(utterance_id, df_rec['word'][i].lower()))
f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id))
f_wav_scp.close()
f_text_file.close()
f_utt2spk.close()
with open(corpus_txt, 'wb') as f:
f.write(bytes('\n'.join([word.lower() for word in word_list]) + '\n', 'utf-8'))
with open(nonsilence_phones_txt, 'wb') as f:
f.write(bytes('\n'.join(fame_asr.phoneset_short) + '\n', 'utf-8'))
with open(silence_phones_txt, 'wb') as f:
f.write(bytes('sil\nspn\n', 'utf-8'))
with open(optional_silence_txt, 'wb') as f:
f.write(bytes('sil\n', 'utf-8'))
with open(os.path.join(kaldi_data_dir, 'conf', 'decode.config'), 'wb') as f:
f.write(bytes('first_beam=10.0\n', 'utf-8'))
f.write(bytes('beam=13.0\n', 'utf-8'))
f.write(bytes('lattice_beam=6.0\n', 'utf-8'))
with open(os.path.join(kaldi_data_dir, 'conf', 'mfcc.conf'), 'wb') as f:
f.write(bytes('--use-energy=false', 'utf-8'))
## ======================= recognition ======================
listdir = glob.glob(os.path.join(feature_dir, '*.mfc'))
with open(hvite_scp, 'wb') as f:
f.write(bytes('\n'.join(listdir), 'ascii'))
with open(hresult_scp, 'wb') as f:
f.write(bytes('\n'.join(listdir).replace('.mfc', '.rec'), 'ascii'))
# calculate result
performance = np.zeros((1, 2))
for niter in range(1, 50):
output = pyhtk.recognition(
os.path.join(config_dir, 'config.rec'),
lattice_file,
os.path.join(default.htk_dir, 'model', 'hmm1', 'iter' + str(niter), 'hmmdefs'),
stimmen_dic, phonelist_txt, hvite_scp)
output = pyhtk.calc_recognition_performance(
stimmen_dic, hresult_scp)
per_sentence, per_word = pyhtk.load_recognition_output_all(output)
performance_ = np.array([niter, per_sentence['accuracy']]).reshape(1, 2)
performance = np.r_[performance, performance_]
print('{0}: {1}[%]'.format(niter, per_sentence['accuracy']))
## ======================= forced alignment using HTK ======================= ## ======================= forced alignment using HTK =======================
@ -168,54 +316,7 @@ if do_forced_alignment_htk:
predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl'))
## ======================= make files which is used for forced alignment by Kaldi =======================
if make_kaldi_data_files:
wav_scp = os.path.join(kaldi_data_dir, 'wav.scp')
text_file = os.path.join(kaldi_data_dir, 'text')
utt2spk = os.path.join(kaldi_data_dir, 'utt2spk')
# remove previous files.
if os.path.exists(wav_scp):
os.remove(wav_scp)
if os.path.exists(text_file):
os.remove(text_file)
if os.path.exists(utt2spk):
os.remove(utt2spk)
f_wav_scp = open(wav_scp, 'a', encoding="utf-8", newline='\n')
f_text_file = open(text_file, 'a', encoding="utf-8", newline='\n')
f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n')
# make wav.scp, text, and utt2spk files.
for i in df.index:
filename = df['filename'][i]
print('=== {0}: {1} ==='.format(i, filename))
#if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
wav_file = os.path.join(wav_dir, filename)
if os.path.exists(wav_file):
speaker_id = 'speaker_' + str(i).zfill(4)
utterance_id = filename.replace('.wav', '')
utterance_id = utterance_id.replace(' ', '_')
utterance_id = speaker_id + '-' + utterance_id
# wav.scp file
wav_file_unix = wav_file.replace('\\', '/')
wav_file_unix = wav_file_unix.replace('c:/', '/mnt/c/')
f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix))
# text file
word = df['word'][i].lower()
f_text_file.write('{0}\t{1}\n'.format(utterance_id, word))
# utt2spk
f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id))
f_wav_scp.close()
f_text_file.close()
f_utt2spk.close()
## ======================= make lexicon txt which is used by Kaldi ======================= ## ======================= make lexicon txt which is used by Kaldi =======================

View File

@ -68,14 +68,21 @@ phoneset = [
# the phones which seldom occur are replaced with another more popular phones. # the phones which seldom occur are replaced with another more popular phones.
# replacements are based on the advice from Martijn Wieling. # replacements are based on the advice from Martijn Wieling.
reduction_key = { reduction_key = {
'y':'i:', 'e':'e:', 'ə:':'ɛ:', 'r:':'r', 'ɡ':'g' 'y':'i:', 'e':'e:', 'ə:':'ɛ:', 'r:':'r', 'ɡ':'g',
# aki added because this is used in stimmen_project.
'ɔ̈:':'ɔ:'
} }
# already removed beforehand in phoneset. Just to be sure. # already removed beforehand in phoneset. Just to be sure.
phones_to_be_removed = ['ú', 's:', 'ɔ̈:'] phones_to_be_removed = ['ú', 's:']
def phone_reduction(phones): def phone_reduction(phones):
"""
Args:
phones (list): list of phones.
"""
return [reduction_key.get(i, i) for i in phones return [reduction_key.get(i, i) for i in phones
if not i in phones_to_be_removed] if not i in phones_to_be_removed]
phoneset_short = list(set(phone_reduction(phoneset))) phoneset_short = list(set(phone_reduction(phoneset)))
phoneset_short.sort() phoneset_short.sort()
@ -97,6 +104,7 @@ translation_key_asr2htk = {
# refer to Xsampa. # refer to Xsampa.
'ɔ': 'O', 'ɔ:': 'O:', 'ɔ̈': 'Oe', 'ɔ': 'O', 'ɔ:': 'O:', 'ɔ̈': 'Oe',
#'ɔ̈:': 'O:', # does not appear in FAME, but used in stimmen.
'ɛ': 'E', 'ɛ:': 'E:', 'ɛ': 'E', 'ɛ:': 'E:',
'ɪ': 'I', 'ɪ:': 'I:', 'ɪ': 'I', 'ɪ:': 'I:',

View File

@ -81,3 +81,25 @@ def add_row_asr(df):
for index, row in df.iterrows(): for index, row in df.iterrows():
asr.append(fame_functions.ipa2asr(row['ipa'])) asr.append(fame_functions.ipa2asr(row['ipa']))
return df.assign(asr=asr) return df.assign(asr=asr)
def load_pronunciations(WORD, htk_dic):
""" load pronunciation variants from HTK dic file.
Args:
WORD (str): word in capital letters.
htk_dic (path): HTK dict file.
Returns:
(pronunciations) (list): pronunciation variants of WORD.
Notes:
Because this function loads all contents from htk_dic file,
it is not recommended to use for large lexicon.
"""
with open(htk_dic) as f:
lines = f.read().replace(' sil', '')
lines = lines.split('\n')
return [' '.join(line.split(' ')[1:])
for line in lines if line.split(' ')[0]==WORD]

View File

@ -2,8 +2,9 @@ import os
os.chdir(r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model') os.chdir(r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model')
import sys import sys
import shutil import shutil
from collections import Counter
#import numpy as np import numpy as np
import pandas as pd import pandas as pd
import defaultfiles as default import defaultfiles as default
@ -62,3 +63,18 @@ for ipa in df['ipa']:
if ':' in ipa_splitted: if ':' in ipa_splitted:
print(ipa_splitted) print(ipa_splitted)
## check pronunciation variants
df_clean = stimmen_functions.load_transcriptions_clean(stimmen_test_dir)
df_clean = stimmen_functions.add_row_asr(df_clean)
df_clean = stimmen_functions.add_row_htk(df_clean)
for word in word_list:
#word = word_list[1]
df_ = df_clean[df_clean['word']==word]
c = Counter(df_['htk'])
pronunciations = dict()
for key, value in zip(c.keys(), c.values()):
if value > 3:
pronunciations[key] = value
print(pronunciations)