|
|
|
@ -16,63 +16,50 @@ import acoustic_model_functions as am_func
@@ -16,63 +16,50 @@ import acoustic_model_functions as am_func
|
|
|
|
|
import convert_xsampa2ipa |
|
|
|
|
import defaultfiles as default |
|
|
|
|
|
|
|
|
|
from forced_alignment import pyhtk |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= user define ======================= |
|
|
|
|
#curr_dir = r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model' |
|
|
|
|
#config_ini = 'config.ini' |
|
|
|
|
#repo_dir = r'C:\Users\Aki\source\repos' |
|
|
|
|
#forced_alignment_module = repo_dir + '\\forced_alignment' |
|
|
|
|
#forced_alignment_module_old = repo_dir + '\\aki_tools' |
|
|
|
|
#ipa_xsampa_converter_dir = repo_dir + '\\ipa-xsama-converter' |
|
|
|
|
#accent_classification_dir = repo_dir + '\\accent_classification\accent_classification' |
|
|
|
|
excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx') |
|
|
|
|
data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#experiments_dir = r'C:\OneDrive\Research\rug\experiments' |
|
|
|
|
data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') |
|
|
|
|
#csvfile = data_dir + '\\Frisian Variants Picture Task Stimmen.csv' |
|
|
|
|
#wav_dir = os.path.join(default.experiments_dir, 'stimmen', 'wav_44k') # 44.1k |
|
|
|
|
wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k |
|
|
|
|
|
|
|
|
|
#wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k |
|
|
|
|
acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model') |
|
|
|
|
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') |
|
|
|
|
fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') |
|
|
|
|
result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result') |
|
|
|
|
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') |
|
|
|
|
fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') |
|
|
|
|
result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result') |
|
|
|
|
|
|
|
|
|
kaldi_data_dir = os.path.join(default.kaldi_dir, 'data', 'alignme') |
|
|
|
|
kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict') |
|
|
|
|
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') |
|
|
|
|
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') |
|
|
|
|
|
|
|
|
|
#cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' |
|
|
|
|
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') |
|
|
|
|
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') |
|
|
|
|
#lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk') |
|
|
|
|
|
|
|
|
|
from forced_alignment import pyhtk |
|
|
|
|
|
|
|
|
|
# procedure |
|
|
|
|
make_dic_files = 0 |
|
|
|
|
make_htk_dict_files = 0 |
|
|
|
|
do_forced_alignment_htk = 0 |
|
|
|
|
eval_forced_alignment_htk = 0 |
|
|
|
|
make_kaldi_data_files = 0 |
|
|
|
|
make_kaldi_lexicon_txt = 0 |
|
|
|
|
load_forced_alignment_kaldi = 1 |
|
|
|
|
eval_forced_alignment = 0 |
|
|
|
|
eval_forced_alignment_kaldi = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= add paths ======================= |
|
|
|
|
|
|
|
|
|
## ======================= add paths ======================= |
|
|
|
|
sys.path.append(os.path.join(default.repo_dir, 'forced_alignment')) |
|
|
|
|
from forced_alignment import convert_phone_set |
|
|
|
|
from forced_alignment import pyhtk |
|
|
|
|
|
|
|
|
|
sys.path.append(os.path.join(default.repo_dir, 'toolbox')) |
|
|
|
|
#import pyHTK |
|
|
|
|
from evaluation import plot_confusion_matrix |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= convert phones ====================== |
|
|
|
|
|
|
|
|
|
mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir) |
|
|
|
|
|
|
|
|
|
xls = pd.ExcelFile(excel_file) |
|
|
|
@ -128,7 +115,7 @@ word_list = np.unique(df['word'])
@@ -128,7 +115,7 @@ word_list = np.unique(df['word'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= make dict files used for HTK. ====================== |
|
|
|
|
if make_dic_files: |
|
|
|
|
if make_htk_dict_files: |
|
|
|
|
output_type = 3 |
|
|
|
|
|
|
|
|
|
for word in word_list: |
|
|
|
@ -138,14 +125,12 @@ if make_dic_files:
@@ -138,14 +125,12 @@ if make_dic_files:
|
|
|
|
|
pronvar_ = df['famehtk'][df['word'].str.match(word)] |
|
|
|
|
|
|
|
|
|
# make dic file. |
|
|
|
|
am_func.make_dic(word, pronvar_, htk_dict_file, output_type) |
|
|
|
|
am_func.make_htk_dict(word, pronvar_, htk_dict_file, output_type) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= forced alignment using HTK ======================= |
|
|
|
|
if do_forced_alignment_htk: |
|
|
|
|
|
|
|
|
|
#hmm_num = 2 |
|
|
|
|
#for hmm_num in [1]: |
|
|
|
|
#for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]: |
|
|
|
|
for hmm_num in [256, 512, 1024]: |
|
|
|
|
hmm_num_str = str(hmm_num) |
|
|
|
@ -178,26 +163,19 @@ if do_forced_alignment_htk:
@@ -178,26 +163,19 @@ if do_forced_alignment_htk:
|
|
|
|
|
default.phonelist, acoustic_model) |
|
|
|
|
os.remove(label_file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prediction = am_func.read_fileFA(fa_file) |
|
|
|
|
#predictions.append(prediction) |
|
|
|
|
|
|
|
|
|
print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) |
|
|
|
|
else: |
|
|
|
|
prediction = '' |
|
|
|
|
#predictions.append('') |
|
|
|
|
print('!!!!! file not found.') |
|
|
|
|
|
|
|
|
|
line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) |
|
|
|
|
predictions = predictions.append(line) |
|
|
|
|
else: |
|
|
|
|
prediction = '' |
|
|
|
|
#predictions.append('') |
|
|
|
|
print('!!!!! invalid entry.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#predictions = np.array(predictions) |
|
|
|
|
#np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions) |
|
|
|
|
predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -208,9 +186,6 @@ if make_kaldi_data_files:
@@ -208,9 +186,6 @@ if make_kaldi_data_files:
|
|
|
|
|
text_file = os.path.join(kaldi_data_dir, 'text') |
|
|
|
|
utt2spk = os.path.join(kaldi_data_dir, 'utt2spk') |
|
|
|
|
|
|
|
|
|
#predictions = [] |
|
|
|
|
#file_num_max = len(filenames) |
|
|
|
|
|
|
|
|
|
# remove previous files. |
|
|
|
|
if os.path.exists(wav_scp): |
|
|
|
|
os.remove(wav_scp) |
|
|
|
@ -224,42 +199,30 @@ if make_kaldi_data_files:
@@ -224,42 +199,30 @@ if make_kaldi_data_files:
|
|
|
|
|
f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n') |
|
|
|
|
|
|
|
|
|
# make wav.scp, text, and utt2spk files. |
|
|
|
|
predictions = pd.DataFrame({'filename': [''], |
|
|
|
|
'word': [''], |
|
|
|
|
'xsampa': [''], |
|
|
|
|
'ipa': [''], |
|
|
|
|
'famehtk': [''], |
|
|
|
|
'prediction': ['']}) |
|
|
|
|
#for i in range(0, file_num_max): |
|
|
|
|
#for i in range(400, 410): |
|
|
|
|
for i, filename in enumerate(df['filename']): |
|
|
|
|
for i in df.index: |
|
|
|
|
filename = df['filename'][i] |
|
|
|
|
print('=== {0}: {1} ==='.format(i, filename)) |
|
|
|
|
|
|
|
|
|
#print('=== {0}/{1} ==='.format(i+1, file_num_max)) |
|
|
|
|
#filename = filenames[i] |
|
|
|
|
#if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): |
|
|
|
|
wav_file = os.path.join(wav_dir, filename) |
|
|
|
|
if os.path.exists(wav_file): |
|
|
|
|
speaker_id = 'speaker_' + str(i).zfill(4) |
|
|
|
|
utterance_id = filename.replace('.wav', '') |
|
|
|
|
utterance_id = utterance_id.replace(' ', '_') |
|
|
|
|
utterance_id = speaker_id + '-' + utterance_id |
|
|
|
|
|
|
|
|
|
print('=== {0}/{1} ==='.format(i, len(df))) |
|
|
|
|
wav_file = wav_dir + '\\' + filename |
|
|
|
|
if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): |
|
|
|
|
wav_file = os.path.join(wav_dir, filename) |
|
|
|
|
if os.path.exists(wav_file): |
|
|
|
|
speaker_id = 'speaker_' + str(i).zfill(4) |
|
|
|
|
utterance_id = filename.replace('.wav', '') |
|
|
|
|
utterance_id = utterance_id.replace(' ', '_') |
|
|
|
|
utterance_id = speaker_id + '-' + utterance_id |
|
|
|
|
# wav.scp file |
|
|
|
|
wav_file_unix = wav_file.replace('\\', '/') |
|
|
|
|
wav_file_unix = wav_file_unix.replace('c:/', '/mnt/c/') |
|
|
|
|
|
|
|
|
|
# wav.scp file |
|
|
|
|
wav_file_unix = wav_file.replace('\\', '/') |
|
|
|
|
wav_file_unix = wav_file_unix.replace('c:/', '/mnt/c/') |
|
|
|
|
f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) |
|
|
|
|
|
|
|
|
|
f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) |
|
|
|
|
# text file |
|
|
|
|
word = df['word'][i].lower() |
|
|
|
|
f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) |
|
|
|
|
|
|
|
|
|
# text file |
|
|
|
|
#word = words[i].lower() |
|
|
|
|
word = df['word'][i].lower() |
|
|
|
|
f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) |
|
|
|
|
|
|
|
|
|
# utt2spk |
|
|
|
|
f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id)) |
|
|
|
|
# utt2spk |
|
|
|
|
f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id)) |
|
|
|
|
|
|
|
|
|
f_wav_scp.close() |
|
|
|
|
f_text_file.close() |
|
|
|
@ -268,8 +231,7 @@ if make_kaldi_data_files:
@@ -268,8 +231,7 @@ if make_kaldi_data_files:
|
|
|
|
|
|
|
|
|
|
## ======================= make lexicon txt which is used by Kaldi ======================= |
|
|
|
|
if make_kaldi_lexicon_txt: |
|
|
|
|
#lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') |
|
|
|
|
option_num = 5 |
|
|
|
|
option_num = 6 |
|
|
|
|
|
|
|
|
|
# remove previous file. |
|
|
|
|
if os.path.exists(lexicon_txt): |
|
|
|
@ -277,35 +239,6 @@ if make_kaldi_lexicon_txt:
@@ -277,35 +239,6 @@ if make_kaldi_lexicon_txt:
|
|
|
|
|
lexiconp_txt = lexicon_txt.replace('lexicon.txt', 'lexiconp.txt') |
|
|
|
|
if os.path.exists(lexiconp_txt): |
|
|
|
|
os.remove(lexiconp_txt) |
|
|
|
|
|
|
|
|
|
#mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', ipa_xsampa_converter_dir) |
|
|
|
|
|
|
|
|
|
#with open(csvfile, encoding="utf-8") as fin: |
|
|
|
|
# lines = csv.reader(fin, delimiter=';', lineterminator="\n", skipinitialspace=True) |
|
|
|
|
# next(lines, None) # skip the headers |
|
|
|
|
|
|
|
|
|
# filenames = [] |
|
|
|
|
# words = [] |
|
|
|
|
# pronunciations = [] |
|
|
|
|
# p = [] |
|
|
|
|
# for line in lines: |
|
|
|
|
# if line[1] is not '' and len(line) > 5: |
|
|
|
|
# filenames.append(line[0]) |
|
|
|
|
# words.append(line[1]) |
|
|
|
|
# pron_xsampa = line[3] |
|
|
|
|
# pron_ipa = convert_xsampa2ipa.conversion('xsampa', 'ipa', mapping, pron_xsampa) |
|
|
|
|
# pron_ipa = pron_ipa.replace('ː', ':') |
|
|
|
|
|
|
|
|
|
# # adjust to phones used in the acoustic model. |
|
|
|
|
# pronunciations.append(pron_ipa) |
|
|
|
|
|
|
|
|
|
## check if all phones are in the phonelist of the acoustic model. |
|
|
|
|
##'y', 'b', 'ɾ', 'u', 'ɔ:', 'ø', 't', 'œ', 'n', 'ɒ', 'ɐ', 'f', 'o', 'k', 'x', 'ɡ', 'v', 's', 'ɛ:', 'ɪ:', 'ɑ', 'ɛ', 'a', 'd', 'z', 'ɪ', 'ɔ', 'l', 'i:', 'm', 'p', 'a:', 'i', 'e', 'j', 'o:', 'ʁ', 'h', ':', 'e:', 'ə', 'æ', 'χ', 'w', 'r', 'ə:', 'sp', 'ʊ', 'u:', 'ŋ' |
|
|
|
|
|
|
|
|
|
#filenames = np.array(filenames) |
|
|
|
|
#words = np.array(words) |
|
|
|
|
#wordlist = np.unique(words) |
|
|
|
|
#pronunciations = np.array(pronunciations) |
|
|
|
|
|
|
|
|
|
# output lexicon.txt |
|
|
|
|
f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n') |
|
|
|
@ -313,52 +246,29 @@ if make_kaldi_lexicon_txt:
@@ -313,52 +246,29 @@ if make_kaldi_lexicon_txt:
|
|
|
|
|
for word in word_list: |
|
|
|
|
|
|
|
|
|
# pronunciation variant of the target word. |
|
|
|
|
#pronvar_ = pronunciations[words == word] |
|
|
|
|
pronunciation_variants = df['ipa'][df['word'].str.match(word)] |
|
|
|
|
#pronunciation_variants = np.unique(pronunciation_variants) |
|
|
|
|
# remove '' |
|
|
|
|
#pronvar_ = np.delete(pronvar_, np.where(pronvar_=='')) |
|
|
|
|
|
|
|
|
|
c = Counter(pronunciation_variants) |
|
|
|
|
total_num = sum(c.values()) |
|
|
|
|
|
|
|
|
|
#with open(result_dir + '\\' + word + '.csv', 'a', encoding="utf-8", newline='\n') as f: |
|
|
|
|
# for key in c.keys(): |
|
|
|
|
# f.write("{0},{1}\n".format(key,c[key])) |
|
|
|
|
|
|
|
|
|
for key, value in c.most_common(option_num): |
|
|
|
|
#print('{0}\t{1}\t{2}\t{3}'.format(word, key, value, total_num)) |
|
|
|
|
key = key.replace('æ', 'ɛ') |
|
|
|
|
key = key.replace('ɐ', 'a') |
|
|
|
|
key = key.replace('ɑ', 'a') |
|
|
|
|
key = key.replace('ɾ', 'r') |
|
|
|
|
key = key.replace('ɹ', 'r') # ??? |
|
|
|
|
key = key.replace('ʁ', 'r') |
|
|
|
|
key = key.replace('ʀ', 'r') # ??? |
|
|
|
|
key = key.replace('ʊ', 'u') |
|
|
|
|
key = key.replace('χ', 'x') |
|
|
|
|
#print('-->{0}\t{1}\t{2}\t{3}\n'.format(word, key, value, total_num)) |
|
|
|
|
|
|
|
|
|
# make possible pronounciation variant list. |
|
|
|
|
pronvar_list = [key] |
|
|
|
|
while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list): |
|
|
|
|
pronvar_list_ = [] |
|
|
|
|
for p in pronvar_list: |
|
|
|
|
if 'ø:' in p: |
|
|
|
|
pronvar_list_.append(p.replace('ø:', 'ö')) |
|
|
|
|
pronvar_list_.append(p.replace('ø:', 'ö:')) |
|
|
|
|
if 'œ' in p: |
|
|
|
|
pronvar_list_.append(p.replace('œ', 'ɔ̈')) |
|
|
|
|
pronvar_list_.append(p.replace('œ', 'ɔ̈:')) |
|
|
|
|
if 'ɒ' in p: |
|
|
|
|
pronvar_list_.append(p.replace('ɒ', 'ɔ̈')) |
|
|
|
|
pronvar_list_.append(p.replace('ɒ', 'ɔ̈:')) |
|
|
|
|
pronvar_list = np.unique(pronvar_list_) |
|
|
|
|
pronvar_list = am_func.fame_pronunciation_variant(key) |
|
|
|
|
|
|
|
|
|
for pronvar_ in pronvar_list: |
|
|
|
|
split_ipa = convert_phone_set.split_fame_ipa(pronvar_) |
|
|
|
|
pronvar_out = ' '.join(split_ipa) |
|
|
|
|
pronvar_list_all.append([word, pronvar_out]) |
|
|
|
|
|
|
|
|
|
# output |
|
|
|
|
|
|
|
|
|
pronvar_list_all = np.array(pronvar_list_all) |
|
|
|
|
pronvar_list_all = np.unique(pronvar_list_all, axis=0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# output |
|
|
|
|
f_lexicon_txt.write('<UNK>\tSPN\n') |
|
|
|
|
for line in pronvar_list_all: |
|
|
|
|
f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1])) |
|
|
|
@ -368,9 +278,8 @@ if make_kaldi_lexicon_txt:
@@ -368,9 +278,8 @@ if make_kaldi_lexicon_txt:
|
|
|
|
|
|
|
|
|
|
## ======================= load kaldi forced alignment result ======================= |
|
|
|
|
if load_forced_alignment_kaldi: |
|
|
|
|
kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5' |
|
|
|
|
phones_txt = os.path.join(kaldi_work_dir, 'data', 'lang', 'phones.txt') |
|
|
|
|
merged_alignment_txt = os.path.join(kaldi_work_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt') |
|
|
|
|
phones_txt = os.path.join(default.kaldi_dir, 'data', 'lang', 'phones.txt') |
|
|
|
|
merged_alignment_txt = os.path.join(default.kaldi_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt') |
|
|
|
|
|
|
|
|
|
#filenames = np.load(data_dir + '\\filenames.npy') |
|
|
|
|
#words = np.load(data_dir + '\\words.npy') |
|
|
|
@ -380,98 +289,72 @@ if load_forced_alignment_kaldi:
@@ -380,98 +289,72 @@ if load_forced_alignment_kaldi:
|
|
|
|
|
|
|
|
|
|
# load the mapping between phones and ids. |
|
|
|
|
with open(phones_txt, 'r', encoding="utf-8") as f: |
|
|
|
|
mappings = f.read().split('\n') |
|
|
|
|
mapping_phone2id = f.read().split('\n') |
|
|
|
|
|
|
|
|
|
phones = [] |
|
|
|
|
phone_ids = [] |
|
|
|
|
for m in mappings: |
|
|
|
|
phone_ids = [] # ID of phones |
|
|
|
|
for m in mapping_phone2id: |
|
|
|
|
m = m.split(' ') |
|
|
|
|
if len(m) > 1: |
|
|
|
|
phones.append(m[0]) |
|
|
|
|
phone_ids.append(int(m[1])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# load the result of FA. |
|
|
|
|
with open(merged_alignment_txt, 'r') as f: |
|
|
|
|
lines = f.read() |
|
|
|
|
lines = lines.split('\n') |
|
|
|
|
|
|
|
|
|
fa_filenames = [] |
|
|
|
|
fa_pronunciations = [] |
|
|
|
|
filename_ = '' |
|
|
|
|
pron = [] |
|
|
|
|
predictions = pd.DataFrame({'filename': [''], |
|
|
|
|
'word': [''], |
|
|
|
|
'xsampa': [''], |
|
|
|
|
'ipa': [''], |
|
|
|
|
'famehtk': [''], |
|
|
|
|
'prediction': ['']}) |
|
|
|
|
#fa_filenames = [] |
|
|
|
|
#fa_pronunciations = [] |
|
|
|
|
utterance_id_ = '' |
|
|
|
|
pronunciation = [] |
|
|
|
|
for line in lines: |
|
|
|
|
line = line.split(' ') |
|
|
|
|
if len(line) == 5: |
|
|
|
|
filename = line[0] |
|
|
|
|
if filename == filename_: |
|
|
|
|
utterance_id = line[0] |
|
|
|
|
if utterance_id == utterance_id_: |
|
|
|
|
phone_id = int(line[4]) |
|
|
|
|
#if not phone_id == 1: |
|
|
|
|
phone = phones[phone_ids.index(phone_id)] |
|
|
|
|
pron_ = re.sub(r'_[A-Z]', '', phone) |
|
|
|
|
if not pron_ == 'SIL': |
|
|
|
|
pron.append(pron_) |
|
|
|
|
phone_ = phones[phone_ids.index(phone_id)] |
|
|
|
|
phone = re.sub(r'_[A-Z]', '', phone_) |
|
|
|
|
if not phone == 'SIL': |
|
|
|
|
pronunciation.append(phone) |
|
|
|
|
else: |
|
|
|
|
fa_filenames.append(re.sub(r'speaker_[0-9]{4}-', '', filename)) |
|
|
|
|
fa_pronunciations.append(' '.join(pron)) |
|
|
|
|
pron = [] |
|
|
|
|
|
|
|
|
|
filename_ = filename |
|
|
|
|
|
|
|
|
|
# correct or not. |
|
|
|
|
#for filename, fa_pronunciation in zip(fa_filenames, fa_pronunciations): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# predictions = pd.DataFrame({'filename': [''], |
|
|
|
|
# 'word': [''], |
|
|
|
|
# 'xsampa': [''], |
|
|
|
|
# 'ipa': [''], |
|
|
|
|
# 'famehtk': [''], |
|
|
|
|
# 'prediction': ['']}) |
|
|
|
|
# for i, filename in enumerate(df['filename']): |
|
|
|
|
# print('=== {0}/{1} ==='.format(i, len(df))) |
|
|
|
|
# if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): |
|
|
|
|
# wav_file = os.path.join(wav_dir, filename) |
|
|
|
|
# if os.path.exists(wav_file): |
|
|
|
|
# word = df['word'][i] |
|
|
|
|
# WORD = word.upper() |
|
|
|
|
# fa_file = os.path.join(fa_dir, filename.replace('.wav', '.txt') + hmm_num_str) |
|
|
|
|
|
|
|
|
|
# #if not os.path.exists(fa_file): |
|
|
|
|
# # make label file. |
|
|
|
|
# label_file = os.path.join(wav_dir, filename.replace('.wav', '.lab')) |
|
|
|
|
# with open(label_file, 'w') as f: |
|
|
|
|
# lines = f.write(WORD) |
|
|
|
|
|
|
|
|
|
# htk_dict_file = os.path.join(htk_dict_dir, word + '.dic') |
|
|
|
|
|
|
|
|
|
# pyhtk.doHVite(wav_file, label_file, htk_dict_file, fa_file, default.config_hvite, |
|
|
|
|
# default.phonelist, acoustic_model) |
|
|
|
|
# os.remove(label_file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# prediction = am_func.read_fileFA(fa_file) |
|
|
|
|
# #predictions.append(prediction) |
|
|
|
|
|
|
|
|
|
# print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) |
|
|
|
|
# else: |
|
|
|
|
# prediction = '' |
|
|
|
|
# #predictions.append('') |
|
|
|
|
# print('!!!!! file not found.') |
|
|
|
|
|
|
|
|
|
# line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) |
|
|
|
|
# predictions = predictions.append(line) |
|
|
|
|
# else: |
|
|
|
|
# prediction = '' |
|
|
|
|
# #predictions.append('') |
|
|
|
|
# print('!!!!! invalid entry.') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# #predictions = np.array(predictions) |
|
|
|
|
# #np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions) |
|
|
|
|
# predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) |
|
|
|
|
filename = re.sub(r'speaker_[0-9]{4}-', '', utterance_id_) |
|
|
|
|
prediction = ''.join(pronunciation) |
|
|
|
|
df_ = df[df['filename'].str.match(filename)] |
|
|
|
|
df_idx = df_.index[0] |
|
|
|
|
prediction_ = pd.Series([#filename, |
|
|
|
|
#df_['word'][df_idx], |
|
|
|
|
#df_['xsampa'][df_idx], |
|
|
|
|
#df_['ipa'][df_idx], |
|
|
|
|
#df_['famehtk'][df_idx], |
|
|
|
|
df_.iloc[0,1], |
|
|
|
|
df_.iloc[0,3], |
|
|
|
|
df_.iloc[0,4], |
|
|
|
|
df_.iloc[0,2], |
|
|
|
|
df_.iloc[0,0], |
|
|
|
|
prediction], |
|
|
|
|
index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], |
|
|
|
|
name=df_idx) |
|
|
|
|
predictions = predictions.append(prediction_) |
|
|
|
|
#fa_filenames.append() |
|
|
|
|
#fa_pronunciations.append(' '.join(pronunciation)) |
|
|
|
|
pronunciation = [] |
|
|
|
|
|
|
|
|
|
utterance_id_ = utterance_id |
|
|
|
|
predictions.to_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl')) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= evaluate the result of forced alignment ======================= |
|
|
|
|
if eval_forced_alignment: |
|
|
|
|
if eval_forced_alignment_htk: |
|
|
|
|
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') |
|
|
|
|
|
|
|
|
|
compare_hmm_num = 1 |
|
|
|
@ -524,3 +407,81 @@ if eval_forced_alignment:
@@ -524,3 +407,81 @@ if eval_forced_alignment:
|
|
|
|
|
if compare_hmm_num: |
|
|
|
|
f_result.close() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ======================= evaluate the result of forced alignment of kaldi ======================= |
|
|
|
|
if eval_forced_alignment_kaldi: |
|
|
|
|
result = pd.read_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl')) |
|
|
|
|
|
|
|
|
|
f_result = open(os.path.join(result_dir, 'result.csv'), 'w') |
|
|
|
|
f_result.write("word,total,valid,match,[%]\n") |
|
|
|
|
|
|
|
|
|
# load pronunciation variants |
|
|
|
|
with open(lexicon_txt, 'r', encoding="utf-8", newline='\n') as f: |
|
|
|
|
lines = f.read().split('\n')[:-1] |
|
|
|
|
pronunciation_variants_all = [line.split('\t') for line in lines] |
|
|
|
|
|
|
|
|
|
word_list = np.delete(word_list, [0], 0) # remove 'Oog' |
|
|
|
|
for word in word_list: |
|
|
|
|
|
|
|
|
|
# load pronunciation variant of the word. |
|
|
|
|
pronunciation_variants = [] |
|
|
|
|
for line in pronunciation_variants_all: |
|
|
|
|
if line[0] == word.lower(): |
|
|
|
|
pronunciation_variants.append(line[1].replace(' ', '')) |
|
|
|
|
|
|
|
|
|
# see only words which appears in top 3. |
|
|
|
|
result_ = result[result['word'].str.match(word)] |
|
|
|
|
result_tolerant = pd.DataFrame({ |
|
|
|
|
'filename': [''], |
|
|
|
|
'word': [''], |
|
|
|
|
'xsampa': [''], |
|
|
|
|
'ipa': [''], |
|
|
|
|
'prediction': [''], |
|
|
|
|
'match': ['']}) |
|
|
|
|
|
|
|
|
|
for i in range(0, len(result_)): |
|
|
|
|
line = result_.iloc[i] |
|
|
|
|
|
|
|
|
|
# make a list of all possible pronunciation variants of ipa description. |
|
|
|
|
# i.e. possible answers from forced alignment. |
|
|
|
|
ipa = line['ipa'] |
|
|
|
|
pronvar_list = [ipa] |
|
|
|
|
pronvar_list_ = am_func.fame_pronunciation_variant(ipa) |
|
|
|
|
if not pronvar_list_ is None: |
|
|
|
|
pronvar_list += list(pronvar_list_) |
|
|
|
|
|
|
|
|
|
# only focus on pronunciations which can be estimated from ipa. |
|
|
|
|
if len(set(pronvar_list) & set(pronunciation_variants)) > 0: |
|
|
|
|
if line['prediction'] in pronvar_list: |
|
|
|
|
ismatch = True |
|
|
|
|
else: |
|
|
|
|
ismatch = False |
|
|
|
|
|
|
|
|
|
line_df = pd.DataFrame(result_.iloc[i]).T |
|
|
|
|
df_idx = line_df.index[0] |
|
|
|
|
result_tolerant_ = pd.Series([line_df.loc[df_idx, 'filename'], |
|
|
|
|
line_df.loc[df_idx, 'word'], |
|
|
|
|
line_df.loc[df_idx, 'xsampa'], |
|
|
|
|
line_df.loc[df_idx, 'ipa'], |
|
|
|
|
line_df.loc[df_idx, 'prediction'], |
|
|
|
|
ismatch], |
|
|
|
|
index=['filename', 'word', 'xsampa', 'ipa', 'prediction', 'match'], |
|
|
|
|
name=df_idx) |
|
|
|
|
result_tolerant = result_tolerant.append(result_tolerant_) |
|
|
|
|
# remove the first entry (dummy) |
|
|
|
|
result_tolerant = result_tolerant.drop(0, axis=0) |
|
|
|
|
|
|
|
|
|
total_num = len(result_) |
|
|
|
|
valid_num = len(result_tolerant) |
|
|
|
|
match_num = np.sum(result_tolerant['match']) |
|
|
|
|
|
|
|
|
|
print("word '{0}': {1}/{2} ({3:.2f} %) originally {4}".format(word, match_num, valid_num, match_num/valid_num*100, total_num)) |
|
|
|
|
f_result.write("{0},{1},{2},{3},{4}\n".format(word, total_num, valid_num, match_num, match_num/valid_num*100)) |
|
|
|
|
|
|
|
|
|
f_result.close() |
|
|
|
|
## output confusion matrix |
|
|
|
|
#cm = confusion_matrix(result_['ipa'], result_['prediction']) |
|
|
|
|
|
|
|
|
|
#plt.figure() |
|
|
|
|
#plot_confusion_matrix(cm, classes=pronunciation_variants, normalize=False) |
|
|
|
|
#plt.savefig(result_dir + '\\cm_' + word + '.png') |
|
|
|
|