Compare commits

..

No commits in common. "0777735979c245a70a93fee8be2506c45df60f99" and "3a98e184fea13fd784fdf56a689926d833ea3b70" have entirely different histories.

7 changed files with 213 additions and 316 deletions

Binary file not shown.

View File

@ -11,7 +11,6 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
..\forced_alignment\forced_alignment\convert_phone_set.py = ..\forced_alignment\forced_alignment\convert_phone_set.py ..\forced_alignment\forced_alignment\convert_phone_set.py = ..\forced_alignment\forced_alignment\convert_phone_set.py
..\toolbox\evaluation.py = ..\toolbox\evaluation.py ..\toolbox\evaluation.py = ..\toolbox\evaluation.py
..\forced_alignment\forced_alignment\forced_alignment.pyproj = ..\forced_alignment\forced_alignment\forced_alignment.pyproj ..\forced_alignment\forced_alignment\forced_alignment.pyproj = ..\forced_alignment\forced_alignment\forced_alignment.pyproj
..\forced_alignment\forced_alignment\htk_dict.py = ..\forced_alignment\forced_alignment\htk_dict.py
..\forced_alignment\forced_alignment\lexicon.py = ..\forced_alignment\forced_alignment\lexicon.py ..\forced_alignment\forced_alignment\lexicon.py = ..\forced_alignment\forced_alignment\lexicon.py
..\forced_alignment\forced_alignment\mlf.py = ..\forced_alignment\forced_alignment\mlf.py ..\forced_alignment\forced_alignment\mlf.py = ..\forced_alignment\forced_alignment\mlf.py
..\forced_alignment\forced_alignment\pronunciations.py = ..\forced_alignment\forced_alignment\pronunciations.py ..\forced_alignment\forced_alignment\pronunciations.py = ..\forced_alignment\forced_alignment\pronunciations.py

View File

@ -38,7 +38,7 @@ def make_filelist(input_dir, output_txt):
fout.write(input_dir + '\\' + filename + '\n') fout.write(input_dir + '\\' + filename + '\n')
def make_htk_dict(word, pronvar_, fileDic, output_type): def make_dic(word, pronvar_, fileDic, output_type):
""" """
make dict files which can be used for HTK. make dict files which can be used for HTK.
param word: target word. param word: target word.
@ -98,8 +98,8 @@ def find_phone(lexicon_file, phone):
for line in lines: for line in lines:
line = line.split('\t') line = line.split('\t')
if len(line) > 1: if len(line) > 1:
pronunciation = line[1] pron = line[1]
if phone in pronunciation: if phone in pron:
extracted.append(line) extracted.append(line)
return extracted return extracted
@ -149,54 +149,3 @@ def read_fileFA(fileFA):
phones.append(line_split[2]) phones.append(line_split[2])
return ' '.join(phones) return ' '.join(phones)
def fame_pronunciation_variant(ipa):
ipa = ipa.replace('æ', 'ɛ')
ipa = ipa.replace('ɐ', 'a')
ipa = ipa.replace('ɑ', 'a')
ipa = ipa.replace('ɾ', 'r')
ipa = ipa.replace('ɹ', 'r') # ???
ipa = ipa.replace('ʁ', 'r')
ipa = ipa.replace('ʀ', 'r') # ???
ipa = ipa.replace('ʊ', 'u')
ipa = ipa.replace('χ', 'x')
pronvar_list = [ipa]
while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list):
pronvar_list_ = []
for p in pronvar_list:
if 'ø:' in p:
pronvar_list_.append(p.replace('ø:', 'ö'))
pronvar_list_.append(p.replace('ø:', 'ö:'))
if 'œ' in p:
pronvar_list_.append(p.replace('œ', 'ɔ̈'))
pronvar_list_.append(p.replace('œ', 'ɔ̈:'))
if 'ɒ' in p:
pronvar_list_.append(p.replace('ɒ', 'ɔ̈'))
pronvar_list_.append(p.replace('ɒ', 'ɔ̈:'))
pronvar_list = np.unique(pronvar_list_)
return pronvar_list
def make_fame2ipa_variants(fame):
fame = 'rɛös'
ipa = [fame]
ipa.append(fame.replace('ɛ', 'æ'))
ipa.append(fame.replace('a', 'ɐ'))
ipa.append(fame.replace('a', 'ɑ'))
ipa.append(fame.replace('r', 'ɾ'))
ipa.append(fame.replace('r', 'ɹ'))
ipa.append(fame.replace('r', 'ʁ'))
ipa.append(fame.replace('r', 'ʀ'))
ipa.append(fame.replace('u', 'ʊ'))
ipa.append(fame.replace('x', 'χ'))
ipa.append(fame.replace('ö', 'ø:'))
ipa.append(fame.replace('ö:', 'ø:'))
ipa.append(fame.replace('ɔ̈', 'œ'))
ipa.append(fame.replace('ɔ̈:', 'œ'))
ipa.append(fame.replace('ɔ̈', 'ɒ'))
ipa.append(fame.replace('ɔ̈:', 'ɒ'))
return ipa

View File

@ -3,7 +3,6 @@ import os
#default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite') #default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite')
cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model'
kaldi_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
#config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy') #config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy')
#config_train = os.path.join(cygwin_dir, 'config', 'config.train') #config_train = os.path.join(cygwin_dir, 'config', 'config.train')
config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite') config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite')

View File

@ -10,56 +10,60 @@ import re
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix #from sklearn.metrics import confusion_matrix
import acoustic_model_functions as am_func import acoustic_model_functions as am_func
import convert_xsampa2ipa import convert_xsampa2ipa
import defaultfiles as default import defaultfiles as default
from forced_alignment import pyhtk
## ======================= user define ======================= ## ======================= user define =======================
#curr_dir = r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model'
#config_ini = 'config.ini'
#repo_dir = r'C:\Users\Aki\source\repos'
#forced_alignment_module = repo_dir + '\\forced_alignment'
#forced_alignment_module_old = repo_dir + '\\aki_tools'
#ipa_xsampa_converter_dir = repo_dir + '\\ipa-xsama-converter'
#accent_classification_dir = repo_dir + '\\accent_classification\accent_classification'
excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx') excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx')
#experiments_dir = r'C:\OneDrive\Research\rug\experiments'
data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data')
#csvfile = data_dir + '\\Frisian Variants Picture Task Stimmen.csv'
wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k wav_dir = os.path.join(default.experiments_dir, 'stimmen', 'wav')
acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model') acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model')
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short')
fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA')
result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result')
kaldi_data_dir = os.path.join(default.kaldi_dir, 'data', 'alignme')
kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict')
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt')
#cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model'
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') #lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr')
#lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk') #lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk')
# procedure # procedure
make_htk_dict_files = 0 make_dic_files = 0
do_forced_alignment_htk = 0 do_forced_alignment_htk = 1
eval_forced_alignment_htk = 0
make_kaldi_data_files = 0 make_kaldi_data_files = 0
make_kaldi_lexicon_txt = 0 make_kaldi_lexicon_txt = 0
load_forced_alignment_kaldi = 1 load_forced_alignment_kaldi = 0
eval_forced_alignment_kaldi = 1 eval_forced_alignment = 0
## ======================= add paths ======================= ## ======================= add paths =======================
sys.path.append(os.path.join(default.repo_dir, 'forced_alignment')) sys.path.append(os.path.join(default.repo_dir, 'forced_alignment'))
from forced_alignment import convert_phone_set from forced_alignment import convert_phone_set
from forced_alignment import pyhtk from forced_alignment import pyhtk
sys.path.append(os.path.join(default.repo_dir, 'toolbox')) sys.path.append(os.path.join(default.repo_dir, 'toolbox'))
#import pyHTK
from evaluation import plot_confusion_matrix from evaluation import plot_confusion_matrix
## ======================= convert phones ====================== ## ======================= convert phones ======================
mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir) mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir)
xls = pd.ExcelFile(excel_file) xls = pd.ExcelFile(excel_file)
@ -111,11 +115,11 @@ df = pd.DataFrame({'filename': df['Filename'],
# cleansing. # cleansing.
df = df[~df['famehtk'].isin(['/', ''])] df = df[~df['famehtk'].isin(['/', ''])]
word_list = np.unique(df['word'])
## ======================= make dict files used for HTK. ====================== ## ======================= make dict files used for HTK. ======================
if make_htk_dict_files: if make_dic_files:
word_list = np.unique(df['word'])
output_type = 3 output_type = 3
for word in word_list: for word in word_list:
@ -125,67 +129,67 @@ if make_htk_dict_files:
pronvar_ = df['famehtk'][df['word'].str.match(word)] pronvar_ = df['famehtk'][df['word'].str.match(word)]
# make dic file. # make dic file.
am_func.make_htk_dict(word, pronvar_, htk_dict_file, output_type) am_func.make_dic(word, pronvar_, htk_dict_file, output_type)
## ======================= forced alignment using HTK ======================= ## ======================= forced alignment using HTK =======================
if do_forced_alignment_htk: if do_forced_alignment_htk:
#for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]: #hmm_num = 2
for hmm_num in [256, 512, 1024]: for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]:
hmm_num_str = str(hmm_num) hmm_num_str = str(hmm_num)
acoustic_model = os.path.join(acoustic_model_dir, 'hmm' + hmm_num_str + r'-2\hmmdefs') acoustic_model = os.path.join(acoustic_model_dir, 'hmm' + hmm_num_str + r'-2\hmmdefs')
predictions = pd.DataFrame({'filename': [''], predictions = []
'word': [''],
'xsampa': [''],
'ipa': [''],
'famehtk': [''],
'prediction': ['']})
for i, filename in enumerate(df['filename']): for i, filename in enumerate(df['filename']):
print('=== {0}/{1} ==='.format(i, len(df))) print('=== {0}/{1} ==='.format(i, len(df)))
if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
wav_file = os.path.join(wav_dir, filename) wav_file = os.path.join(wav_dir, filename)
if os.path.exists(wav_file):
if os.path.exists(wav_file) and i in df['filename'].keys():
word = df['word'][i] word = df['word'][i]
WORD = word.upper() WORD = word.upper()
fa_file = os.path.join(fa_dir, filename.replace('.wav', '.txt') + hmm_num_str)
#if not os.path.exists(fa_file):
# make label file. # make label file.
label_file = os.path.join(wav_dir, filename.replace('.wav', '.lab')) label_file = os.path.join(wav_dir, filename.replace('.wav', '.lab'))
with open(label_file, 'w') as f: with open(label_file, 'w') as f:
lines = f.write(WORD) lines = f.write(WORD)
htk_dict_file = os.path.join(htk_dict_dir, word + '.dic') htk_dict_file = os.path.join(htk_dict_dir, word + '.dic')
fa_file = os.path.join(fa_dir, filename.replace('.wav', '.txt') + hmm_num_str)
pyhtk.doHVite(wav_file, label_file, htk_dict_file, fa_file, default.config_hvite, pyhtk.doHVite(wav_file, label_file, htk_dict_file, fa_file, default.config_hvite, default.phonelist, acoustic_model)
default.phonelist, acoustic_model)
os.remove(label_file)
prediction = am_func.read_fileFA(fa_file) prediction = am_func.read_fileFA(fa_file)
predictions.append(prediction)
os.remove(label_file)
print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction))
else: else:
prediction = '' predictions.append('')
print('!!!!! file not found.') print('!!!!! file not found.')
line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) predictions = np.array(predictions)
predictions = predictions.append(line) #match = np.c_[words[predictions != ''], pronunciations[predictions != ''], predictions[predictions != '']]
else: np.save(os.path.join(data_dir, 'predictions_hmm' + hmm_num_str + '.npy'), predictions)
prediction = ''
print('!!!!! invalid entry.')
predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl'))
## ======================= make files which is used for forced alignment by Kaldi ======================= ## ======================= make files which is used for forced alignment by Kaldi =======================
if make_kaldi_data_files: if make_kaldi_data_files:
wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen'
kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
kaldi_data_dir = os.path.join(kaldi_work_dir, 'data', 'alignme')
kaldi_dict_dir = os.path.join(kaldi_work_dir, 'data', 'local', 'dict')
htk_dict_dir = os.path.join(experiments_dir, 'stimmen', 'dic_top3')
wav_scp = os.path.join(kaldi_data_dir, 'wav.scp') wav_scp = os.path.join(kaldi_data_dir, 'wav.scp')
text_file = os.path.join(kaldi_data_dir, 'text') text_file = os.path.join(kaldi_data_dir, 'text')
utt2spk = os.path.join(kaldi_data_dir, 'utt2spk') utt2spk = os.path.join(kaldi_data_dir, 'utt2spk')
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt')
predictions = []
file_num_max = len(filenames)
# remove previous files. # remove previous files.
if os.path.exists(wav_scp): if os.path.exists(wav_scp):
os.remove(wav_scp) os.remove(wav_scp)
@ -199,12 +203,12 @@ if make_kaldi_data_files:
f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n') f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n')
# make wav.scp, text, and utt2spk files. # make wav.scp, text, and utt2spk files.
for i in df.index: for i in range(0, file_num_max):
filename = df['filename'][i] #for i in range(400, 410):
print('=== {0}: {1} ==='.format(i, filename)) print('=== {0}/{1} ==='.format(i+1, file_num_max))
filename = filenames[i]
wav_file = wav_dir + '\\' + filename
#if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
wav_file = os.path.join(wav_dir, filename)
if os.path.exists(wav_file): if os.path.exists(wav_file):
speaker_id = 'speaker_' + str(i).zfill(4) speaker_id = 'speaker_' + str(i).zfill(4)
utterance_id = filename.replace('.wav', '') utterance_id = filename.replace('.wav', '')
@ -218,7 +222,7 @@ if make_kaldi_data_files:
f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix))
# text file # text file
word = df['word'][i].lower() word = words[i].lower()
f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) f_text_file.write('{0}\t{1}\n'.format(utterance_id, word))
# utt2spk # utt2spk
@ -231,257 +235,203 @@ if make_kaldi_data_files:
## ======================= make lexicon txt which is used by Kaldi ======================= ## ======================= make lexicon txt which is used by Kaldi =======================
if make_kaldi_lexicon_txt: if make_kaldi_lexicon_txt:
option_num = 6 kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
kaldi_dict_dir = os.path.join(kaldi_work_dir, 'data', 'local', 'dict')
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt')
option_num = 5
# remove previous file. # remove previous file.
if os.path.exists(lexicon_txt): if os.path.exists(lexicon_txt):
os.remove(lexicon_txt) os.remove(lexicon_txt)
lexiconp_txt = lexicon_txt.replace('lexicon.txt', 'lexiconp.txt')
if os.path.exists(lexiconp_txt): mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', ipa_xsampa_converter_dir)
os.remove(lexiconp_txt) with open(csvfile, encoding="utf-8") as fin:
lines = csv.reader(fin, delimiter=';', lineterminator="\n", skipinitialspace=True)
next(lines, None) # skip the headers
filenames = []
words = []
pronunciations = []
p = []
for line in lines:
if line[1] is not '' and len(line) > 5:
filenames.append(line[0])
words.append(line[1])
pron_xsampa = line[3]
pron_ipa = convert_xsampa2ipa.conversion('xsampa', 'ipa', mapping, pron_xsampa)
pron_ipa = pron_ipa.replace('ː', ':')
# adjust to phones used in the acoustic model.
pronunciations.append(pron_ipa)
# check if all phones are in the phonelist of the acoustic model.
#'y', 'b', 'ɾ', 'u', 'ɔ:', 'ø', 't', 'œ', 'n', 'ɒ', 'ɐ', 'f', 'o', 'k', 'x', 'ɡ', 'v', 's', 'ɛ:', 'ɪ:', 'ɑ', 'ɛ', 'a', 'd', 'z', 'ɪ', 'ɔ', 'l', 'i:', 'm', 'p', 'a:', 'i', 'e', 'j', 'o:', 'ʁ', 'h', ':', 'e:', 'ə', 'æ', 'χ', 'w', 'r', 'ə:', 'sp', 'ʊ', 'u:', 'ŋ'
filenames = np.array(filenames)
words = np.array(words)
wordlist = np.unique(words)
pronunciations = np.array(pronunciations)
# output lexicon.txt # output lexicon.txt
f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n') #f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n')
pronvar_list_all = [] pronvar_list_all = []
for word in word_list: for word in word_list:
# pronunciation variant of the target word. # pronunciation variant of the target word.
pronunciation_variants = df['ipa'][df['word'].str.match(word)] pronvar_ = pronunciations[words == word]
# remove ''
pronvar_ = np.delete(pronvar_, np.where(pronvar_==''))
c = Counter(pronunciation_variants) c = Counter(pronvar_)
total_num = sum(c.values()) total_num = sum(c.values())
#with open(result_dir + '\\' + word + '.csv', 'a', encoding="utf-8", newline='\n') as f:
# for key in c.keys():
# f.write("{0},{1}\n".format(key,c[key]))
for key, value in c.most_common(option_num): for key, value in c.most_common(option_num):
#print('{0}\t{1}\t{2}\t{3}'.format(word, key, value, total_num))
key = key.replace('æ', 'ɛ')
key = key.replace('ɐ', 'a')
key = key.replace('ɑ', 'a')
key = key.replace('ɾ', 'r')
key = key.replace('ʁ', 'r')
key = key.replace('ʊ', 'u')
key = key.replace('χ', 'x')
#print('-->{0}\t{1}\t{2}\t{3}\n'.format(word, key, value, total_num))
# make possible pronounciation variant list. # make possible pronounciation variant list.
pronvar_list = am_func.fame_pronunciation_variant(key) pronvar_list = [key]
while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list):
pronvar_list_ = []
for p in pronvar_list:
if 'ø:' in p:
pronvar_list_.append(p.replace('ø:', 'ö'))
pronvar_list_.append(p.replace('ø:', 'ö:'))
if 'œ' in p:
pronvar_list_.append(p.replace('œ', 'ɔ̈'))
pronvar_list_.append(p.replace('œ', 'ɔ̈:'))
if 'ɒ' in p:
pronvar_list_.append(p.replace('ɒ', 'ɔ̈'))
pronvar_list_.append(p.replace('ɒ', 'ɔ̈:'))
pronvar_list = np.unique(pronvar_list_)
for pronvar_ in pronvar_list: for pronvar_ in pronvar_list:
split_ipa = convert_phone_set.split_fame_ipa(pronvar_) split_ipa = convert_phone_set.split_fame_ipa(pronvar_)
pronvar_out = ' '.join(split_ipa) pronvar_out = ' '.join(split_ipa)
pronvar_list_all.append([word, pronvar_out]) pronvar_list_all.append([word, pronvar_out])
# output
pronvar_list_all = np.array(pronvar_list_all) pronvar_list_all = np.array(pronvar_list_all)
pronvar_list_all = np.unique(pronvar_list_all, axis=0) pronvar_list_all = np.unique(pronvar_list_all, axis=0)
#f_lexicon_txt.write('<UNK>\tSPN\n')
#for line in pronvar_list_all:
# f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1]))
#f_lexicon_txt.close()
# output
f_lexicon_txt.write('<UNK>\tSPN\n')
for line in pronvar_list_all:
f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1]))
f_lexicon_txt.close()
## ======================= load kaldi forced alignment result ======================= ## ======================= load kaldi forced alignment result =======================
if load_forced_alignment_kaldi: if load_forced_alignment_kaldi:
phones_txt = os.path.join(default.kaldi_dir, 'data', 'lang', 'phones.txt') kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
merged_alignment_txt = os.path.join(default.kaldi_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt') phones_txt = kaldi_work_dir + '\\data\\lang\\phones.txt'
merged_alignment_txt = kaldi_work_dir + '\\exp\\tri1_alignme\\merged_alignment.txt'
#filenames = np.load(data_dir + '\\filenames.npy') filenames = np.load(data_dir + '\\filenames.npy')
#words = np.load(data_dir + '\\words.npy') words = np.load(data_dir + '\\words.npy')
#pronunciations = np.load(data_dir + '\\pronunciations_ipa.npy') pronunciations = np.load(data_dir + '\\pronunciations_ipa.npy')
#pronvar_list_all = np.load(data_dir + '\\pronvar_list_all.npy') pronvar_list_all = np.load(data_dir + '\\pronvar_list_all.npy')
#word_list = np.unique(words) word_list = np.unique(words)
# load the mapping between phones and ids. # load the mapping between phones and ids.
with open(phones_txt, 'r', encoding="utf-8") as f: with open(phones_txt, 'r', encoding="utf-8") as f:
mapping_phone2id = f.read().split('\n') mappings = f.read().split('\n')
phones = [] phones = []
phone_ids = [] # ID of phones phone_ids = []
for m in mapping_phone2id: for m in mappings:
m = m.split(' ') m = m.split(' ')
if len(m) > 1: if len(m) > 1:
phones.append(m[0]) phones.append(m[0])
phone_ids.append(int(m[1])) phone_ids.append(int(m[1]))
# load the result of FA.
with open(merged_alignment_txt, 'r') as f: with open(merged_alignment_txt, 'r') as f:
lines = f.read() lines = f.read()
lines = lines.split('\n') lines = lines.split('\n')
predictions = pd.DataFrame({'filename': [''], fa_filenames = []
'word': [''], fa_pronunciations = []
'xsampa': [''], filename_ = ''
'ipa': [''], pron = []
'famehtk': [''],
'prediction': ['']})
#fa_filenames = []
#fa_pronunciations = []
utterance_id_ = ''
pronunciation = []
for line in lines: for line in lines:
line = line.split(' ') line = line.split(' ')
if len(line) == 5: if len(line) == 5:
utterance_id = line[0] filename = line[0]
if utterance_id == utterance_id_: if filename == filename_:
phone_id = int(line[4]) phone_id = int(line[4])
#if not phone_id == 1: #if not phone_id == 1:
phone_ = phones[phone_ids.index(phone_id)] phone = phones[phone_ids.index(phone_id)]
phone = re.sub(r'_[A-Z]', '', phone_) pron_ = re.sub(r'_[A-Z]', '', phone)
if not phone == 'SIL': if not pron_ == 'SIL':
pronunciation.append(phone) pron.append(pron_)
else: else:
filename = re.sub(r'speaker_[0-9]{4}-', '', utterance_id_) fa_filenames.append(re.sub(r'speaker_[0-9]{4}-', '', filename))
prediction = ''.join(pronunciation) fa_pronunciations.append(' '.join(pron))
df_ = df[df['filename'].str.match(filename)] pron = []
df_idx = df_.index[0]
prediction_ = pd.Series([#filename, filename_ = filename
#df_['word'][df_idx],
#df_['xsampa'][df_idx], # correct or not.
#df_['ipa'][df_idx], #for filename, fa_pronunciation in zip(fa_filenames, fa_pronunciations):
#df_['famehtk'][df_idx],
df_.iloc[0,1],
df_.iloc[0,3],
df_.iloc[0,4],
df_.iloc[0,2],
df_.iloc[0,0],
prediction],
index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'],
name=df_idx)
predictions = predictions.append(prediction_)
#fa_filenames.append()
#fa_pronunciations.append(' '.join(pronunciation))
pronunciation = []
utterance_id_ = utterance_id
predictions.to_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl'))
## ======================= evaluate the result of forced alignment ======================= ## ======================= evaluate the result of forced alignment =======================
if eval_forced_alignment_htk: if eval_forced_alignment:
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') match_num = []
for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256]:
compare_hmm_num = 1 #hmm_num = 256
if compare_hmm_num:
f_result = open(os.path.join(result_dir, 'result.csv'), 'w')
f_result.write("nmix,Oog,Oog,Oor,Oor,Pauw,Pauw,Reus,Reus,Reuzenrad,Reuzenrad,Roeiboot,Roeiboot,Rozen,Rozen\n")
for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]:
#for hmm_num in [256]:
hmm_num_str = str(hmm_num) hmm_num_str = str(hmm_num)
if compare_hmm_num: match = np.load(data_dir + '\\match_hmm' + hmm_num_str + '.npy')
f_result.write("{},".format(hmm_num_str))
#match = np.load(data_dir + '\\match_hmm' + hmm_num_str + '.npy') # use dic_short?
#prediction = np.load(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy')) if 1:
#prediction = pd.Series(prediction, index=df.index, name='prediction') pronunciation_variants = np.array(['WORD', 'pronunciation']).reshape(1, 2)
#result = pd.concat([df, prediction], axis=1)
result = pd.read_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl'))
# load pronunciation variants
for word in word_list: for word in word_list:
htk_dict_file = os.path.join(htk_dict_dir, word + '.dic') fileDic = experiments_dir + r'\stimmen\dic_top3' + '\\' + word + '.dic'
with open(htk_dict_file, 'r') as f: pronunciation_variants = np.r_[pronunciation_variants, pyHTK.loadHTKdic(fileDic)]
lines = f.read().split('\n')[:-1]
pronunciation_variants = [line.split('\t')[1] for line in lines]
# see only words which appears in top 3. # see only words which appears in top 3.
result_ = result[result['word'].str.match(word)] match_short = []
result_ = result_[result_['famehtk'].isin(pronunciation_variants)] for line in match:
word = line[0]
WORD = word.upper()
pronvar = pronunciation_variants[pronunciation_variants[:, 0] == word.upper(), 1]
match_num = sum(result_['famehtk'] == result_['prediction']) if line[1] in pronvar:
total_num = len(result_) match_short.append(line)
print("word '{0}': {1}/{2} ({3:.2f} %)".format(word, match_num, total_num, match_num/total_num*100)) match_short = np.array(match_short)
if compare_hmm_num: match = np.copy(match_short)
f_result.write("{0},{1},".format(match_num, total_num))
else:
# output confusion matrix
cm = confusion_matrix(result_['famehtk'], result_['prediction'])
plt.figure() # number of match
plot_confusion_matrix(cm, classes=pronunciation_variants, normalize=False) total_match = sum(match[:, 1] == match[:, 2])
plt.savefig(result_dir + '\\cm_' + word + '.png') print("{}: {}/{}".format(hmm_num_str, total_match, match.shape[0]))
match_num.append([hmm_num, total_match, match.shape[0]])
if compare_hmm_num:
f_result.write('\n')
if compare_hmm_num:
f_result.close()
## ======================= evaluate the result of forced alignment of kaldi ======================= # number of mixtures vs accuracy
if eval_forced_alignment_kaldi: match_num = np.array(match_num)
result = pd.read_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl')) plt.xscale("log")
plt.plot(match_num[:, 0], match_num[:, 1]/match_num[0, 2], 'o-')
plt.xlabel('number of mixtures', fontsize=14, fontweight='bold')
plt.ylabel('accuracy', fontsize=14, fontweight='bold')
plt.show()
f_result = open(os.path.join(result_dir, 'result.csv'), 'w') # confusion matrix
f_result.write("word,total,valid,match,[%]\n") #dir_out = r'C:\OneDrive\Research\rug\experiments\stimmen\result'
#word_list = np.unique(match[:, 0])
# load pronunciation variants #for word in word_list:
with open(lexicon_txt, 'r', encoding="utf-8", newline='\n') as f: # match_ = match[match[:, 0] == word, :]
lines = f.read().split('\n')[:-1] # cm = confusion_matrix(match_[:, 1], match_[:, 2])
pronunciation_variants_all = [line.split('\t') for line in lines] # pronvar = pronunciation_variants[pronunciation_variants[:, 0] == word.upper(), 1]
word_list = np.delete(word_list, [0], 0) # remove 'Oog'
for word in word_list:
# load pronunciation variant of the word.
pronunciation_variants = []
for line in pronunciation_variants_all:
if line[0] == word.lower():
pronunciation_variants.append(line[1].replace(' ', ''))
# see only words which appears in top 3.
result_ = result[result['word'].str.match(word)]
result_tolerant = pd.DataFrame({
'filename': [''],
'word': [''],
'xsampa': [''],
'ipa': [''],
'prediction': [''],
'match': ['']})
for i in range(0, len(result_)):
line = result_.iloc[i]
# make a list of all possible pronunciation variants of ipa description.
# i.e. possible answers from forced alignment.
ipa = line['ipa']
pronvar_list = [ipa]
pronvar_list_ = am_func.fame_pronunciation_variant(ipa)
if not pronvar_list_ is None:
pronvar_list += list(pronvar_list_)
# only focus on pronunciations which can be estimated from ipa.
if len(set(pronvar_list) & set(pronunciation_variants)) > 0:
if line['prediction'] in pronvar_list:
ismatch = True
else:
ismatch = False
line_df = pd.DataFrame(result_.iloc[i]).T
df_idx = line_df.index[0]
result_tolerant_ = pd.Series([line_df.loc[df_idx, 'filename'],
line_df.loc[df_idx, 'word'],
line_df.loc[df_idx, 'xsampa'],
line_df.loc[df_idx, 'ipa'],
line_df.loc[df_idx, 'prediction'],
ismatch],
index=['filename', 'word', 'xsampa', 'ipa', 'prediction', 'match'],
name=df_idx)
result_tolerant = result_tolerant.append(result_tolerant_)
# remove the first entry (dummy)
result_tolerant = result_tolerant.drop(0, axis=0)
total_num = len(result_)
valid_num = len(result_tolerant)
match_num = np.sum(result_tolerant['match'])
print("word '{0}': {1}/{2} ({3:.2f} %) originally {4}".format(word, match_num, valid_num, match_num/valid_num*100, total_num))
f_result.write("{0},{1},{2},{3},{4}\n".format(word, total_num, valid_num, match_num, match_num/valid_num*100))
f_result.close()
## output confusion matrix
#cm = confusion_matrix(result_['ipa'], result_['prediction'])
# plt.figure() # plt.figure()
#plot_confusion_matrix(cm, classes=pronunciation_variants, normalize=False) # plot_confusion_matrix(cm, classes=pronvar, normalize=True)
#plt.savefig(result_dir + '\\cm_' + word + '.png') # plt.savefig(dir_out + '\\cm_' + word + '.png')