Code is cleaned up.

This commit is contained in:
yemaozi88 2018-09-16 23:33:31 +02:00
parent ea30b5c503
commit 0777735979
7 changed files with 225 additions and 211 deletions

Binary file not shown.

View File

@ -11,6 +11,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
..\forced_alignment\forced_alignment\convert_phone_set.py = ..\forced_alignment\forced_alignment\convert_phone_set.py ..\forced_alignment\forced_alignment\convert_phone_set.py = ..\forced_alignment\forced_alignment\convert_phone_set.py
..\toolbox\evaluation.py = ..\toolbox\evaluation.py ..\toolbox\evaluation.py = ..\toolbox\evaluation.py
..\forced_alignment\forced_alignment\forced_alignment.pyproj = ..\forced_alignment\forced_alignment\forced_alignment.pyproj ..\forced_alignment\forced_alignment\forced_alignment.pyproj = ..\forced_alignment\forced_alignment\forced_alignment.pyproj
..\forced_alignment\forced_alignment\htk_dict.py = ..\forced_alignment\forced_alignment\htk_dict.py
..\forced_alignment\forced_alignment\lexicon.py = ..\forced_alignment\forced_alignment\lexicon.py ..\forced_alignment\forced_alignment\lexicon.py = ..\forced_alignment\forced_alignment\lexicon.py
..\forced_alignment\forced_alignment\mlf.py = ..\forced_alignment\forced_alignment\mlf.py ..\forced_alignment\forced_alignment\mlf.py = ..\forced_alignment\forced_alignment\mlf.py
..\forced_alignment\forced_alignment\pronunciations.py = ..\forced_alignment\forced_alignment\pronunciations.py ..\forced_alignment\forced_alignment\pronunciations.py = ..\forced_alignment\forced_alignment\pronunciations.py

View File

@ -38,7 +38,7 @@ def make_filelist(input_dir, output_txt):
fout.write(input_dir + '\\' + filename + '\n') fout.write(input_dir + '\\' + filename + '\n')
def make_dic(word, pronvar_, fileDic, output_type): def make_htk_dict(word, pronvar_, fileDic, output_type):
""" """
make dict files which can be used for HTK. make dict files which can be used for HTK.
param word: target word. param word: target word.
@ -98,8 +98,8 @@ def find_phone(lexicon_file, phone):
for line in lines: for line in lines:
line = line.split('\t') line = line.split('\t')
if len(line) > 1: if len(line) > 1:
pron = line[1] pronunciation = line[1]
if phone in pron: if phone in pronunciation:
extracted.append(line) extracted.append(line)
return extracted return extracted
@ -149,3 +149,54 @@ def read_fileFA(fileFA):
phones.append(line_split[2]) phones.append(line_split[2])
return ' '.join(phones) return ' '.join(phones)
def fame_pronunciation_variant(ipa):
ipa = ipa.replace('æ', 'ɛ')
ipa = ipa.replace('ɐ', 'a')
ipa = ipa.replace('ɑ', 'a')
ipa = ipa.replace('ɾ', 'r')
ipa = ipa.replace('ɹ', 'r') # ???
ipa = ipa.replace('ʁ', 'r')
ipa = ipa.replace('ʀ', 'r') # ???
ipa = ipa.replace('ʊ', 'u')
ipa = ipa.replace('χ', 'x')
pronvar_list = [ipa]
while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list):
pronvar_list_ = []
for p in pronvar_list:
if 'ø:' in p:
pronvar_list_.append(p.replace('ø:', 'ö'))
pronvar_list_.append(p.replace('ø:', 'ö:'))
if 'œ' in p:
pronvar_list_.append(p.replace('œ', 'ɔ̈'))
pronvar_list_.append(p.replace('œ', 'ɔ̈:'))
if 'ɒ' in p:
pronvar_list_.append(p.replace('ɒ', 'ɔ̈'))
pronvar_list_.append(p.replace('ɒ', 'ɔ̈:'))
pronvar_list = np.unique(pronvar_list_)
return pronvar_list
def make_fame2ipa_variants(fame):
fame = 'rɛös'
ipa = [fame]
ipa.append(fame.replace('ɛ', 'æ'))
ipa.append(fame.replace('a', 'ɐ'))
ipa.append(fame.replace('a', 'ɑ'))
ipa.append(fame.replace('r', 'ɾ'))
ipa.append(fame.replace('r', 'ɹ'))
ipa.append(fame.replace('r', 'ʁ'))
ipa.append(fame.replace('r', 'ʀ'))
ipa.append(fame.replace('u', 'ʊ'))
ipa.append(fame.replace('x', 'χ'))
ipa.append(fame.replace('ö', 'ø:'))
ipa.append(fame.replace('ö:', 'ø:'))
ipa.append(fame.replace('ɔ̈', 'œ'))
ipa.append(fame.replace('ɔ̈:', 'œ'))
ipa.append(fame.replace('ɔ̈', 'ɒ'))
ipa.append(fame.replace('ɔ̈:', 'ɒ'))
return ipa

View File

@ -3,6 +3,7 @@ import os
#default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite') #default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite')
cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model'
kaldi_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
#config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy') #config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy')
#config_train = os.path.join(cygwin_dir, 'config', 'config.train') #config_train = os.path.join(cygwin_dir, 'config', 'config.train')
config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite') config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite')

View File

@ -16,25 +16,15 @@ import acoustic_model_functions as am_func
import convert_xsampa2ipa import convert_xsampa2ipa
import defaultfiles as default import defaultfiles as default
from forced_alignment import pyhtk
## ======================= user define ======================= ## ======================= user define =======================
#curr_dir = r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model'
#config_ini = 'config.ini'
#repo_dir = r'C:\Users\Aki\source\repos'
#forced_alignment_module = repo_dir + '\\forced_alignment'
#forced_alignment_module_old = repo_dir + '\\aki_tools'
#ipa_xsampa_converter_dir = repo_dir + '\\ipa-xsama-converter'
#accent_classification_dir = repo_dir + '\\accent_classification\accent_classification'
excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx') excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx')
#experiments_dir = r'C:\OneDrive\Research\rug\experiments'
data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data')
#csvfile = data_dir + '\\Frisian Variants Picture Task Stimmen.csv'
#wav_dir = os.path.join(default.experiments_dir, 'stimmen', 'wav_44k') # 44.1k
wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k
#wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k
acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model') acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model')
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short')
fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k')
@ -44,35 +34,32 @@ kaldi_data_dir = os.path.join(default.kaldi_dir, 'data', 'alignme')
kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict') kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict')
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt')
#cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model'
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') #lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr')
#lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk') #lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk')
from forced_alignment import pyhtk
# procedure # procedure
make_dic_files = 0 make_htk_dict_files = 0
do_forced_alignment_htk = 0 do_forced_alignment_htk = 0
eval_forced_alignment_htk = 0
make_kaldi_data_files = 0 make_kaldi_data_files = 0
make_kaldi_lexicon_txt = 0 make_kaldi_lexicon_txt = 0
load_forced_alignment_kaldi = 1 load_forced_alignment_kaldi = 1
eval_forced_alignment = 0 eval_forced_alignment_kaldi = 1
## ======================= add paths ======================= ## ======================= add paths =======================
sys.path.append(os.path.join(default.repo_dir, 'forced_alignment')) sys.path.append(os.path.join(default.repo_dir, 'forced_alignment'))
from forced_alignment import convert_phone_set from forced_alignment import convert_phone_set
from forced_alignment import pyhtk from forced_alignment import pyhtk
sys.path.append(os.path.join(default.repo_dir, 'toolbox')) sys.path.append(os.path.join(default.repo_dir, 'toolbox'))
#import pyHTK
from evaluation import plot_confusion_matrix from evaluation import plot_confusion_matrix
## ======================= convert phones ====================== ## ======================= convert phones ======================
mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir) mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir)
xls = pd.ExcelFile(excel_file) xls = pd.ExcelFile(excel_file)
@ -128,7 +115,7 @@ word_list = np.unique(df['word'])
## ======================= make dict files used for HTK. ====================== ## ======================= make dict files used for HTK. ======================
if make_dic_files: if make_htk_dict_files:
output_type = 3 output_type = 3
for word in word_list: for word in word_list:
@ -138,14 +125,12 @@ if make_dic_files:
pronvar_ = df['famehtk'][df['word'].str.match(word)] pronvar_ = df['famehtk'][df['word'].str.match(word)]
# make dic file. # make dic file.
am_func.make_dic(word, pronvar_, htk_dict_file, output_type) am_func.make_htk_dict(word, pronvar_, htk_dict_file, output_type)
## ======================= forced alignment using HTK ======================= ## ======================= forced alignment using HTK =======================
if do_forced_alignment_htk: if do_forced_alignment_htk:
#hmm_num = 2
#for hmm_num in [1]:
#for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]: #for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]:
for hmm_num in [256, 512, 1024]: for hmm_num in [256, 512, 1024]:
hmm_num_str = str(hmm_num) hmm_num_str = str(hmm_num)
@ -178,26 +163,19 @@ if do_forced_alignment_htk:
default.phonelist, acoustic_model) default.phonelist, acoustic_model)
os.remove(label_file) os.remove(label_file)
prediction = am_func.read_fileFA(fa_file) prediction = am_func.read_fileFA(fa_file)
#predictions.append(prediction)
print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction))
else: else:
prediction = '' prediction = ''
#predictions.append('')
print('!!!!! file not found.') print('!!!!! file not found.')
line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i)
predictions = predictions.append(line) predictions = predictions.append(line)
else: else:
prediction = '' prediction = ''
#predictions.append('')
print('!!!!! invalid entry.') print('!!!!! invalid entry.')
#predictions = np.array(predictions)
#np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions)
predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl'))
@ -208,9 +186,6 @@ if make_kaldi_data_files:
text_file = os.path.join(kaldi_data_dir, 'text') text_file = os.path.join(kaldi_data_dir, 'text')
utt2spk = os.path.join(kaldi_data_dir, 'utt2spk') utt2spk = os.path.join(kaldi_data_dir, 'utt2spk')
#predictions = []
#file_num_max = len(filenames)
# remove previous files. # remove previous files.
if os.path.exists(wav_scp): if os.path.exists(wav_scp):
os.remove(wav_scp) os.remove(wav_scp)
@ -224,22 +199,11 @@ if make_kaldi_data_files:
f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n') f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n')
# make wav.scp, text, and utt2spk files. # make wav.scp, text, and utt2spk files.
predictions = pd.DataFrame({'filename': [''], for i in df.index:
'word': [''], filename = df['filename'][i]
'xsampa': [''], print('=== {0}: {1} ==='.format(i, filename))
'ipa': [''],
'famehtk': [''],
'prediction': ['']})
#for i in range(0, file_num_max):
#for i in range(400, 410):
for i, filename in enumerate(df['filename']):
#print('=== {0}/{1} ==='.format(i+1, file_num_max)) #if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
#filename = filenames[i]
print('=== {0}/{1} ==='.format(i, len(df)))
wav_file = wav_dir + '\\' + filename
if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
wav_file = os.path.join(wav_dir, filename) wav_file = os.path.join(wav_dir, filename)
if os.path.exists(wav_file): if os.path.exists(wav_file):
speaker_id = 'speaker_' + str(i).zfill(4) speaker_id = 'speaker_' + str(i).zfill(4)
@ -254,7 +218,6 @@ if make_kaldi_data_files:
f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix))
# text file # text file
#word = words[i].lower()
word = df['word'][i].lower() word = df['word'][i].lower()
f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) f_text_file.write('{0}\t{1}\n'.format(utterance_id, word))
@ -268,8 +231,7 @@ if make_kaldi_data_files:
## ======================= make lexicon txt which is used by Kaldi ======================= ## ======================= make lexicon txt which is used by Kaldi =======================
if make_kaldi_lexicon_txt: if make_kaldi_lexicon_txt:
#lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') option_num = 6
option_num = 5
# remove previous file. # remove previous file.
if os.path.exists(lexicon_txt): if os.path.exists(lexicon_txt):
@ -278,87 +240,35 @@ if make_kaldi_lexicon_txt:
if os.path.exists(lexiconp_txt): if os.path.exists(lexiconp_txt):
os.remove(lexiconp_txt) os.remove(lexiconp_txt)
#mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', ipa_xsampa_converter_dir)
#with open(csvfile, encoding="utf-8") as fin:
# lines = csv.reader(fin, delimiter=';', lineterminator="\n", skipinitialspace=True)
# next(lines, None) # skip the headers
# filenames = []
# words = []
# pronunciations = []
# p = []
# for line in lines:
# if line[1] is not '' and len(line) > 5:
# filenames.append(line[0])
# words.append(line[1])
# pron_xsampa = line[3]
# pron_ipa = convert_xsampa2ipa.conversion('xsampa', 'ipa', mapping, pron_xsampa)
# pron_ipa = pron_ipa.replace('ː', ':')
# # adjust to phones used in the acoustic model.
# pronunciations.append(pron_ipa)
## check if all phones are in the phonelist of the acoustic model.
##'y', 'b', 'ɾ', 'u', 'ɔ:', 'ø', 't', 'œ', 'n', 'ɒ', 'ɐ', 'f', 'o', 'k', 'x', 'ɡ', 'v', 's', 'ɛ:', 'ɪ:', 'ɑ', 'ɛ', 'a', 'd', 'z', 'ɪ', 'ɔ', 'l', 'i:', 'm', 'p', 'a:', 'i', 'e', 'j', 'o:', 'ʁ', 'h', ':', 'e:', 'ə', 'æ', 'χ', 'w', 'r', 'ə:', 'sp', 'ʊ', 'u:', 'ŋ'
#filenames = np.array(filenames)
#words = np.array(words)
#wordlist = np.unique(words)
#pronunciations = np.array(pronunciations)
# output lexicon.txt # output lexicon.txt
f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n') f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n')
pronvar_list_all = [] pronvar_list_all = []
for word in word_list: for word in word_list:
# pronunciation variant of the target word. # pronunciation variant of the target word.
#pronvar_ = pronunciations[words == word]
pronunciation_variants = df['ipa'][df['word'].str.match(word)] pronunciation_variants = df['ipa'][df['word'].str.match(word)]
#pronunciation_variants = np.unique(pronunciation_variants)
# remove ''
#pronvar_ = np.delete(pronvar_, np.where(pronvar_==''))
c = Counter(pronunciation_variants) c = Counter(pronunciation_variants)
total_num = sum(c.values()) total_num = sum(c.values())
for key, value in c.most_common(option_num): #with open(result_dir + '\\' + word + '.csv', 'a', encoding="utf-8", newline='\n') as f:
#print('{0}\t{1}\t{2}\t{3}'.format(word, key, value, total_num)) # for key in c.keys():
key = key.replace('æ', 'ɛ') # f.write("{0},{1}\n".format(key,c[key]))
key = key.replace('ɐ', 'a')
key = key.replace('ɑ', 'a')
key = key.replace('ɾ', 'r')
key = key.replace('ɹ', 'r') # ???
key = key.replace('ʁ', 'r')
key = key.replace('ʀ', 'r') # ???
key = key.replace('ʊ', 'u')
key = key.replace('χ', 'x')
#print('-->{0}\t{1}\t{2}\t{3}\n'.format(word, key, value, total_num))
for key, value in c.most_common(option_num):
# make possible pronounciation variant list. # make possible pronounciation variant list.
pronvar_list = [key] pronvar_list = am_func.fame_pronunciation_variant(key)
while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list):
pronvar_list_ = []
for p in pronvar_list:
if 'ø:' in p:
pronvar_list_.append(p.replace('ø:', 'ö'))
pronvar_list_.append(p.replace('ø:', 'ö:'))
if 'œ' in p:
pronvar_list_.append(p.replace('œ', 'ɔ̈'))
pronvar_list_.append(p.replace('œ', 'ɔ̈:'))
if 'ɒ' in p:
pronvar_list_.append(p.replace('ɒ', 'ɔ̈'))
pronvar_list_.append(p.replace('ɒ', 'ɔ̈:'))
pronvar_list = np.unique(pronvar_list_)
for pronvar_ in pronvar_list: for pronvar_ in pronvar_list:
split_ipa = convert_phone_set.split_fame_ipa(pronvar_) split_ipa = convert_phone_set.split_fame_ipa(pronvar_)
pronvar_out = ' '.join(split_ipa) pronvar_out = ' '.join(split_ipa)
pronvar_list_all.append([word, pronvar_out]) pronvar_list_all.append([word, pronvar_out])
# output
pronvar_list_all = np.array(pronvar_list_all) pronvar_list_all = np.array(pronvar_list_all)
pronvar_list_all = np.unique(pronvar_list_all, axis=0) pronvar_list_all = np.unique(pronvar_list_all, axis=0)
# output
f_lexicon_txt.write('<UNK>\tSPN\n') f_lexicon_txt.write('<UNK>\tSPN\n')
for line in pronvar_list_all: for line in pronvar_list_all:
f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1])) f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1]))
@ -368,9 +278,8 @@ if make_kaldi_lexicon_txt:
## ======================= load kaldi forced alignment result ======================= ## ======================= load kaldi forced alignment result =======================
if load_forced_alignment_kaldi: if load_forced_alignment_kaldi:
kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5' phones_txt = os.path.join(default.kaldi_dir, 'data', 'lang', 'phones.txt')
phones_txt = os.path.join(kaldi_work_dir, 'data', 'lang', 'phones.txt') merged_alignment_txt = os.path.join(default.kaldi_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt')
merged_alignment_txt = os.path.join(kaldi_work_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt')
#filenames = np.load(data_dir + '\\filenames.npy') #filenames = np.load(data_dir + '\\filenames.npy')
#words = np.load(data_dir + '\\words.npy') #words = np.load(data_dir + '\\words.npy')
@ -380,98 +289,72 @@ if load_forced_alignment_kaldi:
# load the mapping between phones and ids. # load the mapping between phones and ids.
with open(phones_txt, 'r', encoding="utf-8") as f: with open(phones_txt, 'r', encoding="utf-8") as f:
mappings = f.read().split('\n') mapping_phone2id = f.read().split('\n')
phones = [] phones = []
phone_ids = [] phone_ids = [] # ID of phones
for m in mappings: for m in mapping_phone2id:
m = m.split(' ') m = m.split(' ')
if len(m) > 1: if len(m) > 1:
phones.append(m[0]) phones.append(m[0])
phone_ids.append(int(m[1])) phone_ids.append(int(m[1]))
# load the result of FA.
with open(merged_alignment_txt, 'r') as f: with open(merged_alignment_txt, 'r') as f:
lines = f.read() lines = f.read()
lines = lines.split('\n') lines = lines.split('\n')
fa_filenames = [] predictions = pd.DataFrame({'filename': [''],
fa_pronunciations = [] 'word': [''],
filename_ = '' 'xsampa': [''],
pron = [] 'ipa': [''],
'famehtk': [''],
'prediction': ['']})
#fa_filenames = []
#fa_pronunciations = []
utterance_id_ = ''
pronunciation = []
for line in lines: for line in lines:
line = line.split(' ') line = line.split(' ')
if len(line) == 5: if len(line) == 5:
filename = line[0] utterance_id = line[0]
if filename == filename_: if utterance_id == utterance_id_:
phone_id = int(line[4]) phone_id = int(line[4])
#if not phone_id == 1: #if not phone_id == 1:
phone = phones[phone_ids.index(phone_id)] phone_ = phones[phone_ids.index(phone_id)]
pron_ = re.sub(r'_[A-Z]', '', phone) phone = re.sub(r'_[A-Z]', '', phone_)
if not pron_ == 'SIL': if not phone == 'SIL':
pron.append(pron_) pronunciation.append(phone)
else: else:
fa_filenames.append(re.sub(r'speaker_[0-9]{4}-', '', filename)) filename = re.sub(r'speaker_[0-9]{4}-', '', utterance_id_)
fa_pronunciations.append(' '.join(pron)) prediction = ''.join(pronunciation)
pron = [] df_ = df[df['filename'].str.match(filename)]
df_idx = df_.index[0]
prediction_ = pd.Series([#filename,
#df_['word'][df_idx],
#df_['xsampa'][df_idx],
#df_['ipa'][df_idx],
#df_['famehtk'][df_idx],
df_.iloc[0,1],
df_.iloc[0,3],
df_.iloc[0,4],
df_.iloc[0,2],
df_.iloc[0,0],
prediction],
index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'],
name=df_idx)
predictions = predictions.append(prediction_)
#fa_filenames.append()
#fa_pronunciations.append(' '.join(pronunciation))
pronunciation = []
filename_ = filename utterance_id_ = utterance_id
predictions.to_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl'))
# correct or not.
#for filename, fa_pronunciation in zip(fa_filenames, fa_pronunciations):
# predictions = pd.DataFrame({'filename': [''],
# 'word': [''],
# 'xsampa': [''],
# 'ipa': [''],
# 'famehtk': [''],
# 'prediction': ['']})
# for i, filename in enumerate(df['filename']):
# print('=== {0}/{1} ==='.format(i, len(df)))
# if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)):
# wav_file = os.path.join(wav_dir, filename)
# if os.path.exists(wav_file):
# word = df['word'][i]
# WORD = word.upper()
# fa_file = os.path.join(fa_dir, filename.replace('.wav', '.txt') + hmm_num_str)
# #if not os.path.exists(fa_file):
# # make label file.
# label_file = os.path.join(wav_dir, filename.replace('.wav', '.lab'))
# with open(label_file, 'w') as f:
# lines = f.write(WORD)
# htk_dict_file = os.path.join(htk_dict_dir, word + '.dic')
# pyhtk.doHVite(wav_file, label_file, htk_dict_file, fa_file, default.config_hvite,
# default.phonelist, acoustic_model)
# os.remove(label_file)
# prediction = am_func.read_fileFA(fa_file)
# #predictions.append(prediction)
# print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction))
# else:
# prediction = ''
# #predictions.append('')
# print('!!!!! file not found.')
# line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i)
# predictions = predictions.append(line)
# else:
# prediction = ''
# #predictions.append('')
# print('!!!!! invalid entry.')
# #predictions = np.array(predictions)
# #np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions)
# predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl'))
## ======================= evaluate the result of forced alignment ======================= ## ======================= evaluate the result of forced alignment =======================
if eval_forced_alignment: if eval_forced_alignment_htk:
htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short')
compare_hmm_num = 1 compare_hmm_num = 1
@ -524,3 +407,81 @@ if eval_forced_alignment:
if compare_hmm_num: if compare_hmm_num:
f_result.close() f_result.close()
## ======================= evaluate the result of forced alignment of kaldi =======================
if eval_forced_alignment_kaldi:
result = pd.read_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl'))
f_result = open(os.path.join(result_dir, 'result.csv'), 'w')
f_result.write("word,total,valid,match,[%]\n")
# load pronunciation variants
with open(lexicon_txt, 'r', encoding="utf-8", newline='\n') as f:
lines = f.read().split('\n')[:-1]
pronunciation_variants_all = [line.split('\t') for line in lines]
word_list = np.delete(word_list, [0], 0) # remove 'Oog'
for word in word_list:
# load pronunciation variant of the word.
pronunciation_variants = []
for line in pronunciation_variants_all:
if line[0] == word.lower():
pronunciation_variants.append(line[1].replace(' ', ''))
# see only words which appears in top 3.
result_ = result[result['word'].str.match(word)]
result_tolerant = pd.DataFrame({
'filename': [''],
'word': [''],
'xsampa': [''],
'ipa': [''],
'prediction': [''],
'match': ['']})
for i in range(0, len(result_)):
line = result_.iloc[i]
# make a list of all possible pronunciation variants of ipa description.
# i.e. possible answers from forced alignment.
ipa = line['ipa']
pronvar_list = [ipa]
pronvar_list_ = am_func.fame_pronunciation_variant(ipa)
if not pronvar_list_ is None:
pronvar_list += list(pronvar_list_)
# only focus on pronunciations which can be estimated from ipa.
if len(set(pronvar_list) & set(pronunciation_variants)) > 0:
if line['prediction'] in pronvar_list:
ismatch = True
else:
ismatch = False
line_df = pd.DataFrame(result_.iloc[i]).T
df_idx = line_df.index[0]
result_tolerant_ = pd.Series([line_df.loc[df_idx, 'filename'],
line_df.loc[df_idx, 'word'],
line_df.loc[df_idx, 'xsampa'],
line_df.loc[df_idx, 'ipa'],
line_df.loc[df_idx, 'prediction'],
ismatch],
index=['filename', 'word', 'xsampa', 'ipa', 'prediction', 'match'],
name=df_idx)
result_tolerant = result_tolerant.append(result_tolerant_)
# remove the first entry (dummy)
result_tolerant = result_tolerant.drop(0, axis=0)
total_num = len(result_)
valid_num = len(result_tolerant)
match_num = np.sum(result_tolerant['match'])
print("word '{0}': {1}/{2} ({3:.2f} %) originally {4}".format(word, match_num, valid_num, match_num/valid_num*100, total_num))
f_result.write("{0},{1},{2},{3},{4}\n".format(word, total_num, valid_num, match_num, match_num/valid_num*100))
f_result.close()
## output confusion matrix
#cm = confusion_matrix(result_['ipa'], result_['prediction'])
#plt.figure()
#plot_confusion_matrix(cm, classes=pronunciation_variants, normalize=False)
#plt.savefig(result_dir + '\\cm_' + word + '.png')