Compare commits

..

2 Commits

Author SHA1 Message Date
yemaozi88
beff33fdf9 revert "load novo phoneset" to continue working. 2018-12-30 23:19:18 +01:00
yemaozi88
af785e51cf commit to clean up 2018-12-30 23:14:26 +01:00
6 changed files with 28 additions and 21 deletions

3
.gitignore vendored
View File

@ -1,9 +1,6 @@
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
## important ##
.acoustic_model/forced_alignment_novo.py
# User-specific files
*.suo
*.user

Binary file not shown.

View File

@ -25,12 +25,12 @@ mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_
stimmen_transcription_ = pd.ExcelFile(default.stimmen_transcription_xlsx)
phonelist_novo70_ = pd.ExcelFile(default.phonelist_novo70_xlsx)
df = pd.read_excel(phonelist_novo70_, 'list')
## novo phoneset
#translation_key = dict()
translation_key = dict()
#phonelist_novo70_ = pd.ExcelFile(default.phonelist_novo70_xlsx)
#df = pd.read_excel(phonelist_novo70_, 'list')
## *_simple includes columns which has only one phone in.
#for ipa, novo70 in zip(df['IPA_simple'], df['novo70_simple']):
# if not pd.isnull(ipa):
@ -38,3 +38,18 @@ df = pd.read_excel(phonelist_novo70_, 'list')
# translation_key[ipa] = novo70
#phonelist_novo70 = np.unique(list(df['novo70_simple']))
phoneset_ipa = []
phoneset_novo70 = []
with open(default.cmu69_phoneset, "rt", encoding="utf-8") as fin:
lines = fin.read()
lines = lines.split('\n')
for line in lines:
words = line.split('\t')
if len(words) > 1:
novo70 = words[0]
ipa = words[1]
phoneset_ipa.append(ipa)
phoneset_novo70.append(novo70)
translation_key[ipa] = novo70
phoneset_ipa = np.unique(phoneset_ipa)
phoneset_novo70 = np.unique(phonset_novo70)

View File

@ -3,7 +3,7 @@ import os
#default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite')
cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model'
kaldi_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5'
#config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy')
#config_train = os.path.join(cygwin_dir, 'config', 'config.train')
config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite')
@ -30,16 +30,11 @@ repo_dir = r'C:\Users\Aki\source\repos'
ipa_xsampa_converter_dir = os.path.join(repo_dir, 'ipa-xsama-converter')
forced_alignment_module_dir = os.path.join(repo_dir, 'forced_alignment')
WSL_dir = r'C:\OneDrive\WSL'
fame_dir = os.path.join(WSL_dir, 'kaldi-trunk', 'egs', 'fame')
fame_s5_dir = os.path.join(fame_dir, 's5')
fame_corpus_dir = os.path.join(fame_dir, 'corpus')
fame_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus'
experiments_dir = r'c:\OneDrive\Research\rug\experiments'
stimmen_transcription_xlsx = os.path.join(experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx')
stimmen_data_dir = os.path.join(experiments_dir, 'stimmen', 'data')
phonelist_friesian_txt = os.path.join(experiments_dir, 'friesian', 'acoustic_model', 'config', 'phonelist_friesian.txt')
novo_api_dir = os.path.join(WSL_dir, 'python-novo-api')
cmu69_phoneset = os.path.join(novo_api_dir, 'novoapi', 'asr', 'phoneset', 'en', 'cmu69.phoneset')
phonelist_novo70_xlsx = os.path.join(experiments_dir, 'Nederlandse phonesets_aki.xlsx')

View File

@ -30,12 +30,12 @@ htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short
fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k')
result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result')
kaldi_data_dir = os.path.join(default.fame_s5_dir, 'data', 'alignme')
kaldi_dict_dir = os.path.join(default.fame_s5_dir, 'data', 'local', 'dict')
kaldi_data_dir = os.path.join(default.kaldi_dir, 'data', 'alignme')
kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict')
lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt')
#lex_asr = os.path.join(default.fame_corpus_dir, 'lexicon', 'lex.asr')
#lex_asr_htk = os.path.join(default.fame_corpus_dir, 'lexicon', 'lex.asr_htk')
#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr')
#lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk')
# procedure
@ -278,8 +278,8 @@ if make_kaldi_lexicon_txt:
## ======================= load kaldi forced alignment result =======================
if load_forced_alignment_kaldi:
phones_txt = os.path.join(default.fame_s5_dir, 'data', 'lang', 'phones.txt')
merged_alignment_txt = os.path.join(default.fame_s5_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt')
phones_txt = os.path.join(default.kaldi_dir, 'data', 'lang', 'phones.txt')
merged_alignment_txt = os.path.join(default.kaldi_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt')
#filenames = np.load(data_dir + '\\filenames.npy')
#words = np.load(data_dir + '\\words.npy')