acoustic_model/acoustic_model/acoustic_model.py

118 lines
3.3 KiB
Python
Raw Normal View History

import os
import sys
import tempfile
import configparser
import subprocess
2018-03-26 20:50:14 +02:00
2018-04-02 01:07:50 +02:00
import pandas as pd
## ======================= user define =======================
repo_dir = 'C:\\Users\\Aki\\source\\repos\\acoustic_model'
curr_dir = repo_dir + '\\acoustic_model'
config_ini = curr_dir + '\\config.ini'
output_dir = 'd:\\OneDrive\\Research\\rug\\experiments\\friesian\\acoustic_model'
2018-04-02 01:07:50 +02:00
forced_alignment_module = 'C:\\Users\\Aki\\source\\repos\\forced-alignment'
sys.path.append(os.path.join(os.path.dirname(sys.path[0]), curr_dir))
2018-04-02 01:07:50 +02:00
sys.path.append(forced_alignment_module)
from forced_alignment import convert_phone_set
import acoustic_model_functions as am_func
## ======================= load variables =======================
config = configparser.ConfigParser()
config.sections()
config.read(config_ini)
config_hcopy = config['Settings']['config_hcopy']
config_train = config['Settings']['config_train']
FAME_dir = config['Settings']['FAME_dir']
2018-04-02 01:07:50 +02:00
lexicon_file = FAME_dir + '\\lexicon\\lex.asr'
dataset_list = ['devel', 'test', 'train']
## ======================= extract features =======================
##dataset = dataset_list[0]
#for dataset in dataset_list:
# print(dataset)
## make a script file for HCopy
#hcopy_scp = tempfile.NamedTemporaryFile(mode='w', delete=False)
#hcopy_scp.close()
2018-04-02 01:07:50 +02:00
## using the filelist in FAME! corpus
#feature_dir = output_dir + '\\mfc\\' + dataset
#am_func.make_hcopy_scp_from_filelist_in_fame(FAME_dir, dataset, feature_dir, hcopy_scp.name)
## extract features
#subprocessStr = 'HCopy -C ' + config_hcopy + ' -S ' + hcopy_scp.name
#subprocess.call(subprocessStr, shell=True)
#os.remove(hcopy_scp.name)
## ======================= make a list of features =======================
##dataset = dataset_list[2]
#for dataset in dataset_list:
# print(dataset)
# feature_dir = output_dir + '\\mfc\\' + dataset
# hcompv_scp = output_dir + '\\scp\\' + dataset + '.scp'
# am_func.make_filelist(feature_dir, hcompv_scp)
## ======================= check the phonemes used in the lexicon =======================
2018-04-02 01:07:50 +02:00
phonelist = am_func.get_phonelist(lexicon_file) # 49
phonelist_list = list(phonelist)
#lines_g1 = am_func.find_phone(lexicon_file, 'g')
#lines_g2 = am_func.find_phone(lexicon_file, 'ɡ')
## ======================= make label file =======================
dataset = 'train'
hcompv_scp = output_dir + '\\scp\\' + dataset + '.scp'
script_list = FAME_dir + '\\data\\' + dataset + '\\text'
lexicon = pd.read_table(lexicon_file, names=['word', 'pronunciation'])
with open(hcompv_scp) as fin:
features = fin.read()
features = features.split('\n')
with open(script_list, "rt", encoding="utf-8") as fin:
scripts = fin.read()
scripts = pd.Series(scripts.split('\n'))
feature = features[0]
file_basename = os.path.basename(feature).replace('.mfc', '')
# get words from scripts.
script = scripts[scripts.str.contains(file_basename)]
script_id = script.index[0]
script_txt = script.get(script_id)
script_words = script_txt.split(' ')
del script_words[0]
# make the label file.
SCRIPT_WORDS = []
script_prons = []
all_prons_found = 1
for word in script_words:
SCRIPT_WORDS.append(word.upper())
extracted = lexicon[lexicon['word']==word]
script_prons.append(extracted)
all_prons_found *= len(extracted)
# make the dict file.
convert_phone_set.ipa2fame(phonelist_list)
phonelist_list