2018-12-26 23:49:28 +01:00
|
|
|
|
import os
|
|
|
|
|
os.chdir(r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model')
|
|
|
|
|
|
|
|
|
|
import sys
|
|
|
|
|
import csv
|
2019-01-20 13:47:29 +01:00
|
|
|
|
from collections import Counter
|
|
|
|
|
import random
|
|
|
|
|
import shutil
|
2018-12-26 23:49:28 +01:00
|
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
import pandas as pd
|
2019-01-15 11:30:49 +01:00
|
|
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
from sklearn.metrics import confusion_matrix
|
|
|
|
|
from sklearn.metrics import accuracy_score
|
|
|
|
|
import novoapi
|
|
|
|
|
|
2019-01-20 13:47:29 +01:00
|
|
|
|
import defaultfiles as default
|
|
|
|
|
sys.path.append(default.forced_alignment_module_dir)
|
2019-01-21 10:35:50 +01:00
|
|
|
|
from forced_alignment import convert_phone_set
|
2019-01-20 13:47:29 +01:00
|
|
|
|
#import acoustic_model_functions as am_func
|
2018-12-26 23:49:28 +01:00
|
|
|
|
import convert_xsampa2ipa
|
2019-01-15 11:30:49 +01:00
|
|
|
|
import novoapi_functions
|
2019-02-06 00:00:14 +01:00
|
|
|
|
import stimmen_functions
|
2019-01-15 11:30:49 +01:00
|
|
|
|
sys.path.append(default.accent_classification_dir)
|
|
|
|
|
import output_confusion_matrix
|
2018-12-26 23:49:28 +01:00
|
|
|
|
|
2019-01-15 11:30:49 +01:00
|
|
|
|
## procedure
|
|
|
|
|
forced_alignment_novo70 = True
|
2018-12-26 23:49:28 +01:00
|
|
|
|
|
|
|
|
|
|
2019-01-12 23:29:56 +01:00
|
|
|
|
## ===== load novo phoneset =====
|
2019-04-22 02:03:50 +02:00
|
|
|
|
phoneset_ipa, phoneset_novo70, translation_key_ipa2novo70, translation_key_novo702ipa = novoapi_functions.load_novo70_phoneset()
|
2019-01-12 23:29:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ===== extract pronunciations written in novo70 only (not_in_novo70) =====
|
2018-12-26 23:49:28 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-01-12 23:29:56 +01:00
|
|
|
|
## read pronunciation variants.
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#stimmen_transcription_ = pd.ExcelFile(default.stimmen_transcription_xlsx)
|
|
|
|
|
#df = pd.read_excel(stimmen_transcription_, 'frequency')
|
|
|
|
|
#transcription_ipa = list(df['IPA'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
stimmen_test_dir = r'c:\OneDrive\Research\rug\_data\stimmen_test'
|
|
|
|
|
df = stimmen_functions.load_transcriptions_novo70(stimmen_test_dir)
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
## transcription mistake?
|
|
|
|
|
#transcription_ipa = [ipa.replace(';', 'ː') for ipa in transcription_ipa if not ipa=='pypɪl' and not pd.isnull(ipa)]
|
|
|
|
|
#transcription_ipa = [ipa.replace('ˑ', '') for ipa in transcription_ipa] # only one case.
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#not_in_novo70 = []
|
|
|
|
|
#all_in_novo70 = []
|
|
|
|
|
#for ipa in transcription_ipa:
|
|
|
|
|
# ipa = ipa.replace(':', 'ː')
|
|
|
|
|
# ipa = convert_phone_set.split_ipa(ipa)
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
# # list of phones not in novo70 phoneset.
|
|
|
|
|
# not_in_novo70_ = [phone for phone in ipa
|
|
|
|
|
# if not phone in phoneset_ipa and not phone in david_suggestion]
|
|
|
|
|
# not_in_novo70_ = [phone.replace('sp', '') for phone in not_in_novo70_]
|
|
|
|
|
# not_in_novo70_ = [phone.replace(':', '') for phone in not_in_novo70_]
|
|
|
|
|
# not_in_novo70_ = [phone.replace('ː', '') for phone in not_in_novo70_]
|
2019-01-07 11:50:24 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
# if len(not_in_novo70_) == 0:
|
|
|
|
|
# all_in_novo70.append(''.join(ipa))
|
|
|
|
|
|
|
|
|
|
# #translation_key.get(phone, phone)
|
|
|
|
|
# not_in_novo70.extend(not_in_novo70_)
|
|
|
|
|
#not_in_novo70_list = list(set(not_in_novo70))
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-01-12 23:29:56 +01:00
|
|
|
|
|
|
|
|
|
## check which phones used in stimmen but not in novo70
|
2019-01-07 11:50:24 +01:00
|
|
|
|
# 'ʀ', 'ʁ',
|
|
|
|
|
# 'ɒ', 'ɐ',
|
|
|
|
|
# 'o', 'a' (o:, a:?)
|
2018-12-31 13:04:33 +01:00
|
|
|
|
# [e] 'nyːver mɑntsjə' (1)
|
2019-01-07 11:50:24 +01:00
|
|
|
|
# [ɾ] 'ɪːɾ'(1)
|
|
|
|
|
# [ɹ] 'iːjəɹ' (1), 'ɪ:ɹ' (1)
|
|
|
|
|
# [ø] 'gʀøtəpi:r'(1), 'grøtəpi:r'(1)
|
|
|
|
|
# [æ] 'røːzəʀæt'(2), 'røːzəræt'(1)
|
|
|
|
|
# [ʊ] 'ʊ'(1) --> can be ʏ (uh)??
|
|
|
|
|
# [χ] --> can be x??
|
2018-12-31 13:04:33 +01:00
|
|
|
|
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#def search_phone_ipa(x, phone_list):
|
|
|
|
|
# x_in_item = []
|
|
|
|
|
# for ipa in phone_list:
|
|
|
|
|
# ipa_original = ipa
|
|
|
|
|
# ipa = ipa.replace(':', 'ː')
|
|
|
|
|
# ipa = convert_phone_set.split_ipa(ipa)
|
|
|
|
|
# if x in ipa and not x+':' in ipa:
|
|
|
|
|
# x_in_item.append(ipa_original)
|
|
|
|
|
# return x_in_item
|
2019-01-07 11:50:24 +01:00
|
|
|
|
#search_phone_ipa('ø', transcription_ipa)
|
|
|
|
|
|
|
|
|
|
|
2019-01-12 23:29:56 +01:00
|
|
|
|
## ===== load all transcriptions (df) =====
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#df = stimmen_functions.load_transcriptions()
|
2019-01-12 23:29:56 +01:00
|
|
|
|
word_list = [i for i in list(set(df['word'])) if not pd.isnull(i)]
|
|
|
|
|
word_list = sorted(word_list)
|
2019-01-07 11:50:24 +01:00
|
|
|
|
|
|
|
|
|
|
2019-01-12 23:29:56 +01:00
|
|
|
|
## check frequency of each pronunciation variants
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#cols = ['word', 'ipa', 'frequency']
|
|
|
|
|
#df_samples = pd.DataFrame(index=[], columns=cols)
|
|
|
|
|
#for ipa in all_in_novo70:
|
|
|
|
|
# ipa = ipa.replace('ː', ':')
|
|
|
|
|
# samples = df[df['ipa'] == ipa]
|
|
|
|
|
# word = list(set(samples['word']))[0]
|
|
|
|
|
# samples_Series = pd.Series([word, ipa, len(samples)], index=df_samples.columns)
|
|
|
|
|
# df_samples = df_samples.append(samples_Series, ignore_index=True)
|
2019-01-12 23:29:56 +01:00
|
|
|
|
|
|
|
|
|
# each word
|
2019-04-22 02:03:50 +02:00
|
|
|
|
#df_per_word = pd.DataFrame(index=[], columns=df_samples.keys())
|
|
|
|
|
|
|
|
|
|
#for word in word_list:
|
|
|
|
|
word = word_list[2]
|
|
|
|
|
df_ = df[df['word']==word]
|
|
|
|
|
np.unique(list(df_['ipa']))
|
|
|
|
|
#df_samples_ = df_samples_[df_samples_['frequency']>2]
|
|
|
|
|
#df_per_word = df_per_word.append(df_samples_, ignore_index=True)
|
2019-01-13 23:36:02 +01:00
|
|
|
|
#df_per_word.to_excel(os.path.join(default.stimmen_dir, 'pronunciation_variants_novo70.xlsx'), encoding="utf-8")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## ===== forced alignment =====
|
2019-01-21 10:35:50 +01:00
|
|
|
|
rozen_dir = r'c:\Users\Aki\source\repos\acoustic_model\rozen-test'
|
2019-01-15 11:30:49 +01:00
|
|
|
|
if forced_alignment_novo70:
|
2019-01-13 23:36:02 +01:00
|
|
|
|
Results = pd.DataFrame(index=[],
|
2019-01-20 13:47:29 +01:00
|
|
|
|
columns=['filename', 'word', 'xsampa', 'ipa', 'result_ipa', 'result_novo70', 'llh'])
|
|
|
|
|
#for word in word_list:
|
2019-01-21 10:35:50 +01:00
|
|
|
|
for word in ['Rozen']:
|
2019-01-13 23:36:02 +01:00
|
|
|
|
# pronunciation variants top 3
|
|
|
|
|
df_per_word_ = df_per_word[df_per_word['word']==word]
|
|
|
|
|
df_per_word_ = df_per_word_.sort_values('frequency', ascending=False)
|
|
|
|
|
if len(df_per_word_) < 3: # pauw, rozen
|
|
|
|
|
pronunciation_ipa = list(df_per_word_['ipa'])
|
|
|
|
|
elif word=='Reuzenrad':
|
|
|
|
|
pronunciation_ipa = [
|
|
|
|
|
df_per_word_.iloc[0]['ipa'],
|
|
|
|
|
df_per_word_.iloc[1]['ipa'],
|
|
|
|
|
df_per_word_.iloc[2]['ipa'],
|
|
|
|
|
df_per_word_.iloc[3]['ipa']]
|
|
|
|
|
else:
|
|
|
|
|
# oog, oor, reus, roeiboot
|
|
|
|
|
pronunciation_ipa = [
|
|
|
|
|
df_per_word_.iloc[0]['ipa'],
|
|
|
|
|
df_per_word_.iloc[1]['ipa'],
|
|
|
|
|
df_per_word_.iloc[2]['ipa']]
|
|
|
|
|
#print("{0}: {1}".format(word, pronunciation_ipa))
|
|
|
|
|
|
|
|
|
|
# samples for the word
|
|
|
|
|
df_ = df[df['word']==word]
|
|
|
|
|
|
|
|
|
|
# samples in which all pronunciations are written in novo70.
|
|
|
|
|
samples = df_.query("ipa in @pronunciation_ipa")
|
|
|
|
|
|
|
|
|
|
results = pd.DataFrame(index=[],
|
2019-01-20 13:47:29 +01:00
|
|
|
|
columns=['filename', 'word', 'xsampa', 'ipa', 'result_ipa', 'result_novo70', 'llh'])
|
2019-01-13 23:36:02 +01:00
|
|
|
|
|
|
|
|
|
for i in range(0, len(samples)):
|
|
|
|
|
sample = samples.iloc[i]
|
2019-01-20 13:47:29 +01:00
|
|
|
|
filename = sample['filename']
|
|
|
|
|
wav_file = os.path.join(default.stimmen_wav_dir, filename)
|
2019-01-13 23:36:02 +01:00
|
|
|
|
if os.path.exists(wav_file):
|
2019-01-20 13:47:29 +01:00
|
|
|
|
# for Martijn
|
2019-01-21 10:35:50 +01:00
|
|
|
|
shutil.copy(wav_file, os.path.join(rozen_dir, filename))
|
|
|
|
|
|
|
|
|
|
# pronunciation_ipa_ = [ipa.replace(':', 'ː') for ipa in pronunciation_ipa]
|
|
|
|
|
# result = novoapi_functions.forced_alignment(wav_file, word, pronunciation_ipa_)
|
|
|
|
|
# result_ipa, result_novo70, llh = novoapi_functions.result2pronunciation(result, word)
|
|
|
|
|
# result_ = pd.Series([
|
|
|
|
|
# sample['filename'],
|
|
|
|
|
# sample['word'],
|
|
|
|
|
# sample['xsampa'],
|
|
|
|
|
# sample['ipa'],
|
|
|
|
|
# ' '.join(result_ipa),
|
|
|
|
|
# ' '.join(result_novo70),
|
|
|
|
|
# llh
|
|
|
|
|
# ], index=results.columns)
|
|
|
|
|
# results = results.append(result_, ignore_index = True)
|
|
|
|
|
# print('{0}/{1}: answer {2} - prediction {3}'.format(
|
|
|
|
|
# i+1, len(samples), result_['ipa'], result_['result_ipa']))
|
|
|
|
|
# #results.to_excel(os.path.join(default.stimmen_dir, 'results.xlsx'), encoding="utf-8")
|
|
|
|
|
#if len(results) > 0:
|
|
|
|
|
# Results = Results.append(results, ignore_index = True)
|
|
|
|
|
#Results.to_excel(os.path.join(default.stimmen_result_novoapi_dir, 'Results.xlsx'), encoding="utf-8")
|
2019-01-13 23:36:02 +01:00
|
|
|
|
else:
|
2019-01-21 10:35:50 +01:00
|
|
|
|
Results_xlsx = pd.ExcelFile(os.path.join(default.stimmen_result_novoapi_dir, 'Results.xlsx'), encoding="utf-8")
|
2019-01-15 11:30:49 +01:00
|
|
|
|
Results = pd.read_excel(Results_xlsx, 'Sheet1')
|
2019-01-13 23:36:02 +01:00
|
|
|
|
|
2019-01-15 11:30:49 +01:00
|
|
|
|
|
|
|
|
|
## ===== analysis =====
|
2019-01-20 13:47:29 +01:00
|
|
|
|
#for word in word_list:
|
|
|
|
|
# if not word == 'Oog':
|
|
|
|
|
# Results_ = Results[Results['word'] == word]
|
|
|
|
|
# y_true = list(Results_['ipa'])
|
|
|
|
|
# y_pred_ = [ipa.replace(' ', '') for ipa in list(Results_['result_ipa'])]
|
|
|
|
|
# y_pred = [ipa.replace('ː', ':') for ipa in y_pred_]
|
|
|
|
|
# pronunciation_variants = list(set(y_true))
|
|
|
|
|
# cm = confusion_matrix(y_true, y_pred, labels=pronunciation_variants)
|
|
|
|
|
|
|
|
|
|
# plt.figure()
|
|
|
|
|
# output_confusion_matrix.plot_confusion_matrix(cm, pronunciation_variants, normalize=False)
|
|
|
|
|
# #plt.show()
|
2019-01-21 10:35:50 +01:00
|
|
|
|
# plt.savefig(os.path.join(default.stimmen_result_novoapi_dir, word + '.png'))
|