From 0777735979c245a70a93fee8be2506c45df60f99 Mon Sep 17 00:00:00 2001 From: yemaozi88 <428968@gmail.com> Date: Sun, 16 Sep 2018 23:33:31 +0200 Subject: [PATCH] Code is cleaned up. --- .vs/acoustic_model/v15/.suo | Bin 67072 -> 61440 bytes acoustic_model.sln | 1 + .../acoustic_model_functions.cpython-36.pyc | Bin 4615 -> 5904 bytes .../__pycache__/defaultfiles.cpython-36.pyc | Bin 612 -> 670 bytes acoustic_model/acoustic_model_functions.py | 57 ++- acoustic_model/defaultfiles.py | 3 +- acoustic_model/performance_check.py | 375 ++++++++---------- 7 files changed, 225 insertions(+), 211 deletions(-) diff --git a/.vs/acoustic_model/v15/.suo b/.vs/acoustic_model/v15/.suo index d7f39204db0df761287424f7fe769c9c8ce6ea6d..a6c86bcbd5d82e63ac0ff5d4b84527bb75ee0296 100644 GIT binary patch delta 4560 zcmds43s6+o8NTQ41D3~%h_DF36%|)rvOEM7VG$&#$V1n}CR$#iAOuh@qQuIUI+!{e z2kvj0K5VLX#%ZhLB-~_@HbFOHNVGQfK{RO^i%#pblhi~?+h`)&?<^OFF=iyKnauPu z-~Rvk|8xFx&;L3Xhg0hq&~zryCyyIU0fOKy2!aL}cDvmm0t2F6CAtEN?72i7u$Zfhdi9s&!K?l_gEs+7xo zRNfyn7ML_n$Mw@No;ptUE%4#$csl4TmFKU)=Lf5F4(Ln(XNhv(fg&1%6{>O`XbKPm z@OQ`Ag~?qdA%@vtxRrSNAb4KQ5_OaZ`Y^Ce9rGD8#_>feUkq9T*vIKsgMS1F9Vc5i zj_0=4ca?;(;jZ%Ov+Gq&185_#8Q20e0TR#*Y$bL*Mi%Rg2R?k($b({qaDqYEh^ag% zUXg86!1B$_Dop_ORe3*^@__I;M$ix-3>Xba5O|&_m7qKz5daSepC1Xz1EK|ZKmq|S zAAJyyOb`D!6ed+U7nBEtD|kRSjR#Exl7M7jKEUO$oS<~k9Dqye-5AKwih$bA`I@rhQ<*N?LQW{qr=L_bDbjVq{Ew2EFRPGg~# zdx#uKvFu}No>Zt~-s;$|2F!8;e(-#?rTUSj%I&F(84ZMJy?MfW9FQNfBmE1x3S|p0 z<`eKw0$soVo?{SrZBJHTjU;=qH*bKvsQJ4p zLv}wVXGpA=nllQi*Jx!$^v{eUZ{G6sW2+W9Y4AX%lDYUK>&_IJ{w??}z)Q}Gpz9Q1 zd>rg0=gl@My z?_5CQ#v`SZq=9*6d18+3-k-y`Pf7ZzXXj@>(lBEtQXkI}II9?& z_+#rJRO1o!)*fs=qIJ7}eAjqXuIwymXR=SY)*Fk->=UH*=S-T<^KGx;499^UfEU;a zP#5qz@CMKeoC3J65A;pox4>!O48Y}QL4ODQ9{2-r4mb~704@Xlz&pStfX{yy^gUoe zKxMqI3c2D6_&=%RtDx6_zXRm-wfNmo6?eG6<(up?48{!zOc{i6zNAr>>En)7wkt_ zSEK#Q()`d$Q8wjk1J%e{+(CA6`ap<;RkgZ`Y=cx@0IHhmJ<) znQ%fK&efvQ)$8MMu{ZxVo<3&oa0t`pS+kRrNo#I;O)PxAp76l}H`29kiIQ9P8k6t8 zP1^j_CD>;36z#wbe7@|eMaQlUqW;=ga@B4Msl(TQcsY7?1Nw54r!!XyrN=0{?g(n_ zv_Ma9N}|pu{3$@rpn)6V^meS?w^8l!s5~mHi=e@t)%1B!it^p#ZHyAj4=aZ&9%FQ& zK0*1UVUW@DZ$$X=uMqwzsuB9Ays!D^t`0`oTc*<9mPJZgpUmiN^E!pCcWbDmrIqPv z&Y9Av#c`|b3li}6WKvpcWra0uecXI&N>W_1HD!KWdSXgyTzYwBlDzO*MC=E*8l3IV zT*&-~>7}(H;k?$zx@$V?u6Ag?aC*7W!S)i1$TDn5PfSRysHpl5o!^#9wul8$)$l{D zP>Nft#&e6sgVD2@n#%bVcA)m5Ohtj@_8_6NN=(CFl%)9wP)yD$L@3!1NmOJ9L{~u$Nr^(I~ zvYoV%Yjfm8vtlhxem`AFx8YOQ*5RPCgC;uHW%(MiuRL!_J)SKrg(czV^ciY7P#a+(e$gS!MMQlVQtjP-wLR>BO&0*uPuv@<@KLr ze-1r!(h6Z`M*_JDE#$hHsYE>$dT)1qgYPxaWnz&Eb8Z)v)}anB&i@s?t-YsADI2=v q$2zIJGlk5FxuN&&HG(jMvMGyf&OCB-=wCk0SieYeteNuNulxr}!1UDs delta 8927 zcmeHM3viQHcK(0)B^zUm?HDiy8^eO}OAkwyLcQ8}0;<8ZeW^fJj;-(~sF$0eqUaI=T_F zIONnKd@ezd=46gSd@e;ifM6olAdCpob}>G45$h1&L6jgW5M_vB#45yn2wZluJsFRr zA}r@2vJuM=xV*e%0~HoFAXbhWbo7dGLv=+UV0O`pK`fEEO7WST5dVFK7#=q)D~+IVCt?@kA;iOoC}IQ=W4QCE&FjCZ z8nY^rY9uNo)28F|PAVj7PN{G;=7|bPzuT#>9P7z=M9nD`7R>Y^JB-@PNe-|c3*Vv& zSKdK|FeO=)P8JE}(xT~%Q2IRZa>_Ypa8++0NbT(O8<%g<_Kca(m(qJ!{p)-#?L7TZzB=1Ge zBRSTz3xZ3uaE8IjwW8?Qa{PxWmKV6|j+xUP;nTLJY-%fZmLSO3@j&}?biBOcDm_uK zna&U?L6hSGBR{EuK#6X;zC@Qz6JJGq4;hc) z!_F)JirxpiU)%{F-N;+YR(90A+x1!f0dLv4U5})FpF$feAkKsspp4ptjzv%TsCeQz zF&*oaGleMT%!~$s&g4At$_^$+lWsX<(I?N>#Kpg>6!Iu z-?u${>EnMtbm_f+)&H3D3Gtpc{cJvV2;cyJS{l3meE#8%#f7fSZL(7vUmtGy{dhH8 zy}ukLZZyxk5%IyA7!4EW?J#mO6B3UX(=_bnZatDXN*DSm2bxzM<)wykvMj`-Kwqpq z%uA24&t%gI6p-77C@((AMk`9ZR*8C-P&Vx5qC(hRM)gp=*yaHW#Ag$+$KPAME;rKu zz}olA-Y;23`?r7m6jt7R4-p-!3eY+TR%x@77a#GoWO!N&|5c@9k|hvFOhgc<9-XLT z(2&C_&HFuS@ENQ)i8q{)0y%3lUU4ea2p))J`Z2C+4>piPl(l=}MKL|@%}qbexu(XK zT~|7RHLoH_diUi@>2c+S)AYZsIGb#+-^OFsVg(;TCSLSBW)cWY@WPFjcyrC)(XlPE zY?{opkw}xv+j0)AgbTr~=pd>%i--y0ev<^}PI6*zt%vgeWLAXvY>&Fp7W6m;oMEAp zqY6to&3>V!N2wkRn{3)aX?*0^2pwxLhw^Qe>8V9PV4E3K+iK#I+j8k|s0MiCGMCpw zS@ALt;8yvQuH{g}qJiRtNu!9_Qq)H4pbfqx%W+hbAF3-QUC0sQM;nQm`E#Odxjf_~OEa@8pm3rn1 z)HoqNWLM&omFPz5#30`E#9g1L1X0u~JF18%D@eIfQFcdXiSNNe zNbD(sAML4$?d_u};QZu1`c2rq_gQd1TC|*MO6qt3wM>i+Ei(x8;5;LG_EDVXKiebxv z&GCZ$F}nR(TE2;me>){yr9!|{aB8IjUVSZxBWKr26dXw{n=N6>L~Nx}DXV{I*Jh2& zZ&SM54*!;2vQ1sOL5JMu3OTxr4!^_dvpKtb!QL*L$L;|C*@`IU276q+b*@3zu3bCp zr6M+$!>zAX3MGPCo`}P*6)D-A+WH2LQp6Urg+e*|p_fzZsh7(X5}rs>D^$v{HJ8t? zm2j~$Uw}1oE?>k~upioG^0w$pNG9t)+kMi+tPND-7D)gbm4A5SWL`rl*tAl_g8 zTWA>zLgM8FJik91nhqV#OpKzu3{IVT9iAL#0`s|%iQ{4#M#K|nYqLBA`n_Z2@XT|| z;pCy}C?VddM1lyCQ$i`U{ ze0A$Zvs%Mbhc>1O8iZgzv37tQ3XUx)9D;`p^#tC>gNd}T$@Z`?&oSHz5T)ggWRBTI+cAbe3qtL6jFEsO>$q*qwp&| zAw{G$)Y+}KwyDiFgD5y8^R}wJJa@FZpIz-%=%svCxJf@CFt*A_rN%qJwwUcLUSo@k z-R6=RhU>$YfKVw5g$1o9?NGPT>TTqOJo0eR*4R7T*De!uM^r+dLm%L%hW*_tR-?1c zqxALxu;Hy+VIakf#g=B=%~vu*Ib4hzk*K8lAd!i$d=cG`XZb9S*BaDpFaaQmrv$Q1N;i zs@=UBg)!VX)T|fu`V^iAMSf?W$P%e;ZS)Q_xp<*=Ti70P+Z_7dh*}ct)VoZLVPS)) zxwqOC=4e7+%}kv3j#hP>f0)x0ZsE6U_>C%S*dhuB*=mPLrm@>QCG}i)L=frI8HO}A z>2M3jq_@=TSjN6VtBc#y-=~cTS$wrXU>-I!v(P*n1Zr(hy}{8&tW~0LE6x3;pkY8J z1Ivku48k{aAphASINi{VRFDw&<48zF@ab3p%fz8Rg5?d6I3b3-8%lWPavuEXWX+Mp z7!9YdEQj+KD<|ICN<;Lu3^;tO0@lA;l&Pc?lmNZ-Yn5eRI7;+)A%`!p-RtxWI<~i; zBOib$cdsS_eKMcto)VIXqji|98`V>mR5BTK5LdE76Mz7ign z;%~Q>-8j-d#Caw~U1LBlt14#`($(+cP(RI!9ZtX>&WrJr6A1#oAlMkC%_i8N;I&7- z4HVvoOc}Z33Vc@M0K*s;o5)%&t8ND-erUl4>alMX;wcc%$e9Cqi3l750W9N633niNiWO} zMMn=`V#Jgf7~E|H2_wZw^UakvgL4R&&G7508kjWenZ*7{6hU4gV+=FLz@TYKh3(yP zCJSY_ZQ^i?5*6jg@KR}3@{qGl$Dc*C-L-eBep6SgYdeMbqdc?=fPRl78 z*~B}uL?2+j1|k1t>bl^`h0r?IcbkBod}&T4!A#~$0sZ4kH>BF&Q;T5DitS$_!8`Py z$ORUI+3~Ff?Bol}U&8Zwz6sso>(2kP@Vsk|*Ir*0w4ByeQ5>7Ys?Gt|b_J;W_mOcoluB;#dd+e=EnRRBD*Ix(XSEn#^ z&j!_N<+qL1ad47ItiBLGM?EPfAOGQzkBXM^&pr)flkcg|uK)VKZe;AnHGLkZwf{@g zYvS}fQ)#geF2T_&5x8>g0I05&fb_BteAnLq-;Mog(^36ajGydr#)o+{O@4OcPb(w& ks}Zzn24RQBJ_|eG5C2#iFMYRUajJVf??y@5EiHxr2gcT(XaE2J diff --git a/acoustic_model.sln b/acoustic_model.sln index ecdeaf9..37a1335 100644 --- a/acoustic_model.sln +++ b/acoustic_model.sln @@ -11,6 +11,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution ..\forced_alignment\forced_alignment\convert_phone_set.py = ..\forced_alignment\forced_alignment\convert_phone_set.py ..\toolbox\evaluation.py = ..\toolbox\evaluation.py ..\forced_alignment\forced_alignment\forced_alignment.pyproj = ..\forced_alignment\forced_alignment\forced_alignment.pyproj + ..\forced_alignment\forced_alignment\htk_dict.py = ..\forced_alignment\forced_alignment\htk_dict.py ..\forced_alignment\forced_alignment\lexicon.py = ..\forced_alignment\forced_alignment\lexicon.py ..\forced_alignment\forced_alignment\mlf.py = ..\forced_alignment\forced_alignment\mlf.py ..\forced_alignment\forced_alignment\pronunciations.py = ..\forced_alignment\forced_alignment\pronunciations.py diff --git a/acoustic_model/__pycache__/acoustic_model_functions.cpython-36.pyc b/acoustic_model/__pycache__/acoustic_model_functions.cpython-36.pyc index 3ff968422062f223c00c69ce5bffdb0b149c2db2..067e7892520d9d435867822f74bf3799b629dda9 100644 GIT binary patch delta 2000 zcmZ`)O=ufO6yCR5t^OsiY|D1y*!2&kqE2e;kh+ekV{97eA$8KGPSqc=Xyin)Eh)3B z;1n4pxZq+a*q)wh2u%wqr%Kj1^-9ux|J9v6Bj4Lz0ilv6K#Z&y;+64jz_zWLsJ zGqW?_?rx3loEe#nM1ntDzW1(vQj&g`Tt6B41^gRd?2hbpcd_n|q*M<}upVrC8N4Ya zd%ynPCqqj7D8C2^@t1rLdc`N6ryX5b#S=~F5qFhQ=>O({@{fEfE4g_Jn-O`UQT;t*D3O<&2TZK}&*G3);m2E0U7e|4k!AY$A&|5Z_PfsO*pQWWJ2$y((=z#btIC}0JQp;PqQ7mM+M)J!f1hVTU z*E438@mENkCU8$8hhBnNF%kNE_=QG136{5@uVoGHMaeaaIa?V5{U5VxYIhv=p(kC81U{X9rhgb2a}^8^dxC~6*?WLi3FgF|juZ1UNG&B_%oUPqN~J@3ZH|vo+};<}-YCP~lvHpvSIDM~ z(rV5;wl}|aj8)&lk_;fwk2an5gjQs{j%r29OAMI2DQ`I3!;~iXy12@GP2KO}0T&Ot zc!-7V{3=ZlVNs{}I&8rDFD%A7@=$?ISW%77nRcuf|F5?LeBrvZA+PUMU_p9+d&9$G z70-&wt%{tND;^v<#yU6UO#sQIbU6ysD7T7(_?tnEb+@EzEKyI7+e1HW=EmD{q-+_~HFkPnet$q6Q$7MQXBGc8Rmq)O} zuMm)__z=M`LQ=6jxsq;$OT1XPrStS^&M>WryO*}0|E6bfv~K?8Bibyeu(20w2^oMqQF1EqNY#jw&-4LpZ23q8IV88 zJ{8E5Q_-``Q$NdwqA+ipQdL_dAM>{X5iQ94=O zu+2N`Zc{oN&U59|eOq^4S;g(*wf#p2hU1NWewCE!+lY>`<21W2_Z>P?iwXHsqvr88 zzt-SSw)i{5>uzlK932`uvuQnh>}qWlJ!3>%r8&9z_G2WVdiAkGL zeDXi0d5n^i=Q2ML5Cy6#5{DBKlP9vMFv@P;#`2p<_7;0uW=^V0X7VlW{L+$w(vtX+ z%7WBeEaCn^F1IG1W4Dk%(g-x-7DsMkc4~Y|X7XfZ4g*Hh$vym%lXE#PGs;hPz8E@1%@enp%hru^hRTq@?4K;|vhf{gsU z)FMHUuoaMq;!Le5DN0N(NlhuT0V!Z9D9X=6@rxL@KBLoQH}3arnjl@aldXBA8O11b zyEi}XRNK!I?J4IC=P;1G!7%E`=2jV~_9$t*z$hmQg}9N{1{#fzjS z^N1<&DM7R*=A|SS7r9Ng5mRKepByJ<;VlU=SQbQ>f(Sbh;RGVQKtwQz02zd2x;Tg{ a0VHm5*yQG?l;)(`F#>tTa+6<+DFOiHc9>-V diff --git a/acoustic_model/__pycache__/defaultfiles.cpython-36.pyc b/acoustic_model/__pycache__/defaultfiles.cpython-36.pyc index 5dbcde73aa1d335c81a17b3bf8821acb23eb041b..4fac91d60b21523d760ee59f078bcbf31736281c 100644 GIT binary patch delta 207 zcmaFDGLN<1n3tF9K){S>Z*~TT#|%h-4ajx?;^G7#k;0I|n8Ogokirzjn8F;zl**9B zoW+vLn8K36+RPlqn!*;v1{GsVVQ*%NVu$lMfIN;A&M3|lt|%_3C|3%13J*{(Z!;%r z6n6?^FoP!F#1?I)D)q^njOr8DKa}9P#gtzh#g$x{UY?m3pORS=#hIO$lae`kF5_k~ YA)t#mSXdZYm<2>w82K3an1F010NTtd@Bjb+ delta 183 zcmbQo`h=z4n3tF9d09)eC_4kgV+JI^24p(`ad8ZgNMT4}%wdRPNMVX%Ol8Po%3@As zOkqx8X=aXMNnwp*g^IDJur)J9vB7!lKpuMvM-)d2XA~z?lrx1Zg&U}sr 1: - pron = line[1] - if phone in pron: + pronunciation = line[1] + if phone in pronunciation: extracted.append(line) return extracted @@ -149,3 +149,54 @@ def read_fileFA(fileFA): phones.append(line_split[2]) return ' '.join(phones) + + +def fame_pronunciation_variant(ipa): + ipa = ipa.replace('æ', 'ɛ') + ipa = ipa.replace('ɐ', 'a') + ipa = ipa.replace('ɑ', 'a') + ipa = ipa.replace('ɾ', 'r') + ipa = ipa.replace('ɹ', 'r') # ??? + ipa = ipa.replace('ʁ', 'r') + ipa = ipa.replace('ʀ', 'r') # ??? + ipa = ipa.replace('ʊ', 'u') + ipa = ipa.replace('χ', 'x') + + pronvar_list = [ipa] + while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list): + pronvar_list_ = [] + for p in pronvar_list: + if 'ø:' in p: + pronvar_list_.append(p.replace('ø:', 'ö')) + pronvar_list_.append(p.replace('ø:', 'ö:')) + if 'œ' in p: + pronvar_list_.append(p.replace('œ', 'ɔ̈')) + pronvar_list_.append(p.replace('œ', 'ɔ̈:')) + if 'ɒ' in p: + pronvar_list_.append(p.replace('ɒ', 'ɔ̈')) + pronvar_list_.append(p.replace('ɒ', 'ɔ̈:')) + pronvar_list = np.unique(pronvar_list_) + return pronvar_list + + +def make_fame2ipa_variants(fame): + fame = 'rɛös' + ipa = [fame] + ipa.append(fame.replace('ɛ', 'æ')) + ipa.append(fame.replace('a', 'ɐ')) + ipa.append(fame.replace('a', 'ɑ')) + ipa.append(fame.replace('r', 'ɾ')) + ipa.append(fame.replace('r', 'ɹ')) + ipa.append(fame.replace('r', 'ʁ')) + ipa.append(fame.replace('r', 'ʀ')) + ipa.append(fame.replace('u', 'ʊ')) + ipa.append(fame.replace('x', 'χ')) + + ipa.append(fame.replace('ö', 'ø:')) + ipa.append(fame.replace('ö:', 'ø:')) + ipa.append(fame.replace('ɔ̈', 'œ')) + ipa.append(fame.replace('ɔ̈:', 'œ')) + ipa.append(fame.replace('ɔ̈', 'ɒ')) + ipa.append(fame.replace('ɔ̈:', 'ɒ')) + + return ipa \ No newline at end of file diff --git a/acoustic_model/defaultfiles.py b/acoustic_model/defaultfiles.py index 7437cd9..74d9a9b 100644 --- a/acoustic_model/defaultfiles.py +++ b/acoustic_model/defaultfiles.py @@ -2,7 +2,8 @@ import os #default_hvite_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'htk', 'config.HVite') -cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' +cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' +kaldi_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5' #config_hcopy = os.path.join(cygwin_dir, 'config', 'config.HCopy') #config_train = os.path.join(cygwin_dir, 'config', 'config.train') config_hvite = os.path.join(cygwin_dir, 'config', 'config.HVite') diff --git a/acoustic_model/performance_check.py b/acoustic_model/performance_check.py index bb43c20..f4dd82a 100644 --- a/acoustic_model/performance_check.py +++ b/acoustic_model/performance_check.py @@ -16,63 +16,50 @@ import acoustic_model_functions as am_func import convert_xsampa2ipa import defaultfiles as default +from forced_alignment import pyhtk + ## ======================= user define ======================= -#curr_dir = r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model' -#config_ini = 'config.ini' -#repo_dir = r'C:\Users\Aki\source\repos' -#forced_alignment_module = repo_dir + '\\forced_alignment' -#forced_alignment_module_old = repo_dir + '\\aki_tools' -#ipa_xsampa_converter_dir = repo_dir + '\\ipa-xsama-converter' -#accent_classification_dir = repo_dir + '\\accent_classification\accent_classification' excel_file = os.path.join(default.experiments_dir, 'stimmen', 'data', 'Frisian Variants Picture Task Stimmen.xlsx') +data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') - -#experiments_dir = r'C:\OneDrive\Research\rug\experiments' -data_dir = os.path.join(default.experiments_dir, 'stimmen', 'data') -#csvfile = data_dir + '\\Frisian Variants Picture Task Stimmen.csv' -#wav_dir = os.path.join(default.experiments_dir, 'stimmen', 'wav_44k') # 44.1k wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k -#wav_dir = r'c:\OneDrive\WSL\kaldi-trunk\egs\fame\s5\corpus\stimmen' # 16k acoustic_model_dir = os.path.join(default.experiments_dir, 'friesian', 'acoustic_model', 'model') -htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') -fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') -result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result') +htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') +fa_dir = os.path.join(default.experiments_dir, 'stimmen', 'FA_44k') +result_dir = os.path.join(default.experiments_dir, 'stimmen', 'result') kaldi_data_dir = os.path.join(default.kaldi_dir, 'data', 'alignme') kaldi_dict_dir = os.path.join(default.kaldi_dir, 'data', 'local', 'dict') -lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') +lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') -#cygwin_dir = r'C:\cygwin64\home\Aki\acoustic_model' -#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') +#lex_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') #lex_asr_htk = os.path.join(default.fame_dir, 'lexicon', 'lex.asr_htk') -from forced_alignment import pyhtk # procedure -make_dic_files = 0 +make_htk_dict_files = 0 do_forced_alignment_htk = 0 +eval_forced_alignment_htk = 0 make_kaldi_data_files = 0 make_kaldi_lexicon_txt = 0 load_forced_alignment_kaldi = 1 -eval_forced_alignment = 0 +eval_forced_alignment_kaldi = 1 + ## ======================= add paths ======================= - sys.path.append(os.path.join(default.repo_dir, 'forced_alignment')) from forced_alignment import convert_phone_set from forced_alignment import pyhtk sys.path.append(os.path.join(default.repo_dir, 'toolbox')) -#import pyHTK from evaluation import plot_confusion_matrix ## ======================= convert phones ====================== - mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', default.ipa_xsampa_converter_dir) xls = pd.ExcelFile(excel_file) @@ -128,7 +115,7 @@ word_list = np.unique(df['word']) ## ======================= make dict files used for HTK. ====================== -if make_dic_files: +if make_htk_dict_files: output_type = 3 for word in word_list: @@ -138,14 +125,12 @@ if make_dic_files: pronvar_ = df['famehtk'][df['word'].str.match(word)] # make dic file. - am_func.make_dic(word, pronvar_, htk_dict_file, output_type) + am_func.make_htk_dict(word, pronvar_, htk_dict_file, output_type) ## ======================= forced alignment using HTK ======================= if do_forced_alignment_htk: - #hmm_num = 2 - #for hmm_num in [1]: #for hmm_num in [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]: for hmm_num in [256, 512, 1024]: hmm_num_str = str(hmm_num) @@ -178,26 +163,19 @@ if do_forced_alignment_htk: default.phonelist, acoustic_model) os.remove(label_file) - prediction = am_func.read_fileFA(fa_file) - #predictions.append(prediction) print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) else: prediction = '' - #predictions.append('') print('!!!!! file not found.') line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) predictions = predictions.append(line) else: prediction = '' - #predictions.append('') print('!!!!! invalid entry.') - - #predictions = np.array(predictions) - #np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions) predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) @@ -208,9 +186,6 @@ if make_kaldi_data_files: text_file = os.path.join(kaldi_data_dir, 'text') utt2spk = os.path.join(kaldi_data_dir, 'utt2spk') - #predictions = [] - #file_num_max = len(filenames) - # remove previous files. if os.path.exists(wav_scp): os.remove(wav_scp) @@ -224,42 +199,30 @@ if make_kaldi_data_files: f_utt2spk = open(utt2spk, 'a', encoding="utf-8", newline='\n') # make wav.scp, text, and utt2spk files. - predictions = pd.DataFrame({'filename': [''], - 'word': [''], - 'xsampa': [''], - 'ipa': [''], - 'famehtk': [''], - 'prediction': ['']}) - #for i in range(0, file_num_max): - #for i in range(400, 410): - for i, filename in enumerate(df['filename']): + for i in df.index: + filename = df['filename'][i] + print('=== {0}: {1} ==='.format(i, filename)) - #print('=== {0}/{1} ==='.format(i+1, file_num_max)) - #filename = filenames[i] + #if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): + wav_file = os.path.join(wav_dir, filename) + if os.path.exists(wav_file): + speaker_id = 'speaker_' + str(i).zfill(4) + utterance_id = filename.replace('.wav', '') + utterance_id = utterance_id.replace(' ', '_') + utterance_id = speaker_id + '-' + utterance_id - print('=== {0}/{1} ==='.format(i, len(df))) - wav_file = wav_dir + '\\' + filename - if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): - wav_file = os.path.join(wav_dir, filename) - if os.path.exists(wav_file): - speaker_id = 'speaker_' + str(i).zfill(4) - utterance_id = filename.replace('.wav', '') - utterance_id = utterance_id.replace(' ', '_') - utterance_id = speaker_id + '-' + utterance_id + # wav.scp file + wav_file_unix = wav_file.replace('\\', '/') + wav_file_unix = wav_file_unix.replace('c:/', '/mnt/c/') - # wav.scp file - wav_file_unix = wav_file.replace('\\', '/') - wav_file_unix = wav_file_unix.replace('c:/', '/mnt/c/') + f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) - f_wav_scp.write('{0} {1}\n'.format(utterance_id, wav_file_unix)) + # text file + word = df['word'][i].lower() + f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) - # text file - #word = words[i].lower() - word = df['word'][i].lower() - f_text_file.write('{0}\t{1}\n'.format(utterance_id, word)) - - # utt2spk - f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id)) + # utt2spk + f_utt2spk.write('{0} {1}\n'.format(utterance_id, speaker_id)) f_wav_scp.close() f_text_file.close() @@ -268,8 +231,7 @@ if make_kaldi_data_files: ## ======================= make lexicon txt which is used by Kaldi ======================= if make_kaldi_lexicon_txt: - #lexicon_txt = os.path.join(kaldi_dict_dir, 'lexicon.txt') - option_num = 5 + option_num = 6 # remove previous file. if os.path.exists(lexicon_txt): @@ -277,35 +239,6 @@ if make_kaldi_lexicon_txt: lexiconp_txt = lexicon_txt.replace('lexicon.txt', 'lexiconp.txt') if os.path.exists(lexiconp_txt): os.remove(lexiconp_txt) - - #mapping = convert_xsampa2ipa.load_converter('xsampa', 'ipa', ipa_xsampa_converter_dir) - - #with open(csvfile, encoding="utf-8") as fin: - # lines = csv.reader(fin, delimiter=';', lineterminator="\n", skipinitialspace=True) - # next(lines, None) # skip the headers - - # filenames = [] - # words = [] - # pronunciations = [] - # p = [] - # for line in lines: - # if line[1] is not '' and len(line) > 5: - # filenames.append(line[0]) - # words.append(line[1]) - # pron_xsampa = line[3] - # pron_ipa = convert_xsampa2ipa.conversion('xsampa', 'ipa', mapping, pron_xsampa) - # pron_ipa = pron_ipa.replace('ː', ':') - - # # adjust to phones used in the acoustic model. - # pronunciations.append(pron_ipa) - - ## check if all phones are in the phonelist of the acoustic model. - ##'y', 'b', 'ɾ', 'u', 'ɔ:', 'ø', 't', 'œ', 'n', 'ɒ', 'ɐ', 'f', 'o', 'k', 'x', 'ɡ', 'v', 's', 'ɛ:', 'ɪ:', 'ɑ', 'ɛ', 'a', 'd', 'z', 'ɪ', 'ɔ', 'l', 'i:', 'm', 'p', 'a:', 'i', 'e', 'j', 'o:', 'ʁ', 'h', ':', 'e:', 'ə', 'æ', 'χ', 'w', 'r', 'ə:', 'sp', 'ʊ', 'u:', 'ŋ' - - #filenames = np.array(filenames) - #words = np.array(words) - #wordlist = np.unique(words) - #pronunciations = np.array(pronunciations) # output lexicon.txt f_lexicon_txt = open(lexicon_txt, 'a', encoding="utf-8", newline='\n') @@ -313,52 +246,29 @@ if make_kaldi_lexicon_txt: for word in word_list: # pronunciation variant of the target word. - #pronvar_ = pronunciations[words == word] pronunciation_variants = df['ipa'][df['word'].str.match(word)] - #pronunciation_variants = np.unique(pronunciation_variants) - # remove '' - #pronvar_ = np.delete(pronvar_, np.where(pronvar_=='')) c = Counter(pronunciation_variants) total_num = sum(c.values()) + #with open(result_dir + '\\' + word + '.csv', 'a', encoding="utf-8", newline='\n') as f: + # for key in c.keys(): + # f.write("{0},{1}\n".format(key,c[key])) + for key, value in c.most_common(option_num): - #print('{0}\t{1}\t{2}\t{3}'.format(word, key, value, total_num)) - key = key.replace('æ', 'ɛ') - key = key.replace('ɐ', 'a') - key = key.replace('ɑ', 'a') - key = key.replace('ɾ', 'r') - key = key.replace('ɹ', 'r') # ??? - key = key.replace('ʁ', 'r') - key = key.replace('ʀ', 'r') # ??? - key = key.replace('ʊ', 'u') - key = key.replace('χ', 'x') - #print('-->{0}\t{1}\t{2}\t{3}\n'.format(word, key, value, total_num)) - # make possible pronounciation variant list. - pronvar_list = [key] - while 'ø:' in ' '.join(pronvar_list) or 'œ' in ' '.join(pronvar_list) or 'ɒ' in ' '.join(pronvar_list): - pronvar_list_ = [] - for p in pronvar_list: - if 'ø:' in p: - pronvar_list_.append(p.replace('ø:', 'ö')) - pronvar_list_.append(p.replace('ø:', 'ö:')) - if 'œ' in p: - pronvar_list_.append(p.replace('œ', 'ɔ̈')) - pronvar_list_.append(p.replace('œ', 'ɔ̈:')) - if 'ɒ' in p: - pronvar_list_.append(p.replace('ɒ', 'ɔ̈')) - pronvar_list_.append(p.replace('ɒ', 'ɔ̈:')) - pronvar_list = np.unique(pronvar_list_) + pronvar_list = am_func.fame_pronunciation_variant(key) for pronvar_ in pronvar_list: split_ipa = convert_phone_set.split_fame_ipa(pronvar_) pronvar_out = ' '.join(split_ipa) pronvar_list_all.append([word, pronvar_out]) - - # output + pronvar_list_all = np.array(pronvar_list_all) pronvar_list_all = np.unique(pronvar_list_all, axis=0) + + + # output f_lexicon_txt.write('\tSPN\n') for line in pronvar_list_all: f_lexicon_txt.write('{0}\t{1}\n'.format(line[0].lower(), line[1])) @@ -368,9 +278,8 @@ if make_kaldi_lexicon_txt: ## ======================= load kaldi forced alignment result ======================= if load_forced_alignment_kaldi: - kaldi_work_dir = r'C:\OneDrive\WSL\kaldi-trunk\egs\fame\s5' - phones_txt = os.path.join(kaldi_work_dir, 'data', 'lang', 'phones.txt') - merged_alignment_txt = os.path.join(kaldi_work_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt') + phones_txt = os.path.join(default.kaldi_dir, 'data', 'lang', 'phones.txt') + merged_alignment_txt = os.path.join(default.kaldi_dir, 'exp', 'tri1_alignme', 'merged_alignment.txt') #filenames = np.load(data_dir + '\\filenames.npy') #words = np.load(data_dir + '\\words.npy') @@ -380,98 +289,72 @@ if load_forced_alignment_kaldi: # load the mapping between phones and ids. with open(phones_txt, 'r', encoding="utf-8") as f: - mappings = f.read().split('\n') + mapping_phone2id = f.read().split('\n') phones = [] - phone_ids = [] - for m in mappings: + phone_ids = [] # ID of phones + for m in mapping_phone2id: m = m.split(' ') if len(m) > 1: phones.append(m[0]) phone_ids.append(int(m[1])) + + # load the result of FA. with open(merged_alignment_txt, 'r') as f: lines = f.read() lines = lines.split('\n') - fa_filenames = [] - fa_pronunciations = [] - filename_ = '' - pron = [] + predictions = pd.DataFrame({'filename': [''], + 'word': [''], + 'xsampa': [''], + 'ipa': [''], + 'famehtk': [''], + 'prediction': ['']}) + #fa_filenames = [] + #fa_pronunciations = [] + utterance_id_ = '' + pronunciation = [] for line in lines: line = line.split(' ') if len(line) == 5: - filename = line[0] - if filename == filename_: + utterance_id = line[0] + if utterance_id == utterance_id_: phone_id = int(line[4]) #if not phone_id == 1: - phone = phones[phone_ids.index(phone_id)] - pron_ = re.sub(r'_[A-Z]', '', phone) - if not pron_ == 'SIL': - pron.append(pron_) + phone_ = phones[phone_ids.index(phone_id)] + phone = re.sub(r'_[A-Z]', '', phone_) + if not phone == 'SIL': + pronunciation.append(phone) else: - fa_filenames.append(re.sub(r'speaker_[0-9]{4}-', '', filename)) - fa_pronunciations.append(' '.join(pron)) - pron = [] + filename = re.sub(r'speaker_[0-9]{4}-', '', utterance_id_) + prediction = ''.join(pronunciation) + df_ = df[df['filename'].str.match(filename)] + df_idx = df_.index[0] + prediction_ = pd.Series([#filename, + #df_['word'][df_idx], + #df_['xsampa'][df_idx], + #df_['ipa'][df_idx], + #df_['famehtk'][df_idx], + df_.iloc[0,1], + df_.iloc[0,3], + df_.iloc[0,4], + df_.iloc[0,2], + df_.iloc[0,0], + prediction], + index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], + name=df_idx) + predictions = predictions.append(prediction_) + #fa_filenames.append() + #fa_pronunciations.append(' '.join(pronunciation)) + pronunciation = [] - filename_ = filename - - # correct or not. - #for filename, fa_pronunciation in zip(fa_filenames, fa_pronunciations): - - - # predictions = pd.DataFrame({'filename': [''], - # 'word': [''], - # 'xsampa': [''], - # 'ipa': [''], - # 'famehtk': [''], - # 'prediction': ['']}) - # for i, filename in enumerate(df['filename']): - # print('=== {0}/{1} ==='.format(i, len(df))) - # if (i in df['filename'].keys()) and (isinstance(df['filename'][i], str)): - # wav_file = os.path.join(wav_dir, filename) - # if os.path.exists(wav_file): - # word = df['word'][i] - # WORD = word.upper() - # fa_file = os.path.join(fa_dir, filename.replace('.wav', '.txt') + hmm_num_str) - - # #if not os.path.exists(fa_file): - # # make label file. - # label_file = os.path.join(wav_dir, filename.replace('.wav', '.lab')) - # with open(label_file, 'w') as f: - # lines = f.write(WORD) - - # htk_dict_file = os.path.join(htk_dict_dir, word + '.dic') - - # pyhtk.doHVite(wav_file, label_file, htk_dict_file, fa_file, default.config_hvite, - # default.phonelist, acoustic_model) - # os.remove(label_file) - - - # prediction = am_func.read_fileFA(fa_file) - # #predictions.append(prediction) - - # print('{0}: {1} -> {2}'.format(WORD, df['famehtk'][i], prediction)) - # else: - # prediction = '' - # #predictions.append('') - # print('!!!!! file not found.') - - # line = pd.Series([df['filename'][i], df['word'][i], df['xsampa'][i], df['ipa'][i], df['famehtk'][i], prediction], index=['filename', 'word', 'xsampa', 'ipa', 'famehtk', 'prediction'], name=i) - # predictions = predictions.append(line) - # else: - # prediction = '' - # #predictions.append('') - # print('!!!!! invalid entry.') - - - # #predictions = np.array(predictions) - # #np.save(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.npy'), predictions) - # predictions.to_pickle(os.path.join(result_dir, 'htk', 'predictions_hmm' + hmm_num_str + '.pkl')) + utterance_id_ = utterance_id + predictions.to_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl')) ## ======================= evaluate the result of forced alignment ======================= -if eval_forced_alignment: +if eval_forced_alignment_htk: htk_dict_dir = os.path.join(default.experiments_dir, 'stimmen', 'dic_short') compare_hmm_num = 1 @@ -524,3 +407,81 @@ if eval_forced_alignment: if compare_hmm_num: f_result.close() + +## ======================= evaluate the result of forced alignment of kaldi ======================= +if eval_forced_alignment_kaldi: + result = pd.read_pickle(os.path.join(result_dir, 'kaldi', 'predictions.pkl')) + + f_result = open(os.path.join(result_dir, 'result.csv'), 'w') + f_result.write("word,total,valid,match,[%]\n") + + # load pronunciation variants + with open(lexicon_txt, 'r', encoding="utf-8", newline='\n') as f: + lines = f.read().split('\n')[:-1] + pronunciation_variants_all = [line.split('\t') for line in lines] + + word_list = np.delete(word_list, [0], 0) # remove 'Oog' + for word in word_list: + + # load pronunciation variant of the word. + pronunciation_variants = [] + for line in pronunciation_variants_all: + if line[0] == word.lower(): + pronunciation_variants.append(line[1].replace(' ', '')) + + # see only words which appears in top 3. + result_ = result[result['word'].str.match(word)] + result_tolerant = pd.DataFrame({ + 'filename': [''], + 'word': [''], + 'xsampa': [''], + 'ipa': [''], + 'prediction': [''], + 'match': ['']}) + + for i in range(0, len(result_)): + line = result_.iloc[i] + + # make a list of all possible pronunciation variants of ipa description. + # i.e. possible answers from forced alignment. + ipa = line['ipa'] + pronvar_list = [ipa] + pronvar_list_ = am_func.fame_pronunciation_variant(ipa) + if not pronvar_list_ is None: + pronvar_list += list(pronvar_list_) + + # only focus on pronunciations which can be estimated from ipa. + if len(set(pronvar_list) & set(pronunciation_variants)) > 0: + if line['prediction'] in pronvar_list: + ismatch = True + else: + ismatch = False + + line_df = pd.DataFrame(result_.iloc[i]).T + df_idx = line_df.index[0] + result_tolerant_ = pd.Series([line_df.loc[df_idx, 'filename'], + line_df.loc[df_idx, 'word'], + line_df.loc[df_idx, 'xsampa'], + line_df.loc[df_idx, 'ipa'], + line_df.loc[df_idx, 'prediction'], + ismatch], + index=['filename', 'word', 'xsampa', 'ipa', 'prediction', 'match'], + name=df_idx) + result_tolerant = result_tolerant.append(result_tolerant_) + # remove the first entry (dummy) + result_tolerant = result_tolerant.drop(0, axis=0) + + total_num = len(result_) + valid_num = len(result_tolerant) + match_num = np.sum(result_tolerant['match']) + + print("word '{0}': {1}/{2} ({3:.2f} %) originally {4}".format(word, match_num, valid_num, match_num/valid_num*100, total_num)) + f_result.write("{0},{1},{2},{3},{4}\n".format(word, total_num, valid_num, match_num, match_num/valid_num*100)) + + f_result.close() + ## output confusion matrix + #cm = confusion_matrix(result_['ipa'], result_['prediction']) + + #plt.figure() + #plot_confusion_matrix(cm, classes=pronunciation_variants, normalize=False) + #plt.savefig(result_dir + '\\cm_' + word + '.png')