From 2004399179dd9eb0473da8fb4931e2a9714f5a4c Mon Sep 17 00:00:00 2001 From: yemaozi88 <428968@gmail.com> Date: Mon, 22 Apr 2019 00:59:53 +0200 Subject: [PATCH] novoapi_functions.py is adjusted to use convert_phoneset.py. --- .vs/acoustic_model/v15/.suo | Bin 99328 -> 89600 bytes acoustic_model/convert_phoneset.py | 18 +- acoustic_model/fame_functions.py | 3 + acoustic_model/fame_hmm.py | 257 +++++++++++++++++++++------- acoustic_model/novoapi_functions.py | 137 +++++++-------- 5 files changed, 283 insertions(+), 132 deletions(-) diff --git a/.vs/acoustic_model/v15/.suo b/.vs/acoustic_model/v15/.suo index 7b01e602d9215f0fe51c4dd3c97b53655594e653..76815aa7195203ac4d36571c539e36db2df85334 100644 GIT binary patch delta 9317 zcmds74P2B}x__Txz(EEW6cG@0L=;rO8NNl*;X_hX82ri<<#P}VQDkWS=EbZnGnF~z zQL}Q@v=6h*xm{~-C6<-zmUYWkv+`nATiva+i>z7-?<%_U_*M>%RQ{^FQZ# z&wD3>XCT1bPFv01-euun>p@xUu2j7{CBzdu3C>7N8gi07d}g zxdr|p7@1xG_jo#Z5-=GU27C+I_24DI7+?Z$127Ai3Gnx+;Emq&Y;XxM2PpN*%EU_F zzQcxkbECY>U#5A}8D8!W4s9v!7xF74mIBLwCSY1iac})?NVs&hLF&5)5;u6KT>XB^ zzrAY8o4QUpx5ke)^`Gw^V0otnaGEWEnJs{sEx-)00d}>3fyj4SKnT)Ywu=Qc zT`>Vx(A6f84;f70@3H`2eAj6KnMkt*aN}$NY=#~SfZo^y_(i7~Tx$Y~(g$U_YXM_X zmMwtmWDA%9bk01>__<;MM!YQb7XEn?Sb&mj#oaK0o+;Lk2&Mq=G(PQRPM`3mH-ROv z8Q20mC>%Xw<98wT5U?BA13U~o0z3*l2J8jgz?Xn{y61Mo2S}XcOj$s9(|;h*2Al!7 zEyjmk(tkqw6W~+eGvGggKLcFu3-CE_`n;Dr>tOyXWPbrJ0+)cx0L#DeGSdFYdLCEU z5k2IJSbrjpLfjX^0>~AVXJGU6@rrjkgNVKsjlp-hdQOV?|Ed+JL;U52VZuCqu*eOu z3g`GDe_|tAk9c3--8fM z5SQu-JQXZlqZ_V>%YBYrt$LyMiWbqmX3(qdrM-$szrtIU-$pdC5}ypb?E5Wbd)=4J zJt(~EtanX|@Te#guR|8HXJQt4QSni{B6x4;TAKF=t-!jd<0&^H!>h4tfw@=Z`CTPs zRbIKcr+=O}>2DMt_K$L{_-&t&=&T*c11x|Q$PzED7#VdBQtJU5kPUE6xn35gl`(Wd zq^?XRA?B}?uA*pnv}-8oSE3o#r*MjOE_1s_0pgOmLwAJ4<>mkS4{C4!&?LP)mqs4t(mA^YQ7K0El>o`|sf_lQGzeT1plA}l&Tw`y0( zb$hNqVLTn|)I9yYU8O*v$T?H^`8x$nilP=Kh$$Z zymxDOKn}Fh!DR`)$3n@{XrnZD&G5~%KW)5D&mN!Ka62>B`Ao{k-aHO_ateNPvEL^k z(6E5>k+R}15BXBXt}KREyh)j=IF)k1!PRR|rlQk&J_tFk^*Y`Z*yZxmy~?Md++;Lf zt}GJPExq+c_)=usPPEw3cZ%^W2G`j-%c4!uhShk_kr}t|I~fIzxi4MU zL$`lc4fUPNdyP&}8nNTHf|pa+3Z0Gy&^=|m1v>Wrxm!=!$aT6^)USyUH@_20ifDQ#LhOB~ zkEtwI$wU-0U1`9O7b^{-d zI_KAlqFsH38Wb&d1x2~X6#A0C7T%4;Hy#r!YH|DS8%?g;Y(jlPb!9o}E76!$=&{6t z&SLFZuZtJMtiE?b@TgcCG|U}6^EA1pIjKT4PSbo8m5U3Ux_z?PIK9ePijtuKKRZfv zuC0glJaC>;(Z1Dg;AmhH3$%rK-!y$#XXe~nda?evft8r%PAfa=5DMib@LO+|e}{qUng9Lrtxl?WQ8`f^de|4Y3e%WX&%E+L2%{gGY7* z(4n!)Dzv_XjS%dHcsF#x8-X1WP6Nzk4_F|-6~i8gavy@-hFYKfyCAmL&f%(NMW0jrHl6y zEiogJHFpbTKD}M*%)(stu)fh-t-HO-NHWJlp{7Sao8kU!^N?eIyiAEZU$6Z_#3X$) z;4GB&Jj$+xa?V3ZFM$U^IVYftEs7YE{DLd#+E3jvN$-&`*fM;V!$Hm$$4<7mkJ*yQ zHFr+Mf}|09KVS3m95&<7oa=S&Z|0_vtg5B~!k%Jb-Dk;lL`KuRwh6JP)-Snj zQu_z_lV+Yc4yPa5`zVIlfr?`hA~$0p_ZXJWoA4B)qc)$?l|r8VVsRkic$=E3cZFr-@JQ@8{BZDx4R>Ye_Shl1c4u^N}@ROy|utiX)) zjQFGskBQWHrAC<%L?+SWj#!_R_#{!%5+m+^$c&Mtkk$2&+B>~-;#01qWb+`3tijT>C^jD&{Eu&rr;+=;>~7Y3$PWq57-9qkRJdZ1jLH?4EpZL@`>cy ze%R0i311WN8gRF7?@M}Xa*9O+?>OIc+~oXeT010;?npBx&6vueqcSdcs$JBzrMPyg z`}1r>4~s*ao|I8@C`j4@sma%lGKyRiNTc=2%-4HrhGEg+Wj+lPj|BDQMVw+>h#j{Q ztGo)~k5ofQ{rOl!CP;>BQ?Nb3|(~d~uU1&h0b_)9xD$qY{%7Q`6Ja zMg8u_#HxcaqIHk0Nf|uAb4EQ2PQVyWnV|{}M+Pa%T+nv#DsXHx_7(8h!MG@bAE}H{ zlowEbZ=9k$4t9fi2_GB=X9a!FniZ&WlR z0nP>m$`Tc&n*A}D8~y|M5P086MM(w!6`Ty#TNUNs@ZEjj{m9>zEyX`U19-5EHsB2S|fg)e-1fZ0{=0pElVQm<1KzLnWO25uO< z&W5lAd}9vg6z`jIJ-hB&I2;x77xOU?XzCO26mT;lk0X#TpNy+84D5IzzCl^#y&}&7 z-3Pt!VSL#X&k5uRI2!Lin~I*|gMu0O82MM2QSQzgJPmFKvn8*dsVHWYyAzy-_orr| z5%6mz7zE_);2M;B2J_Sk`7>n*exHKiU>O<)zg(^;CcO9#%*#Swp(rijfJ#Mq6Fj6! zQGSP!-s8Z19V+a)&^Rq8gi*TK*bx!ukJ5^IzEA1-rw($GAM9w_>0HZr1UA_mL_>!+ zz&}+WQsGFS4Zi_@iO4TiHUwYVRvf!KEqHzP+|cE(?dGjayzxOlQ)kmxeC)=t7Fkh< zu*5c!YWB~vkr+SQ9CA$zF6w7*nD9#+@HLyt4+h*Btu}RuRGa(K1_Z6Xg zE#mNtc9C-|-fcT<>*1A-1BaobzIvHYbU`_e9$$L-T#3~LOo>@w)bs6vY{Xn z`J{Dc>uHoZt6md7yVmCMJ-%?RML5RGzSvv4XkTj=hr=w!C!kbbmi%ak)SzesyK3y) zS~mjNFVEq($ah?5eo7^kq&f#N`c;<&!Bvjx^2S9h3!mys4}3i@p^}I#?*03oQ}GN`3vh33q|Xh#IAdKv^#v2g}O>C zJJnYV(&eB+Sr|#-@i&ht{jOk4&B(jP_0WA=vTY*UMW-J{IIGkHoq0hm{p913P}aZ^ z)MCv^m^v-fg1_a-8R?Z3){LrzG;3;dLW(stEn!qrYI?$`^2+4uQAw8ciVDZdyzdW; zfo}EnCw0FZhtmJ|&1H<(U@ahUOqrn!Nwzu|B3 zB;jIW!qHB6Kz(rd^LeWX@|_7LwAaE7K1d6q%fIOL}~Jk z9GWDKzS~pQCQ>ijB+Cma;sE01D56>NOgfFb_azEN$X3kN-YjQYDWBGJ;FapB{c>3- zPKMzaXsuQp0hmF&W*-0{jxfqTS)W38i^3CT)Zrr+q|oDXii4c;Xc~sJ+7rRq($JeV z=~O~@$nxp5TpY6*<*Ib@llP3GDtXvWHL`FN<;xdF(qg$i1CiaqlYX)-10~whsaE!K zP+XsDjZ9Hy%lb^Zmu~TNI587-ptdNvF_UKS?PFHgu}F0^MUqQXfjnoURo?DsH)!$= z8~M@gT+y$@)9(ez_H5eiRgv^H6KeryE@c~+XqHjAWTxHnyoKh;syxc$k_k8nr9Tu6 zMMQe$Nb7|*ob_`eC`_W@^I1jk;Ij&=P(iSc9=&ar;1^s`PwXsEtT=;MTQ(%gkEU+ikpgF z!U7k|wrrTs<|!0M+MJ~xz-lq+4s4u3nZ)-qCb@V94e*r3EcBA^-2kOm%%Y93NnROp z%q)tL8)sweU(KQiI*oUIwo&dXq2nAE$VUcKf4Xuj7oZ!S+j&D5APZwCSBn#1Hh9e4 zixY;6^0%Ypq7p=^)$v$2N9RzC$1dw77Ls0mQ$=BH#aKuj<7q292nq>y&{^(&8ZX>@ bk>w16YHpNYItc$4K#q>ZKg~^YQ2c)bq3c1M delta 9213 zcmds633yaRw!Wv+2}x+62?=XwfkYCaNjiH1fpkbGh9w(IMjS`XMi~W0QNCfGIx>iUj?Z~IfEs)wPtE&JFBtp)Mc$j|_inzw zPt~cqbx+m#>(mncP@ujlXuYkkLGwLkP3w-N5%{L9txdE6z<>kgeGHw=Z~gi28{a=k zQ=n;C$cqglgR78~%6#7+6X<=uU}y@Oj;tQQivt!n-H&ANc>LfA0lr3){zsaZXDsyH zfZKpLpbO9okg{<;nFSf-OIslO0QG>sy?IC{0)2s-fDGUkO>5HnBT)y44bosw$PPdO zkPajP=(1)7@_-3|)awU%H!v5l0X={OAQmVBLIJ6FGvq{HIvKLJFD+$lZ5t*BHBD`& ziT0%neKHs_3@8Co0U5|}!27oMx4d1x!j~QhIl`BY^2tg_8PZ!JCjpT_ij0~xQ_D*O zdt}dq>gT{LU^XxZ_yte{3<9!&d|((b67U+(*0y* z?guZ*kpDrv*oBIIUiiP#89Ki|{oD{Z2pgk{g zkoWLHUeb{fUYLOPypS^CMGWBQh3LdM;l(PT5x5dBx;8pVK6Lwf&RYDi;3jp0BkkOl}f;9{*75NeDEq@FxKLt3aY8NQ?W8_M&; z#j8@X0MhA8^Ba-jmLGq}h>Chd)Z~8M+JND$m9nEaE7&pvt{tLg<*%1sIJaJ9J)-GpfqCMQu zWsPgj5u?ycwA?zUlwUF=v-1~a!7_HjLn#PsMPey`8Fn-G>sdjk+>3ga66aa1LF>`# zQjITGj|@h~+l0KHu=pM~%xt1Fd|_sZv`BAp!_P~N;$eBsl!VtpWGdltnU3(01S@rM zUodqfHGCqasckkgv3)>`vd5Cn`as=v)eiKO5pNHnSrEWIEf%xCKmwi+l^qtV#jg|@ z1Eok|xkNf%YdM+LzJfoA0RT3G$8!U+i@; za5#pimA~xQW@sV4c*`-PL_QffZY|O&hZ{z$xqQ@``==N;wy|(hAt((`|)(0N4gRw$I(Q8v2| zY#%~ww3vf`D$dwxGq~;yH@`doK;z!{h$nviF}aVvyY5f!IQh=IyP(?XEB3!CQ{fSxZvTez9_5d=tMC}oj|1*@ zx+jo+5_k$Y2s{lu13U{n2OI*P2Yv^<0JLDL1duB%#vOC(8B*orDV8IqN4o9y1PaN* zQv4n&74!T9>vmQeTq`C`3k$29lX+K`^R*8zhHiVMuPkJjI=c>xIfnd)kV7C>VI)3; zcc1U1X>TL{H^{w^uVdfw5%fdBETO@_L%xsv4iF%kRNh1y>i%j)2DwwF9uIJZ_2Yhx zdyMs{C=PeGHk!z_{-qGlx-ILnSGykC7%+0yY5JbI0_(JW03LT+Eg4+l`61LDISU|- z(PVB2hwqD$17(bk@|WQwg$eA8SV7s>qUwHk%y10jf-=f+=Qt13_Y$dNJ0e}bF@f1I zPvfs6-4Vi+GK`@EumQ<{;y*+sS?)*bKEMv-0a7y`vKU}x-YEmNUoGmg8dZY)|3y7U(5GTrS|OfTeQ|D)1DGGvbB4bHYR6UZBm-ADb$zvy?P~{y+PoF&t93LN>pJZS7V^r6y>aBUckj`9x-57pl;EnTAn-AC z;g~04*bF8v9~a9z;(NIoV>o72tarDv%JXK>cvQ?b@WrqM?w8#!ST=nNFSY*5wJui+ z&sV*=`i-ibSv9wMN>%*=vCfp0VNFfc%q3W@P!#obQ?E3*%aCrpF1z3tnoLKUN#mDt5A6Wb$isaPW^>Da`}NK>1NSv_gJ)mmtB5d z$b>Q^ujf2_y*gHay>0Ws-R`(&D(Ncov17aW$ZdZuxS^@?FZBKu{AeN$d)nbX{Olp> zGNXECRYQ$)igRw=)qHzh&SL$kaNpSHblCZovH9=;LyrbrlQ?nMpRP^8T}CZQL=*=f zsEm!Q(-vS~G{>XbL-{zHR%+w@agR^*&vM&e38zk;K#F%nTDj`TK_ZTPCEoluG$rxC z6J-N63s;p`0t{ZsYmV;Zk6-zV|7${k>n|<4>34yJODHEL=ZQO1vUf|}HwlDf;b^`D;oWPFS4e9*Y&*v;q9&Ew*2WF(7i0)JxjwRc3VP}Gra3v3iEXY*vb)xQSdofMhgw29mKckc1 zh3!1Yw=WpYMQ5^pU@vdb*tjbh>$zrXXT6F}r1&dXzi9xqG!4K&y@r!T6Iwdl9~)np zC0KYrN7ow$X<7`pd_DyuflCC>a9;LPt!e*(-Rqz6PAB+ICTeyB?qt!}d7PV`ME(RW zyvCw@P%jOULDT*LDQE75edL0T+aA$rn(M5tws)ZPlj!Cu{)I& zuNCq*q$vvbY_Q1|t!Xc!nMJT&wkr3<;W&lzjgTuKZ@me(RnW(xPi4rTgY1U<9r2pB z4EbdVnl=+=-h{R9Iq3Ib)1L%=8T#CS?`!80k-#7fw0Q^RQ4GSzsBk0|*O|~)WTG+T zUxz$^hHk^w-vPtkw4-sz`wKCkC|`4nre&i1lM&vN;-FDz6Cd1#g?>2n0i~KIJ2G7) z^yOIFJIhVPsxjapD*P7mHdOdx9Oee}ODeqg0fkUbgMJjGl=~f1n(HyZbsjf3dv#aM zPr}C?r=>Yk3eq!e8B1Sf`=&MBue^~{4|kym4+HEs$^h5Kdx{(>>DlQ8h%;krYZ{!h zYv;v`a8}Q)Uy|y`FUU*F&Ks0f=*S$DmXe-6C@&=?bx?XHO5~+MDcOa2msvdDd}$;7 zL9b&kCjWgj&p#i@A!oa>enD@#LB8E6-N4ry;@geV%@C;ZXaDdJZX?Y!XaDB|Mf*=3 z%)I56b1m~~SDJ7s5zf*5`>Jil6u}2J^}dGtt=zTsHEdJgZS4*hX8&jxs`XuHC<03U zH66$L@{;>YsOPm!Ry&F*Mowa$Ewt@h3pbg_&KFuosg}{?qVHq2wOd!u;m5x{m1D~o zb;3kheXcwmzOB&XDiu`MPE;6Pjp^@wdbZ5=rBrz_J%*L4tI|#MW>>jZ_dW?N5-+ub z4|b<0QY}5ugS`GdRa7F)+SHFu@!qpx;W^ki)!}}19{xkq$@4EL9IIoCDXH75A>^>I@9U?h0z;kMFTC=E-CQJQvH93nCRoD<} zRQh0Y$zUwz!Gpu`*>FXv6fL7TQoU@V4cEu7Zdc$JFFTiul?%_0U-tQ?w0szst{$$S zCO)*%q|OgTzuw59&9qXwbcdQZ3|?KH3x~s@g=$tV1_P-K6_iHUVh*gMmX^H|P5K`Xe_P+_q;x(^a+)-(~pqZ67edf8g$+_tmx! znAVABhZ)5mD!2z_8K1|*mkq_WXH^Or)fgKUsW*mGg3Pq-GN#_?#?h~asmYZTDgjq* z&xdD*JK%%nQmVPuEWA~XNyI;=4moHmh$+5NYtwKkR=mOt2bHLY^M#4N>FwZ)TcV(j zP;C>aK^-Ze{s+&GBr~bdR78hqCHSDLly<1r655V5rg|^lGTo}qmQc{alrkzb$TV7@ zcK?jb>cz2$kuQq3vw85{tJL7Jv=IOB2~#u1k)^{7{0|#PS@m88<*Jn_6!o2Ofo?5J zq9oOrhNddUQ=HPr(RyF}@!PBP3X1cxbH4gyJoP?^xm!tUs*T3*>H$%zZUP3%FcBa9 zY$Q3P>sI 1] + multi_character_phones.sort(key=len, reverse=True) + return multi_character_phones \ No newline at end of file diff --git a/acoustic_model/fame_functions.py b/acoustic_model/fame_functions.py index 77fd931..dbb8487 100644 --- a/acoustic_model/fame_functions.py +++ b/acoustic_model/fame_functions.py @@ -352,6 +352,9 @@ def fix_lexicon(lexicon_file): return +#def add_sp_to_lexicon(lexicon_file): + + def word2htk(word): return ''.join([fame_asr.translation_key_word2htk.get(i, i) for i in word]) diff --git a/acoustic_model/fame_hmm.py b/acoustic_model/fame_hmm.py index 723448d..6420999 100644 --- a/acoustic_model/fame_hmm.py +++ b/acoustic_model/fame_hmm.py @@ -16,50 +16,53 @@ import defaultfiles as default sys.path.append(default.toolbox_dir) import file_handling as fh from htk import pyhtk +#from scripts import run_command ## ======================= user define ======================= # procedure +combine_all = 1 + make_lexicon = 0 make_label = 0 # it takes roughly 4800 sec on Surface pro 2. make_mlf = 0 extract_features = 0 -flat_start = 0 -train_monophone_without_sp = 0 -add_sp = 0 -train_monophone_with_re_aligned_mlf = 0 +flat_start = 1 +train_monophone_without_sp = 1 +add_sp = 1 +train_monophone_with_re_aligned_mlf = 1 +increase_mixture = 1 train_triphone = 0 -train_triphone_tied = 1 +train_triphone_tied = 0 # pre-defined values. dataset_list = ['devel', 'test', 'train'] -feature_size = 39 +feature_size = 30 improvement_threshold = 0.3 -hmmdefs_name = 'hmmdefs' -proto_name = 'proto' - lexicon_asr = os.path.join(default.fame_dir, 'lexicon', 'lex.asr') lexicon_oov = os.path.join(default.fame_dir, 'lexicon', 'lex.oov') config_dir = os.path.join(default.htk_dir, 'config') phonelist_full_txt = os.path.join(config_dir, 'phonelist_full.txt') -tree_hed = os.path.join(config_dir, 'tree.hed') -quest_hed = os.path.join(config_dir, 'quests.hed') +tree_hed = os.path.join(config_dir, 'tree.hed') +quests_hed = os.path.join(config_dir, 'quests.hed') model_dir = os.path.join(default.htk_dir, 'model') model_mono0_dir = os.path.join(model_dir, 'mono0') model_mono1_dir = os.path.join(model_dir, 'mono1') model_mono1sp_dir = os.path.join(model_dir, 'mono1sp') model_mono1sp2_dir = os.path.join(model_dir, 'mono1sp2') -model_tri1_dir = os.path.join(model_dir, 'tri1') +model_tri1_dir = os.path.join(model_dir, 'tri1') +model_tri1tied_dir = os.path.join(model_dir, 'tri1tied') # directories / files to be made. lexicon_dir = os.path.join(default.htk_dir, 'lexicon') lexicon_htk_asr = os.path.join(lexicon_dir, 'lex.htk_asr') lexicon_htk_oov = os.path.join(lexicon_dir, 'lex.htk_oov') lexicon_htk = os.path.join(lexicon_dir, 'lex.htk') +lexicon_htk_with_sp = os.path.join(lexicon_dir, 'lex_with_sp.htk') lexicon_htk_triphone = os.path.join(lexicon_dir, 'lex_triphone.htk') feature_dir = os.path.join(default.htk_dir, 'mfc') @@ -71,10 +74,20 @@ fh.make_new_directory(label_dir, existing_dir='leave') ## training -hcompv_scp_train = os.path.join(tmp_dir, 'train.scp') -mlf_file_train = os.path.join(label_dir, 'train_phone.mlf') -mlf_file_train_with_sp = os.path.join(label_dir, 'train_phone_with_sp.mlf') -mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf') +if combine_all: + hcompv_scp_train = os.path.join(tmp_dir, 'all.scp') + mlf_file_train = os.path.join(label_dir, 'all_phone.mlf') + mlf_file_train_word = os.path.join(label_dir, 'all_word.mlf') + mlf_file_train_with_sp = os.path.join(label_dir, 'all_phone_with_sp.mlf') + mlf_file_train_aligned = os.path.join(label_dir, 'all_phone_aligned.mlf') + triphone_mlf = os.path.join(label_dir, 'all_triphone.mlf') +else: + hcompv_scp_train = os.path.join(tmp_dir, 'train.scp') + mlf_file_train = os.path.join(label_dir, 'train_phone.mlf') + mlf_file_train_word = os.path.join(label_dir, 'train_word.mlf') + mlf_file_train_with_sp = os.path.join(label_dir, 'train_phone_with_sp.mlf') + mlf_file_train_aligned = os.path.join(label_dir, 'train_phone_aligned.mlf') + triphone_mlf = os.path.join(label_dir, 'train_triphone.mlf') hcompv_scp_train_updated = hcompv_scp_train.replace('.scp', '_updated.scp') ## testing @@ -104,19 +117,18 @@ if make_lexicon: print('>>> fixing the lexicon...') fame_functions.fix_lexicon(lexicon_htk) - ## add sp to the end of each line. - #print('>>> adding sp...') - #with open(lexicon_htk) as f: - # lines = f.read().split('\n') - #lines = [line + ' sp' for line in lines] - #with open(lexicon_htk_with_sp, 'wb') as f: - # f.write(bytes('\n'.join(lines), 'ascii')) + ## adding sp to the lexicon for HTK. + print('>>> adding sp to the lexicon...') + with open(lexicon_htk) as f: + lines = f.read().split('\n') + with open(lexicon_htk_with_sp, 'wb') as f: + f.write(bytes(' sp\n'.join(lines), 'ascii')) print("elapsed time: {}".format(time.time() - timer_start)) ## intialize the instance for HTK. -chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_htk, feature_size) +chtk = pyhtk.HTK(config_dir, fame_asr.phoneset_htk, lexicon_htk_with_sp, feature_size) ## ======================= make label files ======================= @@ -152,7 +164,7 @@ if make_label: shutil.move(dictionary_file, os.path.join(label_dir_, filename + '.dic')) label_file = os.path.join(label_dir_, filename + '.lab') - chtk.create_label_file(sentence_htk, label_file) + chtk.make_label_file(sentence_htk, label_file) else: os.remove(dictionary_file) @@ -174,7 +186,6 @@ if make_mlf: os.remove(empty_dic_file) for dataset in dataset_list: - #wav_dir_ = os.path.join(default.fame_dir, 'fame', 'wav', dataset) feature_dir_ = os.path.join(feature_dir, dataset) label_dir_ = os.path.join(label_dir, dataset) mlf_word = os.path.join(label_dir, dataset + '_word.mlf') @@ -183,11 +194,11 @@ if make_mlf: print(">>> generating a word level mlf file for {}...".format(dataset)) chtk.label2mlf(label_dir_, mlf_word) + print(">>> generating a phone level mlf file for {}...".format(dataset)) chtk.mlf_word2phone(mlf_phone, mlf_word, with_sp=False) chtk.mlf_word2phone(mlf_phone_with_sp, mlf_word, with_sp=True) - print("elapsed time: {}".format(time.time() - timer_start)) @@ -197,7 +208,7 @@ if extract_features: timer_start = time.time() print('==== extract features on dataset {} ===='.format(dataset)) - wav_dir_ = os.path.join(default.fame_dir, 'fame', 'wav', dataset) + wav_dir_ = os.path.join(default.fame_dir, 'fame', 'wav', dataset) label_dir_ = os.path.join(label_dir, dataset) feature_dir_ = os.path.join(feature_dir, dataset) fh.make_new_directory(feature_dir_, existing_dir='delete') @@ -217,8 +228,8 @@ if extract_features: + os.path.join(feature_dir_, os.path.basename(lab_file).replace('.lab', '.mfc')) for lab_file in lab_list] - if os.path.exists(empty_mfc_file): - os.remove(empty_mfc_file) + #if os.path.exists(empty_mfc_file): + # os.remove(empty_mfc_file) with open(hcopy_scp.name, 'wb') as f: f.write(bytes('\n'.join(feature_list), 'ascii')) @@ -235,9 +246,64 @@ if extract_features: with open(hcompv_scp, 'wb') as f: f.write(bytes('\n'.join(mfc_list) + '\n', 'ascii')) + print(">>> extracting features on stimmen...") + chtk.wav2mfc(os.path.join(htk_stimmen_dir, 'hcopy.scp')) + print("elapsed time: {}".format(time.time() - timer_start)) +## ======================= flat start monophones ======================= +if combine_all: + # script files. + fh.concatenate( + os.path.join(tmp_dir, 'devel.scp'), + os.path.join(tmp_dir, 'test.scp'), + hcompv_scp_train + ) + fh.concatenate( + hcompv_scp_train, + os.path.join(tmp_dir, 'train.scp'), + hcompv_scp_train + ) + + # phone level mlfs. + fh.concatenate( + os.path.join(label_dir, 'devel_phone.mlf'), + os.path.join(label_dir, 'test_phone.mlf'), + mlf_file_train + ) + fh.concatenate( + mlf_file_train, + os.path.join(label_dir, 'train_phone.mlf'), + mlf_file_train + ) + + # phone level mlfs with sp. + fh.concatenate( + os.path.join(label_dir, 'devel_phone_with_sp.mlf'), + os.path.join(label_dir, 'test_phone_with_sp.mlf'), + mlf_file_train_with_sp + ) + fh.concatenate( + mlf_file_train_with_sp, + os.path.join(label_dir, 'train_phone_with_sp.mlf'), + mlf_file_train_with_sp + ) + + + # word level mlfs. + fh.concatenate( + os.path.join(label_dir, 'devel_word.mlf'), + os.path.join(label_dir, 'test_word.mlf'), + mlf_file_train_word + ) + fh.concatenate( + mlf_file_train_word, + os.path.join(label_dir, 'train_word.mlf'), + mlf_file_train_word + ) + + ## ======================= flat start monophones ======================= if flat_start: timer_start = time.time() @@ -246,17 +312,14 @@ if flat_start: chtk.flat_start(hcompv_scp_train, model_mono0_dir) - # create macros. + # make macros. vFloors = os.path.join(model_mono0_dir, 'vFloors') if os.path.exists(vFloors): - chtk.create_macros(vFloors) + chtk.make_macros(vFloors) # allocate mean & variance to all phones in the phone list print('>>> allocating mean & variance to all phones in the phone list...') - chtk.create_hmmdefs( - os.path.join(model_mono0_dir, proto_name), - os.path.join(model_mono0_dir, 'hmmdefs') - ) + chtk.make_hmmdefs(model_mono0_dir) print("elapsed time: {}".format(time.time() - timer_start)) @@ -320,8 +383,9 @@ if train_monophone_with_re_aligned_mlf: os.path.join(modeln_dir, 'macros'), os.path.join(modeln_dir, 'hmmdefs'), mlf_file_train_aligned, - os.path.join(label_dir, 'train_word.mlf'), + mlf_file_train_word, hcompv_scp_train) + chtk.fix_mlf(mlf_file_train_aligned) print('>>> updating the script file... ') chtk.update_script_file( @@ -349,24 +413,55 @@ if train_monophone_with_re_aligned_mlf: print("elapsed time: {}".format(time.time() - timer_start)) -## ======================= train triphone ======================= -if train_triphone: - print('==== traina triphone model ====') +## ======================= increase mixture ======================= +if increase_mixture: + print('==== increase mixture ====') timer_start = time.time() + for nmix in [2, 4, 8, 16]: + if nmix == 2: + modeln_dir_ = model_mono1sp2_dir + else: + modeln_dir_ = os.path.join(model_dir, 'mono'+str(nmix_)) + modeln_dir = os.path.join(model_dir, 'mono'+str(nmix)) - triphonelist_txt = os.path.join(config_dir, 'triphonelist.txt') - triphone_mlf = os.path.join(default.htk_dir, 'label', 'train_triphone.mlf') + print('mixture: {}'.format(nmix)) + fh.make_new_directory(modeln_dir, existing_dir='delete') + niter = chtk.get_niter_max(modeln_dir_) + chtk.increase_mixture( + os.path.join(modeln_dir_, 'iter'+str(niter), 'hmmdefs'), + nmix, + os.path.join(modeln_dir, 'iter0'), + model_type='monophone_with_sp') + shutil.copy2(os.path.join(modeln_dir_, 'iter'+str(niter), 'macros'), + os.path.join(modeln_dir, 'iter0', 'macros')) - print('>>> making triphone list... ') - chtk.make_triphonelist( - triphonelist_txt, - triphone_mlf, - mlf_file_train_aligned) + #improvement_threshold = -10 + niter = chtk.re_estimation_until_saturated( + modeln_dir, + os.path.join(modeln_dir_, 'iter0'), + improvement_threshold, + hcompv_scp_train_updated, + os.path.join(htk_stimmen_dir, 'mfc'), + 'mfc', + os.path.join(htk_stimmen_dir, 'word_lattice.ltc'), + mlf_file=mlf_file_train_aligned, + lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'), + model_type='monophone_with_sp' + ) + nmix_ = nmix - print('>>> making triphone header... ') - chtk.make_tri_hed( - os.path.join(config_dir, 'mktri.hed') - ) + print("elapsed time: {}".format(time.time() - timer_start)) + + +## ======================= train triphone ======================= +print('>>> making triphone list... ') +chtk.make_triphonelist( + mlf_file_train_aligned, + triphone_mlf) + +if train_triphone: + print('==== train triphone model ====') + timer_start = time.time() print('>>> init triphone model... ') niter = chtk.get_niter_max(model_mono1sp2_dir) @@ -377,8 +472,8 @@ if train_triphone: ) print('>>> re-estimation... ') - # I wanted to train until satulated: - # #niter = chtk.re_estimation_until_saturated( + ## I wanted to train until satulated: + #niter = chtk.re_estimation_until_saturated( # model_tri1_dir, # os.path.join(model_tri1_dir, 'iter0'), # improvement_threshold, @@ -395,7 +490,6 @@ if train_triphone: # ERROR [+8231] GetHCIModel: Cannot find hmm [i:-]r[+???] # therefore only two times re-estimation is performed. output_dir = model_tri1_dir - for niter in range(1, 4): hmm_n = 'iter' + str(niter) hmm_n_pre = 'iter' + str(niter-1) @@ -414,18 +508,59 @@ if train_triphone: print("elapsed time: {}".format(time.time() - timer_start)) -## ======================= train triphone ======================= +## ======================= train tied-state triphones ======================= if train_triphone_tied: - print('==== traina tied-state triphone ====') + print('==== train tied-state triphones ====') timer_start = time.time() print('>>> making lexicon for triphone... ') - chtk.make_triphone_full(phonelist_full_txt, lexicon_htk_triphone) + chtk.make_lexicon_triphone(phonelist_full_txt, lexicon_htk_triphone) + chtk.combine_phonelists(phonelist_full_txt) - print('>>> making headers... ') - chtk.make_tree_header(tree_hed) - fame_phonetics.make_quests_hed(quest_hed) + print('>>> making a tree header... ') + fame_phonetics.make_quests_hed(quests_hed) + stats = os.path.join(r'c:\OneDrive\Research\rug\experiments\acoustic_model\fame\htk\model\tri1\iter3', 'stats') + chtk.make_tree_header(tree_hed, quests_hed, stats, config_dir) - print("elapsed time: {}".format(time.time() - timer_start)) + print('>>> init triphone model... ') + niter = chtk.get_niter_max(model_tri1_dir) + fh.make_new_directory(os.path.join(model_tri1tied_dir, 'iter0'), existing_dir='leave') + chtk.init_triphone( + os.path.join(model_tri1_dir, 'iter'+str(niter)), + os.path.join(model_tri1tied_dir, 'iter0'), + tied=True) + # I wanted to train until satulated: + #niter = chtk.re_estimation_until_saturated( + # model_tri1tied_dir, + # os.path.join(model_tri1tied_dir, 'iter0'), + # improvement_threshold, + # hcompv_scp_train_updated, + # os.path.join(htk_stimmen_dir, 'mfc'), + # 'mfc', + # os.path.join(htk_stimmen_dir, 'word_lattice.ltc'), + # mlf_file=triphone_mlf, + # lexicon=os.path.join(htk_stimmen_dir, 'lexicon_recognition.dic'), + # model_type='triphone' + # ) + # + # but because the data size is limited, some triphone cannot be trained and received the error: + # ERROR [+8231] GetHCIModel: Cannot find hmm [i:-]r[+???] + # therefore only 3 times re-estimation is performed. + output_dir = model_tri1tied_dir + for niter in range(1, 4): + hmm_n = 'iter' + str(niter) + hmm_n_pre = 'iter' + str(niter-1) + _modeln_dir = os.path.join(output_dir, hmm_n) + _modeln_dir_pre = os.path.join(output_dir, hmm_n_pre) + + fh.make_new_directory(_modeln_dir, 'leave') + chtk.re_estimation( + os.path.join(_modeln_dir_pre, 'hmmdefs'), + _modeln_dir, + hcompv_scp_train_updated, + mlf_file=triphone_mlf, + macros=os.path.join(_modeln_dir_pre, 'macros'), + model_type='triphone') + print("elapsed time: {}".format(time.time() - timer_start)) \ No newline at end of file diff --git a/acoustic_model/novoapi_functions.py b/acoustic_model/novoapi_functions.py index 3cd502e..120413e 100644 --- a/acoustic_model/novoapi_functions.py +++ b/acoustic_model/novoapi_functions.py @@ -1,20 +1,19 @@ ## this script should be used only by Aki Kunikoshi. +import os + import numpy as np +import pandas as pd import argparse import json from novoapi.backend import session -import os -#os.chdir(r'C:\Users\Aki\source\repos\acoustic_model\acoustic_model') import defaultfiles as default +import convert_phoneset -def load_phonset(): - translation_key_ipa2novo70 = dict() - translation_key_novo702ipa = dict() - +def load_novo70_phoneset(): #phonelist_novo70_ = pd.ExcelFile(default.phonelist_novo70_xlsx) #df = pd.read_excel(phonelist_novo70_, 'list') ## *_simple includes columns which has only one phone in. @@ -23,21 +22,23 @@ def load_phonset(): # print('{0}:{1}'.format(ipa, novo70)) # translation_key[ipa] = novo70 #phonelist_novo70 = np.unique(list(df['novo70_simple'])) + novo70_phoneset = pd.read_csv(default.novo70_phoneset, delimiter='\t', header=None, encoding="utf-8") + novo70_phoneset.rename(columns={0: 'novo70', 1: 'ipa', 2: 'description'}, inplace=True) - phoneset_ipa = [] - phoneset_novo70 = [] - with open(default.novo70_phoneset, "rt", encoding="utf-8") as fin: - lines = fin.read() - lines = lines.split('\n') - for line in lines: - words = line.split('\t') - if len(words) > 1: - novo70 = words[0] - ipa = words[1] - phoneset_ipa.append(ipa) - phoneset_novo70.append(novo70) - translation_key_ipa2novo70[ipa] = novo70 - translation_key_novo702ipa[novo70] = ipa + #phoneset_ipa = [] + #phoneset_novo70 = [] + #with open(default.novo70_phoneset, "rt", encoding="utf-8") as fin: + # lines = fin.read() + # lines = lines.split('\n') + # for line in lines: + # words = line.split('\t') + # if len(words) > 1: + # novo70 = words[0] + # ipa = words[1] + # phoneset_ipa.append(ipa) + # phoneset_novo70.append(novo70) + # translation_key_ipa2novo70[ipa] = novo70 + # translation_key_novo702ipa[novo70] = ipa # As per Nederlandse phoneset_aki.xlsx recieved from David # [ɔː] oh / ohr # from ipa->novo70, only oh is used. @@ -47,15 +48,26 @@ def load_phonset(): # [ɛː] eh # [w] wv in IPA written as ʋ. extra_ipa = ['ɔː', 'ɪː', 'iː', 'œː', 'ɛː', 'ʋ'] - extra_novo70 = ['oh', 'ih', 'iy', 'uh', 'eh', 'wv'] - for ipa, novo70 in zip(extra_ipa, extra_novo70): - phoneset_ipa.append(ipa) - phoneset_novo70.append(novo70) + extra_novo70 = ['oh', 'ih', 'iy', 'uh', 'eh', 'wv'] + + phoneset_ipa = list(novo70_phoneset['ipa']) + phoneset_ipa.extend(extra_ipa) + phoneset_ipa = [i.replace('ː', ':') for i in phoneset_ipa] + + phoneset_novo70 = list(novo70_phoneset['novo70']) + phoneset_novo70.extend(extra_novo70) + phoneset_novo70 = [i.replace('ː', ':') for i in phoneset_novo70] + + translation_key_ipa2novo70 = dict() + translation_key_novo702ipa = dict() + for ipa, novo70 in zip(phoneset_ipa, phoneset_novo70): + #phoneset_ipa.append(ipa) + #phoneset_novo70.append(novo70) translation_key_ipa2novo70[ipa] = novo70 translation_key_novo702ipa[novo70] = ipa - translation_key_novo702ipa['ohr'] = 'ɔː' - translation_key_novo702ipa['ihr'] = 'ɪː' + translation_key_novo702ipa['ohr'] = 'ɔ:' + translation_key_novo702ipa['ihr'] = 'ɪ:' phoneset_ipa = np.unique(phoneset_ipa) phoneset_novo70 = np.unique(phoneset_novo70) @@ -63,25 +75,6 @@ def load_phonset(): return phoneset_ipa, phoneset_novo70, translation_key_ipa2novo70, translation_key_novo702ipa -def multi_character_tokenize(line, multi_character_tokens): - """ - Tries to match one of the tokens in multi_character_tokens at each position of line, - starting at position 0, - if so tokenizes and eats that token. Otherwise tokenizes a single character. - - Copied from forced_alignment.convert_phone_set.py - """ - while line != '': - for token in multi_character_tokens: - if line.startswith(token) and len(token) > 0: - yield token - line = line[len(token):] - break - else: - yield line[:1] - line = line[1:] - - def split_ipa(line): """ Split a line by IPA phones. @@ -89,13 +82,16 @@ def split_ipa(line): :param string line: one line written in IPA. :return string lineSeperated: the line splitted in IPA phone. """ + phoneset_ipa, _, _, _ = load_novo70_phoneset() + #multi_character_phones = [i for i in phoneset_ipa if len(i) > 1] + #multi_character_phones.sort(key=len, reverse=True) + #multi_character_phones = [ + # # IPAs in CGN. + # u'ʌu', u'ɛi', u'œy', u'aː', u'eː', u'iː', u'oː', u'øː', u'ɛː', u'œː', u'ɔː', u'ɛ̃ː', u'ɑ̃ː', u'ɔ̃ː', u'œ̃', u'ɪː' + # ] + #return [phone for phone in multi_character_tokenize(line.strip(), multi_character_phones)] - multi_character_phones = [ - # IPAs in CGN. - u'ʌu', u'ɛi', u'œy', u'aː', u'eː', u'iː', u'oː', u'øː', u'ɛː', u'œː', u'ɔː', u'ɛ̃ː', u'ɑ̃ː', u'ɔ̃ː', u'œ̃', u'ɪː' - ] - - return [phone for phone in multi_character_tokenize(line.strip(), multi_character_phones)] + return convert_phoneset.split_word(line, phoneset_ipa) def split_novo70(line): @@ -104,30 +100,33 @@ def split_novo70(line): :param string line: one line written in novo70. :return string lineSeperated: the line splitted by novo70 phones. """ - _, phoneset_novo70, _, _ = load_phonset() - multi_character_phones = [p for p in phoneset_novo70 if len(p) > 1] - multi_character_phones = sorted(multi_character_phones, key=len, reverse=True) + _, phoneset_novo70, _, _ = load_novo70_phoneset() + #multi_character_phones = [p for p in phoneset_novo70 if len(p) > 1] + #multi_character_phones = sorted(multi_character_phones, key=len, reverse=True) + multi_character_phones = convert_phoneset.extract_multi_character_phones(phoneset_novo70) return ['sp' if phone == ' ' else phone for phone in multi_character_tokenize(line.strip(), multi_character_phones)] -def novo702ipa(tokens): - pronunciation = [] - _, _, _, translation_key = load_phonset() - for phone in split_novo70(tokens): - pronunciation.append(translation_key.get(phone, phone)) - return ' '.join(pronunciation) +def novo702ipa(line): + #pronunciation = [] + _, _, _, translation_key = load_novo70_phoneset() + #for phone in split_novo70(tokens): + # pronunciation.append(translation_key.get(phone, phone)) + #return ' '.join(pronunciation) + return ' '.join(convert_phoneset.convert_phoneset(split_novo70(line), translation_key)) # numbering of novo70 should be checked. -def ipa2novo70(tokens): - pronunciation = [] - _, _, translation_key, _ = load_phonset() - for phone in split_ipa(tokens): - pronunciation.append(translation_key.get(phone, phone)) - return ' '.join(pronunciation) - +def ipa2novo70(line): + #pronunciation = [] + _, _, translation_key, _ = load_novo70_phoneset() + #for phone in split_ipa(tokens): + # pronunciation.append(translation_key.get(phone, phone)) + #return ' '.join(pronunciation) + return ' '.join(convert_phoneset.convert_phoneset(split_ipa(line), translation_key)) + def make_grammar(word, pronunciation_ipa): """ @@ -174,6 +173,7 @@ def forced_alignment(wav_file, word, pronunciation_ipa): p = argparse.ArgumentParser() p.add_argument("--user", default='martijn.wieling') p.add_argument("--password", default='xxxxxx') + args = p.parse_args() rec = session.Recognizer(grammar_version="1.0", lang="nl", snodeid=101, user=args.user, password=args.password, keepopen=True) # , modeldir=modeldir) @@ -196,4 +196,5 @@ def result2pronunciation(result, word): if __name__ == 'main': pronunciation_ipa = ['rø:s', 'mɑn', 'mɑntsjə'] - grammar = make_grammar('reus', pronunciation_ipa) \ No newline at end of file + #grammar = make_grammar('reus', pronunciation_ipa) + phoneset_ipa, phoneset_novo70, translation_key_ipa2novo70, translation_key_novo702ipa = load_novo70_phoneset() \ No newline at end of file