From 487b8bee22c3b60c0f6c6d54eea13ae24b9390df Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Tue, 23 Sep 2025 12:51:45 +0800 Subject: [PATCH 01/16] update EC-RAG test case Signed-off-by: Yongbozzz --- EdgeCraftRAG/assets/img/kbadmin_index.png | Bin 0 -> 44330 bytes EdgeCraftRAG/assets/img/kbadmin_kb.png | Bin 0 -> 21912 bytes EdgeCraftRAG/assets/img/kbadmin_type.png | Bin 0 -> 25996 bytes .../docker_compose/intel/gpu/arc/README.md | 88 ++-- .../docker_compose/intel/gpu/arc/compose.yaml | 2 - .../intel/gpu/arc/compose_vllm.yaml | 5 +- .../intel/gpu/arc/compose_vllm_b60.yaml | 186 +++++++++ .../intel/gpu/arc/multi-arc-yaml-generator.sh | 5 +- EdgeCraftRAG/docs/API_Guide.md | 15 + EdgeCraftRAG/docs/Advanced_Setup.md | 21 +- EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md | 20 + EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py | 17 + EdgeCraftRAG/edgecraftrag/api/v1/data.py | 10 +- .../edgecraftrag/api/v1/knowledge_base.py | 294 ++++++++++--- EdgeCraftRAG/edgecraftrag/api/v1/model.py | 7 +- EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py | 66 ++- EdgeCraftRAG/edgecraftrag/api_schema.py | 11 +- EdgeCraftRAG/edgecraftrag/base.py | 19 +- .../edgecraftrag/components/benchmark.py | 4 +- .../edgecraftrag/components/generator.py | 71 +++- .../edgecraftrag/components/indexer.py | 48 ++- .../edgecraftrag/components/knowledge_base.py | 110 ++++- .../edgecraftrag/components/node_parser.py | 20 + .../edgecraftrag/components/pipeline.py | 186 +++++---- .../edgecraftrag/components/postprocessor.py | 2 +- .../components/query_preprocess.py | 59 +-- .../edgecraftrag/components/retriever.py | 152 ++++++- .../edgecraftrag/controllers/compmgr.py | 24 ++ .../controllers/knowledge_basemgr.py | 54 ++- .../edgecraftrag/controllers/pipelinemgr.py | 4 +- EdgeCraftRAG/edgecraftrag/requirements.txt | 12 +- EdgeCraftRAG/edgecraftrag/utils.py | 7 +- EdgeCraftRAG/nginx/nginx-conf-generator.sh | 2 +- EdgeCraftRAG/tests/configs/test_data.json | 3 - .../configs/test_pipeline_ipex_vllm.json | 45 -- .../configs/test_pipeline_local_llm.json | 44 -- .../tests/test_compose_vllm_on_arc_b60.sh | 173 ++++++++ EdgeCraftRAG/tools/quick_start.sh | 156 ++++++- EdgeCraftRAG/ui/vue/.env.development | 4 +- EdgeCraftRAG/ui/vue/components.d.ts | 7 +- EdgeCraftRAG/ui/vue/index.html | 1 + EdgeCraftRAG/ui/vue/nginx.conf | 2 +- EdgeCraftRAG/ui/vue/package.json | 1 - EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts | 4 +- .../ui/vue/src/api/knowledgeBase/index.ts | 92 ++++- EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts | 5 +- EdgeCraftRAG/ui/vue/src/api/request.ts | 18 +- .../ui/vue/src/assets/iconFont/iconfont.css | 16 +- .../ui/vue/src/assets/iconFont/iconfont.js | 69 +--- .../ui/vue/src/assets/iconFont/iconfont.json | 14 + .../ui/vue/src/assets/iconFont/iconfont.ttf | Bin 14368 -> 14948 bytes .../ui/vue/src/assets/iconFont/iconfont.woff | Bin 9284 -> 9692 bytes .../ui/vue/src/assets/iconFont/iconfont.woff2 | Bin 7836 -> 8208 bytes EdgeCraftRAG/ui/vue/src/auto-imports.d.ts | 168 ++++---- EdgeCraftRAG/ui/vue/src/components.d.ts | 45 +- .../ui/vue/src/components/PartialLoading.vue | 69 ++++ EdgeCraftRAG/ui/vue/src/i18n/en.ts | 152 +++++-- EdgeCraftRAG/ui/vue/src/i18n/zh.ts | 131 ++++-- EdgeCraftRAG/ui/vue/src/layout/Main.vue | 5 +- EdgeCraftRAG/ui/vue/src/main.ts | 5 +- EdgeCraftRAG/ui/vue/src/router/routes.ts | 30 +- EdgeCraftRAG/ui/vue/src/store/chatbot.ts | 1 + EdgeCraftRAG/ui/vue/src/theme/common.less | 28 +- EdgeCraftRAG/ui/vue/src/theme/variables.less | 3 +- EdgeCraftRAG/ui/vue/src/types/global.d.ts | 13 +- EdgeCraftRAG/ui/vue/src/utils/common.ts | 19 +- EdgeCraftRAG/ui/vue/src/utils/notification.ts | 26 +- EdgeCraftRAG/ui/vue/src/utils/other.ts | 4 +- EdgeCraftRAG/ui/vue/src/utils/validate.ts | 9 +- .../views/chatbot/components/Chatbot/Chat.vue | 116 +++++- .../components/Chatbot/ConfigDrawer.vue | 6 +- .../components/Chatbot/MessageItem.vue | 4 +- .../chatbot/components/Chatbot/SseService.ts | 57 ++- .../KnowledgeBase/DetailComponent.vue | 92 +++++ .../Experience/ExperienceDetail.vue | 225 ++++++++++ .../KnowledgeBase/Experience/ImportDialog.vue | 100 +++++ .../KnowledgeBase/Experience/UpdateDialog.vue | 387 ++++++++++++++++++ .../KnowledgeBase/Experience/index.ts | 7 + .../KnowledgeBase/KnowledgeDetail.vue | 298 ++++++++++++++ .../KnowledgeBase/KnowledgeDetial.vue | 302 -------------- .../KnowledgeBase/SelectTypeDialog.vue | 134 ++++++ .../components/KnowledgeBase/UpdateDialog.vue | 127 +++++- .../chatbot/components/KnowledgeBase/index.ts | 5 +- .../components/KnowledgeBase/index.vue | 172 ++++++-- .../vue/src/views/chatbot/components/index.ts | 4 +- .../ui/vue/src/views/chatbot/index.vue | 18 +- EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts | 1 + EdgeCraftRAG/ui/vue/src/views/error/404.vue | 11 +- EdgeCraftRAG/ui/vue/src/views/main/index.vue | 6 +- .../pipeline/components/DetailDrawer.vue | 31 +- .../pipeline/components/ImportDialog.vue | 12 + .../views/pipeline/components/QuickStart.vue | 2 +- .../src/views/pipeline/components/Table.vue | 11 +- .../components/UpdateDialog/Activated.vue | 2 +- .../components/UpdateDialog/Basic.vue | 26 +- .../components/UpdateDialog/CreateDialog.vue | 21 +- .../components/UpdateDialog/EditDialog.vue | 6 +- .../components/UpdateDialog/Generator.vue | 182 ++++---- .../components/UpdateDialog/Indexer.vue | 262 +++++++++--- .../components/UpdateDialog/NodeParser.vue | 95 ++++- .../components/UpdateDialog/PostProcessor.vue | 37 +- .../components/UpdateDialog/Retriever.vue | 135 ++++-- .../ui/vue/src/views/pipeline/enum.ts | 15 + .../ui/vue/src/views/pipeline/index.vue | 14 +- .../ui/vue/src/views/pipeline/type.ts | 3 +- EdgeCraftRAG/ui/vue/vite.config.ts | 5 +- 106 files changed, 4488 insertions(+), 1326 deletions(-) create mode 100644 EdgeCraftRAG/assets/img/kbadmin_index.png create mode 100644 EdgeCraftRAG/assets/img/kbadmin_kb.png create mode 100644 EdgeCraftRAG/assets/img/kbadmin_type.png create mode 100644 EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml mode change 100644 => 100755 EdgeCraftRAG/edgecraftrag/requirements.txt delete mode 100644 EdgeCraftRAG/tests/configs/test_data.json delete mode 100644 EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json delete mode 100644 EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json create mode 100755 EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh create mode 100644 EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue delete mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue create mode 100644 EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue diff --git a/EdgeCraftRAG/assets/img/kbadmin_index.png b/EdgeCraftRAG/assets/img/kbadmin_index.png new file mode 100644 index 0000000000000000000000000000000000000000..7383a01c796d84f0d3d49a112d87e1e71a21efcc GIT binary patch literal 44330 zcmce-2T+sU*Ds0%PcM?g13J3nHgt5yZk=ENS{zg)8h~F%JPkDN(G~Sxzykjqw^!CyrlTu~V%~p35Bz`9 z(($tya*?b?n~tG+N)Z<@~JrJXBvL*f-%zsP;*qW>2t00;+s z@)ECTM2-z+Yp%9%SpU=dc?=0d_9WbFV~RY_+7@Z!@!SOC{!Y{axo>KmxW*H zLOA@k?wyTykcFG6C2PP+0~1h^6UTcW%8^#oWq!n(cMM5dE|d-2^-!SgZ-GyE;HwuW zMjHq+E&skf!Aj-9> zxa!VsL-5m6HnQ&1E|pU+!o(~px)Y@xpJFI@-_^avRA0C_l{AW+n2Q3rrIM!>zkQbs zsL!>6QM`$7Rxn5qry)f>ob!o$-Nt*Kb4m~j&f510DomomLDb}%$B@_R#4sAV*&zf6 z*ZR0wPQ7@-dpP=iwYK%O2C`dFQ!Frk@>5Mk)a=|Mh*k}^s-Mugv!NQWI}^MbfFY-N zuZ#@@Nu1)5E$378A6u=&;PHJN+DWqBVnOoW%eMT`-E<0N4aSQ9v$@#7y)Nqt;FO8Y zd9*M}C2DLvQ*n9Ht*}-%XPzwlQvXEeFM(ta{qgy(_{m?5Z^5oyh>SBk2m^Smb5V6G zX&XC-uLU;Jbs$46oAO=tQgEF2-h6y5w!FJIa9&i_b)tOGP1(I+F9)&_W?de_CR8&C z+n?h{dvWKIuMV{p<~{34tQJ>(0);e1@$*%BVGWX`Y~Ba(nfFORu$r>o%SNNC*B-)M z-Ku67OjFf|3S^^v8ayHrRnNkkBY728O1s28yZq*3O{ZRz-$82A)`LNxC(p|u(1*(w zt#vHH^2F1t6_VBFrq&sadmFDPABGqD?8e+2f(Qhv)&B&wsY|FIVC zX2dJ+@#0iRYb`>KnolUqf#_|gN3@(nm|*ma&G)-1^k5GE>IlhD2x2w}pxnBMS$#c1M~ z+u_V!VviE4Z@N z%Ncq(9is8?jT_l}CUEolxb%xEYA+71A0tt)(s#{jvH?=doPc^`59n5(A#J(p$Lf9d zs}l$}dDlutzL!6^Wt@J$;f5EFSl|jq?>-5Kfkoyrqm*92JX5$@ZDKz6*3%X)6{@_q2L;D zqoSH+kx{34K&4bBxi^f9iSi;)1K98E9v|*;!sE@q3XkmqT4JWP&^h{El0@JPb5THa z?ha_H`zE?vKETNCTEBtll>rj&xh)ZocC#=h`W))ri^&cbV>Fe+IV?CmfoojbrPUOb zXQ*?##k|6WYf8lNWBSdTA*{0dk{-YAPJVy#Hc4H~N>xnKi$O>`U@k&#LLlC9c6r1h zQOjt1V^F{HGUD`L+N;?bT#=`kmJ)5RkI^t|L067c@g?scZ;8zyx4zqp z7syex-KckOYi9G*!b5)+47U&If>s&WlZVFxmq&}&;y&uA@ky{OGi2#qb6M<7u7Fbf zIR>5EGG}|!mIt$_{4Mg($B6Ui!Si0mJsjRx{q^Mp?1a1M|utZ!axK>^9NBekNm#b@)ztvwM%)GY%-&Z= zj#)aSo#&RPFYApv$8zeEzT6e|1!Iq`W`1hF>y)NMJ*r_lTVlMjVFx~#@!csQeYSL& zdndv@uvvR+xEMM-Y^jskz6o%Tu-qx6kJ^KCw9yKx} zYP}cAtZ>dN|9qr`*S8U2rz2UC4<|_(Vd5gm(Ec=K0uf0=Yn#TraI8Hv2+I|=E&iTj z(IGP7`kE){FD;9BwXmC3m0k5?a`4|?dU8cQT=*25f&O~h<3+-xyRZGWK64UaN^6QV zFjPV%IzL4Xu8NvazZ7O(llSOb?@?*n_S2P0`bSZX-0g;P9O?#)a)N!LtBPFvK_sVM}^^K=7V`3N_zF?EtrQtTTcAh>l#tB({u9% z7Y#k6r`(U?c-g=G^dLrXTu5o-r0=1*d=(fc{^4X{d-?DxzIM&$l+aUGu0Nq0Me?=KR%!nAwzP&NI0b@Tw` z0w1&gz@#hx_VkMLFKGMu;lIU8{~UI3H~c?{OZ7kF{J-i-WUR`MJn-ue-l219FNSV- z07O=f91QTFifN95!9V{GXlzBaiY& z9`)+0la;_MQG5sf$!daqN(tZ%(AzwecIf{hj@*AiM&&B8hbx0C{8XcXjel1MDc@|6-on=jITvE0M5nLbcuZc zJYgs&w6ly0O=O)=!lEJ~Os#1}o#T?v+L*{Yg#oK?Bz8)MlIZ6$l*U|!XmwNa$)F!f zqc=E%zkU*Z63*g5-H3vX0Vd3aLfUb&%+*Q$W{rdrI&`5o=9O8Oi$K8l1ORS#s7GFK zOjhvqnFGLu&0Jelr37rR%cnyizUuj1ZPHR^)XGN0&aVOZYT1|Tj|)wUQ+OC{?~$jw6K<{!n_43P8^>s)DeX2j)(fHqk~>Ws zX5Vn3#NSCFqNm@j)on#Ob3G?vb8C&xc@Om_E4WToJ;z5B@TZd))whGF0GJ!cqieyO zo=qo(sy3=SK~Y=13Q_z$v7Z4SnEZxvacT%8=6{oIDlCmx<)i+}kcAuCK&u zije||A}mm_L_JQ3&|lqn|5o+sR$(tHpxEtrWL-Qk?$j-v^m22C3df;V-ratt47{Q9 z8xUarD_kdKeJvP~2ObVEd_14a5@rU#<`w|h0Y4UkTU1sY;N}s))$`@a(>cJMs}XY3 z9;qr!?;*?yUV%9J)xH=(?b0>KT0mgrr7QBPR;9Kf6hb9Z%=JlZxcMpQq5`HQMKNH| zAmE0P=Afwk9Z03|X4ZHSn*0fSa{=ohUH}tYws^3C_?9-no;`>eC zWA@JB-z7h9w`^Q$<;sWfU*&9k$b7_e4RqBs?CE^eadZAt~l z%5n0>?~equPverv&l>J5>pd9`T)a z%uQY>vTuS>>u6MRwpu10i zjsL>qhNSoq=WM&bNcn8_$W2ZloL5_U+^^Sz?)<~AG$mu-^=>*aN{N)m!}s#n5#Q)y zz}^cj=g>2suVTea2-hK<^q5NqGBs-X*V_fiuDxpu^vdBEDui-+5UY)1cej&eZCd}~ z_$lnLH?^o_n$wb7zWkUdDLrjzieQIr zo&!aioB$r z)|Coo4m@qLXLSXXx(|58KF0^ZOLx}s(M37Z$;pEM~q^K)jM&Rc@Fi_@lW?a#$PNM{O0bV(V--N9}}`j>13 zx|~WQFO=>{_!QR&v6C%n;y!gn-(I#r8&DBPFR))5ZCqqGwVa`*n^n4jJ#@`@6@1Ez zA*;^#T5Kw9-(Ip}b9Q&CpP1Z0bYM5U?R2WZ^-G7|a9o|!nM@B=vPuQ?*A_T#5;bvm z$4W#KVF%sL2fk& zoOjxb$1Z+}Jhol$kIjy+nH=U~@LVxfINh3+2JMqWdl@=jE_zzz&sJde(dm6&eg!G@ zy`d*)so9b~S&w{;cNFAvfKPe6z;gxnKqA{~p-groKBVYT)_B{X$d9LHOr+r70@id0 zw~1ga9Os41XJaBBak##Uowud{msIAXDOHrKroAi zi>kCYf=)-iU7Gr&w&h?YmU-j#!(yv?%dTH8-p-4)*DJAiZ|TP$W#2Tun=uk#y+6zG zwnRXz7A4TJWpDk^7^jamC}$Pp>D(*By%eGSL@f2A;nnI1m3G zT#PdBxQNt)R2qL*@qm9@axQi|KD@#Dq6RXIDL((0G&oR0BD3}I6kR!852`^f z^9KPDrg%K#m&8z~0OQfE0^@t|r`;8k#V}d9Dl3;3QJ=mhB|uk4ygkFUjH0Sz*P2oyQa9CY5@vV`O68XrXHKde`<$S%a!GUrio zId@|!@5qr#J_}6JlYu@78g(>4!Lm&i{0kTVxEafila16izn1^l?e1 zKlM_={T;Bt0LBsu3x}o(k*t{4m^%m>H0r~0-lnZw(y9=a&$|57j)DdpvtN26pBBTs zZDEP5rQkB(TDX?k&S>9A)R?@R%E7*O>MPzi1XCe`Ct+w5O!kn#M10{Uj}S@e#9Gt< zM0HZ6ZbfU_8L@K{e#2{m3IuGAS9TisqJb#iq)#I~M-|9{4IwyOXa1i0<9^GUr#wqP zwcxHf=H|r60MV*+^!`3E8Wuru&V@AblN}QV)+9H&b}#`05Vae7e1xuWd~gWi8Xbv4ZM~-s|c7qG9L7UR=ZCiji)*iaVbbc!Hphmm50wJ@a_M zj7y|VRv6b(m)cxl7tfBxFfwK$y@jTSdI{2Ao32MgI+lX7_zEv?{boxhttUDcJ%^`l zbF*H!)qkp{=y^@CNUjEo_1>!H&1c;n{O>y)QuT(+A$p|I2e`C`>>1ca@BGOLT40!% zc5d0zcd&OMRsoQ^IH-0mqaqOlg313$nqr4!yqbJ*8R)T)buloyfXE+AS5|7uW=Qr?@~V-CC@TAmADfSBa( zf5T&NafS!28XK8_hz?>;4TXw!{7iTaZc?&;+2C5AX%M+ z#bbAt2D8Hg0*CebiBT-_l(nx$#md{Y-C+YHu#B>iU9mVqa z>M%8LF|UzdSRe^B><&T~pT9D*rmbDL7N8g`x!OmnUoT6ge47doHN8fMawK}+t$o_U zRJm3TfV-;7r`sBNHDRj9W{qx8z!}FJ=%wU^3BZWH`o_??B`9KPGgX5daK&eu*wDGh z93zI5YZ$+O+3H zFai{el|eOJqRNaoXN~HnKBy_`yA5H*D$#DU$GMC{k)!rst(+k&`|x;wA4v$pwZyTPK{g`f3$cyFew z4$18e$CoPIr68SCl-8T(%IBQrF<(vG1!*b+hBv!B(%5n&93W>@*#?caev3k0Iwmaz z=KzzbDi{o|Z0O&<^u@_KK{22r0N%`BWFE}pR@K;p13mK2XWaq3lX$4qE_R<)Zh*1C zitOP`XT$no2ALu~!ICJnU!3Fq@B$v6 z4V&7rk*>2|Oq!F}&7n{uyH<#E!4tk9Ocz5XWU5V;7)iN9xy#xbtbf*j#oC2PRUb_pmmy9+34`AFi6;-Rx(v88OwfPieu=Z* zO-7S-9^lSb)+IIAv7*VxcqZYyz1gr5%yK}rl@NKr74j{Hw}OK&*|jk8TY0KtMa|0% z@M=1bfAz>MF0nR?%0cP`^=!Z@5mbWkn-)<&SC=Oc9{QvpJIqNWD}z|8WFxJsrouyE zUO+BxH(RGdNSAP+!tK56zMB!mx$WuE*Kd{hGZ~NTC)BMW{t?sc+D;Xn>3afys{|{* z4$<)^X`x<+CVwUK3fNhJld!_x}+YSB$1`Z3rVkI+X*F+fULY2U*$F z>lnmhZkZPmO4gm_9{-^8#lkEEVz2?)n=?1HF+tdhgxs~pV(3nx588V)mh5Sg+cd&Y z%6FJtzS)L^A<#R1=cgrp1FrP*p&*NHM{^|1_VReCw*@(QTE)DAa7-JQACw>IH^|Fl z`@&@gq|m%Zub2~QaZYevwCT$^6g~E{q0V!K`=#jstS_1mSNdMzGCo@)fjyNOcXI${ z`e}fpME9c12c_L+>rWYSQTO<1L20KotOvP^B!{vwZ9M+V+fx0WXTxG$Y2 z9RFUG=-6M!K97u!3A)Ig%UZO~Dy3VTY^72km(jHzWuVKbGqWzd-^i0QkG-8mKFbsw z?39s5Q4G`*$R`*}kf`hEEBm0^PoN6BZ!hWgX-4s%^529ai+tZeNH&5*P#1gRGu(vv}xr z2O(m5#Jvl;w1VAdQT|)+V#vbvd)xGByvPFgm6~T0<&JpNywiDZ-i6*|+`(z%>#^Wc z8aLVG&1NmuV!5~PoOlwFk84XJ2+^|PmnJl%6C7T0QsvYJR|~DSn;(JlwrwoQ*|d*E z{O4yY-;PMt_4#zYPk3Tt;pg@#H+JZ)c6T(&(Rzx|6r|=?xh@*ey;;SyMOm;hb*Ko)7e2ryC8m z+f!&sb+q3u;}1HT9(uh^2m3VDnaeUbbL|=gvk&Bii^(dNxX`&HV*VHtLap}p^fs+Z zWc@U4k6q~&L8Z#tPXy3yJIQ(cvGVs%g5J|gutBhYoYCbeN=rrUH6s;$mlfrJ#MtXx z>dQRgo=iVL4T`c;64>3CiZR+y3p-N1kkdx`F$!>$L!F{_$GJ;$ynsnbeh(5zs! zEd?#hB|`hIJzgIQdYfy6q_IRE0b~PA3knTx z9q&?AgT`kW4r6Xnf87q0cU$F{F&PLk`du%GSOq|AxwYNde_W=j1|ZXSh*O>?L7v;W zfdvK({yp1Dtd-ouX^^`DQu|VAu!xLQmDqH0MKoT#C`-ar*j!y}|0v|<`bVsy>W{hq#*N-`J=g!H^9{t^wlvrIG_&TI+HI^*j# zgDln&55pnh)agQ6@`5R?NtKqR8Xr}9?mhOHtbAU6^1<0j=^W%pO5DM=LALo%(B>{C zht}ngL|ft{)?Uz&I*5O_t^B}vl_p&7R*E59y;Cvit|mlw zGqMZDr}x|Lty!1cZi#P8)x)eL(-X?lpxsglVj44NHayLo(0)6^rjtN8>X%ZN5#uV5 zcW1%N_=3^y-l?P?2f?Y>0->`xeVn|Or1P(-TcZ9RD~yUh-WTW`xJdf>eYvVpbD**& zID^;Yob{GUs}IiLGBxmsv{BAN^1{ZJsTzO-Q;wc=4d9htLJYd|^WR6kbI?q%E$7ej z=S#pxWvtzb*lIMan$|4EK(TztiXr=W0{d{*I{Yl^&E2==2pnDYW#zXOrWtM*I*bQ;pi%0eN!v~_(y`s}pC*>rx35ld_wBZdrZ6`vd03yRqIXx_^|0%^u;dd^DFxba; zO)RvQx`!F>2gM#grxr2Hm|Fr~uUBwc?M@wA!Kn;rR#IijuaAset1?@aM~Gr8q+ux( z|F=AbPn)e>Hk*Fc40ZZ@8x|%u6MniIG3VHR^K__Jlv40|*ukzlS(!rp@vZG{fZskH z*|MHOXlmA-ypSxDf4NK9AUD}3k<6h*%P3$;q&_hgE>IrbQF$nKcSkqXJY-3WJC*|7 z$`Cs`x2rQ4Wj;Xk6OsR-e@0uI2*lH>`cl@BpS*X-L%}lxFw~CO12}5@Y7NAu?V9}I*d@&G#81l#M!0XV6dCOrir5fEN(;%ptq}wjQa$!|$sK9bHM(|` zo_#=^zFMnK?n2R0WhM2*Fn@MR?}^;L@21v}VJbk4nRJJ4CH+eI^w?}Gi?p(dz7<$l zZ&a}5`#^iYbpVI&^N-Q8!#H~<-=p__9X6 zAmFv3h45JZY zzCF)@aAs-^`RpkX5)b~F(V1m(Ve%l2ZP5_F&QSpU@g8$Q0=Zur(EO4bCTn)D2%7Kj zSs13h%a|Q!F$#VB>>g+;1xj)}2;HG80)Y_LL*(}zI|2{~n1-Bo>i4G6=Q>Km7Y6Fg z)$L%%mw&19*UXF>lfQ=U4PxdZ!wXA2QlmAqSv-~m%I>~lt^_YU@8%}mS z1@<;dO{_2_)!`T)bFWa=#f98sE8Cr^bl2;BQY-mkW^h5Di9R=O8*2)7Z6z{Qr~IbI zH%WzT+p}v!Z)rW^!}!f!OP=-B5ZUdSSw|S*!%oAC(g>^NPv^_7h`Mf9Wmn|x`eD3W zLC(Xasgzw@Pbdd+(2d%|G)fZ8oWCeWiTURChGZ|hf5m82MhAQqC1% zX}#Y*X__3Ogj5A_LVVRaeluF%ph^vwUaZqnOi`+Bjw>0;p`yoK@jyS=@YmQJ?|ei(J# zSEo-f3`!@ZY@tI8>eZ=b>6c$eC~=^UYQd;ZDeLkMv=cDovQ$Rf=}$`mw}9iQI^>Yl z`$g)?st>%2DbgfI%t_$g(8|QUq-WIqy&{M7L=REO8U~3J9n0EqdXpn}_p(z)j%R;r z;AThTmrWqt;%rzB${R#^w73j#cvoSPp%-x4xiAfBcth6q{0#QkRZFZjKi=tV(aaf@A?ViClJ7U2VFcW zMUr1q(QGukh5C?kNjYOE|71D!8j3L$x*e1o)j&v;9rq8xSntdvJMllLEOxSRYjdc|A@u84{9U&B)1YiizI#>wSgYD_!OC@A#@)QwDxvDM4!Pb&)2HHw*&*M_NI9C+Tr_*F+Q4LComtOtmVI`9q~ zm@syb(Ajaz33}L963~q>S%5i1tg#MQMK}@%Y>|BeHLGV`G3!u(n&}j{4T*L$s{sux zKJ8i{n(U4~ zG^odv!-Vy=))rNZ^xT=EWN7{jabj##;5uT#_ad#RydZ|BZ7 zqb~4wV)V)_B^D9S49Yu6K@}3Cp4es7gqm_!cD&Ux)i}=y!ugwl)|e&XD5GMuz5ue^ zkI#H_OMmO<5~Lt=Jzp2nDH9R_)z2h!RZFVNc9#{><^;%vM-%R!o@3gbzB7Q=Mrr0+ z$t7*>S8h_vd#kf#gQPN!(L=1Bakr=W4Djo~s9UM7lNGM`3iGo|{Cbqae0kmsP@dPT zeAs;jZ;=o=<6AqL)+B=<-2fKb*r~fkZZRY_))6JTGaNyGBCjm{% zCl1L4^07JENfQSoFx+^VP}_UNi6{R=U&)|p1;tD+f!Oo$Y)xNkgoK(o$4i=^lx3^h zUz(84Z=ZoWmyShX+k@(Y~;2){jTPUeKRNOBtk0u47()iZsC_2ZTl!A;%V=m5hOC> z*e5wm;LtE95I=m&!-$eb7>qXL_9Ztp-E*FBU1ctlmR+~=Fr;ywJHW9 zBrA+O|BD+KZ?-?{LCi1neLTP%1t=nEcK>oQzCLy4Ds$|LJbZ|b?ygU{`{a$P^B~L( z!HU1!mDB<4lYb};%>SoMmH$rm|BK7;f3d6Y?idZ9-|c7q5<6~Pw;<#ITneTCGyr^p z+|}K=B({GU+CM}7E*JwpY`=t8(f{+`2A0W6q5I3- zO;#4eAC+AGm;R$y21F2^))H5GD3Z@G4G>v?#KxoO?Mm~-6l zkjnIGA`LL=CVGJhv62hxvn_<;Asx?n;bMzQH(3}p&;rQn^BfS30mXm-B3H4IJn~)+ zk}`)|ns*8yemDf}FqN^jo~v+orx*Hqdg>3wsr{Hn&qReIuQjKq$AZ?RmEJG$?6kc0 zpc?}le-5zsz-`5v@r^dQD2Z^#TMp4O^=nF#6h!ejqujK^DH6duz6QQg12QbgzX2#} z-+c2M4UwGyq25KoXXQkWNctoZPA)%5PUfMT>;thz#$~Ker<0-^i@dxBE*7YPH#pVNKlF_aa#o->O zGhRJykG>=>X;$k4=gECMd%)l`TrnAPZjcP(PSi~gio24iquh(iHv~V*%&2J&X7xQ1 zel$AarhU!&Vy9@a*O2J0esucxwqQ{xU(kh_e${JZJEPY8Qw0~`Ne+s46yK+Wz2T`l_fT$=^JVzN|Z_g>pT z4Q81ZTV>1GH#GzN(k?_zJ`0l-Dnm`o!T@pnVQXc7N9&(;jV>-d+lrDrZAzwjnXu|W4R%kx zmOKAhcvGSzyli1Z%&W8ne<=3v!f%N1d`BiOqkdC#RFnPjL9TaAYVIwgk4~nBJdlQi z=Az9T^JKy$UKC#b!Sw9hn-A{UFL3b6xRc(}n45if*rp1g1&n1+ni+^+n}PzGGvF_c z-aVS>GByRzyV*OV5BYQL8s4(uXOflg=jNHLa#%B}!JkTtYt3|)^(oMtXgHmk>wB2x=hf*VX7M`WUNlfRgVHX>?%z1)0r0A=J+@HCy)VfF(JwIb7u6tt z;~%CoVI$4OG3H0E)oq}oN)i9gD)WHih|9}XkK$VuZ;Rx`i7N)(7rFHfZ-jyzigdB| zUrU!aDQ2_z`e;0$_>A*BBn}s)ov-POIX*u(<;9K_s^!F=g;E0A9)e?xM(uz~nIbps zjHdd-FD?@GZvI>J!hrf$f43j~YX$zlQ$61TI0B%g@%?2-e*LdD_Rnlf5^t}GB~e;y zE1s^(PHxS-eti8NzU&%z6N*aA4qUQo!gc-Vru zepxF4F&jb4zd12}vL}iee^7`;N9R4zThBj^!Hm1Wz7XS9sgIEnBp(=V0-N<~dOtc= zQeC+_VvhD0v|Fh}a9{LgZap*(uL3ux3Po2wE}kFD`e@x%c(*lJs$?WwKaBIuA2#h~ z(u*VW$2w1L>bpD%clw#LrCFj@wh~Mt}UkZ^By#V>sb-K=vusx(Z*#{-u ztlenkL8sw>IumJa@xM?6(Fv6QxB~PlS;xVv05x#Y1>ng5WO`4zrJ7c(l6Iz}r*`AO z)5aa{Vf$SP*T&yGZ!8W)pkwc2e%6=bdPo$90+i@SsY(z)W0l&w1A~?w(5;_!f0=Q! z_C|zE`{9mYC2C9af{`7VD@~az>3iw+b-w$f#PKE4}<$duhImHifqs6}Q zivHUk@gaFTnx!T;XBT(6I&;ix!4CbS|zRfd{8r~74h&c|E(Y#ksFAd&D$u>FrtW6AycB2>n80yPa(X*||Ci5_{0<`4G9 zjhF#tMsF=b>UTyg-0rJ&^_P~yM}nkgE()5v+W|ExK0vfCKIa9KBR~#bEE4^F8ZGCu zGB!x3aZveU>eVh+4;ZtL0IhYP2bVh($f~rPCIgUR(JTk-#+@|`2D~-`Ph$2(tvLQ} z|CqhyoCq*%tG2%80!4C@H=+P0u{&#>X!o&IFfJTtanFXT3XG}*6jGVMN4ULR`FX!fwqs?25 z%40u#lIY3wzrL)VHS|!!qTz#k$RG3^i3O-4G5}W_+}gvRn=4BB7@8`_5xM7VEGJ<6 z!bE?{_H{VZB2cL*TV;H&QbuXGH-p0#fRl~zPruZ?Z3308b-VH-pXnb?5G5G7_$8z4YQA%fQ^~H0 z#7vqRI!;hGWu+)Uq(Zbnm(40pg5i$?IGZ_mxxr@za1N*JP|3d2*!QaYcg)o@*tUKi z9HESx=dsd5OtwaMMpMPju_JN~8>fKFn=0B}h`-PDq$w9^@+0F-Lyhi`aNXWsAIxf@ zMeo;~#jjmE?DklqiM_&~#n@N|iaNl6KE#s%zBL%{H5uZ-tt|=&-%d}j;jOdCxy?n@ zw2<6;|LgqWgzvU&9ZmkQ_?nT=V|3Uf1E`TL8c_SfZF(IbF3OZgPrB9i$dtENNZGVj z89M*#fW7*buVt0yUv!|C4@xo7CcH^N?yVLF#6vvowa*S$>4$x>#Lmw)e=#5b*Q5O{ z5hMzv>uMnY$G%ciy!vOaJns4dGyZ^K>6Mz((emO-`M*ZrEJoMwh}a*cNLTt-yIxcW z>WtdtTx}JI&~yKKPJ24HUvIt%WV7HL0!mN`L|<9P%0JU;Fw{w_yNLODzV)BoZbw3} zpF913{bt$s&<-mSmqQ=_*`;>VPR9QjKHi;GDxuwz`2dI_W>N3JbMs&})Rz zkJ^;{Zw4P~Kc3s`U@s&M60iDGmw-|e87btJT29P5FU9n<#z+2Il^iIX>t-txGxHp) zO7VB|kIWz669Gy`Tm1jvm8+?q zZ^n9a%Ap$1z@Mqou%P24)xUv>qO{ZHZvhMaeqSZDcwh|s=VKF*8LqYRTDBvo`71mrU z-v&pGA>so^@O99~l7DtF53b|LZrQq-v3x<=~ux;A0B&$m(|HPt?q>xH$XJ-cg)>oJCiN^5r(x6v=F1YFWu zsbQPMDYwy6I2dSJr{1pu&g!<9lkKyWR_pR(Z7AzC*NZ;CexoF|sUklC9BixUm*Bw? zrHyp?bCx+yiYKkt(#k9vykCp?&-&DEYSzCA=v+oE91mC;ai941;EGFI5$lRjR6XaE z;WSVh!Okz4%Pry~$xAzO>u{;_3QPG~!vmhidE@&B-Kt4pjbCg8@9e?cPAJU%HX)3G zF%@kow89?Bq(b9ZsW|5zn}xy zdN;yLd+%gN@G5OS_b6QM97(ul!Jh++Ias8m{A7C8uZjMtW)7&L?g+9kOSiw|9(;Z= z>U^qLg6tjb@>|~nl(;KBoFsc}e1b>3cP%aK6jrBa1h!7?QYJgXm}?i!N@h!-3M6lF zaMWWnD1_}iN(^iYdRJ>GaP7^7ZbKhl%^YpBa=B8algwv?zC1JbFf}UI8R%hV<6zOD zO$eIulGXNHFTjJ&ff*2=4yKV-V<#7GRz1+WJSA9VJdNubslh)C`ReIU{$*|)dz`Hg zHa!}{^#p`39en@HBQ6a)my;MFlHg7l)x0Ob`#DuEJ68J|T=!;E(N}Ugx^@DcJhJIH z=bCrwxH%piW+|zCE<1#c(!~vNZKrmD?Y$viSM>q&sWwOSBHJeOtwY z^{-7wLYZ$uOY6PrrR!u-Q_hArz=iK{)liiVPw0$9djFFxkPB&$cu zrc?~Bym60%^l0o<4w?2LQmgIawG4fbw82k7BA>-ivrou{ zT325CI4Ya4JSJi3!9Fg|5q_saNhvVttp8VLbtg;&p6B2MzA9*U;v6a@cMy6>u8nZp zNQQX3dW4Ur2fW~HdD;@HX!ovvu9)VKbWT89>6QTNk%{qf@S8$MOVZxv%mX3ftw~qo zgl_^%6>MHYTm@G)^)AkM4Q#cmves%bu6okWIB{*V1~k@8L^U6|d$te?Qc^o@xbj9< zJ(Rz)bV&DF!-)|&*vnNqSrF5U9o8CyLm1JZeBaI@_8gg zB`a;XI||7@-n(jUL7Zi-TsbG^tOMG4_scxwHq0#Hr2=Eu*25H=Jrng1>&eXm8>Gij zafvX=C zUC|(mAOG+gH+a(_Lwk)MuXA>aEAD#q{WPs5{qCTD=I?zz9?FUz_Fmjh|7dcLJ$lba z6kG6H3z0$OedR|?I{a4WIQ8JrMs!QC2F};8jCo7ZuIf#(^tNq z&~}{AMj#EV&Q^Rv2q+ z)>yHp-BjR=;et)zwgOVlgm;`8Fku@mj7={3ZZm`W>1|ff>CM|I*?$5zh_TYtLuOqT zDMddt_Z60Asu8aC{^uSyV)}&nw&i)p*9M&kf01#h()SarZb>zc1@Qrq6DBA5JXgD~ ziiO@VXryGI>5^9YZstY&4!g=I<|lq?^y^7hE-&9ht8FU?=-}~OM5VLD-naVo&cSy< z{_cAt^nkfbWt);;7&+NzUM3jfvh6Fu()#j^B7tX;JcNk#D)l3oo>4;C(5H-%FoqWI)WO7ql|E{Oogq zS`;&H;L>N+LC1$j(`?Riu=`jFdubIcehFE|NCb*q!SEJmDHvq59j={G@wfL z!R^AH`3yv`tVZXPL%dQEeJfp1T#^Flm!uaDA$s-1}w#jfbz{o`ly)?Xc8ROZ&!mKzsZ=gcwJ{B&flynC2M)*&R4y zQMLg|+FJ&>HpS5F7om$#i}I(*inL$El}}rpm0voP5VGrNP+-rgWUBexn+Jnc+eK%5 z2<0|13cX2Bi*OIHW-m}ZR~>?8bN#4W51d-BdR=$uud`#RLy!CVOcWU92I(ll* zp6V>I)ouro*ZhP9sXDhO;`eQ?C#{vb|4Qlqbeq@g0PBhvqY*|n_sC)tlx7mub8NY0 z{2W4Qb|pl|}omTJiV*&AT`x(0_z3tBxE2SvnPUqd0D#;~*t#?YoY5Y37fk+-2^% z`iOSw>83gJEA7mid4efrUV0j(E8ZuH`~?xc@`4t66=8>C#xkEZJI}Q5VZ!IXu3FS6 zyT;e5B{O(IJtq5=i(NB5%TQgC#K)jjq=t9DLTg9zRx}{j;>s4DXF+G=oK7nhs6uihCOW zT!hc(kPX*XiQbp~!-wTJ55T;|KZ*KHl=90S^00f?*9OY z|5Zc#-!wV@+kDilRA|>Li-#s4YdY^f*?V2(Q>>6xu;(#JQ*Iw`VQ;AhlxH9im;?am zjD8fbk#3^ylR5LoYVWmMUtXGb6j?X9yan7YAbTwTR^U00N|n-A87;;4Pmq2!E#{w@ z%Q$9$op=oj(;5W;rut?|d9fLcnf3$CD&{GLFok8gpDETTOv#$&Zs8toP(##y^3f4-*??XxXceM zK=TEZg$SWb79XXyqc zAQeA3eFg6!1lDb5^BOR|nnUmz7xtWMi#$UYw)CsvN5DV$~3R zJ!f#fw>F&B3B-dgaSQtyT6Di*qInGS!4<=oFO5wEhkAtrWH=Fpb_C_y-|eo3&@!sp z^zEl)AD0w6p8SWeX#E&skSB~a>F{YHu<_A zt=N6ZDp+LgJ^!yaMNRk~=A_32ivCjsnt=e1mKvW@$G%=POV$#^H!V)~jS~(}<>clo zeQEuXSM-@%J0{)t_oky%zx+tFzJ^xph^u1!Fs{R&YjB6DROynJli$rXD07Gx_*UE5 zrbfsHqNyS0VU{4AP@*XR!ecC(Z|{su_z2z*$k8L};eD?xu85jFD=E5s%pn(U3PB+7 zk{vm%5ATMnW&;Q5m9h)DW?!479Laa-Yw^be{6;sgEKTyBlZtWZukItkxWxNL!<-S* zeZj(Ec;)oemvSkJs#?-dY!5~~hWw~^NkiSGUO{`bO*00rX7nhV$6kJAT{Tlh4!1wGU+AbZ_!Il%!3Xh3d z)geWXe@y6E-p>#@qaEN5TAalO>M(G$l(0&;-4fVZn#%ck%vCl_tA4$6??=a!pXw#A zYZL3o%iZ(CZ*YcQW$zNUqx=!F;X!qlxWI1crDt+Si0j*PwK55fShzFt*~-s?ht(iU z02dMD#zdvPlrI;~+nqt>MmK1QubLP0U^!f+K)OD}^9J@zPwjPfpEo{u&04cnj8%MD z;FL%0DRK&N+NXR^Ra41McPG>`WVJIr6fWS5)x6g?mK-lcB}uS;-1=i=pUT?OlzSqewv>at60QJ%JjRR=sIF_}|7gHNv1lW#3} zgN25)wP(biN2=|g2nfwt9aZ-o`~2%ijO5)}%T6{fk7!nO6lEu$W?v*QwH1J6XS5jj z5ZWGHnhVc7aXCEm9Jeh~yxP40LS%5_1m_`a_~ipQPE7&K&s<#Ne$n1 zsWebPceBi97B2S4SZB13GfKYIV((Ed2r#9o;`0&T57}GB-Dpw`d?JSK81E6|E5|kN zq$kVni0~(sM`Q=Xw_v=Jea~jY3Yb#ye5C7l!sosq`q@}k=y6HBiYQ;0_I@U`5taMv zt>t;n)u+=DR-Ak)}wym0d(e zG_ia13IY>*imbyczGkQ%W%xWu3&mTvs-jFIq)Qs_ZA+1*hs%ULHoM?AOZIao z0v8XaUVF=kdvswm?P;ET$dQ^YVm87Fskk)MBEpeqo6`zhjDJhaipXwV;(rjctM{_6 zZPDj`05;d*1(AHMXDY8uNjK^n7l(#vgwqY)Z={`xEMn>}8hcxthBYp4#k)-~Z)4MeH-5VT6CKDRDzCr)(e){mUvF!xCcJ`mSP zx{#d8ec_~lkByHUNyQ-ZDj(1Oi#oyOST#O6blNyF-5h*g=i&OL-D`xP5C`}UJz%Ng z+0!v6enRkL3Gk3#8u(vB9A_-l78MN;SXi!1l$3IQe!=yo0K4P_jZe~oEYME@o}{-l zP7LBWZ9^E*3yOn@o|Cs_Px&0@m2IdmG3-&zMs#KHW20glO+p7Z)%DMkOU4zjw;`N+ ze!7Yfl@{`hgJ#mT^=EqO!A$q5uN{R}4cGULQT5l40YO?HM@Nr5HFm9#w`U8Ka}mnw zCi-unHnt= zG$QSZ@Uf$j1iBflu~53B25im6x%qf*#sIwB+6N>OiEy7XUnIJqIU#tcWwafoL{h(H zIjBi4=&V6_(5?0y-I~~UsI6mwD5A+iB$rj_5Y9)j!%ktn(#N1nC-*I5f@~!m5WJ#K zfqrGpKuvw^n>fe<5hB2m*N277_Qlb4XX%FQWC}c&ZP+T8P5=1#>6Xt7*rSnIrjN!N z?`OtAt!E!}Xu|(z#mD7Qt@f86<>qgvE}*?mei!|E=PfoogR=8okW{I4;$3$0z851u zSCsD2-DDin>(c-QN2C#9R>I*@x=3p3vuzY6u;keDU z&vO(5)edQhi4xv8AwNriCH|tf?cXx8NascoKz!B!L@}Q%UL9+yg|?=m;IdOq72q&X zj_2&By7DOjGoFHOoAtsTY_BZ;8EsU5Y<)c0Y;L^Vh8dmR`V+)c0bEL)Vj3HV5=t8Q z)tfZKjpC#g<~SQ?!!u?2oE?FYc4?@9kexo)7Gd#A6+5ZO>FrhqhC5d;LFM=pKO>iv zWVfM`c<}<11_KYxME85-nO1p}%k(u}YkZtd%BK60##5U&N=uI36u~M9-jp=MWl&Nm zu12&9;Y^TIPd+d9^O<&@ZCl3TXNjrZ7D^*XhayIIZfP<^y1AVrvAT7y#2;t}(X^x^3XbrA`EztZYbOys)9^kQ;gL`$P-I9kJ^K4yn2Mwrcak&zDJ-pkh8Z?o{l@)Ha^oK zFGAM*3Vxwuu5Hy;^6fZw7z8K;-mAhRckjnw-zbb{e@hcW8)5nqgtMX|M`qp*Ja_s; z>u8{BGXH{b@(^+|#699`FL&-kPqn-Pf1c5gADAOzW6U_vF>;Vfp?yQFJ%>B8|r+Vc-w%vOT-PX>_ z6#aG%ky$)X2$b~oVI7Al+m_wF7>S_c`r!o(rgLX_j{K5kYkH_QN=V6RUCl|? z;5LkrL+iuD;$O z6Ba)X`}WAZk%Hkm4l19E2_l@myhXguU!6yEq!oH_MKX)p&PxmC;Tu`Z46H*z{p zy2iao^FxXrQ?{KqC!IhZMlx@p|CF(RwyC)#-KHl#N;85#z}yTS3hA6YFJEem>v_u`bpgn<+M-GqyQu z=MyMzsIYui!^4M4tT=%n;#I6#4GB%Vwb_Q=Qdebk-}EuPWa(n}D_CPWV>oe41k0IV zQeWVN@+C*kN=KzL-(&(wpzSQEULA9>zM};0E9?CP6cG-c!BdnO>T zEY714Q@aaR5%hk_!IBw@@_UBpPadyjYSV;x)WU>oDpZYetRUf%o({y^XDIu>A`ceG9qytUW0KaPB=w43gE zeyELJN@!0(?BoQbX1Qke9-$wV1pOTrf(AQEoy>9}MDb`6cqLr?rXCmHnfVU2LRiC3 z>9-UL1Lrt`)T&gsQX{412enQAq=y#rv4Sqe)6O3G8Zy6b&JuwP*a^Xc@MK#P^TEBI z@)^V6^Dl&0g)ruFW-iii{zD~c3ucrF@aUA0?iJZ=g3HdI$oYmyuw>ufD&%cQTPaZ#MRia1-J%SMMPi;v%x$Ww%o&AY%}-m3cb21A$*P zjwc&SuSiREgghasabF>rljk8w3Y0#v1=O-+7O``AX@XrUqG_X8dGH={gm6nvd4)3wBt9{0$-TPx?Mr#JrcS&Wwbl=l| zjnQ_ObWe6~290b^FLti7d4yrcRlgwQ1KcbFN{a*hrni2JPYAomWkWO31$MFeeGtg8 ze^Q>$rl$l8a>(5Ig*XVpP=(@-|MEG~!zC2;AKImXGWoJ^OPj>Z|xyoCafV@%frLM^W?@v-?GYa<2r3Dp4r1XozL7vi4 zG3Vqe&;VPFH7rjG4tTJ8hp_6maU8~aMrP#)LDbW_aL`zerBGNVdcMu}4n)ZNdn%pD zdG&o9C&$XsvZ|2Oh6vxCa17imA3?-QXX!sBtM%Al>)Owsl$#wX?L&BcaLx6f`Bbf7F)S zLOknmZ8)PKM)+2cT51ZCjtUJ6eofLW_6`6aju+z97W9sL{u1ngm_Gqs7Y|1y9W}`w-rgXIN`j4{+5S!DCw@HBhYCFNSG!qVw_EnU^$^R3gT8OwbW`8ySrU|mSJR!8f>eAHIkald!H-&+Z zi2dZGc^?NABi*KrK~Bi`%oZ;n<`x$bv)GJ&SWl=Oyfji)1^yD7s>#21c{{f{oa?MW zw7Urg8ppea`8I~o`PutlJ`-rNxiBN6d1p(49WiwS%QQE@;(o1#b@Tcs2r+#NF>IfX z`+NcH8zo|KI6;-mE5b}i#}a{K9iw*BK$vb*Sx93bgDIo2gu;RvT`wX@?#MT>M^aYJ z)vkep77HjzzQp4s_R8a?kT)J+&qk-iDLo?-G#nG7hBE!_)!wR*((F%8otK>ahL>NZ z&GtF*K9tmSSGlA^_S_^ruY(o;4}j>2K%?O-wntNXX|_ZYZk9kcDpk8k{7tJOy%4AU zY0+XY6iHq?KZ_d^8FX%_VKs*OL2;ax=6EDz?i)bZE4Y(ve;SV1Xe=k&5-)SG^ar!W zWV40iHMzusXU(;Waq7>e4rd$@A{|b$oE{-|waTL_XsUV~K#T(1;f&kW=%AfU=rzry zB+E$E)M*04+5(+rmZM5>`=hZYORYDeEa-~XLL!=5r8ITH<*=rHt{e`1M%O)0pnXb{ zB?+bka7YG_8hEl5c6(k67uDkGpwF1J+nXxVIZc>z=j5BNr6tQ--)OZew9QaYGM(I~ zP@O!LrJ1T^lmr-OX(EDg_Eos^yAE+mKK}fs7=R#m33_s_G5_=hY6>pH=;*s3$d5jv ztR~;2jSWczXnI=-xq|JuFBtSWBTZX&?L=yh4-#f@?&OM9uE<0D8TqE*H0`VXE)Bft zZKK8QP=C4EIvi0={f-;3J>#Xoq3N7jr>%d2SGJ97S@W6}i?u}SDkq&*!*SmSr*YBc zP)GF|#0Uwx{kqm!$Y#t@M0UqVYe$OvNWZ-WVcJz>^{AH}jVV~dTmr>3PCsRYDebV zs*i{52^7fbkF>4Y68LFsWNinPAKYbEOZs&(%N>7ABL7RtOFJSIO~;G_NI-3PJ5V+4K{sQkt>H8idV1_n-+6+!rV2S%P+l&>FAlsY{8 z0y27jz>Cl{U-JykEnOz&>hqT|bQRzH#5@iK$)68MT{Z`sWB%=0!0qxt&+gWHyN-Nm zh~l5HPNcK#00`7w@!&QKoEa@pzoWa<$%*5Ivp-;nTuz?W@0e?0m_Ym~764NJ3Ggho zYp(~X`jOC&{D4~8JxI$C+r3qWNU!+0_)B^&?zWh{0`8OGg&c(~e%LRo;;^~Oeyu7- z`thxT>R@|>CIWF%TcB3$f)_~msNYKr2J_8u^AXN(3mcXG@_KWj0RH8#**cnACK8;> zPo;4{`<$uEZD4ck0Q7OYt`kweD`WLoerc-TDTIN)1jw*17&_7`h~@Mowe$$-0|ucN zIc5?77`yM`A}~JNCXU}>h0H^T;(&^!Hcx{wHytpv#@!nLHMi++GVUfRjm;^tu0z*i z1or$qxb+Nu*J}~Dx%jJV5vE;KjO!cU4qO;*tAkrH?CAMi)TsmH zkMUc+^-~&)KaZkaAe(aZvps?URiz^Fz(4&NTg3V@ZRO@{z8Y)V6Jh}h`+_(sOJ8p| z2xR1;mg?BrJqGK=Yt1Kn%g;kp;Ws@4!~+8Ws3Cx&eG#|toDQ!6SS>?I0lo-#GYRI^ z9GZgX0f$w9=9&ZWva`C=ho2_*rSZ(DI}df>1&7c2bC1~ z_GXQ&vU@nRKNG0Tvn=pM;Z+im=fn@_(0>h}qVZOZ2m~2p>=IP5Pn&VJYRa2`pvF>n zWmgxHBR45@w%t3Z5^NPEUMnujoljMk?8Q}+s8P<|kZKKiqpj!5paDSw!TF+Ih}cIX zrOW7!^2`-F%WTT{p*8_<$-vI?Tm(infs3T2b!I?j&KT7q?}*nyeY*r^6vR9ska3Z9 z&!-m`&M~?+etBgqEV4iECpF=-R8r}pw)EjP~Y-@nxfkQZk-hl>$v;|qaD_PZhz!F|tKX}ky4 zgl8v}(UR3WQvkxgScd*3qD_{ST-z{+lN^5G+KXRXXenI4a~HSZAMw$@voZZ`+9E`s zr?&2F76@a;h4RXw#v<}EA7ZB=HTF9 zf6vId9zop?r$iQ(TXS@Ch1zbec5{b@=DPx~dkdQF^tCeIDiNpHXqNqc$e9d21Q&5d z1I%+OYmg(ZvKizu&CIP@S!pNivp`MfIbQ8Fb`3+oIFm6y&@4Covw&L$p9tl7*hs2;-$+In2NZ(q4QdbsKh!e(RX592i87ITY_w0lwQU(YVTh**r0 zZ0(=Nr{xPVN4DH6K7e0%K|nd@4}WW-WKGwQFOZc76Y334v7iULG=Wv-Q_H@*Uvd&& z>1*BJPt$g*6Y;VewA#EzGA%I53xx9FO|q|T=WSt+at1T<9;m`xGUW@d z%IKrx6g#=o*jL`9VX${wXJFLHwREaXyRjL0nHc=Rz=}hP@@7Wr10kk7xXD>z_4z7{ z)i`P68BBt0E=uC-BQ2J86ix?;pzE1tel_s@C}a;G!gA}r-*tAhB(F+( z(CDP4YhXI{=w4=%w*f_P&XfbU2U$%c%)lhlr=i=9dy$iaHV^eb)NjXjG3%&e2JWI( z`8R)RfBU*D-3R{RI(?^eikzKJeR?kHKAy5$odSo4cC3KsFz&&$PUlas?(K7otH{3{ zM1^P1y>OVJUAPM-Ai-9|!Km#mG+L-j8(#43J%xGq5EY)zk9)aZZXVZepE0RL)rDG2 z58fS>BZwT&iPs~`rQ*!jN%qC|kB&;Wb0-N^AHjO|Sq;!kqjHz@3gSe!LP|70acuOB zF)xQ!P={#?I!rq^&Y3ZFoQk@Y{D_F6Nt!s+d>K8e=nusQ)L=!;oXBxFTP^X-65hr) zE48B41Evy^Npifm4&%~btsya~v*BX}IgVq{o&^ZfXF^Wft)!r9do6_04C!0Gro@BV(dL5blSk&aa=ji=X$soSxe zMjN-B{jrWFq?#|Tx`gT(@x9WV)Y`8i%3bGFkM8e!$1l;fh7jh`eLw9r*(Io**w-sZ z#|&%l39w%_3C0NX889$!G?-}zdeUnCv}Oq%FD%hi-jA)oci&$*Hv_m=>O{tM8Ego_ z-M8;-1A6*cj8i(N+9!vk^Ai8 zNAmvUCH9P=7*lL>JZ7H5rRF{ zo&8Kg+!czxoXj8nIzc1fjATOb{mbW8Ia`SLe8onzt|r??;%)>7;3v*d2jmbsl@;|X zOW2S9M)IGjPj8gyE3-NL7%_+w@^;wRXgPT-GK@u{qw06@zP`-qVQ`P~%y}S>{0DyM zx^YN#W`c}OoPeXd-KpIo9Ipo!#V6Frgv$I*PWlAZr2?PxqpWvWgulA=I4B0ha0ZIS zZ-SxjrMlO5+L|wivnLyL#Uq-cSZI-J6T$k7L1*xqj`iL4S0!0Ly~B@7_9nHObLYN9 zjkdNv4If!H_MN}k10KGg9_N4=ry`2W^54OHugsoAjtX?_sTx-6%QJ3^UDg#ZA@K;; zX=;x+>$Z(_Eyd*r-`#H!V%La^7DVsgw$a_rwP|3Sf*ORBQ}WuU3h-3VbhLRpCut#8 zOAs&jNP`{E{fFy4Cni}&+7qECw=8Dw3JT3t9OOjP_tJOi%6#$Ke}i-s(ifpThxs4Y z$Z$PJ62|4{mHAmbFmR<)?M#VvZ8tKm-zGM;1{4Bh{StGYkLKm(c!y(o6pDGjHmDF3gYSAY`L0ot9K*myx? zdZU;}rOx`QjmNPfkn$j-UBZq_if<`Xq$i%unTKB5JJnTWU4D}FKqIBKT}LoIPo7hM zwaD?UNweRq*g`LWN@iocxUJVJW6yqOOXcXZT<1brB+hSJI+NW#C^2e(sji}b;e6Sd zIvdHWL4s#UIX`>aNrDZe5$S*JyNy}K)5B20f062A@E-%J>3CLnYO}$ z-nv*jMaz89{mizAEVYmVL4f*xWaq11V{f}-c;!;U1F&7Qm1W9d;$f($?ELM+Sf}|5 zs`0{Hsh7((1$C6==WJQ@@{QwO`lw>Q<9#+&Is~7LcyK)D?}Bl8aF zqg^(DC%gRs=W&G3*rouoa-Lr?Nrx$|@4h8AS{6PIvc!MQHl=BCGXpVAd8grcTa~A~ z1gJ$p6f`RG8Wb13t9Iqg#Z7lw7T{YEFg;Fs4|4huPp-X<^5zS7Vt^EydkF3>*0raT@{PbbI;Ag6>D zN2)P^M#=^eXq9T)LcbJucVkYs`Gnp36ao1^!r13Q{@*)?APR}bQP&ucRyotQ@Of}y zq%Bh&&bApmmi^A~ z+|XNz$(NW*>oxJJ@s#R{0SqV z_svNU^A84Sqzq1sx((IM9`^y&!}ipXg|@}R9poLxxltn?*0Ua*XEc>L1J7^HcnPFN z3kHqMi1_BLp_3AguH7*sZAK@e{=mb0QjC`>Z^f|ys@}M4Tml^svLlQip><414e;*A zUTw>&&=IHFLwA*U2K_0=E*A4GjeAc2L5BowT;*|0gzAn^!qNVCcFny*={=5kv%Yj9 zIrnptTSBX3^`yQ&k69i2#Syx$>gUHuuiZyWw$N-dF9FZ*6;t>I<0n~mw(1O+$b&|hIE_iCdo-Amav;k-Yp{!xi(_G2rLvB{?& zkqh?7@J-gEqLSY*UH7otJu2nJWmEhb=}KMYK#=7ys@)H%caArTF%MH$tp==)botR0 zkEp2NRnC8c38f7Qlph%8bx&UV#6xvcC#ys}i|4Un;Qgj!>2)n#VT)7!Q;z!2N&@i1 z_e*tkW0;~wk?*{dGmuIqZ(XdUSJtV`ate*s8u&h)od@~QOzsjbt3eR7LORhjZ2+|7 zthHOSQ{Gba>CQkWNo=Q`Y3N`=6Uo^<^v)z$U0IM>M^SNnh7!GXmzQd45m#X5lWC>| zIdlKx>ef253s>mqI8h(wZ(Yf@@ut(svI-}V^O|9;kP*h6pE3&9<5Je=avxhg(6y{O zcUMx5*PJfdreaJe;gv-lzUXw|cuVDctFq$-!~DxEfkHkT!kjNw2n^K+&u&#VgQRNj zG!18^mOxrFp;MRi&txJ_{|rPIWG#{y1+qu1<|-`d9_*=C!ui_5lz$Kt6#DE5eI&gC znY7nli!sjj^IgTZ+P0F+`>D)z=JiB0Nf$!e?f+#seGDvGr^dkD)Jle7i@{yTFm;)e6dt;rvVt1W^OTcbb8S7q|(NpGzRT z2j)|{s6NiYfCGe&Y3?mR)yB7zV5g6I+$JnV1_P=QFABEtBKO`j22q3Mt=1-I=LG}-+0&t>q^aZ95bK8w0pD%WAdVV|IC{9{pYmNGG|~CJJ74AWW&PwU zyp}|*40f*Taez5f8zBRwLwS{JG$89|0%Famwy=H;&jcXO77?(P1ZM?}ZpA;r<>|9R z8a%60b{mm-?V-zeHwp^}kk$r>YGS^?O{feewQ3%kj(~um;|>`nS`79`=kPcckTDtS zrvpz~{)R+)`E~z-X^F0NeXy&#%AVS+hVhsckT3;tJHu*beE{8p0}F7h(4&!mq*~me zcVR3%7DU1Mfl9_ZK0rm*OPx`toDSP_2`DeqB)CXd38$E=hwYu2{&!l=j~-8SGXF*7 z(S1QGW0;2%5}BE=L?!JNvZZLz@K(FP{Nrpa^K^pz88IenKAi%E$ftgVhGXj z;;EWok}|eu3Oq?AuOoI<-e+SX%lXhZmF1t|M@9K;Q@)M1Wma^Y1NiFC!&Ex*P6YZ# z=a}0_)|35K22rJ%=AXo`pI)GKmpgh^*AIiKwkyXR5k?Bl@*XM;0%Vk(@;eLgHGD-J(2hAs6 zpQ=lBiKE9X0kXisD_k-2SHhd6iPTSWsnBft%#y8THjC#kMG>75qjrI*B*NTov8`l- z5`V(>R8kefZox`h!%I5Hh^g(2p+G@+KuZyr;|~z>S8Nd-J$?Dm74Rr-KNfyo>0Z)X zX-D=u`VsU4CIYK(>%Wd8Svt}eWTb*&g!rYGu$bU)UOg)6SPZdWT0|1h?RPQxhwo3a z`duU+aGi&s7Bc);QV+6G-S`P$uG~gavVNdC&II!39C{jV@^lbJd6)S6`~O2rc7I#h zc0%Da|JwZx$C~^*_FIb}obG=ut5IeS6FnLE=kpG*sFi$v&+sQ?@V1RJe&Kg%^<@qgVuEe5;P zOT;Gw(}s?2he3&)%zPV5&!cw@+0$XHB+z|y$LGP0(-F9Ld%G`3haZ5HIL+vO*2t<3 zyzjR+_a?Qx-q7_`gO}#wZ$q$u{XtAVeN&saEyPgs(=TU-&Sk} zvSo~5E9yhF%hag9F#iA>{H>3XY*r6osWbu$tJVXJk8-;xV_`$aG%kXqv9xFM)@kJF zmp%`D4cD#yuT*McC(ELYk)4k76|lZwtaK1qWW+gM0~dfqnIoh`3Y07!g9^o}`1 zI%~W^R1uxC6z$3<&46~@j+uPN1Ayc}vP{R{dynNnBXAQSZcPI0b=-8wV{i8MHcL{YA7IG5CIWw33L^vmr`#h6%o?kA;U-X}x)c)pOSDgG<`sQNhAqy)Cc! zj^yJ0o%$_1>!)&YZsdVQdL7P}8qNc6A30jVQP-10ib#Cy~I} zZkWk@6V_*UlWTO-HcWV|z*V|qq0A!nuz(t7y)usvhZ)aIH=OxnhKiiuA?Gt?5(zltVH zYBK@&ZeeDnvZm8nc2uFdDeMSH6JKlu_zNm7BQjt##em&=A=uvBzZI-i0&QI>j8c@F zp?neht9%_Aqt5a|*;lIV>Z=B#J1ncrxsB*`O28!AB3ck*Vq?TEa<$ zV}auIl5>$msmh@g`ThEw#|q>MFq=y3uc&%F6y`BiSg9m9({(_RSh z$;3`BoY?FRwQKAJy69PJZLM*Zs!fm}7A)404(fz95)2!2#>B_itJ&vHmyiVB-6?J+ z`@U?roXIn~-JA!f2mDxx+yw20u=?F>am5!Y+p@EBPlp;*d7$IoD4L|tzYtlTykw^u z8I;p_GzzXnQ=<2xK`E9VKZ2g_3m5Xn+WB{kBZTf+3xEOvueL4*PluzD`4#s`REkOl zoHR;+WfGJ2O?JnlojX_$+@~-rmFMN>PA#0%4ZKW^+pYC2WjIqnmRCY2iLEz`lU6sb zv#mrwBt|7B9j&K-!oe4(qAAH}*2Dg^llzclb%*Rp-BUncx%0XRn+R1a>z4IwDdKL| z0~GsOpI?F$k7q9H{t^bv4gWKC8@e`56&mlSe1yYLp9Ch^?|% zIYOXKuF{S`HA?Ku%kOV_zidVVyP5cI^?bj(U)lGYh2@-|a`YH`s9gI{9JotU8Fg?<(yvj1s{<#}# zOBCA2jA;1V;5NNxy}v72ZfW1Rz7m)Kc*GP}MM$E6x>1293vqb0h z+Z{l2A+ssy>FJ|-bTJic@@1Uv=yIG|$FH(gM27e8oXChe&oOm8@&u{=^us&+Sgcxn z?dx6VxUq7pVu7Kc%PiUnjcDz>=R%^+o;0qN9BTo!nGu08|BW)+epO- z@z7z6BJWRb1w@~C4dIHsFi=FR=jDcM49Nun!aUuuEmJcYF5>P6)`1U6h7J@QqsaIZ zAwApa>)6;p@+ymai)^Pi`Yo-_?kD~3pA_l#OqqsV9fPzL*pYPyw^mUI)IB-XO}R_d zps1NS7M61nxd zDSq*bEiYg1Eo{Ewc>Zb8TctD(&%LiBy3rqYL7N__RXaW(xvB^UH;i|xTCzx0dV zk~$KPIOH4Y4xCUJOS;pHw$O^nz)PEVfwSfoXxSG5#emADArHV1$7Xa`n>RE=L(GP? z$#h#>ZL%C+NhlZodWgv6x{Pz~Y{{jrE~&3c5*%N$Z5m>Mzv_Y8Ihk*E`#pU}raCJ$ z@(C3a;0W$Ia`xJFWR5i`@+ZZQ3S9TANL1a9R85Z7*sIMV6PoqK$zdz-`d`?nRY}Ae zl17Y7RNPT&o1A3RF<)9c@BuPc70V;0)ysN2fq!Y3Y%%myOW%Ur630Xx z;UxdFA~J1W%z!010rpe`)y|jHa4AD^sf#v=A$<4`sxiR_*1;Eh%X5kS5;mSD08lEX z&zOd8vhVsn;*dk}%jN`xBwKyxT_4n6VWwDwhijD88e(Nae3Xc~RT2{{V}4+P=F&cfH7^sa!5r@lm}5 zL6APP-q-@sViY}_4{<-DeC_rLA0W`%l$}v|{!TYL)s6W(n^oU4pNpgI!tQ6mBP7jd z@7!AR&2VV(vy&A?he*+n&nmq;Gh`Zq10}jLE@kV+>eq?>ogqH=fD=b(wwm3&RK4SN zA_t?!sxAL*;5yPDiF%*UCVP1xwmniNMpOJl$9WC1gB`##Lf%avPyVwZYNYf zD2@Y=Wl7q{ra9+E&BtodkXygwZ@Tpw0GnqUWc0g}N9ma1V9wvm9X}21LZ1t5sY$_~ zG;#eTQUc)_?%BIq*Cs2P!2EpCb9vz5gp=>ubBTlPh~V)}`@a%Al5Yzh5!?72m$2BK zm$#U6kihbD4i4H+iG)+V^{135TA{^oY2s3$SoAos`C?j=N_SpdXYfLf?j!%FHq*4} z(dP#og<dnZZ`xnmc@MaW!)<_DJifk z<@A@x-SR!a)t?72`Q){epohF$yZ_^smNloAW&9>%YCxIU#+d%%J6iQ?KpOX^8y*Ht zE6ZZHeLDMQd)6T z*@{vE**5lxD6bEgBYN3{?0Tu{l2XHp<>`jk=RNjgQj~=+Y71mJ8ry{pnx)$C#&Y%* zEEyE*Y8A6Rwt3yMi2(0|&plLWJH>3f=gi#ZZSHw5FVRlCS%DnB8$ksN@ONz!;KGHfN=A}^e9z5*E4)MwuQipKpu*^K*p+N zt5vR0cmbL6y$rltw#cS+cHg$k~5cJZ;}GzVYqr;N#=jCcb0i|4?kc zU>vkV7e3eO&8cv95(0{*2#MbZDaa@vx}rogi=I~@PXLC=Y>PHSI?lGB$%RLEhQz1w zX@S)Mg^kY9&OkeJuPz(r8+K>?PP-f}$6F@M=N0Tg&RuP)SxBbZ+J0B@$d@yAZ0Q>c zYotri)N_Nd_p(Mso;zcMwKjCyRr~(R`+qWV?hv)P$^)I;Q-*gs za)Urv%2|WB(Dj~H)+@PP&kt}6P3x^Yf!9^Lk3$<52<~Rs`*fw_3JaZ{q4b5;gTkh^Bk%= z-rX^J{0ghv=*~2o4;U*u-|2$%%;AEcI_@C|D0ZULyRx8q!YF#x@(6jDRLLr@5cQ46 z$Iljr>m4C4wWEm2);LX(&@5NyPoKUuFW1T_;eZ9oIrDpoUBclE->>a#r!>ibUfP@q zpPcVVL_A_r8k#4P1N43E?E7&Tb}EohcKdMWKa;msJn^H0%{mAAI<~lM=kS z0rk{q@z9nX-;k}|V-LHphaG)e`;LB*PGbw0<(`;3{=Lt&NfIz}VbQ8R=)UIcS523G zuU`)?w8J|)nf$Q7KcaG#yEiAmktJ_URQHtla)(S6NJw`UDhbn%-G58QGH4&K6X<6j zkJo1VhlCUp{u;?75ni2OK2FF?*V8n+1eTNncXIF40)zSD4l09r)7iF_rF(|rE4K4r zs7x>oTFNX{zmYSRyt%x9bZoPq63ZR@?W5f9Sj(w?1@d`8W(N#v#nv zqzCslK~iYDR>u|t%}kUr)_`05c>`K)o~H7^UZ;wQlC&D-XP1wu{)X!J3y0K9(DhqE zI!iyfSw2{M)*(H&pK-FqmvaSi%)Rw2Ve0+%^21L19d#RP!c=b!Cl#9UvyDw~JkS}r zro%zs0!q{+`E4a?j%+zaXThc7#l9QdvMKe5M(bBmlbd;ne>5_E#qsrq+dn4$ zoP0!?YNe7a#A`7Iy$|wf(m}=!Otc-#^O7A|ZBp)r;~t-A$SstQZIrz^;N)ca&`r36 z=64r?e#d8BdIzQ-EK*)ykop^jWR!xIwV;?Uk6Hc&%(Css*5=R;hXkkIyOY&c`^;7R z1Rfv={I}muQ(ps$^Cl(ylDx~$GB7;4Tq#`{aR2@u%{(;G=80IsZdD6qaR*1X1hL2B zZECF@IgF)lc_I_VZSOutz)TbmTzajMMA!~(;#mB@`O>?*HKCOVd;| znSW|`Z&AaVkJLGuCfCn%)oXP#3*mDKu09mWE@$owR>?Ecrl>eHCZ6&OI>Ej(F2%7i z>xu0reW`8%SIwSht3lr}ng>fX1CUPpv2lR|>l5+B3)~Ziq9-SOZW9k3dUyZFV_U8y zX5|urn~?N53c_C=q??samGG{AcZ%OexQh@wG&DqPY0)f!fA>>x|JukUZ`y2A7%0fg z%WDhcyQ~nMxsB!Ur;BXf-|{}f4>{S>UyUY=W_g)6!X0Y^1)H{~r)L+k%?M+bRG+^% z@;NDrowdz)kUWeXm?w*H$@4fIj?xl-Az9{qaIC%W&lji`&sv5HGN!7-`r$lq z4n{+`O`^Dj1UX`>)foOxOxtUQQBXMz)7Y?BY_?wRep3>X(R?*WfPMTtk}~M0z(FEF zK3paJB;pB)Z#i+A#1=^_kVq-N&@Cc~iP8f8f=uWz>5;QMv-O9`MMdY(DAX2}=VIgN zs0Mu9)~^h1|H^;+v;W(FcpYqewxBcEI{p{`(eeK$z9SB`X_>HfVM0pkT6ZmZUD>)? zZ-4Z!IC`)paBf`+B<}0?g^7Jz7wYCQSnc4f){vWCC>4_xK!~A?5cjH`lmmauN@6`o$l!9(8+C|eJE(3^yNi< z^cjQ=! z-RHh69`gfd^y>F59;>9XDC`IgLa7}rQV*38Jkn=@ASVxbhGP6iE{D zAgrQ)Z0GxH?_%E}l*XHs;JfKqMT#R9!gNF5U>)H+mS!_?^!=Zo3Kd+uBWs=q3yEgU zh2Vu&{ZPeFL`iUrd#*k}l&+TBh>_N_i1{ZmElEB!NYWem(SiS~u`7*gB1@z0OSek1 zGmU@;`ve09h=_pcBs3ZrwqOJat3jehb{#|z2+?Nnu(UWd1Vl7p3xOa=lx7_^B`%E- zNP?^;3PMX*B1;lNAek!YK6B>Z{H*g{y?W=oTi?A^-~HbEvngUL0l57C-qP$L>&yI} zQT{Z3?PmDNcpN=q-j?8rIKLjI0L>~NNzyD5{Q<2*67N0udmxWMP=9*b_yoQ7c1)o7 z%2kqkRYnUIMAc`ph7LNrsR3PI`K3t8Mr76j1hB>a$8Rlf#;mSbam@p_TRG8WlXiOY z#B=!3yzK9h$3CQR8DZ|q>S?9niTpO8duPee%GsoA9rN1yI`Kl|IK0i&FS(Ekn_hd= zy2QqcirnnePM5{N9dHxo}hySISQGW#Oq&*hSi!xCX^%>@tDzy zL6j|NKI!7AeiONhz8`lFqB@@pie>SKns^l{5d?@mA!ZGZoQ85z$HH(amRp*p&&Y>!Ic;kB4CX@`{2p5VkAS6>isgB42o(uMwuVKl z_O$v|s9WW+VLhpX%x4w!K5f?83MUFvQ@Em$ETOBMTV`V^_#JSP7yTg!Z&;wzM}e1B zsZ_f9y+@LOi4~{7)GB`18*(+jd@qqNr6*H{l~bmX4u2qX`DOtHdfysH`!CL2?|Ph@ zxQ!a2p1o~4G>v3t!_Leh5d7!p7KCS^I&B;=8Xu=8Uok8QQVuNUO3P{ih>a-w4OP(K z8=sbstJkJ4#kd8IB6$1hi4>f7#bTcZ&C-cl3HO%1ibpBBZ1{w=2?`^h&7EGGH+f4* zmcKzUZ7}xQqs5k}na;37z*GgPMeR?IBL6LGHuW18-C%sVH|G&go}ust)#RvtJlSg7 z1HY}_I}r=b_G!@erCQ=Eeeyyr?%=|Ow-V6;mj-h-Oa3BTF4UkuQeEzM`Mpuu{Y`;3 zq#>_{oR6w+-xC!|^@y>-((-7dg8T-_tjC~A+>8m)?($4O19@KPPEjqRy1>oZ1+55m zl&5&Cjnsj_yeqmH$`(&bIutLSlITMUcA$ZL0XX=om>!wdYR$0qXP^{N9_5h>%iu*LFgVZ#Nhz^2E`mq=84La?ADBgCvWA^JE7Pi{9W z-*1<0;ID~lN7l?6MhAcWa5xmDUmarXuMA@E45w6wflIHs_P$2+_ZhmvHQ?%-BvIY= zOZ*;0W?5YsDngy6!POrdBT3x_w7=OkD2l~>g0egNh)$6%ET{ZNt)!9XNQ)CXbUau& zam)F}=%BIXyyoj=z0n-gpyn$w=%wDGtdnqDQUwdP72uNsBKl1y4tqH6y;z_3F)1Y>6HwA z)~SPcja)9G6>An8^P;BlW;uHpy}Dq*L5BAUs_*P4nqKr*HsbYrVfrH8v+L&AvtpAb zpXv)6W8WfoxktK+Vc1idWmRR)GlM7!#tZtdJtWo5;TF@#4Al_Lr>1rLGr>g zlcOV=veP(Gb1W9tPB6*}_Il9LLhapd{&Tj$Y2j-ys9v=XJ^!Z5@UcTeL_`F@Uw2!N zfng}bdQ)ip#anfNm#rT7ve=|t?4NoC2(>Qv41*uqVF_>u`~)~$C8Yj7-uQ8ewJ#8$ z9c&;>-3S^i&?nR*G!kh3cxHXf2MD>g8`1b#$o$*-&+P-3vfTa()hvbne{WA)A~82O zDE^6gQaU>wp1}s7bnBFKrr;e`Qgnh5$wtGS;l8ObK=Xk_RjXc1-BuV4-^KB}eh>75 z9?Zra_FkQ+w!$N(VBJIA1{}Zr4FGpe#_IhK0b*XKk!TMgo(6&KH8HrycH!OK@5@7} zeQ^a$e}1-w2;isA62&wv??jYm*gC(6Z!&hxYgJ-Kvl4w5it*S1x4|MUjnvtB&Ihar z7aOkhmkI=eJoGPBrw?5leEN#Vk{a^!W%wT9svZ-9VGAoYi5o|l#xSE=&$14{+h@84 zrd2M+?&!7tmx6{i_|(`s$L;=+_sWtbQpL_C8MliFd%XhZOcWa|Z>XJwq^RMXv#FP@ zcIzOTvQezaeZTG@hW{2I+&K53vooXaLa4hN06`-iq9*a!&`%yQzM!6TG!hRz;>{zW zb5VW%lo}{Q^M&{KwxzQM+gMci=CW?pTj&A*3>ImD{SVB|&0!XVo0Vb)A%J1)D1B4s zZI1?amYvMqm`;czptl)7{z|8peQr}9S>MXHBAoxHbvXFgE&J+#JfiGzX4NLHb~)~V z%S9B*@JJ&DD#*7C4wwxFMZm|en0Hz?-FLtTWaZ}RdtX#wA0;jUX^RIyrV_wz3lGR~ zddJ4b(lx-W^4BR-FTgt&2pC(LJfKo4^U!6$HpyuXh!Rzli_h<{u^YR33V(b&>slI+ zBh{0$f~!W>H!2Fm{=Z#esK=?S4`z|m(qSRqOLW&xIHdh@xgCGoW0Jegpait;oatqK z$7ocM+fwhV$;hI%(- W!up5Z<<~*}-QtSDp00HY`T0K|gRg!7 literal 0 HcmV?d00001 diff --git a/EdgeCraftRAG/assets/img/kbadmin_kb.png b/EdgeCraftRAG/assets/img/kbadmin_kb.png new file mode 100644 index 0000000000000000000000000000000000000000..40f6909a9b52f74256db955aea6a099709af7ecb GIT binary patch literal 21912 zcmdqJXH-+|x-M)%0hKDfs`Orz-b56Hi1ZdZNbdsDL8K`qC?$aO5~PJ1LI()~3Q7qO zItUmM0)!&Hof+SEt#y8!^X>KRvG*9~oF9ykIcH{h%Kh9|x#t`0r)p%^n66#8aDhzy z@k8AU7cRS9xNvcY$9X;W(MKNt&!WeudB;Oa=j&*^rPvzbB90v z^J9G)sgKI`&+dOpQ2wnl>O@2HP?hqXCf%@g`o&1bD-r`%`ufj!3MeHfIY}cgn5dHw z9pRRXo(8q(j+u{{mnzPlo}8x6`r_PuH`YDpWI^@8>n@fq6kP1b6TPS+^<<4fPW2>C z^>5-~XD6OtxYAN`;21QTIyyB}s+zDvSLsgTyZ+dz>FF-fGrT!IFfCjY*qn64*c2bE z`V?dXnyzfwV9I^`XQNS{179Oq(00BU}%g zpFR=|Zz}BW4fwoUlz) zKzW26PovR2R$*)VH**gj8n*`rIF$m2-t&1zHDw3~(+Vv2vUor7$C=JE39NYdcQK=a z=5}&6*FD05v<)!4nIfs9C@HS;irYklnF^DNU)_(W?CQ z{1~=tmS@{(U5pM{I^fzDUe;$jO6PY^et%FRyUFwPhUH( zJd|E3N=6OH;mrac$sBwLJNmGhoO{A~a& z=13f+MFrMr77l3-c{sfmemQ@-;n!7|sK{iR0-UOKb1N}S{z zPmaG$a0qXn;Mw~8nNaJ=c7KiF*#`cs@^|d>d!Z)>o1GM)qG#G=Wx1|YBa2Cst@iV? z zQg8A~L0SeCk0U*TmzcfSA7x!Of66HES=O~bUAeiSHU#DWVBjn`FI$l7vhRqWAOP22EA_YZ>ZTQYt6klKZx5Pq`jEYd_vtK}wthbA zJKinYv*ibTR!`bzb~93D{N_y1EC{+|`-P#4sM$jt`o^x+aBwT|K|j;7_q1C>ci*X3 zFDrdC1-nI{uV#EjUb=k1^Wf~aJ<=#s_a?5& ze$B>VRpxGSD;DeRkC!Pv*`;i;pC9z1(UfdqCtI7pN3>#u8fL_wyuVk9@X_=M;}WIT z0LD)n!zXEqzI3`#8u6x3A~`me+za;tfB)Ld#s~Afcqi0*L^QK_ltM4mpmef80dhIj zF0@+(?xrlXXe9Bah6N6w_c-t{V^3L*1+Q?bV7WmrluCRj;UpVFd1Ez^U72>oa~ft zN*6?-%xmz%r5hO;4+Nj{>wa?q4z6w1EG)L2B;5K#B8}f)ZAP%4+#*E#I9gYWptHl} zr9Ic6g(&)->t4WYjv<5uay7PqXTb^A#I4;k-H>mDq~fhF3Ax7G*)Ud(dAH_hH#rEh zo2zvEhKcMNv*~Y^o_L>i9CVA=lv9TSA--EQ$|scQ3ID#-oWE`3*%8h*W4P>dQ0tE* zkd5aZ>5jenss!_sGay=ZuajOvJfM=DAQsAOB}cI+dru*pwstj0lID*v4U6?Q(U1Dl zD}mr~k4l@7{_4PPG_(8bg=L^$67g~4mF-8XvB^XFrn$IkpA|G395(XVpvsxW!W)1Q5}Bw|_Q2ui9$ov+Yxe`oxnPEPnMDjkZkH&Y+Ug*K{!s2$l$| z*0g0|60>8kFpzN^Ib`T6kqRtx>t$D@LI=63bwuzDLxyvyRcv2pdO2C0DwsVYUvcra zo}c0=Ox@}Y%pkKbriGAKhwmhVMT)g@*-Y>0?yH3AektC5;@6&L5y&T8RXn$}`z%ff zNph3>$?NH_Su%CK(3@cH75C2pBW)?mu39B`XlMo#F>t?*1cW@*D-xqKqNoCo4Dx8I-k=$N>cxv7Yp4?XtRlqLq& z(cPy3o=fjQR-hvxMB7h^sy9k5r0Q77`QV)i)H_N;v|k1c`u4Zg4DAv`H}4``;H&YM z=G>#v4>l#%!N$(&tLkmNxGy+h3GojedWF{WQ)p(;ozx<+>c%uJ9#TDt#k~^FA?jX~ z*=SsXc-fGl7{?=XTPaW|;g&V(h|5ceUr@CMeejh@HHnJBb|=jOyDDSV6h@Nh>-54m zyq5D$Hj>@ z@SsGg->Is3dF1i_%gf}ilYbS7x##U}cd?&&x{?)RQ;uUdP0x18A|<#-tiQ6ev%I{2 zkWUd(0=hsno{tXLc?^FJL~5oiptpttRqT@8-IuexpE|fa_RtP+$YNrU`qWshLD8H% zj8X;B356&f4JR%}sB1cI{c_(&^z}f!nlrM<{Y~l+wXFM0BZKiTMdQ+7 z$8ei$nlt`vfu-eG-VgUcwnDh71Q!Eg&9fv?NCM{Ly)F(RSC+zwwwl*a+YRMcOxB6p zFl++d;OHIA{-5;in^6)2nQBbuK+ab*$M@om;EU8XkJ8F%pmXRL_;;RE-=`PtxiH*! z{}HWE2B&l|5Kr-5NhGK)U?KZCv8^1H;qKDiCvV%JJASX7Gt=t9WA$W5ZS|iMKFQ_L zmEk_iDhj>j#LEvo3tf3{&INh&oOy*t@?W}%k5hk)J`@`}gQZz8nKFnZw{tEBYeQ1I!NR>0i|PTLyC&caNNUr*DU zPr@7jTfZgXyS87!aiELu225TrY4_`c#1(989F>|KV6Jwn95+VJrqHG+mR zhhh4Ye@;Wk>3U6p2JT|Awy4~*ngTG{EOUlc0?`GokG|2D6tSfw$2E3HIl$q)gchvXsqahhKaX4 zq<YSs`}g&Zf~)P5EEzBRO}{!q@gtOwcrd>}Otvs)@Zf~~|X?OwHp zzERi3QNne*G5KImwybF&#i3K7%TL(?!ii^Sn)!=KCi6?AHl?r+!39)(LQ&vXG(r{% z)~{$$j29!n_ikl;j;t{c-tv)E98bf4=7=9ctLJx`YvlFPfWi+r|3W+*lx_h7I3a8;nS zuz}ZHAMI95JN`y3s+@$nIVCf_!SM7nhHvCvB_OKQa;Zq{(4xKL$jdI{>VA=qR&N^y z%85`Q<@hwD>i4)OlmNT1x+LWv#ww(y?IS4eV_Zc z74=Oh0UTUT1rD(iyM4{_{x94Bjj(C!e$awd^WHNy|DW#U-NV|!y)I5 zkP9I@WVmbp#L3C7KTU)JKwPd263+S){!0lU(3qv-UIKdz`?uCAj*8)0Ol_dG1nYS# z0>aFyh&UfuND^T)1M>nm3N9M@nJy{88orKAvm^&HWs}5kvWl)|*DfS@WNospYc2@S z>fXFkj83ZOxQeu-o^UkB-LB4GVrRtIF(cvh)*(l~S7zHq=8gdj#}HO26zu8wdsr*F{-CUR%ib$Hj&w*Rm`>a3-OJ$UZ!j;LYgvINqTMFd+H^r* z)OoULDqFYv2A=;=W|(ov4Y}%??o8KHEo9LgSo>}+Pmb&OEieH?fXm#A$G~>SMHH9= z)47t=)tpzTO~|HeDfSE!?+!?Fe9e}Vn(^!+v;L6%f?voy;pLNC+(TpNZpf}k+i!I9 zJaKOu7HW!WKi)6}i+c_|=qcjx@vHbz#~!M8aM+Oenj*BSf@`A-gQSGWZ-2gc+!@EI z?yRUX4c^mLdu%+UGmPZ{gH3Jy1uPY5L~nA|J=~)`wz90h^LiMp&RcG%uK0UMr3+ve zSSp)W%h1%cV(_qflRKo&4(I*9MMLoGixKlChNQqxfEituo$n@Ox}O(;3YidGez3XxuXJ+KkfFdrJ!2%tCA*NnSM48j&4%5Rxp(%ZCZiIBbY z<8}-SS_b1XsIVV@%vZ&*Pgkc@jJL*J-+R{{*+Q47uxFeuZl{qr%h)jGTwd)sTp)Z` zU-tz-@wo^lI?qn>r?SEWKlqyAz$^gd-g21Q19DoD+~b*L+5T*oPxVX9FyGacfk~2; zxVnDU&1g8N1714iGPY-MeGX3`&-zPF0<%0(q}J!B`*>Z|6t`=3^-vDwmG>@{D`xng znH~@&e3>X+>IQNJAG>?d@#`O_ADudGPM-_3PDuqdb%`0BiUG`!i+l56JFR1e5j+9Ch7H-D zr8kA%Z@7=l2U5r=I{hbc6FJxJ+O}Mk>w~-`X6I0i=T&eK>?=CBGN({K1go8X-Y*0Kokc@iz(cb7`mK_W zh^{ry54%D7PDZqp+InPN`j3_b{9M|$_S7`~?z=J82ek0S)Jx1@Q`4n+6`9KG@V@S$ z5p$XC{NB-0y)HUaSE^i|6_0$g!+DGbU~(7Mn--mL9IP2u4m91DEc!*r0&NOSlWo*WF6j0`=6ZXgl9}Jw=EEIhtJ!j%D^I zfBi{?&0uk2+`KANilYZ=VJyltn>DDr0eU5={~Y$pS?8WY6FW#cO!a@qRx zq-+KCD63#B0`=({z)~lrVtSKoK4f2X>z&%-psKp7)ZI%@|$C3{UB!L|>D{H#X_9Vf$B+Z-IPytPoZxZWfjt zQ~LmSX`bxS6@rR!`rNN9*iAE} zu-d^>0q_u1(1f}pICGHFe^ajCKf%}7@75K`p()ikO|5*NyxYOqJClCVWZdzkvNELp zZV?zUFZ0WpK$3tbj0W&zIx?%$;<6A6<)>fB4Exj{xNB?x`+nfR{e?%);mxz`IiEMK zbUn2&_dwGAMq$KDa^%p}W*Nof4$Ii^we=wPS*iim7;7Qc^E@BMFRU;{T$o3*(Xqy0tMPr{aeF;EH3gLhg!7PQco0+ zp`+1fV*nxogTXi4oE0BTogD5gvG*KoEfJ)qMLYqph2>LOA8uo<0VuJxub#}^O*x*c z>sS|5^gi~Zq{BcK1E%;%2DQ=`QG)(h+M&Y(bJt)^ZRYniT&ji83r*jb?>mKNZtu`fKiGwV1N&1=L`^TFO}#I(CC#nxlr9~|t=0gc%t zijeqN*0OPJ(4)=w#3Zpr7SnOGS`Hf2kNYMyFAu~y#jUL#sdh2pObHjvTPG72s67Hg z07;nB{dsmJD>n}h5uwufag)SdVsRDI%QrIRWe^n|p(`eCstHWAdjJjWNl*(W>m1Uc zWo*Hd3xqYBAdFi0FGQhrOYum653lYQZaJ{q1ZGvN?Dtq&0Te;CL?`w6XYU$^-MWj| zMT4YfDCrQ0Pq~gxZV3zU-~c~z#e?sn)~HC&F$1GiXo*FkS|T|hl}Hd^JK62gi=;M# z@;gt0^^+_Jna9%6WfM@4mRoF@^I?WVG=U5h-44ND$E_js{O$JEs<*?=cq;b4c|?ox zFmIBKxR1-NZ%R|J*$6-ugf_>?gV(qGW}x4tG8P? z^~**Je(q3KxZ`A7dH@F~#ALaLUmZ|!BJB)mb$EsSnY#Wx?DQf*LRMq(dgy<&@?mL* zpnaJE8kcn!nN<8>GJ@@xp#FkDL3=|g6jAHQ>B`>~iauc{Be`cZgHEfXr7&1IPhMVA z=4StM?mG)7mix`)QGnXnfWWMZX>=?7%9gDhKZ?TWaHDxWwgfF>LX@^X-Gn*ZM!X=} z1pq`%okVnHWVe?#w#q1JZoDs50FY+o-TjtxAWf%x(|$pfJ_15eW`5R8U#PG-6P<{K z8Mqb}+>Gk@H}>@m~CIsqL=?=Jbr(Ft9fcCFJ4_83E!b7uDf67&@Ij8|kNXD+Cw z;*j|XHhtsnT$tA}xDP(%o66~w=YO)*YkT^t*{x=yH%zK@e|vtu9a*~z%tRw#vB`(Q zx}nJSTj_42?*i0V1D+X&PB{T+tL3^Ymc*Hu)kj-;DgLg_qY9=TdND}cmZ`iupmK}) zZOk;9Q%Nm9qBp0pAIM@Hx1A^YwBK=lD&}Y~4eevYu|5BMJG=K*N{y!9^4PrN1~+E~ z!E(T5uA}@k)wSoNPPD##K!;P03T8oJJ9NZfhQE$N@xW?1U6{WmvJ*jm%%l_=989)` z%L&?ev%uFzj?ySAN&}^OzH*cmUWSf7-$E z{OI|^n8e&xLziyFD@ZcQiCch%%VvJIRU}BFz2cwZ)i!<+i%+b*U7bR#N$EP_08TVC z8HjMH?;@A`gl+8w#%3kxP#<*`;6XGTh`hYq8K#ex@n$8}+HLpKwl8IoA+|W*$vtN| zFCGce1vS;U9fsUiN8j%X+bJO~rcl3`Qle^8HId$} z!nmIV3{4<|4_H=O-Fb1VU-s_We=R9bb#uG*TYmH7WwBc+4ZNKCKT8|8NkPtAHs;{R zB|mgc>}B9w-7hpc>6LLW1TtP`cqI{Wdp;u? z>D3`qYC_8vkX=%0L{E`7R|#cvNwxPAB*%Si_in>G7>@gUbiGf*;jO!GY`4{o0zJNG zt+tV~M9_&`GTH)fuF!|qvGk*rFcngEeJRFQv##UmPkv-gdcedrC))jIzzsli$`s=s ze_syO2n(xk-7oUbg?Z;0wE5>2xyeY4KDT@fEEU7o9__20Wu_^<{_zL;M}+6Y(*QIw z{uzU-qOb)d#fs)#jObqL4xvTGi-pv0$I^)^KEjjFavwHdV2hOJZ|xpZ-Q<_Ns29LN z<2hoe(*5p&S~<9;g|kENO9(I^G(bn}@S9G}VFMX&b=W9l{XO)Z^{y!7Ti&IewXw^L z6`Bu#?rB~(ViM>*LO}JMQ*RAz-6EmUkttp@b8BU-+MyurtkZ=x<#=ly5XbJu!C!t) z=H_-?kS_I2IuZ zaBM!>YuCOE8ZCdjUcS*Lg%B~HvI|gfT)5+z9kMq`+mV^D?Kdw3myhc6`#7xeaQyxrh`!dQpR8Bi zd}<=cSI;gJO1Y<;*0Lz33u|HGpG>)H9pO7!JYil5^%haPRmf~vcJM)MtLx?HbCs7) zDS)!gdA0_kj|5j<#(RjCVcYf}3GGJ#O!MTAFUdHcRO=!~Lem{0s;m;u zv<7F|VXlOA<3tQ?Uv!`3Ld#HxsAhN4JFCH-h1lv7e<#KT1o*XJ2l?0> z`=E(px{n_Pru?tYhsct%M;jek1?AmE*m3(NXDNk+I(?Z3qhj-8F^#Ie=9~zz96-hT z6>Z|vd4=BRDfI1TFhF^F1)Fd1%844dtPYSg-!86v9a(4-1^xK0`Iw8~c4#4@LS5~Q z38V2BMM5x12rW-}qySsPYg=*Bhp01Br&5Vhpl^HAxxkuzy0Us*O~DKu(- z+SUpAo?6@mrwU}?@_YC|KH*9QR#rbSEQRc5xxT=1cph|rj=p||9$@Azqmf^W zE*O0JbeZ7F0s1XLN~N#|5uU+S1u&cweQA)+h`whWJQa?TC^i17MSnJKPxWN+A=IP! z<$K%-AC*C1FL^2{+^@NG%!2=om+O@RcdR+!gJ*H`Dn|WHX-mez zsE0<4i-)=<1T^aeA2U}NNx?g@_UOUfpR>qa(XlAMcM-(ps@C*!>4WE&fGuB=EHzoS zi@puBFL3hqLgV45MRaYy&knxqBersg%fNF%mZO0{NGg&)#7;TCKRdB`K*)`yPM*5~ zpX#~||Dgur%v8+E{&^k-$P`8SQOyFr6KD30dLx_JdH^pjqCk)-wpC~Ab>=gt=m}}G z<-N9NUy{&wfz$JOjGC8r?MbO*zApw$W|CqeU_=RlwN~%DNyy1Zt&a(*vxF16XmiGU zkZ}e$SBlVT*{Dfr%g>KP#l;0$n*iBktG6@L{cc>KP&A24JjS|Q@~8njR865YxEaP#r{Lia&f|IADKIsb;r@iz(8ry1XaYy zA^?Wa2d?kj?u#+uAvliWnWcmgNF)@xSA|3(0j>joJUv82?*suUOe`AWLQe z$Wv|r(t@tj8>aPz7-{bHb=rBoc2L_Tgq*lj0j}q7-9%l7!7leB zr`?$Wl9Rr}#|(U(>2zP>#oEMy^)TQvumpNf*ZW4Pm>VI}P8YR)&w{m8_V+IQZyIB87 z&;+0{E$wh-6Y}**J0*pHU->bp7t?M=frjkO_*1}U5W9TOcXbr-{q>g3gaJSyzAj6h zvhitE&@2uGr17R;7hfZu%0~}C4qGG7o3B6{K0>Ry+H(Ol1zkr8kl0-ixs8cKKE);T z*d>QP%c{s7aMN?U>oek>Xabk_?;rpR*C>CW zi<(o&`Vx&&AlVjL<5pXX-8EkcRzrU{axM_ZP^_2X^^_&?t+~F%UiU7#T@AuAi48_e zcDv}e()()*1FL>W!}!|`WcM*?YUL6@tk^Qr_KVFXuP}K*)^GqDZn+e)=X0LA#Nsbv z*1wG#$qZ@;W*w+Mun6?V7l_yyglg^m-ZyBcD8i*}sGq?XLyAx9I>2Y`vCDdZr2&_v z0y{2i36d_|lQLalOX0-AM0_XOon2h63keCSc`QDcHUEkd^G%|2uO(|vP6XiK!4x8%3)DX$Lj_75PNgGZK+Vwxuf5b zmk5^29sjXCH7*-K*P%$}(b||{$0wVt^zjJhqe1|Bq<7aO(d(sN@sl^V#h2e`8dexn z2|$RVG)fp?eT=j38Lq2EL2L61QZWm%Hy;o%oNgr3UII-hT6Qh2&k5BIICFE1GXx+F zg^Stz4)u#*&jEev$#H^d9T+A}{^XDJ6?22PzYKUUE(18TN4E0=`w{pr9e9=tw~?TembV`639fiSU;51Dn;F^Wwf@>7t~1a zUmeQDQtnSC!)*r^9N)%$jyTmBcr-)rHy`46LU|jm_BiCnJ2efL3d8=>an?TQtt3ci zhzY)}zZq-ygjcI?-W$H}ZrK-mU-bfL9Py!}3PFJc9e+Q)!LHZ}uooS~18>In0D3|= zy!v%e$mRwPIluULk=VzHY$*$104qEJ4WfI~@RJw%_|7eALz6@eRjOlv z@l!R2ySiMa738;!47V}n1=l`Puh0a-s<5(^|~}VY~6H3V20Qw z3K^|Ay?{8PX__hu<-A7L{t1XLiAw1cV^u#BHmo#VXWBMDRG{~cpr=R;J4F2#-<;M; zaIE9$;(MBT_UURj9ie?oiB{DN1KG5|4fDQ)eXt&n3B zKINRXA4t9477k7Ss7U&XG>oDfr$csJ2!s1q{9bXYc~nPNKi5Ud9`7j#A$`S%ZY?=}q(hD#rE9ub_SClq4m7nhY<^{*=KHw^+`aReDwvE+wOObF%`wDNiP zPAToO25&h+X&}}6=gduh@@}ix1j#AhGc&+tev)xnG4<)l>CRaRV4dfGD5E_oU|4eh z3bb0MLR;sett=klaY^~wwF<4nw1yYA>o;ixV0pTau`=t0hFDrxec5>%E~%Aupefy3 z$1X9WW9ZZmn|$eOC!jdxrfpffB&03lZHTY#(YulPc8yuNqzMW+x5>cfg z z6sjqFi8lQPZO-spdNo`uo=SQ@PdHywek>8(k&lXSrVWC0OVDd0Um`gDKl*DNMY|8RrgL|6A7ax0ObR96Am)eZb5Ck{Jppa$fYRWq zpzwkZR>P(#d>5NBH72S0&e}5^2u8{94|E&CG^FX+bBes|x{3Q6H_Uof z4Ub&f&I38>`NsnB+f4Yo3G+6=40e;#K@FH%n|Z{1e658xyh$G}V_uf<=)*Fe_NmVNTwUi90;_PYZ{YLy>J zsn4FrGI@3A=Ho%0Ams{Omn+LgYdt;9`ey-y-G;)Uwed?vtHSRAM@i9??-O_s>GG#0 z1OBdUySBD-?NGBKyV-hvUq5^;nQa*_2wWW)gvWplfhE{@@U*9W{O7z8?D*xfpw_+t zyt$>S4{r^m&Nh!DCHYhN@@PJE@wAE|%Y$2)aF#*q^)4sm7>*(;j31uXPw`W-Hw0RHJI*rMi@B$xY9NyN4|$6elNdPwh! z?j=cT+8rAyxw*@A6STeBI;BOgo@#3WG#A>gszfT)g>r^>X^uz=Y$$EijPlwbU3&kk z&nT-kLYzBA;rwK0OJR#;<+LWWzVcn30GFMvVh~ra-5qNE7RZw#i-pEqkn7CM9PK+-- z*#GF&B+{r9Ys=z}Iku;4SJ0yIOyWasXyA!E0+IXM;8OR??ogSS--;vSH&Y5h)ZvM5 zGxpL3IyxLeUFrCPTRmuc?~0Y1mgP~&m-xfH6rP-Z<2mj2QYPZ;6LTBf6?*!yQ<2vL z1p>z`B`{05!wsfLi1Y+J$#$@-ZVB81H$^;%pV>)zfR{6odBQ1!R! zxGe@rSL5Y*Ev+!i)IDWw{th+gPKA#~#YMPIFB8=ant6|FGp!aZ#Kx65LB@m`r*B!Q z@K$G`P$EG%tIh3Vzj;`w*n(9uN=GcO^n7^rA;_qnQDUrvBm}WIw%xsu#K{%fgybZ< z6MT@wO$rp}Inw}OYwQhO=C+s<#1;UTE^&aHarKjQ^P;hEuNZ*S>c5Bq5UrOW)Hvm> z3<5<<_FsJ${wlTWLmMTAno94cpOXD$*S!1hLbv})*XI8}%>Eyu%Kv9?t+?0(puVD#eq(RS&x+&cufK+3hMJaj;@Is+b#fY zE{T)5{=5F(v48c-zi;rpN{5(y<21F21u?q0RJjbcIXOt3Kj^$Y@uqxHNfHOFNq~P*OM48S;MU5S(NU|)zhjB| zV-8YjQ|reP^!%rahdm- z`xAp?^Fz$d32K4`YM4GU)oG+y;)JPsUUgpNGOg_dort-@%KY;_3vPvb8J2XH@eH!TSU1^KgsB^YcspSnZR^loek1DpT%a56Vq zmwWbao}u4?n;#kiex`_!-K(jGK#8;{fX?{{s@PIct8u^(}QN4f$iLC4K zvffC?bW91pBdV`Ew+l>f-+!^yi5D!71ng+x?!4#UKuJYC3_khmFlKj>>tOJr76O6n z*)(i#mDB&ZJdn-22awa%fCK!~1&6eTcH7m^;$6JJUgrcd(2J}Jpr!mZn~=9YmDU`v zAvGKgI7(7&P3q|yoR5J}!=!Q5-0}a*6s(Xx&5|0}qqSZ?Zc$^S^|m}x>|T7L21U<^ zS=j0B);_cBr8|j;EW_-@1Lz_i`^zS^M{Vo-{-Rf(rQFG8R~Nx1qh+z&n&gwU?G`63 zH6RA>!e6HoP#m<`c_m`q&c`Xx9rH(whX#>8~hZvQi2s$h_G_Mj+)$cqaq z0Yx>*%Yt~)RKZJAXUBNyyh20)_QOI}AM8KDyHI!nLb6o14(Hsn3bW)9+g-+KZ(U=L@G2W1Bm# zGW(cf9*ymNlOZ^-E&Td&T?Tib^e!}O?`2({T`@LXQM57T-y-m^<0<2SDJ07iuqXJD z63rBjn|W(4i=mtSc5Skj@2fVw+#gu_r%(B^s1ke7TFtKtEEXWvy1)p6l=j zk5ZHBBl!D2O@XD*pNs2!d3v+Dq}tCYd`SYFG@-?$)g`6{aHPWU7$b9EgNi)5#Lgpq zN`Zv~{l|=528naXx2d*&*>uav?3BBu3}QPaxbM^U3f9g6VUV@I@ZhYmhyCO`F=#N* z&}}~J@;F5NhrG1vR_Fq_#=PEeAkw8*;s>~Rze~dQcr@_wW($yYcL52(SilVo7YU(2 zVC!(I`s|?3+{&Z%^c~v6rECveA2JsAoNsPEhV_N9F~U)2d7cT|RsYzI6yxOKfj@Eo zXn*=`{K!uLC|8Ck%M|}_P01}{Zl$#;&8pl!9|R0HKk{Jpzv`81dgzEvOlf-E$o%*y&&MvT7 z&)WEZgqpff23V#v2yzp>{mPleIqn4Hk*X))St|10Lq%3HpHn=fK$mbTS}y&d4F!G-Nz{nJ63cph$&gO zqE&3`k#pyFWdVB2J+qT%?NWUamudYKzlj)%k1#M537d?8J7Y^+>3S_-YS*sj(T3lj zx)GAGTT>!cr>)EPlL?x3W&C~RtJ2*&218O)j)ezxB7I$oLhrjNAEn#Qj1XM%rXG!z zX2&_;PV}i!Xzuo{uRK2cIEondMi)m`xw79C<$j@iv7RVD`%ig2|EPNS_V(3X_}fwe zKJ#0Lc^AK(WL9DEhQO*Q6j2-WOy?LCCDuB2CqYUh6}cGk_8~UYN)49KXIiLj0y_T5 za&RwKgSBKxEIN8iWA}v`r$0@vMXJ&-VW?u8NA%WGZ!aTfX~`blpJI^m(f6Mv1Vtif zl1O~>z8-|OtMWoDHI_X;QK|VD9+4U&{I>!F;!OO|r)qJUhsO)=dUS_ZG3<&*l}+GQ~vOpM4wO{xk5P%B)k-z{>{G4?c5j zIcSwMZhi4vEzpNt6JOv?&?$KEgiGEx1a}>x4qG4ZIP;Z{S!^uVKsW70-Us}P=zxJ# z&nz02J_9hu2zbjzyj_oReqar-Rwo8(%M^E)x0>E*#Q4}g8Lx$km($vT9^1}$9Degy z6tDF5lO6|y^_-i|f6cvrrL5zuW0BeJJM();7P-GP{Fg6TYvyuy71R+kAIyOVbfb!DuFMfZF2&9Q`@pK5DDE4BvfHqFp z!AGK;rx9RSs@)WXtXtKZ>vfeYCFx2h5`a0fr`lIx%!cr{07#6j{XqUPz9O-k4)0%b zE}Zd3^Y^801Ym)+WQA?PEFh6|T*w?pb0hZdty*EYM%!UdOW@Fhz5G)@ayZUlgL{${ zX+3R^s15J92p*2L%K|sX{VP}ku28PXJI!Bw$EhFWftGShd~_BHY@Rff-P?#XOwt$A zq7}2ynxE7s>kPk-I*X+Z3U%01YTkm`<^?}7ttsf08ls=SXr9#^eCzUMP9sQaF~wN*rz&f+W=fr&*w~K2k*Tj_wUH!Q0AB|oarLG z24kWv8nfKq4CIb66m4xBjbiQa;+J!5<3OR88#l_MCaaO0@J{v>Y2^E;X*aP&_l_Qqq`u^WQb^Vg7CWpCYR!$N1cQAQG>a192J2u z1wjkLrl`c~9zseeSsn!-A3nbiSRhR?&p zJFVu2aY3kxn4eKmAj7a-ppK{qarTwk$rxDESNs01kp$&``^yDQcry-=)&5skPyUzU z@V_t$aFgn~++fM+v~#JyDm)>;D>5E{KyiQ9VE*Ss_x}e{-!)4FjwWftbE&&aY zLEM`qwHO7z_tfN*=k*J@6jN{n!lRWfp^(*4fOCoeUX`B0oQ6~YZ%PY0~e?Ubv@NP z_DNvsCd`f^HY&=!^WXbjE4k9Qf=@z%pwKdr_I)M4ND*^?+w_1rrE%74=-p~NxAj6S zTxz`Jovx$z6T&1aD|poV5o4TUCpZGBh0{+nOuUbHzVS^0%=~9Ml)bJD_IY!!T7u0^ zV^X)L-p0J;p(H)ld{6>x`u73q!p}3_T8kFA@-2bK>MEIdVxIpQ>!o+>xt^h^;H)V^ zc4p;)7=xuB+8ckm-QZI?o5LHkAl~V~o)0omc-TL7sQ-Jl$g7>^6ko63q>xsa**qrJ z0O@ax+U`++LU*gSJ}~wFX^&!4m+_;$LvnsG_)4ym#<}f;d-T3E)_1zWI?Y)z_+ob5wmN@o z^~+&g7xRl8^ty!g3BxQ!|9HNOU44zm{^__wvrmZKEN249*2Jq#M(U(v;;45!2Z_xB zDt)KU0y;$9zkWVQ-X&i02V7Uckb1w^Y&l4t=^=YN-qhonle#Za@noV12`Fq9V-gTH zXzE826MDERZ0)>|MBl~6-Xd(*Yf<`&Jx}!%gdJUreBvBF@89pLaHNlXA_!eDf=se+1#MB5vdxLy+koBH?zIWBX@c8s~j7dTARbZ z@WOS$|Ix^~hcms$ar|`Zh$$&0S#;q<7YQY~w5R2^XYP?$E^CpwQ<5b+x$PmXNJO>e z3|-LXZZbQGq*5A@P1fw8wARAdO54u&r_*_!^WXXR_xzsk^ZdTQ-{m2PuTYiii{9_x%+Q8}0#8>5Er?rs-b zZ6M$xByxUR-NxBoPsKT#550Ds`~Yg-F44o=9T}eX@Zxh*9dO*``5v~xC=)TKYP>|( z`jxMERfFZuUC;JRi>J%i^W~k3rc7$QGXnID9=9|pWFySiB(H!|wW%7B=Iiq2O{+Azu?J4Rgb27)RL56M<=7kwtI+V^h?cO%L=9@Iq!D5` zECOGicjyd_cPvhuq5SfmT|`kV7f$FQx@}rB4>xTd#LknbsrMTka~8J3y|!N$o@GJ_ zTwT=8sB0*1m4*$v9Yy*(2mWy_AyJV)Lq`o{A7;KUu2#2;WqRXL{*$w^ROQnb@lP`7 z#;!FE2Q>uZI*|ls7sH&=f4lBAaUo?y!U8+hUGd0*NWBZE$5K@AZiZsK_2uLHd8|E5 z94|*$wUj0OHp{ndJ86%ZIFU6t=2dmCKH^zPpFK2Q45Eaok9= zp*OfC)HIhCW%G(^i!LHh7Jk~}==39MZ*GW%SEq=jQM~xKdP~c&w6DHOzGCFTnA~d1 zqKEDvq2qd$UvIs&HCa_AG7d7`p7KnzS7OOCn#YAdXKH#6tKj>PTGMX^`Nr^malJ%fRvfrxRIk`uroFIt|`_%&F*N$XpUn6&=eND66HDfFs zSI?SEG88_p=H0$uvUrgoZ&G<72#M(?!m7LFl6Wv$%YM>$UTA>}GYV%v_d}m0aTbhFCh3}2Lu(vn3oS(Vh|(ec0SpEcfydWY zKtWTG_Szd%nXBt#5+&^>;8QhU1nkFw~j&0nNx3@xt&)ne<&AX{E zp(eOFMl8CQ@b3+_$5lhwP=HL9=wGa>$cswq!$gMoU%YaV35T3f;*|Eb!~`Y^X+R$7-bf+zi6r=wi-9ytTV z0bzLMn_%|aEfxMzGn{+clt8_XPcBrg5POx0fI^xm4ECl0j8P~LU*+CWDIzgGf8l7l z13TIeBO26MEI%(SEg@w!P7pPN{bI&a5V)7S?5Sz_rG2w=IJ(I!t+2MI~w zH~(UGm_c^kee8@n&ACulYGvz40Y5TT4_tNvwJi&@Ia27O0Dd^L;5M5ZK<=5axd02O zS7v4qPSSY%7658G01L)Xpr+|W0~Wv@V8E-h%>j&M!I7)U^tye^YtI_1WB^0BZ6{zi zfQ#nS0pESZoXq@$r$nC-faKI)`vc{6t*a ZctBP*e#%tG1<+J~K{bvR1`v0sZynP5D5XP zB2DQe1Td5cNJ&EP1Oj)(z3;g9j&bfdd!IATkBl`|d)Hg%eCB+gC#Z*pIv37yojY;j z#DxdCn#LzioQ9t`@z)IF8Q{vVbe10AaPqma&ixY={X8V#i z5trlm*xtQMV2oos*5nk;3A~KR(SH{f5ukTfX2t(%?QJ`zGa9oVw|kF+M}Ja!m4bG@ z^zUbkHxesH5z9@Z@Tbc)qwpY@DrNVe{CXFE=wYvEb=yVdJl|26e4q4yN{uQ|u@gQ} z@VC#$M+b%5qqeq-e*sqpH6jy)?k77YSr#mw1kTk5QWkJoN9aL%L$1BLzS?}SqK}UP zPEH`E-hMD}9iTt8crUV^e(AA50R8m4<{G`Mx2Ru#-ePS%51gsre8~wM{?R`T0uCoW zFan);;=~>1M*87@@?!GIU*)w^ddYBv3w4(NIOR!i3WSn2@MR%0O_e$nS8(c*%I&80 zPHwH1sZ0i-xGPLll>k)y9;LEHmG{}#lO*7!0IzQ?z$1kN!Sm9j(4&P=E5k#E_&q~r z*LMGMzckP0z$zY(#`Vsvsi0N-8$%Li+ykrz6poDxJ)HG9SgswV?o=Gzz62D1dvMET zZKGFuAdj|7RO3bNO_Np{Rz1#I3iw3qcE<4&3tM{x<5b?qs!|tyzMT~wrv7Hn*Rxk_ z-`M+Mgv1(ue*9%F3EcMP0=TrY8DZsKgO5Zum8y+M4uKwlN zoghm3)OHcPOAHOQ3YdP4OKv%w^ZDtR%$_gm(Y!zJ)33OY*C#hF-Y0EDbkwMwLOiWn zD>TZ2Q*fIX6@S0=S;+9`fSopISgC)?tU{Xug+92Xy7x$`H-cF(avi!Ms6#3Eo}PavDN^-;Y|f3Y0pw2g)|f+F{%*@LjkJX`GP2$8hGO-^ z%1uv8Y!0Y}T6s2YV{4nY%V%g`ItH^fqFB_$DjU5KwCz!x+R=I(K^Z7pT}O4h*eL%J za_v>dMdR*4p-z<7^TqgIb3;1(5_<8P-&tOO!^C-u<@TswPhHAu+98xWpTDWs74|{e ziNig2sd_-wCEh?zbZY$hz+D`#{{*BF8mTT=ux)$1ZQJoos?R8tiX$Cv4tN$5HU~8> z#vku}anH{ZlyeC)MKF`_u#+(2LyII)rqOUC*OtD(n1QW+`m zFZ#z@g=&2mac>EEprMQz7ico3C+!xm=z`F^+RL(( zh0x;zB5ByfNP^mQu+mU--K2hp@HFZcYAzd}k8 zBOl(Jm!DUjmqor}gm)A3m8XI?`*Ma2dNq8cRv|O<1EELH!F%xzUjQqnF6jOm)O58z zO!X`r9<*FTsIIVn^1%@c(c#5ocfbib5wAIf_lIsTuT8f_9bGp(N{0FUoVobGNOk}5 zkSAa-jJVy=5Ux9qb=Q^ng(LUp>@Byqya9XiPwa3wsEPMaD^%BBTw;sMIIBc1 z|J;4cg_NIM(c~)W!tAem&M^YwH||~wLy(ILBkgi-fFs_sdkN=X+4=bDqMq_fox@PS zJ`?ULjMf;diT^0F&k74;2AXIK@qQd7fhoE$)V#qlU=}4=@7!}oQlsLt3>e+O(rThO z0I?S#K+iJw`aorvY}hXdLH}K?{-QVkceMmWBHmwxTqtOC^fro6zpiH@L(b9c;Pz~M z*SwjrnisM=DGviyoK|9{!aBdA_hX26Ua1nbu|I;W4~Y@2XS8YFsi~|vm!!nfBUN8g zNApEbV!wy0Ze_pOg>?GZm?gFSlf0P*b+i%j8dZb8B`X z9)S}m7A`NRG)S&C`^kF=jS=H~U9hNCja$oNoZ8_Suzqiu3+3x~O8H23j z!v$+Dw-^@Ap|z)`p425LHESk1)?i!ysM#R0Z4UFr3y9PKz#-E0cFFb6<6_|=H0FF~frls-@u9a*Ct{p{^(8r92tTbp#HZhD#KfPi zW%_}%1Ppje)@+y|2+%q;iVBu}_=VVm_kVPxkISep1uSaH><|(p}SuMA3Aw&Z{J~qgl1c1z=v@mZymL z6qCC@P2!z|{$L`6G4xf!l*s@Spd9>HRmJlBuA> ztG6hY{~lNUw}34Ex{=WbrRp6X;Di70$vl1%PXK@|00rNcXw#4X6{!1fKmMPb4W|+uN_74t!j=z)jU&z6$jBL#C^D@)qd}I)`EqVl^|@v^xdH z6&e|Nw$S|6ZT`UEE%bM80sn}@>)sIoQ`S`R02F9{#M*P4oJ>cw^BzqZI$M@&ETZu& zlEAsTXOxP7H)?87xD||E$jPIR#~}2KFDm(8AoolI;6qHM=$Lr^%)dvCTIK}<&i`pO z{LiuS)jQP>mIEsTXJh1DffgBrgiI5vGo{xn9dJ+(Lbb^zXwp905zDL-V;EFs8r1hu z-mPev^R_<%nD@8@Q4?(FVOMBgwnjJy4IWC{-l8Lk0!Z zn|WHCFm!-h&$wp`tgO^-DsaIdO8m3#ABV8aqRFO#2IR zpFA;L^>F%9Az=70Bb-)I@gIU#o9Ir{z-8n253QC&C|uDU0J|;#ob=p19o|am7OT_` zrBQ5g)8YJ?=GN{tIYeNQW_CtjjsJr&0dQEg$e`G?Kc7Ydkh(a=;Jo-Jbt`+65Ox%B z7-#^6jlM~l1OR*%9qr8op1uvZ952Gt$|mf=)NXSQzmIlV5Y)y?s~$jJxGfUyX-Jwx z-LIE_VhJh$_F!SRnZ#cvBPEZX#i>%AQvpPV1&|0lFJr5T?-INq8k~FaO4dz}I)~J? zCIhT}%@ir40SonS5{JnKKdzr1K6$w@f#~qD*HaCj^(q;_w#t)$6LtY`giF9*TYy0; zusx=Zl9H@FL@O-lZ8f3;?d8cSX?Fse9#`Z^)J|YgR%&cQTu1;bSnLB3lJZAC{{(3HeXU%Q~v|eo=J*V z_uiq~^yT>F^RH}eRO&YSm7|s)hJAJXksOI?%0WxJ6Flw19ZPTxl-B1baX{KH37pjxQs%{)}*H)gLTcs77<~ zdaYRYWj>YkR^y`R``r~0D4Nl`WW>TD#%Ihf+&8E`rk%Z%+oSF}ukSF#<%dvm**34r z=f6SL%So9E!A({Jx3BZo<2pzik_=aC)@Mj-eYiTQ@q9c5R5yWyTW#0rq)x52zRhEJ zl?+t>fpu(#d5~H4_m?iqF&N!0-rILV9$#jlol*=Ct<>*gPpj+pAH2tqzdce~fvfJR zVr6UGvW>gDV?q5nfHY5UU#jZj>oA+%$hpr}+zbFX9N-Hq8e>DI7}WM!X9ArPS`Gj( zZb8Ah73U{)H>-W__ciiO8)82%fqqzOEai@xeb*z|P`2Zmq>z2|T}Rb8DACBe3*J;^ zhz+Tl(o{bLK7+wm1vmi^ki(@g!%q+RSrPn+A<8y7ZIk{)Si+~`+8;O5wx~a@H-QWj zfrU!m>QXzZs@og9>NPZ@cD$*ElN)!#<*()2j3N+|Tp|m|S9b#-=5_HiB?F|wtjrx^ z=yA@$NNuwjPed>f5g3K@A)FC2DB;NXlnt7z|JRcYY+6_)!6RoN!~{jHH;h!)H4nPrm(5sbQ}U!?Oylg#-t7T%)XUl6^nmu&@pyZh(#Cf|?g}To zL#3jnFbM`~IrK)%B|=>*m4xr7_GhZ-JmRM4hxOroW$JU|RO{>BsywV!^UA)dq_jhi zZj0cTV6{aR>>nPB}7aC636$T z3qCb*^E9n1-bmFFear%dkHGvVXI%ApE+1}Y9hY5bosb-fQy7{6aBkH0V{G1o8hkF4{D%{P;Ec#rF7G04cAiFnFz%q4vh4 z+WrrtNVN%nAbH>e1PmM<<+;tyvKK%NW6yLPz)a6ZZQuFb-GAxxo76X%tz$c611lQm zFc^1N70>rC%yS40fszTbX~@Npo^cyV{vAHCbZ@f!kI+iTHrRIelFS1gX`OIyno6vpmTUgDK zGHzopR>Gdx-EggDRL)_E+krRwa_kqVsYdjzy6tY`f9N3>!QD-@v%IoGt*Pz-BJ(02UXc@PmYk{~$tQ3H>(qA)(v;G5%C0 zb>=0cy^M-vvgNRhdX-D27}HLg<=V~b^;BlCw|1glI z`niuf=>c-gfcb%Sm^_uf^M;x!9aW5!=gMz=dqw)qAM=2+jL^68EiCXDoy8UL$AJAu z3*{YY|zk-y`JFZuwv4Z#%z-mX{oRvzo#qRg2{hzd3`EcX1}us zo5_G|37kl&Jq6N*nn8cW&fg_;qrAHGbt_eZ9SO4Yvx8nP9%6AONq%RCL~LxDM7evS zjX_?!sTD;WKjYkubjG8}w})!XPGgYVrkURp#~Z2fb>7O*40x}J43~{zE;9XdfUjc3 zvQ71vZO4O7`zQ%>ws)}=^~7S%Y#^NoYTCl`tWsuvE2%1TIhDIUZ4wQgt$Tlk0S)Ps zhwB`BPezY(cX{&9b|6eEH0G4dT%U%Bz6wtXM@aF{EhG}GyT(%sk0eK4FV^D}9!HzR z5?Q;6x%JW9G`a2Zw^xKqa@rkE-y3RvsL~k(+rQAgEsPx9sI5REzuXmVpl;`y8Wt+K z`Kl7E_`xwr8or0r{fL1Q526zF@N(s>w0%fSpbMp8OULUo7pGosc{Sr|H&(>`Z&`QL z98Z2mjzLjH!sQ~{=Ix#q8_a;}fm`V`S9uSwL21*no=VcJ7L<0O$M;4O#Am%xX}SM! zlyI=rdb8QVCeQUo$pmuxyse1jw@M(MuW2stsFR9`O`zC>Nmey?Qn@+hxM;fsYD`Yl zpcLjg1XfKQ?^rcC>aV&eL$yuDNpuwxGc`GB_oKOS&S-u~d}Vmd$UCZC4r1id%>dF% zXN%k9_)a};n9UGMVALx-+!;cPRbCOn4p@6O$J^N;rTPRkhEtF2YX7$h`ZxuCf=-0 zrtPR4VSb(pw@*#ZujWqyxy!kh+owJ)EGB(pH3H`z6Sm`rib2hU!^bh~k+;e7=Ow{~ z#AK{eS{Hg$&$>JT`#XimPn5Anr5u2`(k_+Z%Q$?V`f3f9x6LgZI{EK7JItz?wAkSkds& z%&%)0PRI=CFOtYKr}FB+$2dCVL@`gRXa{6y zixpd|MAT82oC0}LC+NDPV0zVZR3O(~ginvkYpUPpoE0B^;Wwy#O81NHiED;Qxwjym z6Yp*!lP#R=CsC*RKikp}CYS@}7B9B-xHJFE*2bM#bVvz+x!;i9X@xl6>U1wfK!PaX zhW|$Sc@uQknOD$zQm@(h2at{T(R+O(3kWVRX8khANlHQTu zwtE)TO{}dCo~@15ZYmvrtMGu9b-bUI^A5KmU&PlI-ft*2>c8#N|Y3ZyoQ3 z9@hqdc9MrcC7X%v+=&OR{YRsEr(DuTc)eCQj$g-WrioX@pgMiH-B)L>l$Ng*uu*J+ z&A;$RAKR3*%}E)FBHJwF@9zVah^@ueEq*4@ZNXlRZB^FwBc|LXnG)hQJ5w9G{Orr! z7|ES4KuOzqxwL0NT|W6#J9{pe=yjRPXW>es=3k49w_-elbH}`7$pX$OYEG3`RvQ8G9d<*D?PTp^DyVq;%z0TRU|5kYf#4Q{}GnB9PXOQa20ZLvO3z z9YjyUgSJMoZ4TOnte2*gX)9%dxUuro&JZVWrJbDlGmSUl-dgpycY65w5?A(?ue&U@ zW*Iqj@nm#{AhPR?#Z7QOg=Gz%XET#R2ktgrkt_%#J;*gKgUQ?an2>#zcl@35idif4 z4~_&zTK~qWX4#Z3Hc>AI6`rjLRFImV9p*KGJoo+9L?W1XUBqGMb|pR-s!$q8D6v>@ zp+`EFc1fmkKsmHJy>^In+&>9ghLpXBrSFdOgX3#GQTmBXZ4?2mWduYC%rgi+;_OM`mywlSr zbj=oYzg1znY-ND#`y>Q)>Q{<+SysNq?#zuFBMn!Yt2YTB0&^^RQo43N+7!%o#)!pw zl?Y#+^UI?WV;w#!C`!~`ms;^?SRUDE@G(Dp;&{cv!&cAB&8z}+eI&+$pB$HEe_#w_ z$ccJWN@>xFuHGK8_IC{t2fyif04e5CL;Ej54b?C|@$R zZ3`f0_cHD~72=&6Sa31EWz_K)F4>ZRXqL!j(o(2I?}c&n7^fe)SFehH!Y-T_i#%9{ zPRM?~rN`CPaj?_=ImX>^|2SrjZ@ij}U_M$4S7U24TMgiDOQPsFp=Fl?cqy>MaC<@( zU;C$gAf?dA=+^DqERmsDAXb}!KfWLQga@SkOvZw~jL0vvr9N9+C6GBRAHU1d)OSnI zxnd^+zZx1V!fXuY(%nu>t_;9$B!tqTyB>KDk${Yfg3hh$AJ&Ghz2ax;&RP^9v}&qLIGY6WV%;@mnZ)t@J@_nfvksN6^+#pkp^i;$qM*-{km#{+ zO)24+SXwt})YCyjd?CdzIN7){c7RVe$H!#p>C9<)y<6=Lhw_xrHbv&1TNUK49WhC( zgzjxL(_`?Qr#bw}g_zjd(($MYeKB+m3>8%}r^L(YJsWDilEzKy4lN%Zak|7Y_cy9P z;_dwL(dsc)aUosWnv?{j<}l-#55*70RuW(66xUxkJLhStX5-(EDc~yTP?gLaHB!dN zI*wf#Ct1XG@4JAVVo3yoUlL{ikq{7|>4F3eqCe%9uP77u4cCQZUj5LGnRUIlbv%73 z)M7#_PkHC3Q_c+F<`;gG!pzu12I5sD$M zm^9&FsBg`K)TVARSNnHOBw@{3+zzJ-Tm0+^+XIu%=6EF9dG@#+!q%3z3di_?7OuN|z}IQ?P9HOgL1*Rtkw3;`6KyE< zNZGYE9!_f$#PT&n>pKi6DZ%Q&%U`!9MM+DZnJIn>UcpMcFXVA#?y`(nh2XXv&F-Zn z{Vbc>ZKlejFQ>4_9%|wex656W3S$kpmv?CBwn{$z%Jy9fxzSTW#a&KUBYEtnyb^Pa z^;U@`@lxuW)4-u=7&Us2!cmz)S)iZ+{0Q)@ZyagrEhaD?0uwK%&KA{KM7q_ z$zk-{ek_mi@>0Ldkb-wfU^r)n@sH_1-rA7|3dxF!VsJ<8<_zoc-1hY+a?IQ)ZhX%* ztt~Q|Mn*mR+2Q1;%~OXD57|~~Xg0kR>@a1o+H<=~jBdHfM&0h(>5n@z6zgvR```{- zQsq1CuS;FMb`(5rC7bbl^uOtR9`at$@J$bknN&usYJ+L$P${CLrR3;F_R)l6-yoOqF0lyIc9UBk&(C z{{k{dk{Ee_48hOs@qFg5;1JQ2lD)o}P`JbeE?C(qh(V(YgLr6eCS#*L7yh=y%eP~r ziFOa%r0u!y0DO%cP1`b^D%FmidO^S-QQ3*_9EnZ3lBlilk4i1a5j zc&8djvH9=oJ!%>oGC$sOtob+=fq|Mf>(Avj+eGtkpX7#JFV0fU7s4^Cxi%Ykhj6+# zE(JT!?n+LUA1G&c$W2Pn9^(px<6@0;;0I4!Ee4PGU0g603&l>UrFik`$5sW4%u^3Q zqqR=PXQa2MEN+)Klis*ANRS(*tHV{cf%(YiKQTpW2gT@Fo9UG``p%k>_7t?oo^|2A zN9KvdnLyL*nBAOyYUsSY1)tw3_vXJkK3w4uT2Hl2vu=kS5zZM1uqgt0Yb>2&BIAP1vm%bzPo!uE z2^>08u88b_s`$G6%7jC6$3nHf8yhw>Z#}Wh7bOJVI`B0I?4VM^GLPG75bQEH+CtGm zgQpnA6QC`q&fMJY&lvgg*B{oIBnYqgOp z^Q%FXQ#ti6bz&>-yae&%7ii1jd6(4r_2gQ^Ok%2xDs1oBvX=baV|(`ytH)EOQ=>D9 z;ALtzp<$@{(CJ@{(IL8GpJ#e7T>i_Y}ub1bsKwc2qcmyQT z;lrom;v+CmO{B1C`Zzmv+HT2wxV$-J|3ypFO^KI|H-t? zKR#gTlC0{blq!Uc!Q1_EA^3>B$s8H8h|=~i=-TDTz*w*#_a_mc7DQw();atM{7m*Mwr5W^i#L}#PyOYe3b7k!a9!{GPv|S+ctd$SxFHm9Cbb9qX6&p*Y6{_}f?zmw9pE@;e_Yi8HVI z_$T^Z{S1&X&5!FnCpVSxUh>!swuZ zPubluDz|rf(eu*|xOatF$vY~)+GVKqB!k)VM8nCano_^bv-m%ZcyO(huP5hkUsRCh zwc=hGEaWH`YrP}DyG|60eE4`o2BaI_x%}Z0U$TdXg-XGuIXo!f`L8=i-7;hygC5Fn zY21if=;we+2hRdZr%a!YduD93oPDQeuA~9cvu!Uka3SPl1nV#7@5&X69rvCDK4KT* z5b*iwR^1nQEABen))#N41sPM&jTjWFDDpKu3@}Vk-WJ(`WnO8Q6VjVD?Kr8E{xQKH zCIT1_jMLm^U8<$r2!}Am>l=iREm`=x*WU^k1@zXxaM}j1y9wV!ON%2V3xM_wCDlUc4hPA>;8U< zIww)$C@q6j(qm`X@i~_Ky2`^8?AN}~z9aD0*k>sM)gfs^p_qxcp4G$WhN|-X^~hOh zTmzY#F?L%51FLE_D`iL=uj?<1Tc7NHc1N>iPtxm%|Qb?7iYSQL|8!3Q4rfwrHp;4iUz zcGwBzKA`+uU3D3>%02`8FYSMqQXTU5j(yplXrTrfbsV_dQBjYV|IVt)5-UAjv&)@0 z{-z>Kx(XKY9_BSI*;(ALQO1v_Jq^~TRapa>y_H@mSgxJ@ht9uiVv{K9a(SdjjTA@S zg@>~Fd?NNzliocNTCZMu8M786CeGf7r0N_ipiqfT@9GGR04F5>Qt+=+HLBH7i4rBk z_)%hX0-E6M83ltb@E!GW6${Ytg6apgeIQ9-rhBJPcMD2FO9?6(SqPm zIZ&7@pHb8Ar?5KbIno;J*YPQv=6S##ldgTurb)ub^b`s!zJ46H3`LJKkG~G!GrN|1 zZ_@e1Az*f#z0S8|xR7GC2301KwG368(HV|5kP5ysV6aZ2Ug0-RNca^olar>Ou@%Ox z+aD?n6~ToniDx=KRb@nqjYv%@td)Ai+go!GMKLet7Vw_@8r!suM~>G##;Iuv3XzVy z2OULL+}CY&E--f1KRj*--)OQJuvg@8dfK77f;R?WG^bDssTX!vFBY(s_hz%-*foa(ix^c{S^tCp+PwrGv^m zZMh%nvBhmR+Clcqo6wfA>bkG!M)Tw;86U>PPL@!tX>x_R?%uR|uGy~Lh$ux~_xR_| z6qRP-v+(-s6-zJ#DPK!8iP6jtU`))owx(ki?hmAF{u0W6Wdvk$rMn7n!e-wR2@eka z!uda7T4?P)i!s$LwN#H>oDtU1Wi-k}x%K=h%TOlx{b5jShSO3+X*Sq-^{dy|C%cZ= zPr2g^8iHQNmnkr(HqJ9ib#+^{Ei{R{$HAK+GdW@EHb0|F6Nf6aBcdQ)cdkNJq3|z? z*f?zifNC`^zO2&eYB0I7@y$keu+2XuE%xUWHA&Sb zMt5t9?y%*b&f&5QyS)rkj~M^0qfvC-Fg^DM72xE#5_OW>k5(eZDnv>c@eH>iHH&mA zHv!lnw~`E&dd3L<$UT1}AdpfKg}dmaI^w;X)iKJKCV!Gg5$HO1_f>Q zxFR?&)K?=tSOwt9Xr-MHtEjUdy5t>&U8q{=@Dt{Wj*GewvoZmVjJi07@^&i}xW-XG z2=k)yRO@k7RYu{>|_C?nJknkU5B?JV{|KxNX06u$y+Ig-= z0v42Hl=Uwf{_|62G4$*I84v&e1ZC@g*vS9o-ueG~QORy;tjdMX$^|v9-7lxJ_|?14 zn67Pfa(n(^@zZCz%i-QSu#I+i@7F6AoPoUiW(e@uKU3I+)}H8w6}{sl06lkAC`^a3 zCa9Ab1QrQEZZ%Q|c;;h`>LvKRrQK_p$3KLgRw_9GTxoGWBjsiE3^V<<8X4fYFN;;^ znuIfn^0mF~-~|1ZLOpV~%ytf$$Cp)Z5w#-+))7!O%^D#CN$1W^{`>&aU^w^RWdtP{9FUW#@(4!=%r(a<4Q zr4OX|F^xJ~CN3{80U!Cp8Rc`Z0(d>3$fGJZa$*)YF*p42;V89N z{7oEk9WL6r52SJ@TqfsiUU3L%?F16-Khp9N-W zw3LMn-w3Z#;&q^Az~kd?#8RYHVHOgRwrPn(ZZ{-s1_BJCnysf`MgHYJ4bf(keZ{$e zAz7;_JW>koi;(i5tzM*eHYBTNeWTL|X>2N%s_N%v^?O0h$P?!MU|osoKVtQ?JdwUv z=QCpI)MqY`%SHnTf;(HKwzQHf%`%9sMSz#8oDWc>Dn*)@VJd3B-)dmLzY~0_(Z^Lm z1~y7{4TO(LYi*Z|fE*_vJm@SotdyX$RH!TdEon(`1Zl=Mn*YMz;bN|4WjRE;CL+o1 zzJ3r&C`s3((6D^lCDS}1sTf%asCQJ{(L>th1)%^zi9K%4KiVacC`ARl&nhdilnr;Z zk=$a_NHm+GGUmVlVWbs!b?yie8zU_!gF2ZUW&0WwD( z@&PC^WP6k4UOQmSEy8Y^F)JmQy`0(14B7cfC;s54+~t5RQ9|_-sY}cOG9{5F$gY8q zmFG38eMy#2KGEN-tDekb89T>_ET(_j<3e$a6zie%QY ziv*bMSb!>LOQ8#DXhy&w-*KDoomp>k%6uTb(xTQEyp1=fDT^6y>V=5cPI%W`Pt|%R z+-Q2GR7|2x6NP;_2szL`P zBMxaf<4gn|qu$?w*yoo$VYAbBN^CU}kFN^}NXP^r5}-=Sx9*{frV!vP2GFKlhH1;O z?eQ(X1x<7x3zD9ki_0&1Z!8fAVQmK^Y8zNL#zM7&Db`zo}lr%}F&7BtA#=p)9gIM<{e=7$P zJT3h6e(QACLh#J!Mk!V~?1Md*I_8E=O}z+N_qxEq&$ zX3M^3>RWtkTog4RR=OT`r4r&>SdvmpmiQ#txZf$?JXFFP&*%qi12CD$eI0)X=ufK6 zuCcXhheD!I2AijLwXq%67gIx#Lg^pXJ@i_}V_*7Fe|st(v_$Tc8DhI`jNnDpd~7 zg))|s@=!Edzw6L!pQ-WljQ+NAXCuJeQo8W!U94$adWv}sglK`GQ&l0K^C5!ukQ%X< z0NU&hM7`I<6NTnQlMMb%GDBxb|8K*a54)-JzsmiM5l-5AA*#|qk?Q6esrCEw z1}-o5jZ=l;O!gsMjhbT{2+}rzPR4#z zfoUI1;IOURvA;|LTyY1~1W|6>J!d381!!O>S)>Kuu+r>eI;_gboAsNJs?`f=T#abFZOJvUv@}xX`rb zy5;)&bV!!cqqVn@)-svgzaMHV0@r!f0KWA8md@rdUv-qVc5*ftO=7`Eh5ezb|33AO zRTF+$KrMpD?)GBMFQJmFi8Xh!K{YNXh z(sF!+Ud)N&fC1T}bvP4i+m75Hoy9-M>{20kXAE~TprA~j*#`*MnQ}iL$D;ub3TGwQ zq(J`xU7%7Ayc;=67dvsux|*&x$}`omfavsUI{L%Dx}<7Bk#4AR=qjLVLFppY8r8e2 zfR)n)i1Q)qmO$&&S0nFVbzJnO(~AMa?1&i@(rgI;y*Vpo=*hA85MRW9d4~Sj!rgVTYVv{J0$xPGYOdz_@ zn0>h01c>ZFx>hNNdRU| z?>>F8+LQc^n*VqK&@f-)qT&r20GssBgC>G2i|GI+gknw91GKX6A**!&Uzm!Ii5Qx` z0j=-t&Bk%AcHACv2h>O;ZsMMH&uzf@2GETkn#q3~4=uE%k)ap3XHbEEK+$x++VSBQ zUB3#X?rT%xybn`Gb7PfCCY7aM0|BG}fZK1<@li)WVY;`T(lQ!n^;!-Xm_crHOUFbR zdg#5F8#h9gi5eYJn_9g9s5$j|#H;m}%^U!!m0DEAU8$H9bB|L>0mW}5x0iDpf%!^m zAksX*1dbQIXNA)rs>G=awg&;c^h;LXQrEr!N(|@AMdGTyfbudAfXVe&YI!6(4@dxF z=dgL!I^&BsR9;Tdj^(jbv3*)z6M0QIQj#uemYD6j4HmD`^x|LNp+lZ>ReylPrw35- z?U>u?6V{V127x3l0;i{ zF0!7~PC%fHCC%`z>VSd-iU9N=k*<2z@5eH#kfXp_cQ~dWG>I%5vy0g>jY7Cab!iKo zhn1hcozo(vxhCEMzPosVE;RJY9G=3IK+r`z7e+Ab&6>FwI=6k~Hf}9tKW&ty z+O$)mn1}L%cX#@Mr7|)_2y|R{%3y-oIm35ElrP8{9l|AKCJ9)gnVWJj_StrT=SiOd zasg)@1#6jK0uoQ=OifhMXWP@lUbQ(znQZQb3Hg>K*?OjT0cCA<AaUZW2yq5FJO{1>_u&T?pcSzkg)KJMZ)q?cB8aE%Zs=#<05Q4<`tU-;$ljFpXNz z^USkoOHR1?G*3970W^ygcvd{+t@JIy*-d^b%1)qYA5brhxVGxlnIx5o7P8mo#oEnQ zXaFBlU*(dr*CYUuQ{+MrFw$-~YF{q}_+UO1`{YnF)v}Nk<2(Ah=G0G<`X(AMP(?fmYyb zIR>dBBV*Fp2MP8fB(u0)#^PhGb5t%*TTlmx8aAH*R#( zGXw@e^yfL$B7c^SeE@&b&4 zrC-~4X)LRArnJ%*f_ixum{l6cueAG;cznL|$ra|(D5}`r# z;(M0`gd-Y1n)WfD`3ye~pTGLv*wTJc+PW+)GiblhXT@w+ZWnm$#JttJOMkq2#NjpX zu-1=IZeR=3+^pPZm=i+>7>H>)KD&dnzfbR`fF_Fcv+epEGQp6{HX4MJn-sZfwQhW!3Raw0z?S+lm9@RS^#aGF_9Uh zw1NZH@d@xWc+k-A|1a2Tfd|r~vnxI`D_TQuJM|M>#_{nxuoOi$=y;WWt-PI@!w!*|jtI!22eLPYLJ%t z22T74DP{qP~n`+=cZUb=|s{86k|3l+Sa@e?LLpMoYEF8eOzQ(W8pwA0=Eed!|CM=QgX zFOymOo0Q5iGg#86yMR(ex<|C70kc>#viJ4VR;fetTcq#LK9|Euz-41F`01{E;R=z4 zNo>CX&740(H{aGX(R?vHCt<+Q0sGU$wUR#0e0a~7$!f$cbt-P}EJ^vGZ|Vtv(Y?F> z9%XlL93Dr^%HoP*)DjHc5iRm%jg;x<{ucBpWI3Q4^MFWG81}H z-kHUz zB9tnQ*27XpmcR6lbb`UY8O}1bf$5Ose%HHofqRxXZeJHC^0ua6NmrhRr>6`uq^G^F z%{<)Fw>OLs)EBwRO7l>wlVxXIbKB2CcuBY#Cwc!r=Yg&cN}eqeh``r+8uC^gJv}0fWY6Rt!#W!c~k0S`K}to zRMGYEBpzCHH_fE)db^`_!g34G`&hmsU9QPR&Bp%M!s*FaNZ)Lte=mx4ZN6cuH+70{ z7G&6^X5KiN&Ct&uuU**mbXLx{D_WPeD|3W=BWNRkHnL#?&GdZaJDTdtIe-*Ub+O^{ zou$@HHzydX)X2G6-Qf)}|G(P1@^C2Ezu&1}QOJo(nd0Q65=NHD-mxUcke!hs4%vo; z?36mPt1N>I&M<>vh>U$`L)pq~YBbg?LkyX*jO~5KIoCPw`^S4-=lxy3zuv2VpLy9{wIox ze|i2R5BYR@2rf?A@K(TQO4eH9d|ZFoDkAqGh;~cotS)r}rO;L_a>2Aw&1v=H6R$Mr zAU{Vc@{&s=tl9mz>gJ?ZB)x|F^u4Mnc_TUR(UmlmEvS%k;#+dQiW1`*2uwd>(&>~3 zt`f4yMKC}g)vR4{CG8$8)*1}63JKUz*5^3d%!z0?nDB1V`iNEV=VF8 zl{uS%mIcyRL>s`^(4skOOMFd2u82P-xrvf&=8jr>Y5XAB7&1F}O%2r`;ruUQ>at5N zHpDw{Pa8@xAkzY`pOmsi;DJGae;&%>Bl!36<1Ro&HP)OG{Qrr<-8pH@SQ2F z<1b%KOsVln$@(|JVMH$-1)u%CSZsNRIYiT+%8r1Bw@RzzM0P(=30MvHYL2VtFUalF z*?yc+N5^ygqba}=x{Z_Hwbwp<=tYG0SN%wnvP?v|rhQ)T8NP2B;p zF*<0QbqHnx2hCqLlhtke@O^H9k|)SSp*vIL2$Ink{Nvc_v4bIoTwKX`Oi}vg8nRdgfg;=nPx~i~4|h zXFEG^(Ns$g-)chEM??<_nAFkHBC+yq(exrOb)9Rn6Qt`k?gsRUCFt;Tkql_0E6k)^ zj(q6?E;CM@yA<3Vw&qTvLxtOG2W1qWb8!$_{>A3`4kK}eDLUJvcQ{~~q@NNq_=)o- z+(K~s<7%yYkvD+BLrf8IKUjLgLv9Lifa!Z$0qTebN35aG_kWq1DfbWh`g>;qRMNhZ zV|a3=1T_6sP*i_qjV z@fy{6NvvbP8>m=Oh z&txGXI*6HADOW#f2|Dy66?go|lD)vMXzMHZ;y!S&Gz4}Slaq^dK5k~+?3#1*HM?R= zv!lj)A;5o%Q=y#{YkysG#GI_NuLlke`{&&@hW$FamsIUM)CvqW`^W)Gct#>lmN0eF zlX}$``jqMbQ{j^i^sx1MG7CP51he#tnhcE-)%l(2*ZrWmZ4nr8Qpaows!05!Ji+@4 zAxsV%z#uZemY%1;jZsoA=k{P#Yt>ByKPp4U_r(Yp?oxq73|A|MM-$HtN}MmJT(pz% zW~&2241xkn|0<-l)(IA#U`SHsLuFMqZjQo+JtQ4N2(=x29!|uVt>G%{o|uhwmC^nZ z@_s&6-eiIQ5M6WA08e?l$Y;&ktqfo$^gh38Os{NipfaMzj=t(QGlC3)2+KEdzrP4} z^Ju$o_LRO~BC*3q5f*^MGj*O`UkRX52)V(nK5@d97!|a}K$;p!QkzEUo@tZ! z7I_nE?Y-AS3AzE1$*!wFrk3^saYvzq)eyCG@j|+Swtt` zLZ+@G=-UlTWdF*Ri;VE$aVAG?*d$GN)3s@gyG{bU1XaXFdqJ_LL|z7XooPo(b?k$S zU|EJTu{8&bK3%!;`L+g^x{vvyuk*wcntF^a2;`UwHRL?bYNi_4Jcco!+|KT<^DIh1 z#(Dg9K+&%}{p%R`5}EX7*{}U`YD2l%@zx4@!eD?MB6zi8(C9sPK45m@Q~9Vtgo>zS zxv;&|Bd`1wZe7Ssd*qeqc(VxE-T%ye^ve zMJevEZorkxM8!Cr!<(KOoY>h!5qagIs480hr^MG;R||$*=Yqb7)kBwBtA^c;v|aQx zf=ZICG_bdo7<`F?dL@inreY_zlLbW59}4g!j?dN}J)jbr?tGO=G>i7m2x{~nQ#P#s zx-JHtt!#CLA=)wCtb?|jBL-1j4JtEt7rhWSUgkA#wtRwQ07LrV3OMNaKV98+!%3E2 zpq#!Ktk83KqIe!$if`F~$DSW0gQ*`XBLXAZHEANygCyOCP~d`kQ8cAF%OsV7QJzCTY4%vaK<7d> z#~}f^(3hM6kA&_7KG}l+4-vo9B;HZ#{(q~b1D*cA#|wO)G9F4o!g*&KQ zUJ~}heQRDU_#f9>`W~uf+AQU%>j5E6)4A)UlfpWvttc@Q5Vf>KuNC|N zbbXHf80HL&VA>&YTs?%?s>R15k4#9WX+3Sd{c6ICDSZ^&x(cK-E($ridd@LkB4!{} zC9|wB^y(fUwFhE~8O`PCXt_4hn}JR)-0TXRAC!#5(H=fqgvoMd)I$sQDpc?GjapD= z8u9t1w|*-CQMrAn=*3P=QhiKxaML7vb0=@uQ~fP3!cTsb^cdiKbDSChDjdEw!wQL( z;J}D!%ftm~leW)y`s}=UDB8k^u{=j(LF9xm1a@`7XZdYw1uQlS06abSdK?HJPn@8L=Awtapti|Ph)j)5{6)ZrrMX+ zcXOWI9k{<-Ij>DXLF2@{q0^ymgV#J{ZqNR|TJ0=uU>))I4PoeMSd^SLIAp%fj*qiI2Xg?{7iy*629po+#wp!8F=L*;%s zq-BH74qD%1UP85ld0$3eW+DP@#+!82S#f9vvTb-XG<;iC_k2_H)|%-YBV$>6o@9%7 zb3I^OH)(6pUe`B%+08XvQ34pB;2m9V@2Uz1GeC~kj4I|qjK`x@n=rIopCzFM6s~E}(sZv0&Wucq8Y>h>NaR1+awFUz=*Va* zy2CW)(2nHv7!4_ay2igmNVirGa#v}s5t&qXW2|$7{4N6X{zO3Zp{7W-zASb{q3&AK zWNHi*ORC7*x>*k^-vO`etMXw*%Ao?=8D1Yn^vU&YmuhJB)%>=q@QC)>_bUoA2h>i4 z^bM16NJF`1VtJ3g7>bC^x zBhum&#`v@V`KHd+@3rKkk~j|3i!Mth_phhQ(eKJQg$%eE-W5T-%Gh+vQ<#7Di^jxC zeX-#~@S{3UnEF=u;2YZz?=uweUKl?e=cX4W55#agbG%1d~3#$cD| zFFhry4n&)H-j{Ws;lCAW9zKgYj-c)OlhH#1MLi1@0{$>C>0G-A#(@SyY!dGelT>6bW^oL zND)#C55p;r(AphaoVKZzye!pYwdKr%Wq72n^F5p4kjpuarE2hECx24Bj#72 zV;8??+i75ku+CN_J&L07oGO>|?UhgaTuG5Om8*A9z;}&ki-Do>+3j)Q4`p}U_Msbf zLibx>N(HUyqQM_tO$=;5KL#)2-B!@kYj$ZRZcOnN5?5+@_?ND;IyEr7xm$lFr1pqa z-Ci=hS#eM12*&t$Y2;``j3+hJ?{j}?3nnQ3i9-L|`Fpx+Ru=BYfA-XM-YB4ZYHcdQ z8Sj6dekVZ{esKLWR(|U9DZR{wq+9WW7M+N)vpO7Ci|{i|p>U!NrzqW7PNJwc`<}@H z>GbZ*n2qJau{E^g$pOm^nR!oZ$;u4Obrl@1Hs5;+Vl9hHY!+#sD=dOi1KZX=Wld^H z#W$|z@+G=RbB&G^)lf|1J+#rqJ?~qq^%pf_QJ%L#W>ezC#*y`CiK~B<^L?Z!@q0VI z?xC;k0G8%pO1Jd~NjPSPE+nAMj{`m3Mi4T=#7C}ZP@ zX!+`7SBDAO&6L@#u~HNnutkIgjIrE(R|ge&Q9w}Hq5^#dq%>i=gQLP+yt`=kxhxyM zKg??IcIcN+>`YOxIYgoQmIY~i98{4TC$Ct$JU?<&hwq1&wFZlK1lWfSr6M`_QD%!M3H7c(??AFmx9H5dokG*xHduf&}sE zAFRX11q?HBV6HCDME{6%mEJ=Xh}IDYfM=J@bEg10T)Z~V<1?Ak>a3+n=C{`0mtRWTUewH=!=X2`Nl BaseComponent: if idx in self.components: return self.components[idx] diff --git a/EdgeCraftRAG/edgecraftrag/components/benchmark.py b/EdgeCraftRAG/edgecraftrag/components/benchmark.py index fc3801b5d3..df66ef0e6f 100644 --- a/EdgeCraftRAG/edgecraftrag/components/benchmark.py +++ b/EdgeCraftRAG/edgecraftrag/components/benchmark.py @@ -49,7 +49,7 @@ def cal_input_token_size(self, input_text_list): return input_token_size def init_benchmark_data(self): - pipeline_comp = [CompType.RETRIEVER, CompType.POSTPROCESSOR, CompType.GENERATOR] + pipeline_comp = [CompType.NODEPARSER, CompType.CHUNK_NUM, CompType.RETRIEVER, CompType.POSTPROCESSOR, CompType.QUERYSEARCH, CompType.GENERATOR] if self.is_enabled(): with self._idx_lock: self.last_idx += 1 @@ -58,6 +58,8 @@ def init_benchmark_data(self): data["idx"] = idx for comp in pipeline_comp: data[comp] = "" + data[CompType.NODEPARSER] = 0 + data[CompType.CHUNK_NUM] = 0 return idx, data def update_benchmark_data(self, idx, comp_type, start, end): diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index cb170fcd10..d72f1cf87e 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -6,7 +6,9 @@ import json import os import urllib.request +import requests from urllib.parse import urlparse +from fastapi import HTTPException, status from edgecraftrag.base import BaseComponent, CompType, GeneratorType, InferenceType, NodeParserType from edgecraftrag.utils import concat_history, get_prompt_template, save_history @@ -84,10 +86,38 @@ def extract_unstructured_eles(retrieved_nodes=[], text_gen_context=""): return unstructured_str +def build_stream_response(status=None, content=None, error=None): + response = {"status": status, "contentType": "text"} + if content is not None: + response["content"] = content + if error is not None: + response["error"] = error + return response + + async def local_stream_generator(lock, llm, prompt_str, unstructured_str): async with lock: response = llm.stream_complete(prompt_str) collected_data = [] + try: + for r in response: + collected_data.append(r.delta) + yield r.delta + await asyncio.sleep(0) + if unstructured_str: + collected_data.append(unstructured_str) + yield unstructured_str + res = "".join(collected_data) + save_history(res) + except Exception as e: + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" + +async def stream_generator(llm, prompt_str, unstructured_str): + response = llm.stream_complete(prompt_str) + collected_data = [] + try: for r in response: collected_data.append(r.delta) yield r.delta @@ -97,20 +127,10 @@ async def local_stream_generator(lock, llm, prompt_str, unstructured_str): yield unstructured_str res = "".join(collected_data) save_history(res) - - -async def stream_generator(llm, prompt_str, unstructured_str): - response = llm.stream_complete(prompt_str) - collected_data = [] - for r in response: - collected_data.append(r.delta) - yield r.delta - await asyncio.sleep(0) - if unstructured_str: - collected_data.append(unstructured_str) - yield unstructured_str - res = "".join(collected_data) - save_history(res) + except Exception as e: + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" class QnAGenerator(BaseComponent): @@ -130,13 +150,20 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin self.llm = llm_model if isinstance(llm_model, str): self.model_id = llm_model + self.model_path = llm_model else: - self.model_id = llm_model().model_id + llm_instance = llm_model() + if llm_instance.model_path is None or llm_instance.model_path == "": + self.model_id = llm_instance.model_id + self.model_path = os.path.join("/home/user/models/",os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) + else: + self.model_id = llm_instance.model_id + self.model_path = llm_instance.model_path if self.inference_type == InferenceType.LOCAL: self.lock = asyncio.Lock() self.prompt_content = prompt_content self.prompt_template_file = prompt_template_file - self.prompt = self.init_prompt(self.model_id, self.prompt_content, self.prompt_template_file) + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) self.llm = llm_model if isinstance(llm_model, str): @@ -151,15 +178,15 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin vllm_endpoint = os.getenv("vLLM_ENDPOINT", "http://localhost:8086") self.vllm_endpoint = vllm_endpoint - def init_prompt(self, model_id, prompt_content=None, prompt_template_file=None, enable_think=False): + def init_prompt(self, model_path, prompt_content=None, prompt_template_file=None, enable_think=False): # using the prompt template enhancement strategy(only tested on Qwen2-7B-Instruction) if template_enhance_on is true template_enhance_on = True if "Qwen2" in self.model_id else False if prompt_content: self.set_prompt(prompt_content) - return get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) + return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) elif prompt_template_file is None: print("There is no template file, using the default template.") - prompt_template = get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) + prompt_template = get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) return ( DocumentedContextRagPromptTemplate.from_template(prompt_template) if template_enhance_on @@ -175,7 +202,7 @@ def init_prompt(self, model_id, prompt_content=None, prompt_template_file=None, if template_enhance_on: return DocumentedContextRagPromptTemplate.from_file(prompt_template_file) else: - return get_prompt_template(model_id, prompt_content, prompt_template_file, enable_think) + return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) def set_prompt(self, prompt): if "{context}" not in prompt: @@ -206,7 +233,7 @@ def query_transform(self, chat_request, retrieved_nodes, sub_questions=None): :return: Generated text_gen_context and prompt_str.""" text_gen_context = "" for n in retrieved_nodes: - origin_text = n.node.get_text() + origin_text = n.node.text text_gen_context += self.clean_string(origin_text.strip()) query = chat_request.messages chat_history = concat_history(chat_request.messages) @@ -214,7 +241,7 @@ def query_transform(self, chat_request, retrieved_nodes, sub_questions=None): if chat_request.chat_template_kwargs: if self.enable_think != chat_request.chat_template_kwargs["enable_thinking"]: self.prompt = self.init_prompt( - self.model_id, + self.model_path, self.prompt_content, self.prompt_template_file, chat_request.chat_template_kwargs["enable_thinking"], diff --git a/EdgeCraftRAG/edgecraftrag/components/indexer.py b/EdgeCraftRAG/edgecraftrag/components/indexer.py index 842122964f..00e0f3f4d6 100644 --- a/EdgeCraftRAG/edgecraftrag/components/indexer.py +++ b/EdgeCraftRAG/edgecraftrag/components/indexer.py @@ -13,8 +13,7 @@ class VectorIndexer(BaseComponent, VectorStoreIndex): - - def __init__(self, embed_model, vector_type, milvus_uri="http://localhost:19530", kb_name="default_kb"): + def __init__(self, embed_model, vector_type, vector_url="http://localhost:19530", kb_name="default_kb"): BaseComponent.__init__( self, comp_type=CompType.INDEXER, @@ -26,10 +25,10 @@ def __init__(self, embed_model, vector_type, milvus_uri="http://localhost:19530" from llama_index.core import Settings Settings.embed_model = None - self.milvus_uri = milvus_uri - self._initialize_indexer(embed_model, vector_type, milvus_uri, kb_name) + self.vector_url = vector_url + self._initialize_indexer(embed_model, vector_type, vector_url, kb_name) - def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): + def _initialize_indexer(self, embed_model, vector_type, vector_url, kb_name): # get active name pl = ctx.get_pipeline_mgr().get_active_pipeline() plname = pl.name if pl else "" @@ -46,7 +45,7 @@ def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): VectorStoreIndex.__init__(self, embed_model=embed_model, nodes=[], storage_context=faiss_store) case IndexerType.MILVUS_VECTOR: milvus_vector_store = MilvusVectorStore( - uri=milvus_uri, + uri=vector_url, dim=self.d, collection_name=kb_name + plname + str(self.d), overwrite=False, @@ -55,14 +54,14 @@ def _initialize_indexer(self, embed_model, vector_type, milvus_uri, kb_name): VectorStoreIndex.__init__(self, embed_model=embed_model, nodes=[], storage_context=milvus_store) def reinitialize_indexer(self, kb_name="default_kb"): - self._initialize_indexer(self.model, self.comp_subtype, self.milvus_uri, kb_name) + self._initialize_indexer(self.model, self.comp_subtype, self.vector_url, kb_name) def clear_milvus_collection(self, kb_name="default_kb"): # get active name pl = ctx.get_pipeline_mgr().get_active_pipeline() plname = pl.name if pl else "" milvus_vector_store = MilvusVectorStore( - uri=self.milvus_uri, + uri=self.vector_url, collection_name=kb_name + plname + str(self.d), overwrite=False, ) @@ -75,3 +74,36 @@ def run(self, **kwargs) -> Any: def ser_model(self): set = {"idx": self.idx, "indexer_type": self.comp_subtype, "model": self.model} return set + + +class KBADMINIndexer(BaseComponent): + # Handled in the kbadmin project + def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url="http://localhost:29530"): + BaseComponent.__init__( + self, + comp_type=CompType.INDEXER, + comp_subtype=IndexerType.KBADMIN_INDEXER, + ) + self.embed_model = embed_model + self.kbadmin_embedding_url = kbadmin_embedding_url + self.vector_url = vector_url + + def insert_nodes(self, nodes): + return None + + def _index_struct(self, nodes): + return None + + def run(self, **kwargs) -> Any: + return None + + def reinitialize_indexer(self, kb_name="default_kb"): + return None + + def clear_milvus_collection(self, **kwargs): + return None + + @model_serializer + def ser_model(self): + set = {"idx": self.idx, "indexer_type": self.comp_subtype, "model": {"model_id": self.embed_model}, "kbadmin_embedding_url": self.kbadmin_embedding_url, "vector_url":self.vector_url} + return set \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py index 259c4a463f..d3a050ab4c 100644 --- a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py @@ -1,8 +1,8 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import os -from typing import Any, List, Optional +import os, json +from typing import Any, List, Optional, Dict, Union from edgecraftrag.base import BaseComponent from pydantic import model_serializer @@ -12,8 +12,12 @@ class Knowledge(BaseComponent): file_paths: Optional[List[str]] = [] file_map: Optional[List[str]] = {} description: Optional[str] = "None" - comp_type: str = "knowledge" + comp_type: Optional[str] = "knowledge" + comp_subtype: Optional[str] = "origin_kb" + experience_active: Optional[bool] = False if comp_type == "knowledge" else True active: bool + if comp_type == "experience": + comp_subtype = None def _update_file_names(self) -> None: self.file_map = {os.path.basename(path): path for path in self.file_paths if path is not None} @@ -35,6 +39,103 @@ def remove_file_path(self, file_path: str) -> bool: def get_file_paths(self) -> List[str]: return self.file_paths + def ensure_file_exists(self): + dir_path = os.path.dirname(self.file_paths[0]) + os.makedirs(dir_path, exist_ok=True) + if not os.path.exists(self.file_paths[0]): + with open(self.file_paths[0], 'w', encoding='utf-8') as f: + json.dump([], f, ensure_ascii=False, indent=4) + + def get_all_experience(self) -> List[Dict]: + experinence_file = "/home/user/ui_cache/configs/experience_dir/experience.json" + if experinence_file not in self.file_paths: + self.file_paths.append(experinence_file) + if not os.path.isfile(self.file_paths[0]): + self.ensure_file_exists() + with open(self.file_paths[0], 'r', encoding='utf-8') as f: + return json.load(f) + + def get_experience_by_question(self, question: str) -> Optional[Dict]: + for item in self.get_all_experience(): + if item.get('question') == question: + return item + return None + + def add_multiple_experiences(self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True) -> List[Dict]: + all_experiences = self.get_all_experience() + result = [] + for exp in experiences: + question = exp.get('question') + if not question: + raise ValueError("Must exist when uploading question") + content = exp.get('content', []) + found = False + for item in all_experiences: + if item['question'] == question: + if flag: + item['content'].extend([c for c in content if c not in item['content']]) + else: + item['content'] = content + result.append(item) + found = True + break + if not found: + new_item = {'question': question, 'content': content} + all_experiences.append(new_item) + result.append(new_item) + with open(self.file_paths[0], 'w', encoding='utf-8') as f: + json.dump(all_experiences, f, ensure_ascii=False, indent=4) + return result + + def delete_experience(self, question: str) -> bool: + items = self.get_all_experience() + remaining_items = [item for item in items if item.get('question') != question] + if len(remaining_items) == len(items): + return False + with open(self.file_paths[0], 'w', encoding='utf-8') as f: + json.dump(remaining_items, f, ensure_ascii=False, indent=4) + return True + + def clear_experiences(self) -> bool: + all_experiences = self.get_all_experience() + with open(self.file_paths[0], 'w', encoding='utf-8') as f: + json.dump([], f, ensure_ascii=False, indent=4) + return True + + def update_experience(self, question: str, content: List[str]) -> Optional[Dict]: + items = self.get_all_experience() + for i, item in enumerate(items): + if item.get('question') == question: + updated_item = {'question': question, 'content': content} + items[i] = updated_item + with open(self.file_paths[0], 'w', encoding='utf-8') as f: + json.dump(items, f, ensure_ascii=False, indent=4) + return updated_item + return None + + def add_experiences_from_file(self, file_path: str, flag: bool = False) -> List[Dict]: + if not file_path.endswith('.json'): + raise ValueError("File upload type error") + try: + with open(file_path, 'r', encoding='utf-8') as f: + experiences = json.load(f) + if not isinstance(experiences, list): + raise ValueError("The contents of the file must be a list") + return self.add_multiple_experiences(experiences=experiences, flag=flag) + except json.JSONDecodeError as e: + raise ValueError(f"File parsing failure") + except Exception as e: + raise RuntimeError(f"File Error") + + def calculate_totals(self): + if self.comp_type == "knowledge": + total = len(self.file_paths) + elif self.comp_type == "experience": + total = len(self.get_all_experience()) + else: + total = None + return total + def run(self, **kwargs) -> Any: pass @@ -44,8 +145,11 @@ def ser_model(self): "idx": self.idx, "name": self.name, "comp_type": self.comp_type, + "comp_subtype": self.comp_subtype, "file_map": self.file_map, "description": self.description, "active": self.active, + "experience_active": self.experience_active, + "total": self.calculate_totals() } return set diff --git a/EdgeCraftRAG/edgecraftrag/components/node_parser.py b/EdgeCraftRAG/edgecraftrag/components/node_parser.py index 0f386bc61f..2491cbf9dd 100644 --- a/EdgeCraftRAG/edgecraftrag/components/node_parser.py +++ b/EdgeCraftRAG/edgecraftrag/components/node_parser.py @@ -168,3 +168,23 @@ def ser_model(self): "chunk_overlap": self.chunk_overlap, } return set + +class KBADMINParser(BaseComponent): + # Handled in the kbadmin project + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.comp_type = CompType.NODEPARSER + self.comp_subtype = NodeParserType.KBADMINPARSER + + def run(self, **kwargs) -> Any: + return None + + def insert_nodes(self): + return None + @model_serializer + def ser_model(self): + set = { + "idx": self.idx, + "parser_type": self.comp_subtype, + } + return set diff --git a/EdgeCraftRAG/edgecraftrag/components/pipeline.py b/EdgeCraftRAG/edgecraftrag/components/pipeline.py index 41780ef88f..8ec80a8bc4 100644 --- a/EdgeCraftRAG/edgecraftrag/components/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/components/pipeline.py @@ -97,12 +97,10 @@ def check_active(self, nodelist, kb_name): # TODO: update doc changes # TODO: more operations needed, add, del, modify def update_nodes(self, nodes): - print(f"Updating {len(nodes)} nodes ...") if self.indexer is not None: self.indexer.insert_nodes(nodes) def update_indexer_to_retriever(self): - print("Updating indexer to retriever ...") if self.indexer is not None and self.retriever is not None: old_retriever = self.retriever retriever_type = old_retriever.comp_subtype @@ -122,7 +120,6 @@ def update_indexer_to_retriever(self): # Implement abstract run function # callback dispatcher def run(self, **kwargs) -> Any: - print(kwargs) if "cbtype" in kwargs: if kwargs["cbtype"] == CallbackType.DATAPREP: if "docs" in kwargs: @@ -183,9 +180,18 @@ def model_existed(self, model_id: str) -> bool: # Test callback to retrieve nodes from query def run_retrieve(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: + benchmark_data = {} query = chat_request.messages + top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k contexts = {} - retri_res = pl.retriever.run(query=query) + start = 0 + if pl.enable_benchmark: + _, benchmark_data = pl.benchmark.init_benchmark_data() + start = time.perf_counter() + retri_res = pl.retriever.run(query=query, top_k=top_k) + if pl.enable_benchmark: + benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start + pl.benchmark.insert_benchmark_data(benchmark_data) contexts[CompType.RETRIEVER] = retri_res query_bundle = QueryBundle(query) if pl.postprocessor: @@ -201,10 +207,18 @@ def run_retrieve(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: def run_simple_doc(pl: Pipeline, docs: List[Document]) -> Any: + start = 0 + benchmark_data = {} + if pl.enable_benchmark: + _, benchmark_data = pl.benchmark.init_benchmark_data() + start = time.perf_counter() n = pl.node_parser.run(docs=docs) if pl.indexer is not None: pl.indexer.insert_nodes(n) - print(pl.indexer._index_struct) + if pl.enable_benchmark: + benchmark_data[CompType.NODEPARSER] += (time.perf_counter() - start) + benchmark_data[CompType.CHUNK_NUM] += len(n) + pl.benchmark.insert_benchmark_data(benchmark_data) return n @@ -228,43 +242,57 @@ async def timing_wrapper(): def run_generator_ben(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() contexts = {} - start = time.perf_counter() + retri_res = [] + active_kb = chat_request.user if chat_request.user else None + enable_rag_retrieval = chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) if chat_request.chat_template_kwargs else True + if not active_kb: + enable_rag_retrieval = False + elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": + enable_rag_retrieval = False + elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": + enable_rag_retrieval = False query = chat_request.messages - if pl.generator.inference_type == InferenceType.VLLM: - UI_DIRECTORY = os.getenv("TMPFILE_PATH", "/home/user/ui_cache") - search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") - search_dir = os.path.join(UI_DIRECTORY, "configs/search_dir") - - def run_async_query_search(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) - finally: - loop.close() - - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(run_async_query_search) - top1_issue, sub_questionss_result = future.result() - if sub_questionss_result: - query = query + sub_questionss_result - - retri_res = pl.retriever.run(query=query) - query_bundle = QueryBundle(query) - benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start - contexts[CompType.RETRIEVER] = retri_res - - start = time.perf_counter() - if pl.postprocessor: - for processor in pl.postprocessor: - if ( - isinstance(processor, RerankProcessor) - and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default - ): - processor.top_n = chat_request.top_n - retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) - contexts[CompType.POSTPROCESSOR] = retri_res - benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start + sub_questionss_result = None + experience_status = True if chat_request.tool_choice == 'auto' else False + if enable_rag_retrieval: + start = time.perf_counter() + if pl.generator.inference_type == InferenceType.VLLM and experience_status: + UI_DIRECTORY ="/home/user/ui_cache" + search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") + search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") + + def run_async_query_search(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) + finally: + loop.close() + + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(run_async_query_search) + top1_issue, sub_questionss_result = future.result() + if sub_questionss_result: + query = query + sub_questionss_result + benchmark_data[CompType.QUERYSEARCH] = time.perf_counter() - start + start = time.perf_counter() + top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k + retri_res = pl.retriever.run(query=query, top_k=top_k) + query_bundle = QueryBundle(query) + benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start + contexts[CompType.RETRIEVER] = retri_res + + start = time.perf_counter() + if pl.postprocessor: + for processor in pl.postprocessor: + if ( + isinstance(processor, RerankProcessor) + and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default + ): + processor.top_n = chat_request.top_n + retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) + contexts[CompType.POSTPROCESSOR] = retri_res + benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start if pl.generator is None: raise ValueError("No Generator Specified") @@ -294,37 +322,51 @@ def run_async_query_search(): def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: query = chat_request.messages contexts = {} - if pl.generator.inference_type == InferenceType.VLLM: - UI_DIRECTORY = os.getenv("TMPFILE_PATH", "/home/user/ui_cache") - search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") - search_dir = os.path.join(UI_DIRECTORY, "configs/search_dir") - - def run_async_query_search(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) - finally: - loop.close() - - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(run_async_query_search) - top1_issue, sub_questionss_result = future.result() - if sub_questionss_result: - query = query + sub_questionss_result - retri_res = pl.retriever.run(query=query) - contexts[CompType.RETRIEVER] = retri_res - query_bundle = QueryBundle(query) - - if pl.postprocessor: - for processor in pl.postprocessor: - if ( - isinstance(processor, RerankProcessor) - and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default - ): - processor.top_n = chat_request.top_n - retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) - contexts[CompType.POSTPROCESSOR] = retri_res + retri_res = [] + active_kb = chat_request.user if chat_request.user else None + enable_rag_retrieval = chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) if chat_request.chat_template_kwargs else True + if not active_kb: + enable_rag_retrieval = False + elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": + enable_rag_retrieval = False + elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": + enable_rag_retrieval = False + query = chat_request.messages + sub_questionss_result = None + experience_status = True if chat_request.tool_choice == 'auto' else False + if enable_rag_retrieval: + if pl.generator.inference_type == InferenceType.VLLM and experience_status: + UI_DIRECTORY ="/home/user/ui_cache" + search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") + search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") + + def run_async_query_search(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) + finally: + loop.close() + + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(run_async_query_search) + top1_issue, sub_questionss_result = future.result() + if sub_questionss_result: + query = query + sub_questionss_result + top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k + retri_res = pl.retriever.run(query=query, top_k=top_k) + contexts[CompType.RETRIEVER] = retri_res + query_bundle = QueryBundle(query) + + if pl.postprocessor: + for processor in pl.postprocessor: + if ( + isinstance(processor, RerankProcessor) + and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default + ): + processor.top_n = chat_request.top_n + retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) + contexts[CompType.POSTPROCESSOR] = retri_res if pl.generator is None: raise ValueError("No Generator Specified") diff --git a/EdgeCraftRAG/edgecraftrag/components/postprocessor.py b/EdgeCraftRAG/edgecraftrag/components/postprocessor.py index bb59cc3d21..cbd387f59e 100644 --- a/EdgeCraftRAG/edgecraftrag/components/postprocessor.py +++ b/EdgeCraftRAG/edgecraftrag/components/postprocessor.py @@ -60,5 +60,5 @@ def run(self, **kwargs) -> Any: @model_serializer def ser_model(self): - set = {"idx": self.idx, "processor_type": self.comp_subtype, "model": None, "top_n": None} + set = {"idx": self.idx, "processor_type": self.comp_subtype, "top_n": None} return set diff --git a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py index 124014a038..ed43a99911 100644 --- a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py +++ b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py @@ -172,40 +172,43 @@ async def compute_score(self, input_pair): return await self._calculate_logits_score(*input_pair) -def read_json_files(directory: str) -> dict: +def read_json_files(file_path: str) -> dict: result = {} - for filename in os.listdir(directory): - if filename.endswith(".json"): - file_path = os.path.join(directory, filename) - if os.path.isfile(file_path): - try: - with open(file_path, "r", encoding="utf-8") as file: - data = json.load(file) - result.update(data) - except Exception: - continue + if os.path.isfile(file_path): + with open(file_path, 'r', encoding='utf-8') as f: + result = json.load(f) return result async def query_search(user_input, search_config_path, search_dir, pl): - top1_issue = None - sub_questionss_result = None - if not os.path.exists(search_dir): - return top1_issue, sub_questionss_result + sub_questions_result = None model_id = pl.generator.model_id vllm_endpoint = pl.generator.vllm_endpoint - cfg = OmegaConf.load(search_config_path) - cfg.query_matcher.model_id = model_id - cfg.query_matcher.API_BASE = os.path.join(vllm_endpoint, "v1/completions") - query_matcher = LogitsEstimatorJSON(**cfg.query_matcher) maintenance_data = read_json_files(search_dir) - issues = list(maintenance_data.keys()) + issues = [] + for i in range(len(maintenance_data)): + issues.append(maintenance_data[i]["question"]) if not issues: - return top1_issue, sub_questionss_result - + return top1_issue, sub_questions_result + + cfg = {} + if not os.path.exists(search_config_path): + cfg["query_matcher"] = { + "instructions": "You're an expert in TCB Bonder, your task is to decide the semantic similarity of two queries.\n If they are expressing similar idea, mark as High.\n If they are totally different, mark as Low.\n If some parts of them are similar, some are not, mark as Medium.\n", + "input_template": " {} \n {} \n", + "output_template": "output from {json_levels}.\n", + "json_key": "similarity", + "json_levels": ["Low", "Medium", "High"], + "temperature": 1 + } + else: + cfg = OmegaConf.load(search_config_path) + cfg["query_matcher"]["model_id"] = model_id + cfg["query_matcher"]["API_BASE"] = os.path.join(vllm_endpoint, "v1/completions") + query_matcher = LogitsEstimatorJSON(**cfg["query_matcher"]) semaphore = asyncio.Semaphore(200) async def limited_compute_score(query_matcher, user_input, issue): @@ -218,10 +221,10 @@ async def limited_compute_score(query_matcher, user_input, issue): match_scores.sort(key=lambda x: x[1], reverse=True) # Maximum less than 0.6, we don't use query search. - if match_scores[0][1] < 0.6: - return top1_issue, sub_questionss_result + if match_scores[0][1] < 0.6: + return top1_issue, sub_questions_result top1_issue = match_scores[0][0] - for key, value in maintenance_data.items(): - if key == top1_issue: - sub_questionss_result = value - return top1_issue, sub_questionss_result + for i in range(len(maintenance_data)): + if maintenance_data[i]['question'] == top1_issue: + sub_questions_result = "\n".join(maintenance_data[i]["content"]) + return top1_issue, sub_questions_result diff --git a/EdgeCraftRAG/edgecraftrag/components/retriever.py b/EdgeCraftRAG/edgecraftrag/components/retriever.py index fa8553346a..209fd7b5a0 100644 --- a/EdgeCraftRAG/edgecraftrag/components/retriever.py +++ b/EdgeCraftRAG/edgecraftrag/components/retriever.py @@ -3,12 +3,21 @@ from typing import Any, List, cast +import requests, warnings from edgecraftrag.base import BaseComponent, CompType, RetrieverType from llama_index.core.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.core.retrievers import AutoMergingRetriever from llama_index.core.schema import BaseNode from llama_index.retrievers.bm25 import BM25Retriever from pydantic import model_serializer +from llama_index.core.schema import NodeWithScore + +from langchain_openai import OpenAIEmbeddings +from langchain_milvus import Milvus +from llama_index.core.schema import Document +from typing import List, Optional +from pymilvus import MilvusException +from pymilvus import connections, utility, Collection class VectorSimRetriever(BaseComponent, VectorIndexRetriever): @@ -39,6 +48,8 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk + self.similarity_top_k=top_k return self.retrieve(v) return None @@ -75,8 +86,9 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk # vector_retriever needs to be updated - self._vector_retriever = self._index.as_retriever(similarity_top_k=self.topk) + self._vector_retriever = self._index.as_retriever(similarity_top_k=top_k) return self.retrieve(v) return None @@ -108,8 +120,9 @@ def __init__(self, indexer, **kwargs): def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk nodes = cast(List[BaseNode], list(self._docstore.docs.values())) - similarity_top_k = min(len(nodes), self.topk) + similarity_top_k = min(len(nodes), top_k) bm25_retr = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=similarity_top_k) return bm25_retr.retrieve(v) @@ -123,3 +136,138 @@ def ser_model(self): "retrieve_topk": self.topk, } return set + + +class KBadminRetriever(BaseComponent): + def __init__(self, indexer, **kwargs): + BaseComponent.__init__( + self, + comp_type=CompType.RETRIEVER, + comp_subtype=RetrieverType.KBADMIN_RETRIEVER, + ) + self.vector_db = None + self.collection_name = None + self.topk = kwargs.get("similarity_top_k", 30) + self.KBADMIN_MILVUS_URL = indexer.vector_url + self.CONNECTION_ARGS = {"uri": indexer.vector_url} + self.vector_field = "q_1024_vec" + self.text_field = "content_with_weight" + self.embedding_model_name = indexer.embed_model + self.embedding_url = indexer.kbadmin_embedding_url + "/v3" + self.embedding = OpenAIEmbeddings(model=self.embedding_model_name, api_key="unused", base_url=self.embedding_url, tiktoken_enabled=False, embedding_ctx_length=510) + + def config_kbadmin_milvus(self, knowledge_name): + collection_name = knowledge_name + if not kbs_rev_maps: + get_kbs_info( self.CONNECTION_ARGS) + collection_name = kbs_rev_maps[collection_name] + self.vector_db = Milvus( + self.embedding, + connection_args = self.CONNECTION_ARGS, + collection_name = collection_name, + + vector_field = self.vector_field, + text_field = self.text_field, + enable_dynamic_field=True, + index_params = {"index_type": "FLAT", "metric_type": "IP", "params": {}} + ) + + + def similarity_search_with_embedding(self, query: str, k) -> list[tuple[Document, float]]: + url = self.embedding_url + "/embeddings" + embedding_info = {"model": self.embedding_model_name,"input": query} + # Get embedding result from embedding service + response = requests.post(url, headers={'Content-Type': 'application/json'}, json=embedding_info) + embedding_json = response.json() + embedding = embedding_json['data'][0]['embedding'] + docs_and_scores = self.vector_db.similarity_search_with_score_by_vector(embedding=embedding, k=k) + relevance_score_fn = self.vector_db._select_relevance_score_fn() + return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] + + + def run(self, **kwargs) -> Any: + query = kwargs["query"] + top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk + # langchain retrieval + docs_and_similarities = self.similarity_search_with_embedding(query=query, k=top_k) + node_with_scores: List[NodeWithScore] = [] + for doc, similarity in docs_and_similarities: + score: Optional[float] = None + if similarity is not None: + score = similarity + # convert langchain store format into llamaindex + node = Document.from_langchain_format(doc) + node_with_scores.append(NodeWithScore(node=node, score=score)) + return node_with_scores + + @model_serializer + def ser_model(self): + set = { + "idx": self.idx, + "retriever_type": self.comp_subtype, + "CONNECTION_ARGS": self.CONNECTION_ARGS + } + return set + + +# global kbs maps. +global kbs_rev_maps +kbs_rev_maps = {} +def get_kbs_info(CONNECTION_ARGS): + alias = "default" + try: + connections.connect("default", **CONNECTION_ARGS) + collections = utility.list_collections() + all_kb_infos = {} + new_infos = {} + for kb in collections: + collection = Collection(kb) + collection.load() + try: + if any(field.name == 'kb_id' for field in collection.schema.fields): + docs = collection.query( + expr="pk != 0", + output_fields=["kb_name", "kb_id", "docnm_kwd"], + timeout=10, + ) + else: + docs = collection.query( + expr="pk != 0", + output_fields=["filename"], + timeout=10, + ) + collection.release() + except MilvusException as e: + continue + this_kbinfo = {} + for doc in docs: + try: + if 'kb_name' in doc: + if not this_kbinfo: + this_kbinfo['name'] = doc['kb_name'] + this_kbinfo['uuid'] = doc['kb_id'] + this_kbinfo['files'] = set([doc['docnm_kwd']]) + else: + this_kbinfo['files'].add(doc['docnm_kwd']) + else: + if not this_kbinfo: + this_kbinfo['name'] = kb + this_kbinfo['uuid'] = "" + this_kbinfo['files'] = set([doc['filename']]) + else: + this_kbinfo['files'].add(doc['filename']) + except KeyError: + this_kbinfo = None + break + if this_kbinfo: + unique_files = list(this_kbinfo['files']) + this_kbinfo['files'] = unique_files + new_infos[kb] = this_kbinfo + all_kb_infos.update(new_infos) + kbs_rev_maps.clear() + for kb_id in all_kb_infos: + kbs_rev_maps[all_kb_infos[kb_id]['name']] = kb_id + return kbs_rev_maps + finally: + if connections.has_connection(alias): + connections.disconnect(alias) \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py index b8dd82ab7b..0b155e498b 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py @@ -26,6 +26,29 @@ def search_parser(self, npin: NodeParserIn) -> BaseComponent: return v return None + def search_parser_change(self, pl, req): + pl_change = False + try: + if pl.node_parser.comp_subtype != req.node_parser.parser_type: + return True + if pl.node_parser.comp_subtype == req.node_parser.parser_type: + if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: + if (pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.SENTENCEWINDOW: + if pl.node_parser.window_size != req.node_parser.window_size: + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.HIERARCHY: + if pl.node_parser.chunk_sizes != req.node_parser.chunk_sizes: + pl_change = True + elif pl.node_parser.comp_subtype == NodeParserType.UNSTRUCTURED: + if (pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): + pl_change = True + except: + return False + return pl_change class IndexerMgr(BaseMgr): @@ -43,6 +66,7 @@ def search_indexer(self, indin: IndexerIn) -> BaseComponent: (v.model.model_id_or_path == indin.embedding_model.model_id) or (v.model.model_id_or_path == indin.embedding_model.model_path) ) + and v.model.device == indin.embedding_model.device ): return v return None diff --git a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py index dc69943eb2..4eabfcc021 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py @@ -13,6 +13,7 @@ class KnowledgeManager(BaseMgr): def __init__(self): super().__init__() self.active_knowledge_idx: Optional[str] = None + self.active_experience_idx: Optional[str] = None def get_knowledge_base_by_name_or_id(self, name: str): for _, kb in self.components.items(): @@ -26,8 +27,17 @@ def get_active_knowledge_base(self) -> Optional[Knowledge]: else: return None + def get_active_experience(self): + if self.active_experience_idx: + return self.get_knowledge_base_by_name_or_id(self.active_experience_idx) + else: + return None + def active_knowledge(self, knowledge: KnowledgeBaseCreateIn): kb = self.get_knowledge_base_by_name_or_id(knowledge.name) + if kb.comp_type != "knowledge": + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Experience type cannot be active") + kb = self.get_knowledge_base_by_name_or_id(knowledge.name) self.active_knowledge_idx = kb.idx if knowledge.active else None for idx, comp in self.components.items(): @@ -35,16 +45,36 @@ def active_knowledge(self, knowledge: KnowledgeBaseCreateIn): comp.active = idx == self.active_knowledge_idx return kb + def active_experience(self, knowledge: KnowledgeBaseCreateIn): + kb = self.get_knowledge_base_by_name_or_id(knowledge.name) + if kb.comp_type != "experience": + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Knowledge type cannot be active") + self.active_experience_idx = kb.idx if knowledge.experience_active else None + if kb.experience_active != knowledge.experience_active: + for idx, comp in self.components.items(): + if isinstance(comp, Knowledge): + comp.experience_active = idx == self.active_experience_idx + return kb + + def create_knowledge_base(self, knowledge: KnowledgeBaseCreateIn) -> Knowledge: for _, kb in self.components.items(): if kb.name == knowledge.name: raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="The knowledge base already exists.") + if knowledge.comp_type == "experience": + for idx, kb in self.components.items(): + if kb.comp_type =='experience': + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created.") + if knowledge.comp_type == "experience": + knowledge.active = False if knowledge.active is None: knowledge.active = False - kb = Knowledge(name=knowledge.name, description=knowledge.description, active=knowledge.active) + kb = Knowledge(name=knowledge.name, description=knowledge.description, active=knowledge.active, comp_type=knowledge.comp_type, comp_subtype=knowledge.comp_subtype, experience_active=knowledge.experience_active) self.add(kb) if knowledge.active: self.active_knowledge(knowledge) + if knowledge.experience_active: + self.active_experience(knowledge) return kb def delete_knowledge_base(self, name: str): @@ -54,12 +84,16 @@ def delete_knowledge_base(self, name: str): def update_knowledge_base(self, knowledge) -> Knowledge: kb = self.get_knowledge_base_by_name_or_id(knowledge.name) - - if knowledge.description is not None: - kb.description = knowledge.description - - if knowledge.active is not None and kb.active != knowledge.active: - kb = self.active_knowledge(knowledge) + if kb.comp_type == "knowledge": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.active is not None and kb.active != knowledge.active: + kb = self.active_knowledge(knowledge) + if kb.comp_type == "experience": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: + kb = self.active_experience(knowledge) return "Knowledge base update successfully" def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: @@ -67,3 +101,9 @@ def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: for idx, kb in self.components.items(): kb_list.append(kb) return kb_list + + def get_experience_kb(self): + for idx, kb in self.components.items(): + if kb.comp_type =='experience': + return kb + diff --git a/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py index 81524a3754..b22f0c66df 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/pipelinemgr.py @@ -66,8 +66,8 @@ def activate_pipeline(self, name: str, active: bool, nm: NodeMgr, kb_name: None) return nodelist = None - if pl.node_changed: - nodelist = nm.get_nodes(pl.node_parser.idx) + # if pl.node_changed: + # nodelist = nm.get_nodes(pl.node_parser.idx) pl.check_active(nodelist, kb_name) prevactive = self._active_pipeline if prevactive: diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt old mode 100644 new mode 100755 index 8dc53e6c83..3dd0877a82 --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -3,11 +3,11 @@ EbookLib>=0.18 faiss-cpu>=1.8.0.post1 html2text>=2025.4.15 langchain-core==0.3.60 -llama-index==0.12.41 -llama-index-core==0.12.41 +llama-index==0.12.36 +llama-index-core==0.12.37 llama-index-embeddings-openvino==0.5.2 -llama-index-llms-openai==0.4.0 -llama-index-llms-openai-like==0.4.0 +llama-index-llms-openai==0.3.44 +llama-index-llms-openai-like==0.3.4 llama-index-llms-openvino==0.4.0 llama-index-postprocessor-openvino-rerank==0.4.1 llama-index-readers-file==0.4.7 @@ -20,6 +20,8 @@ pillow>=10.4.0 py-cpuinfo>=9.0.0 pymilvus==2.5.10 python-docx==1.1.2 -unstructured==0.16.11 +unstructured unstructured[pdf] werkzeug==3.1.3 +langchain-openai +langchain-milvus \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/utils.py b/EdgeCraftRAG/edgecraftrag/utils.py index 18a43e5879..bbc6434e98 100755 --- a/EdgeCraftRAG/edgecraftrag/utils.py +++ b/EdgeCraftRAG/edgecraftrag/utils.py @@ -44,15 +44,14 @@ def iter_elements(cls, paragraph: Paragraph, opts: DocxPartitionerOptions) -> It yield Image(text="IMAGE", metadata=element_metadata) -def get_prompt_template(model_id, prompt_content=None, template_path=None, enable_think=False): +def get_prompt_template(model_path, prompt_content=None, template_path=None, enable_think=False): if prompt_content is not None: template = prompt_content elif template_path is not None: template = Path(template_path).read_text(encoding=None) else: template = DEFAULT_TEMPLATE - tokenizer = AutoTokenizer.from_pretrained(model_id) - model_id = model_id.split("/")[-1] + tokenizer = AutoTokenizer.from_pretrained(model_path) messages = [{"role": "system", "content": template}, {"role": "user", "content": "\n{input}\n"}] prompt_template = tokenizer.apply_chat_template( messages, @@ -126,7 +125,7 @@ def concat_history(message: str) -> str: max_token = 6000 active_pl = ctx.get_pipeline_mgr().get_active_pipeline() if active_pl.generator.inference_type == InferenceType.VLLM: - vllm_max_len = int(os.getenv("MAX_MODEL_LEN", "5000")) + vllm_max_len = int(os.getenv("MAX_MODEL_LEN", "10240")) if vllm_max_len > 5000: max_token = vllm_max_len - 1024 diff --git a/EdgeCraftRAG/nginx/nginx-conf-generator.sh b/EdgeCraftRAG/nginx/nginx-conf-generator.sh index bd8e5b194c..f12799f583 100644 --- a/EdgeCraftRAG/nginx/nginx-conf-generator.sh +++ b/EdgeCraftRAG/nginx/nginx-conf-generator.sh @@ -25,7 +25,7 @@ EOL # Generate the server lines for ((i=0; i> $2 + echo " server ${HOST_IP}:${!PORT_VAR:-8$((i+1))00};" >> $2 done # Close the upstream block and the http block diff --git a/EdgeCraftRAG/tests/configs/test_data.json b/EdgeCraftRAG/tests/configs/test_data.json deleted file mode 100644 index 648ae9624d..0000000000 --- a/EdgeCraftRAG/tests/configs/test_data.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "A test case for the rag pipeline. The test id is 1234567890. There are several tests in this test case. The first test is for node parser. There are 3 types of node parsers. Their names are Aa, Bb and Cc. The second test is for indexer. The indexer will do the indexing for the given nodes. The last test is for retriever. Retrieving text is based on similarity search." -} diff --git a/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json b/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json deleted file mode 100644 index 097309d7e2..0000000000 --- a/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "rag_test_local_llm", - "node_parser": { - "chunk_size": 400, - "chunk_overlap": 48, - "parser_type": "simple" - }, - "indexer": { - "indexer_type": "faiss_vector", - "embedding_model": { - "model_id": "BAAI/bge-small-en-v1.5", - "model_path": "./models/BAAI/bge-small-en-v1.5", - "device": "auto", - "weight": "INT4" - } - }, - "retriever": { - "retriever_type": "vectorsimilarity", - "retrieve_topk": 30 - }, - "postprocessor": [ - { - "processor_type": "reranker", - "top_n": 2, - "reranker_model": { - "model_id": "BAAI/bge-reranker-large", - "model_path": "./models/BAAI/bge-reranker-large", - "device": "auto", - "weight": "INT4" - } - } - ], - "generator": { - "inference_type": "vllm", - "model": { - "model_id": "Qwen/Qwen3-8B", - "model_path": "", - "device": "", - "weight": "" - }, - "prompt_path": "./default_prompt.txt", - "vllm_endpoint": "" - }, - "active": "True" -} diff --git a/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json b/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json deleted file mode 100644 index 39ee2ef0f1..0000000000 --- a/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "name": "rag_test_local_llm", - "node_parser": { - "chunk_size": 400, - "chunk_overlap": 48, - "parser_type": "simple" - }, - "indexer": { - "indexer_type": "faiss_vector", - "embedding_model": { - "model_id": "BAAI/bge-small-en-v1.5", - "model_path": "./models/BAAI/bge-small-en-v1.5", - "device": "auto", - "weight": "INT4" - } - }, - "retriever": { - "retriever_type": "vectorsimilarity", - "retrieve_topk": 30 - }, - "postprocessor": [ - { - "processor_type": "reranker", - "top_n": 2, - "reranker_model": { - "model_id": "BAAI/bge-reranker-large", - "model_path": "./models/BAAI/bge-reranker-large", - "device": "auto", - "weight": "INT4" - } - } - ], - "generator": { - "model": { - "model_id": "Qwen/Qwen3-8B", - "model_path": "./models/Qwen/Qwen3-8B/INT4_compressed_weights", - "device": "auto", - "weight": "INT4" - }, - "prompt_path": "./default_prompt.txt", - "inference_type": "local" - }, - "active": "True" -} diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh new file mode 100755 index 0000000000..d0b808df05 --- /dev/null +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -e +source ./common.sh + +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" + +ip_address=$(hostname -I | awk '{print $1}') +HOST_IP=$ip_address + +COMPOSE_FILE="compose_vllm_b60.yaml" +EC_RAG_SERVICE_PORT=16010 + +MODEL_PATH="${HOME}/models" +# MODEL_PATH="$WORKPATH/models" +DOC_PATH="$WORKPATH/tests" +UI_UPLOAD_PATH="$WORKPATH/tests" + +HF_ENDPOINT=https://hf-mirror.com +VLLM_SERVICE_PORT_B60=8086 +TP=1 +vLLM_ENDPOINT="http://${HOST_IP}:${VLLM_SERVICE_PORT_B60}" +LLM_MODEL="Qwen/Qwen3-8B" +LLM_MODEL_PATH="${HOME}/qwen/" +VLLM_IMAGE_TAG="1.0" +DP=1 + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + echo "GenAIComps test commit is $(git rev-parse HEAD)" + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Pull intel/llm-scaler-vllm image" + docker pull intel/llm-scaler-vllm:${VLLM_IMAGE_TAG} + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + docker compose -f build.yaml build --no-cache > ${LOG_PATH}/docker_image_build.log + + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/gpu/arc + source set_env.sh + # Start Docker Containers + docker compose -f $COMPOSE_FILE up -d > ${LOG_PATH}/start_services_with_compose.log + echo "ipex-serving-xpu is booting, please wait." + sleep 30s + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ipex-serving-xpu-container > ${LOG_PATH}/ipex-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ${LOG_PATH}/ipex-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done +} + +function validate_services() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + echo "[ $SERVICE_NAME ] Validating $SERVICE_NAME service..." + local RESPONSE=$(curl -s -w "%{http_code}" -o ${LOG_PATH}/${SERVICE_NAME}.log -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + while [ ! -f ${LOG_PATH}/${SERVICE_NAME}.log ]; do + sleep 1 + done + local HTTP_STATUS="${RESPONSE: -3}" + local CONTENT=$(cat ${LOG_PATH}/${SERVICE_NAME}.log) + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_rag() { + cd $WORKPATH/tests + + # setup pipeline + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/settings/pipelines" \ + "active" \ + "pipeline" \ + "edgecraftrag-server" \ + '@configs/test_pipeline_ipex_vllm.json' + + # add data + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/data" \ + "Done" \ + "data" \ + "edgecraftrag-server" \ + '@configs/test_data.json' + + # query + validate_services \ + "${HOST_IP}:${EC_RAG_SERVICE_PORT}/v1/chatqna" \ + "1234567890" \ + "query" \ + "ipex-serving-xpu-container" \ + '{"messages":"What is the test id?","max_tokens":5}' +} + +function validate_megaservice() { + # Curl the Mega Service + validate_services \ + "${HOST_IP}:16011/v1/chatqna" \ + "1234567890" \ + "query" \ + "ipex-serving-xpu-container" \ + '{"messages":"What is the test id?","max_tokens":5}' +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/gpu/arc + docker compose -f $COMPOSE_FILE down +} + + +function main() { + mkdir -p $LOG_PATH + + echo "::group::stop_docker" + stop_docker + echo "::endgroup::" + + echo "::group::build_docker_images" + if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi + echo "::endgroup::" + + echo "::group::start_services" + start_services + echo "::endgroup::" + + echo "::group::validate_rag" + validate_rag + echo "::endgroup::" + + echo "::group::validate_megaservice" + validate_megaservice + echo "::endgroup::" + + echo "::group::stop_docker" + stop_docker + echo y | docker system prune + echo "::endgroup::" + +} + +main diff --git a/EdgeCraftRAG/tools/quick_start.sh b/EdgeCraftRAG/tools/quick_start.sh index fdefa797f0..0510b1637d 100755 --- a/EdgeCraftRAG/tools/quick_start.sh +++ b/EdgeCraftRAG/tools/quick_start.sh @@ -5,6 +5,8 @@ set -e WORKPATH=$(dirname "$(pwd)") +ip_address=$(hostname -I | awk '{print $1}') +HOST_IP=$ip_address get_user_input() { local var_name=$1 @@ -32,7 +34,7 @@ function start_vllm_services() { MILVUS_ENABLED=$(get_enable_function "MILVUS DB(Enter 1 for enable)" "0") CHAT_HISTORY_ROUND=$(get_user_input "chat history round" "0") LLM_MODEL=$(get_user_input "your LLM model" "Qwen/Qwen3-8B") - MODEL_PATH=$(get_user_input "your model path" "${HOME}/models") + MODEL_PATH=$(get_user_input "your model path" "${PWD}/models") read -p "Have you prepare models in ${MODEL_PATH}:(yes/no) [yes]" user_input user_input=${user_input:-"yes"} @@ -63,14 +65,20 @@ function start_vllm_services() { # vllm ENV export NGINX_PORT=8086 export vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" - TENSOR_PARALLEL_SIZE=$(get_user_input "your tp size" 1) - read -p "selected GPU [$(seq -s, 0 $((TENSOR_PARALLEL_SIZE - 1)))] " SELECTED_XPU_0; SELECTED_XPU_0=${SELECTED_XPU_0:-$(seq -s, 0 $((TENSOR_PARALLEL_SIZE - 1)))} - DP_NUM=$(get_user_input "DP number(how many containers to run vLLM)" 1) - for (( x=0; x ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + +function quick_start_ov_services() { + COMPOSE_FILE="compose.yaml" + echo "stop former service..." + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE down + + ip_address=$(hostname -I | awk '{print $1}') + export HOST_IP=${HOST_IP:-"${ip_address}"} + export DOC_PATH=${DOC_PATH:-"$WORKPATH/tests"} + export TMPFILE_PATH=${TMPFILE_PATH:-"$WORKPATH/tests"} + export MILVUS_ENABLED=${MILVUS_ENABLED:-1} + export CHAT_HISTORY_ROUND=${CHAT_HISTORY_ROUND:-"0"} + export LLM_MODEL=${LLM_MODEL:-"Qwen/Qwen3-8B"} + export MODEL_PATH=${MODEL_PATH:-"${HOME}/models"} + export VIDEOGROUPID=$(getent group video | cut -d: -f3) + export RENDERGROUPID=$(getent group render | cut -d: -f3) + + check_baai_folder + export HF_CACHE=${HF_CACHE:-"${HOME}/.cache"} + if [ ! -d "${HF_CACHE}" ]; then + mkdir -p "${HF_CACHE}" + echo "Created directory: ${HF_CACHE}" + fi + + sudo chown 1000:1000 "${MODEL_PATH}" "${DOC_PATH}" "${TMPFILE_PATH}" + sudo chown -R 1000:1000 "${HF_CACHE}" + export HF_ENDPOINT=${HF_ENDPOINT:-"https://hf-mirror.com"} + export no_proxy="localhost, 127.0.0.1, 192.168.1.1, ${HOST_IP}" + export CCL_DG2_USM=${CCL_DG2_USM:-0} + + echo "Starting service..." + docker compose -f "$WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE" up -d +} + +function main { + if [[ $- == *i* ]]; then + read -p "Do you want to start vLLM or local OpenVINO services? (vLLM/ov) [vLLM]: " user_input + user_input=${user_input:-"vLLM"} + if [ "$user_input" == "vLLM" ]; then + start_vllm_services + else + start_services + fi + else + export SERVICE_TYPE=${SERVICE_TYPE:-"vLLM"} + if [ "$SERVICE_TYPE" == "vLLM" ]; then + quick_start_vllm_services + else + quick_start_ov_services + fi fi } diff --git a/EdgeCraftRAG/ui/vue/.env.development b/EdgeCraftRAG/ui/vue/.env.development index d7ef344a8a..ea6834f8a0 100644 --- a/EdgeCraftRAG/ui/vue/.env.development +++ b/EdgeCraftRAG/ui/vue/.env.development @@ -2,5 +2,5 @@ ENV = development # Local Api -VITE_API_URL = http://10.67.106.238:16010/ -VITE_CHATBOT_URL = http://10.67.106.238:16011/ +VITE_API_URL = / +VITE_CHATBOT_URL = / diff --git a/EdgeCraftRAG/ui/vue/components.d.ts b/EdgeCraftRAG/ui/vue/components.d.ts index 7959bda79a..bf19897b66 100644 --- a/EdgeCraftRAG/ui/vue/components.d.ts +++ b/EdgeCraftRAG/ui/vue/components.d.ts @@ -1,11 +1,8 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - /* eslint-disable */ // @ts-nocheck // Generated by unplugin-vue-components // Read more: https://github.com/vuejs/core/pull/3399 -export {}; +export {} /* prettier-ignore */ declare module 'vue' { @@ -45,6 +42,7 @@ declare module 'vue' { ASelectOption: typeof import('ant-design-vue/es')['SelectOption'] ASlider: typeof import('ant-design-vue/es')['Slider'] ASpace: typeof import('ant-design-vue/es')['Space'] + ASpin: typeof import('ant-design-vue/es')['Spin'] ASteps: typeof import('ant-design-vue/es')['Steps'] ATable: typeof import('ant-design-vue/es')['Table'] ATag: typeof import('ant-design-vue/es')['Tag'] @@ -52,6 +50,7 @@ declare module 'vue' { ATooltip: typeof import('ant-design-vue/es')['Tooltip'] AUploadDragger: typeof import('ant-design-vue/es')['UploadDragger'] FormTooltip: typeof import('./src/components/FormTooltip.vue')['default'] + PartialLoading: typeof import('./src/components/PartialLoading.vue')['default'] RouterLink: typeof import('vue-router')['RouterLink'] RouterView: typeof import('vue-router')['RouterView'] SvgIcon: typeof import('./src/components/SvgIcon.vue')['default'] diff --git a/EdgeCraftRAG/ui/vue/index.html b/EdgeCraftRAG/ui/vue/index.html index df137679ef..c871332d3c 100644 --- a/EdgeCraftRAG/ui/vue/index.html +++ b/EdgeCraftRAG/ui/vue/index.html @@ -9,6 +9,7 @@ + Edge Craft RAG based Q&A Chatbot diff --git a/EdgeCraftRAG/ui/vue/nginx.conf b/EdgeCraftRAG/ui/vue/nginx.conf index 6d9a233bf8..8b6701e78a 100644 --- a/EdgeCraftRAG/ui/vue/nginx.conf +++ b/EdgeCraftRAG/ui/vue/nginx.conf @@ -28,7 +28,7 @@ http { proxy_pass http://edgecraftrag-server:16010; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_http_version 1.1; - proxy_read_timeout 180s; + proxy_read_timeout 600s; proxy_set_header Connection ""; } diff --git a/EdgeCraftRAG/ui/vue/package.json b/EdgeCraftRAG/ui/vue/package.json index 516e870406..d56e123754 100644 --- a/EdgeCraftRAG/ui/vue/package.json +++ b/EdgeCraftRAG/ui/vue/package.json @@ -9,7 +9,6 @@ "preview": "vite preview" }, "dependencies": { - "@vueuse/i18n": "^4.0.0-beta.12", "ant-design-vue": "^4.0.0-rc.6", "axios": "^1.7.9", "clipboard": "^2.0.11", diff --git a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts index f7946ad72d..0c4d3cdb5a 100644 --- a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts @@ -21,9 +21,9 @@ export const requestChatbotConfig = (data: Object) => { }); }; -export const getBenchmark = (name: String) => { +export const getBenchmark = () => { return request({ - url: `/v1/settings/pipelines/${name}/benchmark`, + url: `/v1/settings/pipeline/benchmark`, method: "get", }); }; diff --git a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts index bb7bc9a494..94976fbe78 100644 --- a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts @@ -10,7 +10,7 @@ export const getKnowledgeBaseList = () => { }); }; -export const getKnowledgeBaseDetialByName = (kbName: String) => { +export const getKnowledgeBaseDetailByName = (kbName: String) => { return request({ url: `/v1/knowledge/${kbName}`, method: "get", @@ -54,15 +54,14 @@ export const requestKnowledgeBaseRelation = (kbName: String, data: Object) => { url: `/v1/knowledge/${kbName}/files`, method: "post", data, - showLoading: true, showSuccessMsg: true, successMsg: "request.knowledge.uploadSucc", }); }; -export const requestFileDelete = (kbName: String, data: Object) => { +export const requestFileDelete = (name: String, data: Object) => { return request({ - url: `/v1/knowledge/${kbName}/files`, + url: `/v1/knowledge/${name}/files`, method: "delete", data, showLoading: true, @@ -71,4 +70,89 @@ export const requestFileDelete = (kbName: String, data: Object) => { }); }; +export const getExperienceList = () => { + return request({ + url: "/v1/experiences", + method: "get", + }); +}; + +export const requestExperienceCreate = (data: EmptyArrayType) => { + return request({ + url: "/v1/multiple_experiences/check", + method: "post", + data, + showLoading: true, + }); +}; +export const requestExperienceConfirm = ( + flag: Boolean, + data: EmptyArrayType +) => { + return request({ + url: `/v1/multiple_experiences/confirm?flag=${flag}`, + method: "post", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.createSucc", + }); +}; +export const getExperienceDetailByName = (data: Object) => { + return request({ + url: `/v1/experience`, + method: "post", + data, + }); +}; + +export const requestExperienceUpdate = (data: Object) => { + return request({ + url: `/v1/experiences`, + method: "patch", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.updateSucc", + }); +}; + +export const requestExperienceDelete = (data: Object) => { + return request({ + url: `/v1/experiences`, + method: "delete", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "request.experience.deleteSucc", + }); +}; + +export const requestExperienceRelation = (data: Object) => { + return request({ + url: "/v1/experiences/files", + method: "post", + data, + showLoading: true, + showSuccessMsg: true, + successMsg: "experience.importSuccTip", + }); +}; + +export const getkbadminList = () => { + return request({ + url: "/v1/kbadmin/kbs_list", + method: "get", + }); +}; + +export const requestUploadFileUrl = (kbName: String, data: Object) => { + return request({ + url: `v1/data/file/${kbName}`, + method: "post", + data, + type: "files", + }); +}; + export const uploadFileUrl = `${import.meta.env.VITE_API_URL}v1/data/file/`; diff --git a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts index fd06d1d3d8..bf12c4f331 100644 --- a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts @@ -14,7 +14,6 @@ export const getPipelineList = () => { return request({ url: "/v1/settings/pipelines", method: "get", - showLoading: true, }); }; @@ -107,4 +106,6 @@ export const requestUrlVllm = (data: Object) => { }); }; -export const importUrl = `${import.meta.env.VITE_API_URL}v1/settings/pipelines/import`; +export const importUrl = `${ + import.meta.env.VITE_API_URL +}v1/settings/pipelines/import`; diff --git a/EdgeCraftRAG/ui/vue/src/api/request.ts b/EdgeCraftRAG/ui/vue/src/api/request.ts index 91805dbab5..ce1ee0ca5e 100644 --- a/EdgeCraftRAG/ui/vue/src/api/request.ts +++ b/EdgeCraftRAG/ui/vue/src/api/request.ts @@ -7,8 +7,6 @@ import axios, { AxiosInstance } from "axios"; import qs from "qs"; import i18n from "@/i18n"; -const antNotification = serviceManager.getService("antNotification"); - const service: AxiosInstance = axios.create({ baseURL: import.meta.env.VITE_API_URL, timeout: 600000, @@ -29,7 +27,7 @@ service.interceptors.request.use( }, (error) => { return Promise.reject(error); - }, + } ); // response interceptor @@ -39,8 +37,14 @@ service.interceptors.response.use( if (NextLoading) NextLoading.done(); const res = response.data; if (config.showSuccessMsg) { + const antNotification = serviceManager.getService("antNotification"); + if (antNotification) - antNotification("success", i18n.global.t("common.success"), i18n.global.t(config.successMsg)); + antNotification( + "success", + i18n.global.t("common.success"), + i18n.global.t(config.successMsg) + ); } return Promise.resolve(res); }, @@ -55,10 +59,12 @@ service.interceptors.response.use( } else { errorMessage = error.message; } - if (antNotification) antNotification("error", i18n.global.t("common.error"), errorMessage); + const antNotification = serviceManager.getService("antNotification"); + if (antNotification) + antNotification("error", i18n.global.t("common.error"), errorMessage); return Promise.reject(error); - }, + } ); export default service; diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css index dedd20b357..e62f3bfbba 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css @@ -1,9 +1,8 @@ @font-face { font-family: "iconfont"; /* Project id 4784207 */ - src: - url("iconfont.woff2?t=1754038546130") format("woff2"), - url("iconfont.woff?t=1754038546130") format("woff"), - url("iconfont.ttf?t=1754038546130") format("truetype"); + src: url('iconfont.woff2?t=1757469597873') format('woff2'), + url('iconfont.woff?t=1757469597873') format('woff'), + url('iconfont.ttf?t=1757469597873') format('truetype'); } .iconfont { @@ -14,6 +13,14 @@ -moz-osx-font-smoothing: grayscale; } +.icon-kb:before { + content: "\e639"; +} + +.icon-experience:before { + content: "\e68e"; +} + .icon-deep-think:before { content: "\e772"; } @@ -233,3 +240,4 @@ .icon-active:before { content: "\e795"; } + diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js index f6731b5c1a..e5e61851fd 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js @@ -1,68 +1 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -(window._iconfont_svg_string_4784207 = - ''), - ((h) => { - var l = (a = (a = document.getElementsByTagName("script"))[a.length - 1]).getAttribute("data-injectcss"), - a = a.getAttribute("data-disable-injectsvg"); - if (!a) { - var c, - t, - i, - o, - v, - m = function (l, a) { - a.parentNode.insertBefore(l, a); - }; - if (l && !h.__iconfont__svg__cssinject__) { - h.__iconfont__svg__cssinject__ = !0; - try { - document.write( - "", - ); - } catch (l) { - console && console.log(l); - } - } - (c = function () { - var l, - a = document.createElement("div"); - (a.innerHTML = h._iconfont_svg_string_4784207), - (a = a.getElementsByTagName("svg")[0]) && - (a.setAttribute("aria-hidden", "true"), - (a.style.position = "absolute"), - (a.style.width = 0), - (a.style.height = 0), - (a.style.overflow = "hidden"), - (a = a), - (l = document.body).firstChild ? m(a, l.firstChild) : l.appendChild(a)); - }), - document.addEventListener - ? ~["complete", "loaded", "interactive"].indexOf(document.readyState) - ? setTimeout(c, 0) - : ((t = function () { - document.removeEventListener("DOMContentLoaded", t, !1), c(); - }), - document.addEventListener("DOMContentLoaded", t, !1)) - : document.attachEvent && - ((i = c), - (o = h.document), - (v = !1), - s(), - (o.onreadystatechange = function () { - "complete" == o.readyState && ((o.onreadystatechange = null), e()); - })); - } - function e() { - v || ((v = !0), i()); - } - function s() { - try { - o.documentElement.doScroll("left"); - } catch (l) { - return void setTimeout(s, 50); - } - e(); - } - })(window); +window._iconfont_svg_string_4784207='',(c=>{var l=(a=(a=document.getElementsByTagName("script"))[a.length-1]).getAttribute("data-injectcss"),a=a.getAttribute("data-disable-injectsvg");if(!a){var h,t,i,o,v,m=function(l,a){a.parentNode.insertBefore(l,a)};if(l&&!c.__iconfont__svg__cssinject__){c.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(l){console&&console.log(l)}}h=function(){var l,a=document.createElement("div");a.innerHTML=c._iconfont_svg_string_4784207,(a=a.getElementsByTagName("svg")[0])&&(a.setAttribute("aria-hidden","true"),a.style.position="absolute",a.style.width=0,a.style.height=0,a.style.overflow="hidden",a=a,(l=document.body).firstChild?m(a,l.firstChild):l.appendChild(a))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(t=function(){document.removeEventListener("DOMContentLoaded",t,!1),h()},document.addEventListener("DOMContentLoaded",t,!1)):document.attachEvent&&(i=h,o=c.document,v=!1,s(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,e())})}function e(){v||(v=!0,i())}function s(){try{o.documentElement.doScroll("left")}catch(l){return void setTimeout(s,50)}e()}})(window); \ No newline at end of file diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json index a8fea13f43..db90f79659 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.json @@ -5,6 +5,20 @@ "css_prefix_text": "icon-", "description": "", "glyphs": [ + { + "icon_id": "687788", + "name": "知识库", + "font_class": "kb", + "unicode": "e639", + "unicode_decimal": 58937 + }, + { + "icon_id": "5299955", + "name": "experience", + "font_class": "experience", + "unicode": "e68e", + "unicode_decimal": 59022 + }, { "icon_id": "44419262", "name": "deep-think", diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.ttf b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.ttf index d49803ef9cbebf7c4fb04aabd4ce75212446d270..6f8585b8d57c230b0b38fec8f5941cedaf3b1cca 100644 GIT binary patch delta 1942 zcmbu8TWnNC7{~u}u6w_9Z>5)Ix9#pCETvLiDFhy8deEd@5nhr49OPq3MCwF>R{Q;*&0bqu|QvXjsH#e8p znEE2kZBRs%9&xUt5M`4Ew_+>YWz(CR=3`KzQ0GT!>K%aE1daVpySwoqbkuQhmD_n8 zZ{e-Hov-B^c_-h>cko`mn-B0Ip5ur3YlTJXLks?EwXyJlI$RiMTAtBA%QlMt+j6+K z0Uj(s2zIPME1Ho)5;4@H4iPMbh5(E$l&lR-v>*cujfkQF_hT8BkW_70j3B%)5k?dI zScx>^Bug#YQHA?ZgXLICqPS5F7ZN1bBKSyKqi{pJQ@E^}d6&>y5e5)XJHV+E(8Wkd z30)D~E}_eU`z3U3@G1!<03MJ~D&Rp0B?T_d5R@KxSVD<{NAlu_7Gl(ss|1uixOgE! zI)K+mND=T_326f^)(BDwyhTEKfs2i- z9g~oMPMXfZRy{pM|3mu@~*Jm+_*Zft|L{#l<9gPpof=d z606yYWGay~bq@=M6KWqkmTIXZtJqyapX-%8f+&;H@ z#W1d@G?=(pES_H+Ybxf8iFjIbdp)#)sq>r0Q}I}_xGUWaaelCipa_8NzWH$9PQ>*~4Qv)!^P2R94`!m8TZ>I&2iZg4HI x8Je%#=duO6eR}-dE!(v;>nKdLWdp2(Jpg6zQ;z@PN{yG#H8 delta 1363 zcmbu8OKeP09LB$U@62=_opw5%QuIMv?@>e1SqPdSA&rPdBOamARz2DdCPE|~B1Eh( zCBa5KB55p85~M4MNE(sG!ZXo^=6~CUjUZy7zT?lr!pgn*opaAQ_s*RE_kCw5eJOP+ zk&*5tPXYT0;Jveb_raEp7el)M|7T#<+ScaArufn8f;+sIta{lM`RL8z> zdMKTE4=lXFi9M%#E0-o=@m+H5b5F;X{Ka1o@^Atex;^jopMGp?EaS19BOLo-n^|7w zyJWV^8pwL>cdH`5#;}8Mem%w62LUGnkM)*suE#QXQ7jNg0#YIgsgXKaE%nkMn`Nss zNsH`|v~*?{I4g?&wNjbA>+H-}{58GX@jrJ8f8RhZ#-k89n2&jgV=AgphDn%+B9yZ3 z0(jsnip0kw!=I*O6NY)TTsILcB< z8K)tS8v3%9_gi+*31xDPruJ3S@~Q=doq?O8XbQIn2`Jnsq(I@8AyI{!hnO|OG9WR9 z1wo1wmIfK8ut>=GjA{5m8GEM3z{(*L6lwtZ4b^~{H9}n=C5lOgBot}|sZppJq)wrJ zkktxIA@vF})qG%-4OADhS)tC5tqK)~G%3^`(xT7>$PR^mK++1GfpjYL2-2m{EeMa5 zfxbZ=0U6WKLdaudc^7*%|@rI^t2YR3<2gLTgmdz0PgR5@MF15euX(!0j{ z&=>KgeBX06<@EcP_@4!;11AEbL4UA1crN%o)E(*%eaoGgyFcs?uMA(!LtcB{P=0Iv zyGUs?vNCd~AYRZL-4H!fSW$SP@N29yb}2SmM2C82gkHt1WY$imEc6^p4p_|EPrwd8 ACjbBd diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.woff b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.woff index e9b153a7f53cdb04c2fd5285bc717d763ccfdf5e..9e3764205e5172452a77bd26b78a2118b8a01979 100644 GIT binary patch delta 9211 zcmVpm=l}o!F#rGnHZ1I1H)v>OVE_Ok8~^|S9smFUBnRIE zyJ&56cmMz*JOBUy8vpOVAOTb=0E%v5ZDjxeBJcnJ0YCr%0%s7TJg{(Yb94XzBrpH~0jdB10)yn?!(NjJ z0gHdXx-l^^iFa4s+|B*AiMeeTTd}r^V4;Go*a>!8ih>rlegn&ev?BNc6t#ceUD3ub zV9x8Y3hBfmGyImBVRwd|bDkHt0W`@wDd=#J1|8A(P*)o+t~YV};(DRsOy|G&so#`> zXo{97MLC9|62mbPqcIlaF%gq79WyZ(3$cG3Yf+8u*opl(h|@Sr#Z*pHX*R8-^|X;T zQ#Bpc=4-39-P&HgP+vM;JlQ%whs(RSU-|Bb|GN9NZ4G)m-e<48&;7N-J^JXTg)a>8 zftS4CF%Kxw%OfS%Nwc1Ri#wG0&NrU%kf(g&BX4QsD{tuHJ~!!Mkam73*C$GLfC_&d zyr!SeyjQMwDbmVwCH{&w{h{V-d|Us^)|x*We_x(eayw0lsHdiEh!#^yM7xo%b(oSP zI!zf8U8XdNZd0yAk11iI*OWC;G^I}Tner$4jr`pKQzwX$<++Zsk)JJu?);nI!26}+D1&6dPjduni@ztIZnc7QK zO+6;Ijr{(0Ox-5-O)V!5jQlQ7P0c6HOfx_#nx=tNw!99eDbs|IW-YIgX~i@(q;<=E zNE@a}B5j&xic~dC7wOP6XQU(3#8I2Kybjk^O;bp1*EEmR_AEECUNFrl^(BAH>+|uV zX>K{$GTQzCJNP)+c$}4b3v?XCm1tMjO!rLBbocc1Z+@CztEV*@%}AQBEZLH5gJnxH z7>wmFwv0buFu?`_fxsez1K8#hz>tOA1$&c_1aC-42xQqu@FU=mgycAUvzt5~W(gYt zhp-83*bUQux4LI!o3Q8XlX`!uZ{4a}-Boq})s%;#jvvN{@j@z0X;hIqjiO|)R&JQX zsG76P9P1U3qM1B0y%wu`&4$GigznYqZXfnkbP~DfbI=2fz@V?NJ|AmO2YGuPKFG#^SDuu;0p5RMpFbG)_g$C~@f6E&A$c$M_yb`z$t1OK zz~{kxWRdf-hZ^-Y>n~z%xNu@cqqb)KzpP(duiMuyIbYT=u2g@5a^n0Y!Kjb%gw}ZQt54OV6dRz@%DUR_fqg<-uSVtQ$_glbY1 zwtfU5-O^#(8wT63yt?GASPsxI^%~%5L^ED=nJ4J;*|&Qcuh+}ixBK7^?eu!>S3aD2 z{=*0ESpJ}W@F0JU4%!#oHn#h$x6tHScQ;@BVDy6z@C3rI&|Ju#@{69AF@it*vPblz zhunH|`nEs*@wP4h{JW8@Pd&92nGaq$eN#Sv)3@dq9)G-W3v4sR!rb1+58@DTbtlzF z4O5fUCTcUx$;b+H2U3z~2$hHFd8k^V^-Q^5%^8`jK=6OwOqNl!ii5)~oLjR_shKbm zA|uf!Ml#UYUZpc3#*1-s4~dD+-=nMai1ly6bYxVWXGJHc!52*0@_-vO5`@_Jn36Wxg)0b1uf@`+rm6yHbs z5~6+N<*0v_9N75pahCE?672gZMVZwcnRvP0m<3dYfmO-D)apaXS+RPfQr0k2K+puz z0Zm30n7>|5A&ovJ$7Hy6DdeV%CX{AENhFk4xpHbIRpwq*5?}VKyJUWbmv^a>c+8*h z9}_q5ghuAw+qDPOL_$3PWSyOf1S%`XctzolS>k^&jync#PQ!aYiJx)w)m(VzYzfsz zX!rRFi&zF_vJQ_9)38e#d4B-`13UXk(5RdwbpfGoNr@2hFJ{q}a7F3~hwVE*E25~~ zSzTF8)W>ot7&VNj{Xs`pR|ooaA|Fa*5~1qKKS&)};ZhE54u=cjO4#0iTEPBkWn~rp zDmQ;tPj;3K)RnNaiF`hR_)sDQm;J};$_iO?L<+o@{vj2EN|w#$s`ZMg^&+@C?++pd zxCVXid+&90cf(T=d!3RWRGX^$XLT^IpgE{}Fpm&(ojTYJ*g-&d52`JGhq3R$8ptu> z_kL;~HBK#u=ae-8!6q^%7GW4UOW-Dj3>&NVE3lun{;CR{paP9hA*%&z1xpGm zr@LHH&Cvy3Mlx?7@&|){ddeR}N;F(HnhzP}vN4rDoF*3!QEg?`% zG_nmdlGi~6!tG8{PJ8$0Ht=m3m16TiD_u{F=g#r}akyAQ5z?y$kIy(S* zy0EyrMbz4l6hmco93jDe-3RfPpUgLOBu7O)V!FBnUZO)ww18et~^y66=m zkEAE4mfSF?Qn1EgN(n{ILYw&2m#z}l+&tlnr+OxendIC%?p?fS$gqkNJx0nue$&R$ z^%W1(Gtm<+Zo6b#F@gXL;8=fb!?1kz=Cfr?H^vqm%zy4#G~hX55vE8>KC1z4*O@GXBh=;+-TNNW#R z+hJ;qT1Ks=&Z91(E~l;~dfnAKVks}Z;AWWZ`SS$~0PoB{yXpTOvCjQ;5u0^k^p zu?OReu#aJU%sx-h?+^O!WY&TzG}vuX~?1C}*leT$6m!B=30JYu02`N=a7g91`8 z7#d|oRTS0B{3sj=ha41Q0KFP(oyTigLAHJJQ_Sb6g1q0Xi?sdTHc?%IrKoPyd-pP^%ox z-U05YrnuI)Ws85wOM{4cLBNQ)c0Ajb!vh+T2Dg^XYM3klY`!yH$M*%91L>q|^dAIy zOps+^OpYsK+)q8eXr2~@0%y9hvfej5gAw&CqGchI7;b5ZD zrP6;sg+5M3{C8_KC&%T~;AH-(%Bn$j+Q&1PxxhEDe&BxsGV}#7hCX&$P|&POg2Est zzXY990Qo3_?*`rp(RB^bg*9-qS`N8FG`|i7%K-C^08H9*miH6dvI1@O2I!h$RI`Q= zTG7{g=GoIMi{&kMEZ=m_*?s-_JjZg3i03hW5AWGkoX@bHz=!xiv9M(F;mPqru~-3~ z?qJq1vV(K?0MO~7vA4(TW9N5X1br|@`KgFwxhdIjjr9;hhGel)Msi&ja7Mul z*Il>9a^OY=%ro0J$ z@RCbV;N!~*N&3H1h2|Y7AO_&tPwt2ml(Pp<;_u-uDg}CK)ZsXt!~%xyupOHr?AE9d z6Kj#E02yor11}kl1FgfN>sq7Y8YJLRD~DI+8mN|gM!cL4OTp&zq}&?%j)O!Ge_woc zljMKjy_c6H{%<7zWix(hpI`9#1pnvm3gE6iALo8t#KwFG`tI31XVX)D*P=W^#%{1h3}>7tB%X-j)oJ zT7#@H0lcmcOax9qnG+R-aimCv$XBh@AS!=sn+D7^-?eHri&m^!q+qM@&REQhq0D!} zu~?WCeemmy$C~#DM13L|U+4gey|;f+e?mJ=OPs^{{i2_vBlVoUBUi8I&=(_x=ucg` zpF$BgqmQ7dNe2F!B76tvPc7(8Mt{E%%?IwdBarXr1kNAwQ(jn;1Ne2k(eW>8)Es~C z6Dj0)4{0JZAtGgIA~WcO_?&P+*qWglQUxYkk-SE&qQeirzF_3y9rnjNE*@DxAmkyC zoxWtly86ICecguF+Q6dG7hV{B!G3T5{+IUeM>E%-O5s4-DFhDi-qx+hsUt@a^~}vT z-`tK}9)LMax8^YU|1$$5WmH40IKO|4QG=~#oiL8fst(Qwt^UvFV_%3mUwZc0U!l%P zzb+`Lz~mxxPBgFj|HpG{-jDVyShe7y1$UIeMzjX;{RyW!Tsur+*H!@ZI`2QgaR-0=F`3)5 zhm+A=&7(wJ(|u5W*FF&Ii8*l@ns$uR0Vh5KB$|dI)^nIp!T@s2zjrT;alnoJ>=|gC zArwwQp8#hZvRIScDUe9&_{;bZ=y!?=Qas3KjLK0KRiUb&Y39Rh&!E;&YpL^LZ!7&} zn1Iorg$YO}SI}(G0`i(Uxq^T6e&XX>{UkO$rQ&7#8Cbjgewn?BOUJz2 zRa{CC5$jL$XozQE5c}B>)wUg5Ax@7$6f*=?O>H|`ZT>R|zdClqj;*&y zu*QIE^5=|>_Uf5bC6#}wCT$^EO`>_{Wg;jR$z&q-TYC0_^W{bTtLI65bBg88Z2UIt zzl1dHNL7-}{m?yGNzvPTMm>5aqt79Qz3RdX6Bp0FG8fkh*}ja~v8ffuJb|CVi=610 zLK;~|cP&NG`y|L@3A=!WV?dJtGv$apl2qX^B0t$Ne6HI?cgTO-DmE565=(n<==K1I zvz}B;lOGY%!XuGN5bBr<+^&R0?MNubt_oIG9wuT!3Ot8X{|OX$(o;-mp*w>q>1TWoX+ISU46qsP zX(NW_+g}s}feXx-Ue-jBDq0+kL=5GHMx>)-GfVgwRJ1}w{NKUT-;4!NskpEUr>7U!BFi#_<-v|RKnb`$XSDz#0*=(? z9C=8!WTGOwIaAjQqzgU~Cqz9w-zPJ37>rXsjve#)k(cu=#F$~S7_+0S`3kA9XCd5t#bQMVa`E(_htQTQ?!W(vWjJ`G`HG|?Jw$&$C+YU(_uqg2Y`nJ%`zeuCnAP-;T((oh{tMS@_}jYmOL^pjQP%gsaKXf!+n9{dUyXg)*i z)kA0|99_|TWk!ldB>lh8XEPsOX$89U~7=Rr-H#oH3p! z!*zAdg4V}(_4nWFuVv8&HJ8+xAj2_iP?VHdR<#crRsX$rS1K;=x_)(p>VjR(IYBV*ydg{hb%QU|*wfa2A@$I(u{ln6j{JtD>is_uWdusJF ztEaxunIoR?^&8i$xN)3b)oOo^$?mfc;A8OY0>mVat*Ap#1+k{9WMBti`AMeMacvm3 zL|emjqpW$M7fGBGOHr%i<)fpcdX@I%f1Xy=!7caQ_Y;rjC-+^xMvdr652IZ1w>SOr zI-0)jmp5(tmi=Dd!{3xn$6{A|=of)bRntGud+4e@xP=e_DLpKDn7n@$70+>K2<^mI zJ^3HX{7tb~I(_xG^KF?wfS<>c?z_vDZsi!3ShQhKob?90IJ`cIo&@)Z5ndc#KMQ(> zRTXe68ueZXfFBwe8PO`Vr|TyvMVYhdOZOb{c#hn2=_)0nbznxh?B3lkUQ5&0zPNki zAr#DGes?Mri(ToVp9X(AMM?jdyt+1L6UINf=bj&7G*9$+^IAk$?$8k0@q?bOr^5X1 zSS*#g@|#_bk9HmY7w|2^V4+WgRcB$x$veV3kwj4mWswEjU;B%6O2f$&nxB@0rguFy z6i8_Zl<8C;6*$V#dK`^BE6!aoG-u=Hw>NJb7#bSbxcRe}Y&w4`S_wP2=8VanVzFoP zjPEU->?xH1pohOV6hIJ8(P%{gaYq9wxw-FIe(wD%KHH{-Vde}C92F8>PZx_5v!qME zcg94q*xr*LgYIlVEU^r9r{PSYmnQG)9Pc*jwMy&M&rw8fRz2wklJ38Hl2NxZpxWGX zt|UZ@cuh{1JJo+eP?p$0j4#G|BFUVjX;O2kq^eRb8R>}?`6%n<6@QOzsWFS7=|`b_ z-hMJHZV@~E>qR-daHy{@Ug9N@4Fk=vKi8uk{V9)5Bnv6&{NcxE)RY)gQ`Ykm@r!TO7!3tf% zR|SFGu0g!mlft8V=uBb>Pbn@;s)7`WNRin>P?KrBd5)Td!M?2~?I-MCFrxTX7zb`J zR4CQsYIvy-3`D^-D~Ct>%=DVSQRxk;!>Swl5Lb|{1t_&+hh|_UB+I6pkIG*dmGKgZf8T2!^%Lap1FvwsKx+J zm*YEk7NX7HWxC8HorrbY-km$q+4jTe>~DPI8*M&c4ScRr5H6Z6f4w@4D`2b2NsQfu zR9VBT#>#f2GRDQa7WPaW9q(D#75Cq{>wCN4+BmX$^+?~;s^PBj@vh-jQ|Kpa2zpoB z2YG*xJ_1%V=A5;$SU{Eusw`*KOmg~)OjZSX!qXl^_lYuWa|~x(5ZnyM3+K(>||1j6Vy0h3a?~8HjY}k<&eI7GzJ# zs1*s+s6@b;KqIivtsIVF=EylbNx_kiGr)gV)k>hp8cxV#k*`x&y+J?Pvovl$hQ4?t zZJ`&M@6vD4)s^-l^q4*V>tFBNhmO}4R##PZHNLc`va(7e>v0R6Zk>L8E_XfLkoApu zMvDHA|7HI;GS-Fc|NZgIclws~((%*Qop$cRhr9aEDW~T~QbzBxKCx@;Z4=Em7hHd1 zn%68a=Ogo=<1@h;-hmNi09Q7sG4j=k**<)2xl2Zdux2|3c8?Mbpg#^t3%nDrtT;x09aD(Kyh7-V8eI5RYbh zM_WiJ=uvwbLERYNv1mkA&)waf}H=O)F8HId-;OZ~|mFQvTqZ|f5Ugh;p75HJEYUYcD z=hyP{d_X3d+D9PLOTc}p)Y27?FC*z@Ck}=WwJcC($P_wTTYfc`wgSN z(=E|nJ`xXXx4$cJhdJTf$$ZDeY(>qaBu})J6D0-@lWbUmN|Vg5lSzLf)}0(GgSSFX zl=UQSLaYTrhld*3KHsJ$Mn^hxgXhoh`Rg?kU)Z43rporALx*Oj5Vh!v)AC&l7cEc>Ko)E_YtzXTg@ogKyosO13D}P^Xf}V~Z?F4lg_BZ{+)M}T z3h~<;gag6N65Gd?P8JP#Q`U5uB-e7i$N^PMKb^WJ#w#9R9Xa zl0#TpKo_E!OtgRb@&XBmRr}@Wq*Fi?^_eSm)G>shbD#1=ca}Aa!hrf2Lv#YPP?5wBG zIdb^LJgdc=0uNeSXQctl)o1vVI@U2ADYZgpQSV%$;r% zIFz~U8+^<_&y712q&VvFvIC>OAmZ3LWmag#?GHeFwUIg#{Gm zZ<1my`b>WjkRLx*LVtUrdkKA}RGN1B@7dEL`vm;h-dgTAX1#9yCRf}0nA;3=e>%~_ z_LoW}XCG(q<6xsO=;HwB5mWNwBJv_^O0Y~>?b`O}9V z+HczG2k6}vFMJAz$h^_PS;`7LFX0k)MB)9Blb2M83(<8TpZAsz6^%XvWcY zdp~{c+NiQ;?PF{ADABd>cXFlq8l9!<_I@ONTB}=u5weay_yhbyypYl)JNVdet&>G`f1*7V?45mTgx9xOHfFcxdakt2g_CDu*P$-~PQ9FBpc_51^aV z_5nxfbeszM?5_lZ>^B*W`KIUp0q6Qid3c;-U}Rum0OD!f1^46mZN4&avoL_blV?4$ zF#7-h|Me`4%#A=U2LlsG6aZ8}43?9TARB)Q0HG2Fp#T7QoMT~NU|_+98F7e{jQ{_q zjyaT>4*28RD& zJ_7(8CIR2GVv7r~p3sARB}-RVa(0v* ztjhjxXE?$I=!+U%{gOB)x&-j9`_=fNJfiwKfe~wnmincv@O;%E+h6ovq+Za+U6=4@5t;;dGbg^MZ*p~n44ma7HwmPzjjfFvuEtsb;m>3G5 zMB27W#XfSQe=QYhBIje_$%-`v?jH%E^0}9 z>4aBoQpTq7Po!~!I;8U7+L+l=1TC4iiniqEqXtdqA>`|E@mK%aHOIXRkxw7t Re3A6(bIP*6{lmIk003HgyNUn+ delta 8800 zcmV-mBA?ydOT<{EwC1Wl&Sz|SW&i*K zlmGx6%m4r?P+OY|YiMO*WB>plU;qFBF#rGnHY|guA82T0VE_OilmGw#9smFUBnRIE zxoB;4cmMz(v;Y7A8UO$Q;NSxP{%mh!VE_Oi&;S4casU7Tax!rzR&8N;Z~y=yL;wH) z9{>OVAOTV;0E%v5ZDjxeA!Gmm0YCr%0%s7TJg{(Yb94XzBBTHS0iOT>0&(PYFyoU5 z0gHeCy5^!W#=C1Y_xpVlli0>qtR; zU%))C$10=~i_Gv_W@p*Co%0NE4Je1Vp)oewD8&{H&9S$1zQ0WK`ToYz@A&?mZ}Fds zP+3hHP*sB()`&(mrg2SZQq!8zoEEgInznzmtv&7QM5n2es%a|CrscGj*3(9+rNd&r zSSfaj-Fjnv>1gqI^Xv>Rj@*8!BOmWo9u2fdTGwgfJzx0DBL;ZL2j20DL0;0q?MSJM$K2s7uXzyH*~buVyr4prH<5qp zEqWr^C*0&Yt#KpQujmQ=PfJVwX!v#UZ78?V@-He|#bi;;TYj$=D;Cp6v12iJ6uXw+t@TEWS){&XF_j!GTFfWMn?~y&!(e|asQ>@~ zc$}3w33wdUdGqG&&g{oqf(SkWvLw1MRijps!H`y0~Cd{lIGS_MUolT zEh46yHj1b!Rcdu(7$MEn>Ly#)*}Cc0BzM`gfx4kp0Z${Eaihy!evikx!_BzeZpOO9 z1Al0{+ikt}@#IS%KXB*DhpYn!Xmr53;P%m-E8am9EADB$^3ljgAK`y-gkPh%fHmnA zU9VyUfB03G=tU1Z^~TigAAWfImVf&7@YZLa-HMEdu9~_zm%I5}^YTwUmA{pOKNjZp zTl^3XP%70<^-x391ht9U40F;m0^Nq>1R6l4A$l&V6lpD8s#UUjIwKIgJDp)X z6X(>-(`p8cgvdzr$&r6_G`d@EkBhNFjNAiay!|&QC%3i9*6Vzj7c^9}URT=M6qF-n z6Ul!@t?}L6FbL4g&4COVi==w1jW$BJ%0^7w($*R0)mWI{+?H#D%My7lrt7BA$k;dtwvLWA^ow{d5KqSgmDRtK+A_jr9NHWV z=7Z&+wRfSb2fXjdSu(G;L)*O)n@1~!pqEN}O znQWz2Hq>qecl-T8!~j>P@B83`w$4s?Dq?StbNxy~QU0Lx=VUYob@t~FVs23SI|17d z=+1tn$?q`sy;ub~Cj8z@&85bumGGRBDj?WI=Hwy_A$tkzttHZ24#CDMy)x`)wYQ=` zC#XOpRLFm7!CJwR!pdn5S5&feo)4iAZyogd{a$*~>ql}VSkfC0>!p%DnL3mr7aB>W ztV1dEYkeS-8CW!+!#$eKRt*DFp;*xGZ9GiK(OSQRLVnApr6={0byW)f?j;3XX%a3P z$cz|OLQJMf+jHHAy;O!;1T(Cuuz0|Y0&G9anAv}BR76843A3!#YjxnydPq}Y76cC+ zSm0o?(y;Ep)k6sS)HD_Qn1K6I59cp&Wpm+q>(-yQ$Song6vD-8t|{cDF2B!nhC4_Q zg=?=Vm{JEIrWuJLG!C_2NZb+&MmG$F&f0ud2-Ee^#r1#m z&PS%Y^4ff(Fj3%pN|A6O2vEl&tx(wT$d?wkZQZdr-qtly$hD`^>6CfSL{~VxWd6wJ zwqjpjvB_`rB>fubN`>qPs1+i+p^|MH9U-DAVBp2T!!g6C8OFiVXbCR5(}-CI0Tn~5 zVuX1GSgVilt=Mnt-6%+F7g*aNYLtIkPOYWRr!Jzdpspv+s5wfQRhtm2!oHpcn5`!B zk1+Mq0O0zQ7_EiTJ{v;-9HlXKVSEwxFpP)U$^rYLQ<`m{_qoVkRL!E(lp znwaQJBszsax{yYh!r8(0QhR&pR@tnXsH65MXvR*I@1NBd&wxy2sD-e$#6mbGt)zne zwzc9ZbzoTAo=6Txk?Jk$0wRA#*ckM69nWp-(EIfeKf;HyQFJ0^*dU-9&u}sBo}e(o z2k+tH-foVwKB4_$G~lPLPdJXw4CoIC!Jx2#aK#1!Jph`KzKrAcqIqBhdF$D|99M)v zfX>abZW?)@GCPki(tn`h)EZl}x1lm&M%x-UZ83Ri5HU9h7%|tCENFlAro#gok*OiAz?HWlSj$>jLMC-vLo3y3G&U{M1QjTtdc_e zB#D{f_`Q7_>Y;cD+V>jqNs$iv2!kyMPRR0gR!IF7$s3s#%Ux806$3*mU zT4OB7vFJ$;$9aEFf$?ZQ*!wO)1%67Z;*#jCS* zRLve1ui%4{zwr_&w+0@wk?4`X6<^;Zd3WySC5itt$$NkKj91#@6+9lnd-dHu+)>Ro zUd-0ea4-@?4U(bzo=4kz)z%*?LDN3%mjakl%(#ObKm6wW;Y+5iL4XLLQ*uV!{x1OMm96{9KTW-0f6}vnDbC_z*VdDQY10;iO*Ug(H4mn!0BTQTLc}I5Ng@AlU9vr4dVL~ zPPMsqh{UeV0O(ELyPxCsd!r$4*DfxE?rwh^CF+{)f$}@n{%BX!j?2)LZIt%g@fje| z6co{}Lxd6r2t~cScf%O_o!HNsfz}y9VHfmCaK|#61VCqQl zgEA*WA7K3qG}S{S+G8<8Qx?@>-hO}1hNBzKx!vqI1w{YGa}NrxP>O$$cXK|O=XkgG zO>Zj7aR2H@GZ%cizYSWive$5_sGGZnO9~=ly(u0I@C*!My%3;Uwqq;A=~0Mc2EeK* zEk~=xe>&k;+io}gC};qc0F%j`kd}-m~62vTp)ato_8W zp{bqLOCXjWM@PSb{{kmLccrLqJDXT%>!yehEGDB8e+;?VI$2t>fQqV>5nC5&4O%K> zkuhnsS6Nf(|B_|-OZ~tC6TekA2N{^$wE~4SqH}yT`Y|9&e(iZI6yHILMoQ7n=+(f+&i1u0&I9-tUH8 zqd{a@hOj)?Q5%0K0@r8FCO|~MmfEZ>56PxXltWI=)bRr8yhp@wQ47xVgqS%D#z_yy zj(WVv&3P7J%rF^@*%4Oq%ntyU?+T<0wmsCs-8;KM@WJFoOhYa9gj9FnM&tjezB?XJ#36I~UhkSyt5Gbl{^5TBa5-(@{I#nB~ zrR;*5UEj?u8A6&ZK~C1ZjLIepz7xIgh!$wv6wtH)+H&Q-eOE5W{v(aoBn@c+`umb* zU9oT9zS)0xZwK~LA{8QWtCA+_2F$-^CkyK)OD{*^#wS+8??+$vr+ui;=li$YPuzCF za^&)*3C?f*z?boDzwN~Bt^DI<^p}bC^*R|aJM(Be6!?5=b09Ci5lH3(e}}FL)Xz2ZNDFa0Wd1RSwWNOzhReXeJn0)p%`2ibNy;Bme#?X(q)8^aV6Q1{)5r ziq0RLj7S#Tt%oCaB4SlUkHFPD57>$S15Z;v*q1PsruwOKs43!IoO~(VN^oLWYIiFO zjvasF`o~5$WTsiY-pn=wuhZZ@GWKDKX0T}!J#s(ri|>9;6t55!rA<{k(D$uA^oo*D z;QGzhbJy+3nr3#-x^sVY61-5cb}8E~N_D8oG#?dn6~i1Z=!sNR%B3#0`on06M&Bx^ z0$kR$c3MP1XMgn=d4QtOe`b&I5*ePvTsj0P-H?^CF@#V?2 z=i82Nr@7~EmPJFq4Mpu@Dr@YVTzh!!M!&6~bueUNkUH>Xn3=(R5T6`)g;)Q@v6 zx}x=OAw)n*4~Z@&r$)rHZ5l#5{-A#=_k4)IIT}r+uKjkdCG-37OL)S0_mHWXS%xJR zZ3q-+tqv~^uTP>U0ol1*cyW0B4ConFk-@E~*SaA9et39zSS{18jvppvdCsP<-h0I5 zI&$x2Yvi!nh8g+t`*yx^JxyQ#%Fc}kkw1s|oylZ0dXYRT~7=Q2H zd%uU#T+!vusbOKIO+#qM4!Am=4e~pq(PZ+fZ+6%|+70-h!M6;8g+msAlb(@9^_ca)>G7#e;-9GpKeXXEB~H*f44 z80g!$`3skBIx3oR%fIf7iLQS_p=;udCzefg6^j7SLr)C&5QI}STIECBQC~9D*z*EE zxNp@LTGTMioPoZhLcHU-LScNCblDSUj28;6J^2CX&N{>r%RzVQ_7u8l^1e0OyR22q z%~L;H5jk1)gcC?Q|H>&w-AsdObI!RE5G~?$SuNDAJ=;?_Sc}Zmb{veZz7YD}9 zms7Bt^7&%}#dwMd`n-$~@^{9&OGBe_T868)AnV?h9Jf-?#X>6{einZn&jUM=glKrm zKINPhP?G6Of2!_wVnToC1aDTJXX~dXM`~FN5;q%pHmwEyW>nHNi4@j&jDRt)XLMFc zgrzS@+Q}lBPbC!k9VJPpF0g*W3LV0OfrR-&PXV)7DQIQG5`_fg21JNOsxz9_Q@DZ^ud%;OTz}*nM&erj)Br5~^HM zQB_y$^WUnT$r`ZXHkzbFbvp}O)k(s%Y@|Q645rTNp>n9KOJ&w%@$No|EVge~^yBXX zl$@Scbu0%YZToh>_Z@#o!zVVF~ zpRWZzS11SQl1{U`77`VpObyC1vhLyR0~JS_&8o4)490pYca-J0e?{_kA8-tB6g z*B`=P0KY=9J&QC%x^v0to;w4wC#99L1Zq?wU{#n$M9G;}$5{T?#y;=l4 zR<}bQ6ZU^eW3@W{c-OL+^(6Yrk(7yEZoE&wLswQ?i_w$T*e`yuXAe42T~Jw5(UjP- zuJY;%jm)P^w8&g^V>Wvu+>rT=xq6cR&wsFf5FYJ7);}Jfd8}u7HyvAe&cbsSJkrs7 zb}2O&PU_vud&G{>cMUYpn17vNTsPmChl~TZ&jf#Kcqc}b4qREMMyb{IDTH(Q+H{xn zG-1tF4D1|7AegOlCef12TG{=ny4mu!n!}-^)^F3@-D!G;l-6(UoQ_42@+g{W?5C%! z5mHItrY>vi;_IIbzA)9z4-96GoLP3vMQwZwzh*0!ESGzwSnhNnJ>_;FdDf+|- z`#gVe2IgshagjCaPE02frfI1ghBfU_8Z01~`IdfCHI`tUY`)UcShc#H-zTGxFAy9Z zCZHlcgvh=#_^Ar7wJX36a}^_3$S1}U`9jW62Gxq97-7ClGUQH|3hC1E=`NZ;x>$RY zlPk-pXT_RSTnVBNPHE>W%^{5D5acOKlQ4foyE>?rRki)IyTDU^nhK_tt{v9{$=LYB z>{+o%FnqlT$T>SB`FY4L)ynGik|k>2_o#Yj6bRO>sHdh3iZ-2%v|udK0B zbAj@xZksr_Ch0AfGRYC_5K7P}ieAbTiy5l|TaBJ{W5&-~hdGF-(I5v=d^k}`z-52^ z%D$H(x?a+Cla*9l|VXV%S6hrm^`nOJL`BcK*g&ya%>dkYiJ_cM09QACELm+fN)hxmU}gI*C= zR|$j5=JM!I)OcLAZsg}K8x&Tl@gwnk94?e|?!DQ$ymce#g8rn{#U{1hQe&jcdNPV| z6T#jjkPc{qz*npBOh-osde}EbA%n#cuf}3svDjU?4BRrg=55S*)}~V_@^RbucpJZf zzo2?v6kCAN+&?JOGbW=PdSB)OLDMmks)J8xOT zWxZ^orC@OnNUnfV3=X8r>49KT3Am&+>F9fAfREgAZZr*GR^p?#oEJ?$st51(imKYh z`~Q;XyHr)=?h4UMmo9Dh27}+0i=hCP=F|B|Ivr`eI$y$pP*L6#P1ApRCUFxCj&vRF zbSFw#Ki9f=7%3W6MK9l_AtPn|*V1NQ^9X(%cLMk2Acmw2^RxWpn&TiN=X)dP3z~YC zc$HwMpt%amlf{31ti7YBxO~O4E6(ih>FGXm#Z><#g|~P;di-}wzj*g`OO{-B_ZOF5 z&kC*t>-EsAkBMPd;8=fgdT^>mU{hu@Z}CwbeSgfRAjMIao9!F%_z}m>DX~H`Zhrvc ztBur|;16x0K1W>y$XT^eowYtK!}7o3sXz$|;ehO?927T-UJz_|1TtIGw4T+Y~Us>mb ztPSV@I$$lKt@F{vpIGuIwEX*zKepbsl#kH+O9F1Gg4*RPMryVgIsewQ3s4}Ygt8gI}Ux@PS~(r2|g z6&N97`-9)b&*KG@Mj0f(HyfAb(aC77j2+juZqzEKk)_eK+qRGoe70=67Qn3oLqh{w zw_Us0<5xH&dA-(e+<5*Fw0;EL?6!|crxTRlV|~r%XTLnjsLVHA{{z^l>F0QyV_;-p zU;yH!UpD8*^V@u7;AUX}fhU^`C&K9e|NqysFfum+xf~2kAW;BvI}Iz786q2h0sx^9 z2A==`c${NlU|?WD#*Aouvhn}_)G>!L^8vaj1l|Au000000B!*O0a^jL0pJ=ye0C=2ZU}Rum zuwbZT5MTfSCLrblLI#HaU_Ju?8}I?$v+p8L0e`|U?E5yu4hF(HJo6>4tx0_9+Bvo} zZTWhVCWj~VU|-47)s>taC6g7&|Lq({xPlZ5WMIg#M1c|&uHqQiaDwZ&ft$F6+qi?f zxQF|AfQNX5$9RILc!uXV#Tj1UC0^k*-rz0X;XOWp<0C%dGirRnSA4^F{J>9~<5yBB zqJNIZR@-$coiR>|sGVhfXPi)VN=nBxtaX=l-fg-8@2#u`EGYzTogy|nY0yoZk5$?> zb*|jjjt;Z5{oRU~%Hde9t=k$>4P{{;B5m?9x^l5*M%ae`X{VcPN;@6d%*H}5#}>>} z=uHfnA4J-w7>a%5MqA2g*Et_EPgZOwQ-5SAQXVLdd382Tvyg^_ucYhtd}d5XCky>* zEB0xKt}CP75FgltRn-A|ieAWAhM`ure2kUt@M`FQi-s)u!_t;BbUy6^`81GK`P7HUcCAfoKbM00bZfgl7kZ0~-`EMMi^-17ZX6D56r7%ll@Zl)+`nF6jdfxVf;^(BE;B4msx{S zEyee@H|-~jIiNtIJhL!^#Bb1D(1ak~&G&BpH`8`)23SkTRY9IgWz{>vy$(G0tW3`S zCA)u-LTv?!R0ZgDNyzsj__)`EBfc1*t`hq5QX2&mZbdZa!Dy^B;iF?YA;16AS+TW4Ey;_<=dmyMEC&!OhBB38`!3R<6ksk1iZ)%7GHsg| zzZ2iQi>hI~`!*Mr92XJxaQryB@oNH*B-7v|NL{ee9kJ}JyWnw zah>;EXf1Qqc1^h?m8exJsuy#~mkQF4(q4SQem{9&Gf7fK#=7dx6E~0fWe7anpIIMW zWdC?e-x1EID3D<}Wvy&lK1oN(&eLU?M$u2olmBt+Twyo_T!Ufjwj8*i6U!u>nMJOA z(=D(h8lxk+ht3W44c!|W9{N~m>pfms%gR-yy=*Nz$_Fp!Zh8IU%R8^e{Qdh?5BEQ6 zXfAF?*86|c8Ygb|#eF5yYu}*NfF2FDZCN#E*0>SF`i#kSsWh9HHd(i4*OXC{Hmq5a z+Oeoxo2p*ZI_z6EVf+Jc$bn7^25nkVXwk3Pj7Ia?Nh(NMGo(M59T31qD*?v<;es9l zo#4X**d{R8BCuE`aF`=JF-v%1obbj7;e%lUpHK?|Fh&rO69RM*VpND=B!mJrLWd?I zHW5Bdz#fr~UBZkh!h%sE7n4LjHi+q1BNkwZSb~&j#17GcMWP$s#5uGPeW()m&`S(s zn)rwgN^k5_3b0I>g9*yAx{Ss$)D0n4q3!@-M_nhP6?F@U4%7`I?xSuK@c>w%J{JY` zSkC}1C89T>YF#*m$0E793rJ1yoNf^`W2mbSWA-M(0uQ0Qk6?3 zjHPEM)#~s}ZaXyvW$lEe`{7)1mJ_48!xAS~1FK3aRhdc{T2Qomg06()Sctf;Y%4sb zn9|q;Ac@tM4<^xrmnGS3EUWJ;OCCv0Z$Xv(lkXG%Kq9fzh}1fLR1G*0|9R%hwF!zUtG_= z6TH0neDmm0R$aH%z~rJ%LiGVeG>WkBcpMP3Zro#pRz?t~(~Q*oot!nQilcBIiR;@b zT^?6uLoUh|P?*wLNix&v1iCh~loUTjd4n>_8R5tT_Ac!m##fOQDSwYb(Oaq_UUJ6( zVtO5;tOk=%(}cf5lM-k?3b-~RD)nN5UEyJVPC@gWko}nV5U))E#JIQoq#|0<{J^6| zz)vC&E{fQ{TM95<+^~z_WF#K32dPm#BV^*5^+Ln=}p)KO_LjH5Z z8*vScGC{(^u{hA41$8e7y_Yia0rd0`_iQ;hksKFl0-CWa6(oP#xP4xnyQSZ`pk2KC zW4$HX#+KGm9V@lS1b!d(mn5pXH#+VAZ|iDSZb$LZ*3*cq$_d#MtxLIdryKUSp1@L( zh^eQUJ&N%u9;$GMM&*i6X=-y=AZU*2UR@LF26TaV$I)>V8F8$2yLuR^|BsDme?%8- zmv<1u5b=>)VIcbtYpAeXit`hsOH8kYA=q{-k6Dkbhc3K46Z4(Mcm;_qbbm7y>fOi6 z#(i;5f1o`)W3+*yrFWpEE!A)|TQ`M8tnDjabdIT3#06-abuR-0Lw zg6`!b%1@0P|B9uia$9RMli~qN`%K>Axm#$n#u&Yyy7fmKu!QuADHX6_r(d%*rPM8e z1lO}6CEQotS4K~v@02A9hHHNk;P74u5K;7VYFYKB!X05Tj-gJg7Sj%ZGlba<#5Xz% zaI9h)xS=@kp9bp6B@qqC*1jMUGs}j;*XiAsTlS^)lMAK0>PP3qkIyH=a#U$56pQ1) z&bzUSxCJsU0QZpTB)vf@&ZgH>=Vap4)|^a_Tf$`5g-Y_Ihics4-FJNN8B}LhPD+_m zP;djR2YZ_F(Vu@4)#9Ko5!{tg0*ahkM`QA6wv9lsYR+!|9#GuJu{?y9W;V`hftF$2 zFBT2<4Dp8VmwBi2K9b=9O!xaP0Dnt+prv!uhXICfvn-KR-!iqn>i??4seh*aeWBMH z_n8`eG=ZQ{4=N1N4kL{n_W2a@fH!)>Jis7Hl&RW~^ggaD>P5AMW!)PG-`RAncR8ge zBtk9Wmh%c_`wX6Nk0p3xCM)W&mxq2Z#1*dA7EZI9enIQgAThP3VlBIcNb^PdTql)# zj$2`r6#m5PS@@A5ZVrCtdGN~WPrRFf?>Xk@;a8S=udS-Dr;cfqO>Lv~4rrslYD#c5 zXv);8Nai6uZTB(-r|GrPf~mHV2)RBF5_8v3z&93lGzXh&TSI3KuoJlh%^%Hco%v&K z?RPD8`!h@(VP3T5$+SMbZA2pG$aY$pI#fHtY+WL?8#}5y`DnFQ?U#3sawkyu=*?4g zOYJ<*gnw71zJUXO;FSNq!1#A9nnC}Awm62zU&9vH@|ir#^uOZnA|vZZ`D*_4v%B{0 zG9|(t<#|v!jOQ}>On-(B=L0SG7hf+BPHIQu+@g87q9R=)IvR}d@D0d*Qd*U0M01Q$ z@s8*=?7impjNUr9ewaIT`OlUm5vyV>VwNyJh@?rVH6s1VtfiSL9U`WQ+D$v3NgDV> z|B}a)yNw!%!h*3Zw9rntwd062&PwbGjLwL-UIN4q`7i#pZD9Oa{I;0;OU#3yfZi@^ z9I8!iuN-~?qB!J}*2Ch`E+Ay;<%1l9x1W>0*bR3?u4vqgKfbE3zsZz`F`QtFSU4Sx zF0nMxK1~(TQ!|79U^>@G+ghM(=C_`XVNqwILiQfh`2p`m%Mobjnt&0jy6$OodQGcs1NsGu-INJ%OXSp z&m>+zd@;s<1~;PfQFL5{dRL2$NOe5vWNR@?Bmx_0IBHi%73F;bTJtM-SMTN$K2*5p zBih~DnM%d_It<@Rs>DEDM&&z*=OxAYtQU>5PjfH4?UF%-4N*QYNLW9W26#EK2ti3x zmn0O0)TW#>%_wVgDHBin%z=v=@yeZZlx=kn-99!nz=)atjprbB{Dl@^>4ib=uzzwz zc3^539+COYqy0GSpFJZ#wEip}T>00PwSJ*cf>nN9K%#ZzmJ1WuWw` z5TJV|W&{v7J2Wug|vyne?DIddF=9U3E|Rg7yI0T(E<&6R|%r z8Z3L`mDC!di2rbhRTSZd7kehX>>VXWutD2Z&uTZfjk|m2`|j->{g!zfT{`J!Uq4aq zoBk<4eKjFn;e|d4gx~ z0{lESk>@q@?{#K5d#o~csJ60W2P!>ZI?|Zfc_Juaz)qEIb=Q16w4q;R5M>S-+*R*( zxot{w`-COuQ~l{_$NsX^uY|g>oCm%XYTyS~>7ga}#nQcvDydTa3ScCI!1gFH?iJ*ezyAJYgw&Lj$PwfEhYD=ds-$kv)4-!u4R82fb6EpV1_b`1lH+p+_Wxjhr+S1PL zl?glUm_b0r=uwW!vfIKARGZ6!^3q$%csWVJGhDvF<)&TE+4#A^`0>)m4BKE?b~oYG zwm%UqIV~r_$q~X_vvo;SMP+7u^>bgn?`Ut_!;^pVI~sW;w*RRPV9VWWu@Rk5vGs&q z7YUb__!(1=eRe!z#KOV$`dz|u5c9eTeTC9&4z8Eeiz62Hy};8$xLt_g;4aBo&qlxF z?$*n>q+r$(F6<`MX~bpGS=;vwXcboC7ClDL5mxv-UW9WT8630`Z4qMx?D>G+_Z!xrotAIOMvGy`@wr16 z6--yX_Le-9m_%Ze7U?0ehKNA@>Y{c1LLnq#lSu@c zWFy0XWDjW~JpkZXus{-G3sL|6tp@-3Q+?fq1o(}%K;vi#qf4iYiEJaqRbcgSGttrX z)v1g%)KgR{pgPWKs@$dGeSRgTZ^CQvt(sOpjo&5;-XJwfHBvPY?0;r>uPLsawaQJ@ zC^F}$)al+SUa6kxo@%8U24i?op@*}`nxMDMndi23R$Qt; zOBS2ZrsBo3SGh?x3DZ<+^`zkbVAQLWD2NXTh!-TDH7GbYpC~xX`A$xmlNXi~8k!T9 z_j$sE3H{xOg=leqHp?ZF1%j%j`mS(%*cMopBQ9;mk&nUcC;29WgiUU5Yz-C3v^n6uw}0U zG-khc>I}%)c(7WfWM7Tj(Pz0e0^|EObh`(&C^8p2fEyr9#u zt!ty^LQ|+okfrcHMlG{eP{&*%9q>KjcVZR41@VqI$RFJIn7=9!CV-3J0*t7%4kHQV z8zImJ2Jd6Y@p5Lw|D=h*p4d#UU1J@L3#Y6>G|Do90@G!ANQej_^H|EC=SJoc zV-!+FAh{8l6-pv7d>tR4^p}rkO=M@noO0;nJYpL*h~y<=Ox=qig{l2BuM;N)R@I} za-7a~!pbWZQvo7Tizl~HetHxc&$`(laGhB^tJu}2A>>>4pOm)H;v?o}_hzRf&JJ7Y zT4+8}`jmjQ+?Qn;D`jco&yGw-pnYKbBq- z%`kLB+2w_omVi2!z6GcSeJ-_|?2KlnoqF+r z$A)-JXzfh_`%q0xJ7cFGEPoqPj66VzMWcPqOK9w-^H%l&Ihu3?|G3KqqZqWmTV z`z+Egrh!LY2;0VuP534hYrQGf3hlxoABEI^S&_MJAfyYpA9N}4241yZz`K;<|Absfei}~xNDjY&EaY9C41cHidwMW? zf4xv%JEr94=5sZL0(o*p_qZ?mX^H^5t04=}ZU%E2la3dy{LoEW`8$+3`uWqoflBKQ zecPDmx6j;1?F*lI)T1j|v|j3l_CGO2GR(WgZqkR9MM=jys3!3#1Vo@A@^z*WAaGH) zBVuSM!o-8xzdb1F`Z4wqbhiYY(a(~^n>}Wu&ZA~~nB!|Jd$3|{>~Y=zzHb*Y=Jgvi zsa#>-6!d?NyWw;&$1YctyuLM(tZ7}yWGv9-$B7-wB-$xeGiETE+`8G> zhO#?{IonY-=lalM?k?nDAfAQE{tp0j(9QXkFCq5n6mL0H4jJnV0c>*A{8f{q$Yo2M z#w0keo2K;Qb22k%IyN=uhXq!yfWCdb=o7`h=d1(PS8URR=U)yT)=|nn8q1ATHbpZj zwpbp8UA)H`rgxf~9Rn#t(u=4nHq|)Y@G(nI-d>>f(fZzV@x@U~zR16n$f+;jJay~t z_Cx2Q=Dc@$p1GE~iuKm(TCpJ>Oqk*w@9Y3SPV3yhy_41+6xar}Jt=Ld|NFPT0R%$a zwd?+JNBa9mBMEo(o3~=-U+ypu+=?`M)Oc9Ylv+<`ad|=wfD3lEnYIGD-N@w_eF+2T z9kMWFxP3YF8V|$)yX%@=`#t+zo5u}%4wqNCt4S&u0@#V?*POpG=&mN;fQO7U=T2g! zFl%d>DXdAo1_fs(g*l0RS9xi^UFC>%xTtv0s2><*=0S@#Q&prO6R964%12 zVSNi0^c_jHzy^t(y+?=MQYkxw>8-B*@nhr0*|Yb_z|PYnjF(8KT#%Jtd@PpMWM!>( zSmn^&17D?(u#-&#NnsJ?hV#s%@Uny^SS9Snym>c{q*6dYUXRIbF|I9@>@JVq%F0)- zHf*S<*c}9{;fD{kwGAdBn_<9cSkGhU)vL|qEJDQevuEFSD%Klkgk;Oi=Ai75R|aAR znLBwUAf{O+Z^Dd{te~q`huNLJpH8R7@$@{dju&^B7t7V_xpDl%v3&h9=*ty*m>3MNHAZ_trsip=*>URG}Mdr4?IM2~>Qn$4g7XD~2sSVHk4f1An+2-()8zpmNb{!(oDV zZvyHsV&>!eaY*z`bR~KU+8ZJgH_RRFo05{Dy?xw1?4KzbfB&2T-+?V#`hEM?N#@RV zb9=`d;Q?9ejcKxJ=EKDvy=QN-3REKs?bT_r!dFdr5;LZqo{6LZui7d9Gf_ zlgR6YX|gq?M4x6KPufg^C)q$I2FHB-8B{6(wI-H+7i0;t!5u;H0aEMa$f#t|OL2ro z3S5jnN%NI#o-XEEObl;rYd${7*|_{ds?~a!eqTY*jZ@E8x-&J%y|=@v@-`Q=Mk4OG z6r8Q7J_vkWpB^X-PGzXT!7yGdv`6=K*?&Dj-&j#jfA62dT89o$#1l;Z9rz>aBai+K5sw7DQaiWM?pxqM-@x&&7@dS#y6D#V-OAAoTEo#N0xdwxqYQ+hb1IvyU zaCIY!HbOfE!^%XI{2Bs6P+iT3y{MSzJ+_-683%R;$AdV!^^$z2d!MC_i_eSQDlZX~H$C3bmoUhhn@D z1vlRr(Fqh$G)NB6#Y0K%e>bo?v>y1B_Cf=+WwZgP*9>?4)^V5$yg^laS_K!;+ zafLUTbUo|_k&N9DZ-iYZp?&B=NCXM#E}N(n8=|m}Fg3CuPByN8+$sBq2IcLA;VyG} zrKj`)N%p=upto4BT*KtafgA>+B4!k*6hQy|1zpM1#776u%{9mo-1s%=srrAE88a|? zy)jxi)ezs-q2X3w0^pqK+39J#el+gf=>?l`?}INmeJ<|kbtCV_;=GuOzHXk!xyO07 zD_G7tuY#@!3(v(zTzUuUr-E{JxfiD@l+;o%=A+(3mW zxIRD(e7GwS>c)hR;qas^Jix!1{PA?dzg6jYe`qV>9sXZ0WjvpK94ci6qx#LH4L}Sbc7kmVx!0X=FWP8JW+o)wJ5;BYZL@X|eh9 z>ehbMV)4HUXt5mvFa@pO-fsTpfob{L@>%_k5@4^_e|igIq=odEhe}JcVQa9qv~gXJ zq+JWC1rYiHDn>acFWeFuYWYcazQw}B=fLi(Kky4K$RNd^8x6P6>vQOy|)dH0-(QI;Sd0wa8{*dMy){ks-YA3P zM6()(KeMDsUzBzPr_7FNE<@fx@hu+e-hg&>mb}u~E#60{yp%=o^Al$Rd4!EKS z>$~SpyvY8tr|Ui==FcTDy3-LUx|M_9qsgA(HLFI*cq<o`|znOoK957{etLwM*FCfS>BJ!!- zt2U+)?#<`TGeeAQlJFCpR-I~kzFB)hl(=%--+ds!Qs5c2t8?7IVWhcu_5S*Pf!0xE ziK6_Ei%c3_a9JDSX5k6I&e_g;2}djJX=q3Nss_?n0g?U+66ky99P{7a8?7;=$Il~ zzbkTl&>|l~8m5Y;>ALbtZoU)4v4zisfDz&Rp|e`J!;_dC zh|dwe#V}v9q8|(T)~vfxp{2IGb?h{t0+*m8O~p|@y5|Rs1o;>>4<&(|+G#+KXc+@x zXv~vi(V>8BkD*6|Vw|BW>vuy#wmNVTR_2b=wbZ5;E@^o1`fRU7R$->0N2zJtlAphM z1uE9*EQhmQSMt`ZljI}l`f1xvCIi#ttv^d~%#b33{5aZO!q15{IFvP8@uO53qpXuP zhWO|ods1Z+VjR>q2R91jae++YZwfd;W`*sR$%^iQSWnBcTb6RxvJ^PWFsMMj2=13&`#u0>I%C{6bN zB|(n3HC*e1po6+ZJvzs9VKqukMH{GVW9MQ&_Wh2gwt^*G18~rAOJY{s)>yDV7RuJl zey-=l{KCOw4oQk84_|xVo!yt#n~F+&WX4BUp=d>c5nVWhL&(zA-46)OzRO>7O-HUP z_s1S6wopi(M;OEKyxV$6rKTVVIADL42q-cVH*5u0364pMYqZ80=IM!MQ%h`_txL1E z^E&M=ZP}RK(z9h#n?s(jzh~#~-G~B>JPCpdq=|c=A0iK-NpvY{2O%XYdtVcH1rT60 zwQ^c#f{-9VYr;!yes6LFEs0w+Lk+WbzIV$etIXT^48MPcijpX4|B zHKcw>WL1#51Y*{HU&))A73l{;?kYit7HH-Gc`2!L$(>~?4-$p_5Ci|Gvuvfi3sZNb z!yhghpt7AA$-9~v$+5gR){^Wcnq@hFP%)ILB-?jSI+TJyMVqd83b5Bi(e#97~lrbhEn`|~QdBiHnYAK}1{j-sP78l3z$ycNSATQnG5`b4n zUORziX1a{I^MS*>0cmziT-KOwunxX-0Qq?h177eipZ*bU4rIC_AJf@GZ3Y7q;KlJ} znML(og}YOrb_VK!3RnxQvFg&7atl=rt*Ml%?@C?z2{*m21#raN_*L$i&|hlM`?1Bq+OuZLBd{t@ZTplU1{6EH)As3<5pGd7vG{2v85=Z=jEu z1ZIdgKqn|3zyU=7tWdInNy(g2gplk)on#M+;W}J@xDk|Ud>@b1+*YE5VQ|SzAt|^YBa|?T8H7Ynlf}nS zBh!Au&Ja%Zii*%Dt($<XqR`TMmi}Fir!lIvT>{(o;8Ylh6+plRt6UKBN5$0X8j<$FTv< z8zXsjODzifHPU#Ui}r@4QA2d!=3)+FGm6qv7%$f2k^h zN=E1mAF`C5#2PAhz4?*=sc9Z-N&_{r2p|0xpZR-hCsp>OMk(r!SqLSyGgG|GB3X=? zj*}gLTC0WcTfOd{xBS1a);8DZ(IVP6`L-v%}{JB>xgVK^x;gVHF)sziIlXR@VuM6wu zEK9=?8j7yLaln`y0XygPSRwr*^6>N^m@P*vPh)b}XO0+>wqY|pVFTJeg||5HM(zN{ zUc6@oAGt}l@B7HFGj!CiIjpF&P2BEbe_65#{=~5?#+1 z1!hJTeV@6iMnUTlk6>yPpueh&b0-ZmuBw6wn~;my2(4%e>*v-m)@ra$U~?pP(i?hR z{_l}?7jAf<|2|BI*eMj<2@2EX%KPau^b%;{La+>*f)}?l!*BYtRk{LC4B7nYk}hfV zyPFWytBC5y^-H7=zrxL8<@t~}tpeH#=hFr|KIk9E;T(Dt9+RewOHgD)-&v4PHVi0K zGLl{Q2T=ME4e^;vHay=ayIk58r(ZS1)5Z!;n02c0-k)j%+OJ>zK@cb8Ph84Z$9CYv zUq@&usj}cAT-hTC&wDuU@j1>|_bDtseh()k4G4^M3^Mj!&mziQ&n@m62xGIupk)fY zf^ijL0{wQ1Ir|nkH#R1t%kgcA$3&N`q^$67pUjr5_JGe1|BCMR8o2BXn=AkpVIvFM z__-m_IyfsL4nUgC9(t}Ne~wEc#|;o7H2w=mqcA&ytr+LTnL9B03#TJ6D@L6-=SGQr zWaJzT32DrYxq%LN&Q6OGo^gv|qNjO{p|}|+(`pv&(b=YnbtEQUj)B(E7qInL<7@1p zz~rjy%mYi=D_k=+#<})yk99|u?B!~VP%*!n=ysUohgPMqSWB%Y>A`I|7Bv(e=U%PJ z5Lh;Ys?Lgt^~YsXZ53~hVsHn(`DGQY!}HsjnGYY-bspvDv-6L_ z3XiyCBkqE*)9iYV;_1|s&FVv-^uacg_!05*aU~@;B988f(j3t`S{A(?sfb^0J<#gD zTn%=JVPQPpS}z6S74^+YY|LYLxIQ&~`m0*&n_3Sn*m{9yX}Gt&hYlWcUaSH;C8#(N zJLFdqOz5wd!nWMu)oCUJJSCJ5Tk-lQ*8vNT*m2jJa&NBULq@?$g$+$m{GnwWv_CHs|(HQ53BAqh4ou&^bs zl3)~QVBRo718gHhni~uOqD*d#%?Wt<=~-qlRIkALDPCC3bt;!%I&A~IwR8xAx%5Qf z#DEKdLWuG;PRItUAh38vlvY_`hj=}~u{@Bg-lrF%z!L3Hr=zzvaW5-v6z+is(S9Te zdq6oTd9d}K#8dG9Pcg1zYVF~8d_eM)y6mep(X{!l+LBA)3@tQp2_usggtHjb=I3x) zsmB6KJ3{llwtg`U%?$j?^DYFbb<~4$@|}3UqjV%lir zLs&8}2*G93ha?nI8ePsQdXysuxT2`AA()oT_l1Cfqgn&*sZ;w=5p}Gj`p{uFkXZ(`IZbQ6?)P&vPj$r zcqT&m8R>S`ow9EkX|gB1?|WAaWmYGHz1-fXQFel2)F4%ujl6!VzTep2_ujly-iX(1 zkxoB*^hH@{=3juGI^$pvni9%TAj=3(i(%YxONmkE&f4PXC|PP%(GS|yo!Oq=8DU)r zKi2OCjC$~r{QUklmjUyi)jJh!Pv6{@>s+%2u6lNAM`736PPYvSib$)?wZU(h(;GjC ze+~;=(d+eUX#6NuY7EP1^y4q)*6Uio1lU^6L~x*E@lRjnvlAbqmFvyaWf{H!ocL@L zt;aUHKln&HA>2$ZI@Xg&_7Air7GDfD?M+V7xGZ$lyQ>V~iL5Y*ekH6ZvfXZ9C=bi1 zgL&6uQ7HB(8Ys;RNmI7#V_u?7-b9QZHA}JZj$aR|&9Qt@*=0wadKmuX#@od=G8OZuuSjz9N|0_OI70t;~q6e&nV1T5}tY@MLejyTUqi z-EZpvc9BahHni^nb{*mHB|`lS-(l8aANGb$nK{^D@5@*YV)Y3J;aVakc4Rw-VxeHRk6`;+{VH)4_Yic1B?`D#ftxioK5iviGr<%%@)~{Y z8?0WtAm5yY7QuS)p+grJE>=GC5dSBR7mDJ=|FQ@gqlUzC_M{6TPv?PU#4+M>(tpGn zA_DcbpVt2sg%FG4!W}L1L7s zB`P2|{?PDBT~fKc(OK9aG-WGQX&y=L$!=+GDuoILW4KbG|CW<2#c-Pg&w2myr~nmO z#6VMNS5}PddnMb>>#?jTT^o;%FIl^y(OJC5FGZUBIm?Z5twem1R#+Wjz%&DZg`dcGH=npnrri0G(Z zYf^tp%NANR#Nb&x9alY$gSnPREroY&DM+!6Ym;f$>3oyd6I*fP@UkZVCPQWpSG(RxEtRN7GuG_7lZ}O(^M$!;61Pz%%FyN;{ zwKKZWyXoGW=!yk4f~{eT9N~(TX$~R3o{eao9L}}3N1vL&e>jESxHAX5{(^*%T5UQ6 zdZ8e;6C!q_6CgiQNP5N2c+VQwyp#}foTDe($a+q{$R}ZLfEzZ${qRgXnU+c!d>OPuBJm04peuo<60!HZIAgb1M}@vLm-tQ?_J>pOKI;HWPj3a9CzBi%+Rs|dw#UjUqZ zkCneZyX1O^VPiBiZPZIAT&Je1qA|D0_ZX^)X=5CU74i=tMaUDBNY_Bn3kc!GT9Qcz z8bJ5#PTr*2l-wQ1v9}_vE;Xno5DK%bm6wJV6@`|{XE^FR4=s|H&KLy#k1I|0PmxGc z{L{w-$(^T2(*4IMNIjV&%ee_|4#|c}M*rDR({SsTYzsPr7rAl1m7%zuIH(vb{8zz< z+~iYRdL8?)RZtO0(s;)Ncvk4w(!l@y2wmSz1@@&h)tdA$1gtjwL6J7Sl$P zk|)odD>Gvb*a~+tDOtB?a?%(^Mnrx}!$-u30JU^y`YC``(DH z`zKKIE*k)|C*bhlh0Kb31o>&2WP*pJxdW_57%b`+N}UiLqc!y zme+%%E#HEe)9d&4T~yj|(2IsR?_;JB>PQBxrgm*%(T1tJ+W#aJN-;0v21&2C6esld zP~$}d2#7#K)Qe0bK;V+@*U*WH(0Cry{_)?^{?9Z2hyE%Bf9jWuV@<9rP!~`uTurgH zl|xvOjURH@1wI}0W5OFZX%d;-w#WaEY!}1X0FF&2FMW19r(T4}2e}?H zXc1(rGx)N}nx7iyYsd@N+s~Niuw#M3ozKZgr|H<#?9XP{B02Q&!zE7*`)>ywusmgx zVjjIeby`PR_{O-%NM%#hsgWfz4R-Aj2bkV|RaOL~2uv%cD%n)yV#AwEJ^5gv)>G^C zx1$%1TKZo0zEmax0q249;Gj1;2etB5&z3}6FY3}=u^=YbBi6wV-b?E{c(9Mw z?mwpuYP(-nzwXD6b@gBl#NEE*2X|_8bUM>;r@OyPm;K;Q^1$sdqic<;1udwhJXOmV zWB^=ns4adbq1%mIj&amv0IyNCP8jYyF1^MTNkB(kv(u>Cs8jRoNw>*GRW2%$DTWYs z+=XoyZjQUC$UPJhQ_VT4tR!Y_Ei;LgI_#*??N4H+vaiVLC_XQ&S$m6R%VhZlO`jG+ z893wQtikP%tP&=B;B6qgI@WmuxYSiVR$B(&bTHrjf|>Fx{;N5j*-YP{`|9% zp2*Bx`~35#*Usl8u?)3U?Gcd)7#rqQ+7BecTholXohVnOl~-RYH8!)^1~=#8#muxl z2vW&b5f3|$hp9aaq+DK8%>%xXagN#d*{I`qCu>7SV4>gvy*ckf=Y;+Pa1 zIy=Q=adgT>>7q;bM3S1!%&m5fc7sFkMni>8x&X zQjj2dG3K{LIJH!=`(1}CE1y2ywQK3p4u4IL?#Oz zM~y?DL@TLEG%J%7vGUCJYl9U|dFm^|x}L%k`A#hxt)wZ@r}DIy1xh~F^@(}jQ-(Ed z-X!G02R?8a{yy6igUUg3(`iC?2LkGEV&>yUaY*zsbR{|$?Ex8yo8(TrAJ&=^XWh7C z*zdV&AD`?oud#jmM!iNSX;!Urc7Dm5;sI&v%>~j0$Sry*_rq=w7D%_15j~qd-Dt}QZe#!yB zZw@@#;=)uTkKPHc%G+Dm8iu&*Sa@z}^-196^x#BMKr%xKP6qQLp(DBv8-E+}|H#tV z|F|)LwM?9#h+^V>dhng9&eZ=@|E1L_>wa`-mEr_}C{9Q%)E6*fTG^^7o6i>W_sS=Yzp zWcW_)mL8!?uE{unAPopwFeM(&6RU#NbXAB-RXr?zcSUt+nRvKbr3yBP6uT`S7HuCv z`K9wj>R`1{9imn)HEYZJPlT7E@YYKsdLBg>4ibHJu~34`k6x?}tp}cE!_XLQBW(;C zz8vU{m^yt%#ZT+951u`W9i3ePqNN^W(v9GoL^3u-ycv9hg!ZKSK|;un?l_H0vLcE` z2nAtz{WNhP%QUmJH|>iu6shL+`U(y^hI~0T~QvLYEXO5655RMoTc4ZpMpxANv*pL+epW0$ zdp@e`hEWV+aa|&0FK4%2ZZ9wO1e;hFOmKs-+l8oZf~m$E-J}S&cEfyfrlTEhRBvB> zgGW+Q5bi(|(OQneL*B&PLZXnf_oQq;j30^x2)1?S8Wk<)j{8<6~1cq zwXEB7MoJy(%Ixe9*(dH{g@7Qc$%<*iq_U4yX7#}|uMq57Him|-PQ>#&OKMYC2G%2{ zk@b*eWIk$!Q)){(`P3zDvHI|`wtsNq@INSMNg4q#g{>c7?EUD9Y5CCd&VsrKuxC5p zz5o%DA_lBRrKDJ~HCSuPy0V=qO>1%?gj#@#QLM}hF$V>iKgn)uHj9vzu;Ie&z`*Pf zq3H zuElA%7cIIz1{W-9$vAwzx?44&5_{k@4BTmlbI)mEHOvRLVL2f=(;M1+%{3O;z4a;0 z*iJYFL(dn&ajN2 zQNfZf(@E2v3-E9s?SI0Re`Tx%KJQMJX1D~ucX0a0WAE=o?A}@TqhC+@pGt^y6Zo5A z{?b8Q*Ys^SNBtkBQ>5pWx@QltuYUzMXOnCF(k?l22WlV#E%7D59~V1&{Jc)=USpEC zJM~7(tiD!pW@_D5JFT8~(;7H$T8q8Yjkl`11&4Lbz=DU7hQxcVko;?k_^8rWpERwd zXomQ&l@5(wB|~=1dKWlFFwNaW&(wNW`N966J0~Y&{r@2)qSqYieSh>D1UW`bxpYU> z*0jQd`I3;f2d^%eu!n_YZf zttqel+rxvAyW5?v{K_A-LxqGI4O(=_Sfa;(5ff&V=&mubaoxK2=oz1on3SB7nifjW z$jr*_mD4*nFTYRUf_{ZX#U!%jwk-Edw%vVClgm8WDomM`37_$@cH6e5umwQnR zQ$@{&^)M-N)Y(VRCjwQtMV&Y*PRiLIKV%{(=cHvU=TJ~LKk#Xb+7t<6+%k)f1>^>d z0}>R|0ySwoj4#O=M?S&H(sRCBXRK-|;}x5$yH`b}g~kEaQOQwQuKWTO=XFu+3cj~P zXVGgK5=7&?dr +
+
+ +

+ {{ title ? title : t("common.loading") }}... +

+

+ {{ describe ? describe : t("common.waitTip") }} +

+
+
+ + + + + diff --git a/EdgeCraftRAG/ui/vue/src/i18n/en.ts b/EdgeCraftRAG/ui/vue/src/i18n/en.ts index 39d3cf0fa8..decfaec3f3 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/en.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/en.ts @@ -27,6 +27,9 @@ export default { all: "All", reset: "Reset", uploadTip: "Click or drag file to this area to upload", + loading: "Loading", + waitTip: + "Please wait patiently and do not refresh the page during this period.", }, system: { title: "System Status", @@ -63,7 +66,8 @@ export default { activated: "Activated", inactive: "Inactive", isActive: "Activated", - pipelineFormatTip: "Supports JSON format, with file size not exceeding 10M.", + pipelineFormatTip: + "Supports JSON format, with file size not exceeding 10M.", importSuccTip: "Files upload successful!", importErrTip: "Files upload failed!", name: "Name", @@ -73,7 +77,8 @@ export default { deactivateTip: "Are you sure deactivate this pipeline?", activeTip: "Are you sure activate this pipeline?", deleteTip: "Are you sure delete this pipeline?", - notActivatedTip: "There is no available pipeline. Please create or activate it first.", + notActivatedTip: + "There is no available pipeline. Please create or activate it first.", validErr: "Form validation failed !", config: { basic: "Basic", @@ -85,10 +90,12 @@ export default { indexer: "Indexer", indexerType: "Indexer Type", embedding: "Embedding Model", + embeddingUrl: "Embedding URL", embeddingDevice: "Embedding run device", retriever: "Retriever", retrieverType: "Retriever Type", topk: "Search top k", + topn: "Top n", postProcessor: "PostProcessor", postProcessorType: "PostProcessor Type", rerank: "Rerank Model", @@ -99,25 +106,31 @@ export default { language: "Large Language Model", llmDevice: "LLM run device", weights: "Weights", - local: "Local", - vllm: "Vllm", - vector_uri: "Vector Uri", + local: "Local (OpenVINO)", + vllm: "Remote (vLLM)", + vector_url: "Vector Database URL", modelName: "Model Name", - vllm_url: "Vllm Url", + vllm_url: "vLLM URL", + kbadmin: "kbadmin", }, valid: { nameValid1: "Please input name", nameValid2: "Name should be between 2 and 30 characters", + nameValid3: "The name only supports letters, numbers, and underscores.", nodeParserType: "Please select Node Parser Type", chunkSizeValid1: "Please select Chunk Size", - chunkSizeValid2: "The value of Chunk Size cannot be less than Chunk Overlap", + chunkSizeValid2: + "The value of Chunk Size cannot be less than Chunk Overlap", chunkOverlapValid1: "Please select Chunk Overlap", - chunkOverlapValid2: "The value of Chunk Overlap cannot be greater than Chunk Size", + chunkOverlapValid2: + "The value of Chunk Overlap cannot be greater than Chunk Size", windowSize: "Please select Chunk Window Size", indexerType: "Please select Indexer Type", - embedding: "Please select Embedding Model", + embedding: "Please select embedding Model", + embeddingUrl: "IP : Port, (e.g. 192.168.1.1:13020)", embeddingDevice: "Please select Embedding run device", retrieverType: "Please select Retriever Type", + retrieverTypeFormat: "Retriever type can only select kbadmin", topk: "Please select Top k", postProcessorType: "Please select PostProcessor Type", rerank: "Please select Rerank Model", @@ -126,51 +139,78 @@ export default { language: "Please select Large Language Model", llmDevice: "Please select LLM run device", weights: "Please select Weights", - vector_uri: "IP : Port, (e.g. 192.168.1.1:19530)", + kb_vector_url: "IP : Port, (e.g. 192.168.1.1:29530)", + vector_url: "IP : Port, (e.g. 192.168.1.1:19530)", vllm_url: "IP : Port, (e.g. 192.168.1.1:8086)", - urlValid1: "Please enter url", + urlValid1: "Please enter vector url", urlValid2: "Please enter the correct url", urlValid3: "URL cannot be accessed", urlValid4: "Test passed !", urlValid5: "The URL has not passed verification yet", modelName: "Please enter model name", + vllmUrlValid1: "Please enter vLLM url", + vllmUrlValid2: "Please enter the correct url", + vllmUrlValid3: "URL cannot be accessed", + vllmUrlValid4: "Test passed !", + vllmUrlValid5: "The URL has not passed verification yet", + nodeParserTypeTip: + "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", + indexerTypeTip: + "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", + retrieverTypeTip: + "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", + retrieverChangeTip: "Please go to the Indexer stage to complete the data", + indexerTypeValid1: "Indexer type can only select kbadmin", + modelRequired: "Please enter embedding model url", + modelFormat: "Please enter the correct url", + retrieverValid: + "Please return to the Indexer stage to supplement information.", }, desc: { name: "The name identifier of the pipeline", nodeParserType: "Node parsing type when you use RAG", chunkSize: "Size of each chunk for processing", chunkOverlap: "Overlap size between chunks", - windowSize: "The number of sentences on each side of a sentence to capture", - indexerType: "The type of index structure responsible for building based on the parsed nodes", + windowSize: + "The number of sentences on each side of a sentence to capture", + indexerType: + "The type of index structure responsible for building based on the parsed nodes", embedding: "Embed the text data to represent it and build a vector index", - embeddingDevice: "The device used by the Embedding Model", + embeddingUrl: "Connecting embedding model url", + embeddingDevice: "The device used by the embedding model", retrieverType: - "The retrieval type used when retrieving relevant nodes from the index according to the user's query", + "The retrieval type used when retrieving relevant nodes from the index according to the user's experience", topk: "The number of top k results to return", - postProcessorType: "Select postprocessors for post-processing of the context", + postProcessorType: + "Select postprocessors for post-processing of the context", rerank: "Rerank Model", rerankDevice: "Rerank run device", - generatorType: "Local inference generator or vllm generator", + generatorType: "Local inference generator or vLLM generator", language: "The large model used for generating dialogues", llmDevice: "The device used by the LLM", weights: "Model weight", reranker: "The model for reranking.", - metadataReplace: "Used to replace the node content with a field from the node metadata.", + metadataReplace: + "Used to replace the node content with a field from the node metadata.", vectorsimilarity: "retrieval according to vector similarity", - autoMerge: "This retriever will try to merge context into parent context.", + autoMerge: + "This retriever will try to merge context into parent context.", bm25: "A BM25 retriever that uses the BM25 algorithm to retrieve nodes.", faissVector: "Embeddings are stored within a Faiss index.", vector: "Vector Store Index.", simple: "Parse text with a preference for complete sentences.", - hierarchical: "Splits a document into a recursive hierarchy Nodes using a NodeParser.", + hierarchical: + "Splits a document into a recursive hierarchy Nodes using a NodeParser.", sentencewindow: "Sentence window node parser. Splits a document into Nodes, with each node being a sentence. Each node contains a window from the surrounding sentences in the metadata.", - unstructured: "UnstructedNodeParser is a component that processes unstructured data.", + unstructured: + "UnstructedNodeParser is a component that processes unstructured data.", milvusVector: "Embedding vectors stored in milvus", - vector_uri: "Connecting milvus uri", + vector_url: "Connecting milvus vector url", test: "Test", - modelName: "Vllm model name", - vllm_url: " Test if Vllm url is available ", + modelName: "vLLM model name", + vllm_url: " Test if vLLM url is available ", + kbadmin: "Third party knowledge base engine", }, }, generation: { @@ -218,18 +258,34 @@ export default { edit: "Edit Knowledge Base", deleteTip: "Are you sure delete this knowledge base?", activeTip: "Are you sure activate this knowledge base?", - uploadTip: "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", + uploadTip: + "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", notFileTip: "The knowledge base is empty. Go upload your files.", name: "Name", des: "Description", activated: "Activated", nameValid1: "Please input knowledge base name", nameValid2: "Name should be between 2 and 30 characters", - nameValid3: "The name cannot start with a number", + nameValid3: + "Alphanumeric and underscore only, starting with a letter or underscore.", desValid: "Please input knowledge base description", activeValid: "Please select whether to activate", - uploadValid: "Single file size not exceeding 50M.", + uploadValid: "Single file size not exceeding 200M.", deleteFileTip: "Are you sure delete this file?", + selectTitle: "Create Type Select", + selectDes: "Please select the type you want to create", + experience: "Experience", + experienceDes: + "Experience refers to the knowledge and skills acquired through practical involvement, trial, and reflection, serving as a key foundation for solving real-world problems.", + kbDes: + "A Knowledge Base is a centralized repository for storing organized information such as documents, FAQs, and guides, enabling teams or users to quickly access and share knowledge.", + type: "Type", + original: "Original", + kbadmin: "kbadmin", + typeValid: "Please select knowledge base type", + nameRequired: "Please select kbadmin name", + waitTip: "Please be patient and wait for the file upload to complete.", + done: "Finished", }, request: { pipeline: { @@ -248,9 +304,51 @@ export default { updateSucc: "Knowledge Base update successfully !", deleteSucc: "Knowledge Base deleted successfully !", }, + experience: { + createSucc: "Experience created successfully!", + updateSucc: "Experience update successful!", + deleteSucc: "Experience deleted successfully!", + }, }, error: { notFoundTip: "Uh oh! It seems like you're lost", back: "Go Home", }, + experience: { + create: "Create Experience", + edit: "Edit Experience", + import: "Import Experience", + fileFormatTip: "Supports JSON format, with file size not exceeding 100M.", + importSuccTip: "Files upload successful!", + importErrTip: "Files upload failed!", + uploadValid: "Single file size not exceeding 100M.", + experience: "Experience", + detail: "Detail", + operation: "Operation", + deleteTip: "Are you sure delete this experience?", + addExperience: "Add Experience", + delExperience: "Delete Experience", + addContent: "Add Content", + delContent: "Delete Content", + total: "Total experience: ", + unique: "Unique", + selectTip: "Please choose an appropriate method for data update", + cover: "Cover", + increase: "Append", + deactivateTip: "Are you sure deactivate this experience?", + activeTip: "Are you sure activate this experience?", + label: { + experience: "Experience", + contents: "Experience Content", + content: "Content", + }, + placeholder: { + experience: "Please enter Experience", + content: "Please enter content", + }, + valid: { + experience: "Experience cannot be empty", + content: "Content cannot be empty", + }, + }, }; diff --git a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts index 2a1a318851..0f0ef3ef2e 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts @@ -27,6 +27,8 @@ export default { all: "全选", reset: "重置", uploadTip: "点击或将文件拖到此区域进行上传", + loading: "加载中", + waitTip: "请耐心等待,在此期间不要刷新页面。", }, system: { title: "系统状态", @@ -49,7 +51,8 @@ export default { step1: "创建 Pipeline", step1Tip: "定制您的 RAG 流程,释放 AI 信息处理的最大能力。", step2: "前往对话", - step2Tip: "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", + step2Tip: + "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", create: "去创建", }, pipeline: { @@ -84,10 +87,12 @@ export default { indexer: "索引器", indexerType: "索引器类型", embedding: "嵌入模型", + embeddingUrl: "嵌入模型地址", embeddingDevice: "模型运行设备", retriever: "检索器", retrieverType: "检索器类型", topk: "检索 top k", + topn: "Top n", postProcessor: "节点后处理器", postProcessorType: "节点后处理器类型", rerank: "重排模型", @@ -98,15 +103,17 @@ export default { language: "语言大模型", llmDevice: "运行设备", weights: "权重", - local: "本地", - vllm: "Vllm", - vector_uri: "Vector Uri", + local: "本地(OpenVINO)", + vllm: "远程(vLLM)", + vector_url: "向量数据库地址", modelName: "模型名称", - vllm_url: "Vllm 地址", + vllm_url: "vLLM 地址", + kbadmin: "kbadmin", }, valid: { nameValid1: "请输入名称", nameValid2: "请输入 2 到 30 个字符的名称", + nameValid3: "名称仅支持字母、数字和下划线", nodeParserType: "请选择节点解析器类型", chunkSizeValid1: "请选择分块大小", chunkSizeValid2: "分块大小的值不能小于分块重叠值", @@ -115,8 +122,10 @@ export default { windowSize: "请选择句子上下文窗口大小", indexerType: "请选择索引器类型", embedding: "请选择嵌入模型", + embeddingUrl: "IP : 端口,(例如 192.168.1.1:13020)", embeddingDevice: "请选择嵌入模型运行设备", retrieverType: "请选择检索器类型", + retrieverTypeFormat: "检索器类型只能选择kbadmin", topk: "请选择Top k", postProcessorType: "请选择后处理器类型", rerank: "请选择重排模型", @@ -125,30 +134,46 @@ export default { language: "请选择大语言模型", llmDevice: "请选择大语言模型运行设备", weights: "请选择模型权重", - vector_uri: "IP : 端口,(例如 192.168.1.1:19530)", + kb_vector_url: "IP : 端口,(例如 192.168.1.1:29530)", + vector_url: "IP : 端口,(例如 192.168.1.1:19530)", vllm_url: "IP : 端口,(例如 192.168.1.1:8086)", - urlValid1: "URL 不能为空", - urlValid2: "请输入正确的URL", - urlValid3: "URL无法访问", + urlValid1: "向量数据库地址不能为空", + urlValid2: "请输入正确的向量数据库地址", + urlValid3: "向量数据库地址无法访问", urlValid4: "测试通过!", - urlValid5: "URL还未通过校验", + urlValid5: "向量数据库地址还未通过校验", modelName: "请输入模型名称", + vllmUrlValid1: "vLLM地址不能为空", + vllmUrlValid2: "请输入正确的vLLM地址", + vllmUrlValid3: "vLLM地址无法访问", + vllmUrlValid4: "测试通过!", + vllmUrlValid5: "vLLM地址还未通过校验", + nodeParserTypeTip: "索引器类型和检索器类型将同时设置为kbadmin", + indexerTypeTip: "节点解析器类型和检索器类型将同时设置为kbadmin", + retrieverTypeTip: "索引器类型和节点解析器类型将同时设置为kbadmin", + retrieverChangeTip: "请前往索引器阶段补全数据", + indexerTypeValid1: "索引器类型只能选择kbadmin", + modelRequired: "请输入嵌入模型地址", + modelFormat: "请输入正确的模型地址", + retrieverValid: "请回到Indexer阶段补充信息", }, desc: { name: "Pipeline的名称标识,用于区分不同工作流", - nodeParserType: "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", + nodeParserType: + "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", chunkSize: "文本处理时的单块数据大小", chunkOverlap: "相邻数据块的重叠部分大小,确保跨块语义连续性", windowSize: "每个节点捕获的上下文句子窗口大小,用于增强语义完整性", indexerType: "基于解析节点构建的索引结构类型", embedding: "将文本转换为向量表示的过程", + embeddingUrl: "嵌入模型地址", embeddingDevice: "执行嵌入模型推理的硬件设备(CPU/GPU)", retrieverType: "根据用户查询从索引中检索节点的算法类型", topk: "检索时返回的最相关结果数量", postProcessorType: "对检索结果进行后处理的组件类型", rerank: "对检索结果进行二次排序的模型,提升答案相关性", rerankDevice: "执行重排模型推理的硬件设备(CPU/GPU)", - generatorType: "回答生成方式的类型(本地部署模型或 vllm 高效推理框架)", + generatorType: "回答生成方式的类型(本地部署模型或 vLLM 高效推理框架)", language: "用于生成自然语言回答的大模型(如 LLaMA、ChatGLM)", llmDevice: "大语言模型推理的硬件设备(需匹配模型规模要求)", weights: "大模型的权重", @@ -157,17 +182,19 @@ export default { vectorsimilarity: "根据向量相似性进行检索", autoMerge: "该检索器会尝试将上下文合并到父级上下文中", bm25: "使用BM25算法检索节点的BM25检索器", - faissVector: "嵌入存储在Faiss索引中。", + faissVector: "矢量索引存储在Faiss中。", vector: "矢量存储索引", simple: "解析文本,优先选择完整的句子。", - hierarchical: "使用借点解析将文档分割成递归层次节点", - sentencewindow: "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", + hierarchical: "使用NodeParser将文档拆分为递归层次结构的节点。", + sentencewindow: + "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", unstructured: "一个处理非结构化数据的组件", - milvusVector: "嵌入存储在Milvus索引中", - vector_uri: "测试Milvus地址是否可用", + milvusVector: "矢量索引存储在Milvus中", + vector_url: "测试Milvus地址是否可用", test: "测 试", - modelName: "Vllm 模型名称", - vllm_url: "测试Vllm地址是否可用", + modelName: "vLLM 模型名称", + vllm_url: "测试vLLM地址是否可用", + kbadmin: "第三方知识库系统", }, }, generation: { @@ -184,7 +211,8 @@ export default { desc: { top_n: "重排后结果的数量", temperature: "数值越高,输出越多样化", - top_p: "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", + top_p: + "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", top_k: "从概率前k的 Token 中采样", penalty: "抑制重复的系数,设为1.0表示禁用", maxToken: "生成回答的最大Token数量", @@ -214,18 +242,33 @@ export default { edit: "编辑知识库", deleteTip: "您确定要删除此知识库吗?此操作不可恢复。", activeTip: "您确定要激活此知识库吗?", - uploadTip: "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", + uploadTip: + "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", notFileTip: "您还没有上传任何文件,点击“上传”按钮开始添加内容吧~", name: "名称", des: "描述", activated: "激活状态", nameValid1: "请输入知识库名称", nameValid2: "请输入 2 到 30 个字符的名称", - nameValid3: "名称不能以数字开头", + nameValid3: "仅支持字母、数字和下划线,必须以字母或下划线开头。", desValid: "请输入知识库描述", activeValid: "请选择是否启用该功能。", - uploadValid: "单个文件大小不得超过 50MB", + uploadValid: "单个文件大小不得超过 200MB", deleteFileTip: "您确定要删除此文档吗?此操作不可恢复。", + selectTitle: "创建类型选择", + selectDes: "请选择要创建的数据类型", + experience: "经验注入", + experienceDes: + "Experience是指个人或团队在实践过程中积累的知识和技能,通常通过实际操作、试错和反思获得,是解决实际问题的重要依据", + kbDes: + "知识库是系统化存储信息的集合,用于集中管理文档、常见问题、操作指南等知识内容,便于团队或用户快速查找和共享信息。", + type: "类型", + original: "原始的", + kbadmin: "kbadmin", + typeValid: "请选择知识库类型", + nameRequired: "请选择kbadmin名称", + waitTip: "请耐心等待所有文件上传完成!", + done: "已完成", }, request: { pipeline: { @@ -244,9 +287,51 @@ export default { updateSucc: "知识库更新成功!", deleteSucc: " 知识库删除成功!", }, + experience: { + createSucc: "经验创建成功!", + updateSucc: "经验更新成功!", + deleteSucc: " 经验删除成功!", + }, }, error: { notFoundTip: "Oops 好像走错地方啦~", back: "首页", }, + experience: { + create: "新建经验", + edit: "编辑经验", + import: "导入经验", + fileFormatTip: "仅支持JSON格式,文件大小不超过100M", + importSuccTip: "文件上传成功!", + importErrTip: "文件上传失败!", + uploadValid: "单个文件大小不得超过 200MB", + experience: "经验", + detail: "详情", + operation: "操作", + deleteTip: "确定要删除这个经验?", + addExperience: "新增经验", + delExperience: "删除经验", + addContent: "新增内容", + delContent: "删除内容", + total: "经验总数: ", + unique: "唯一", + selectTip: "请选择合适的方式进行数据更新", + cover: "覆盖", + increase: "追加", + deactivateTip: "您确定要停用该经验库吗?", + activeTip: "您确定要启用该经验库吗?", + label: { + experience: "经验", + contents: "经验内容", + content: "内容", + }, + placeholder: { + experience: "请输入经验", + content: "请输入内容", + }, + valid: { + experience: "经验不能为空", + content: "内容不能为空", + }, + }, }; diff --git a/EdgeCraftRAG/ui/vue/src/layout/Main.vue b/EdgeCraftRAG/ui/vue/src/layout/Main.vue index 79af5a7f47..ba386bfe55 100644 --- a/EdgeCraftRAG/ui/vue/src/layout/Main.vue +++ b/EdgeCraftRAG/ui/vue/src/layout/Main.vue @@ -4,13 +4,16 @@
- + +
+ + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue new file mode 100644 index 0000000000..24a1f937ec --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ExperienceDetail.vue @@ -0,0 +1,225 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue new file mode 100644 index 0000000000..b558bac195 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/ImportDialog.vue @@ -0,0 +1,100 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue new file mode 100644 index 0000000000..e828d47356 --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/UpdateDialog.vue @@ -0,0 +1,387 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts new file mode 100644 index 0000000000..c9d1df62ed --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/Experience/index.ts @@ -0,0 +1,7 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +import UpdateDialog from "./UpdateDialog.vue"; +import ImportDialog from "./ImportDialog.vue"; + +export { UpdateDialog, ImportDialog }; diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue new file mode 100644 index 0000000000..e8cffb236b --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetail.vue @@ -0,0 +1,298 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue deleted file mode 100644 index 290a85cd8e..0000000000 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/KnowledgeDetial.vue +++ /dev/null @@ -1,302 +0,0 @@ - - - - - diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue new file mode 100644 index 0000000000..8b9ab7978e --- /dev/null +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/SelectTypeDialog.vue @@ -0,0 +1,134 @@ + + + + + diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue index f987ff5cb4..e95e7436f6 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/UpdateDialog.vue @@ -17,14 +17,45 @@ autocomplete="off" :label-col="{ style: { width: '100px' } }" > - + + + {{ $t("knowledge.original") }} + {{ $t("knowledge.kbadmin") }} + + + + + + {{ + item + }} + + - + {{ $t("pipeline.activated") }} {{ $t("pipeline.inactive") }} @@ -59,10 +90,11 @@ import { requestKnowledgeBaseCreate, requestKnowledgeBaseUpdate, + getkbadminList, } from "@/api/knowledgeBase"; import { isValidName } from "@/utils/validate"; import { FormInstance } from "ant-design-vue"; -import { computed, ref } from "vue"; +import { computed, ref, onMounted } from "vue"; import { useI18n } from "vue-i18n"; const props = defineProps({ @@ -74,11 +106,17 @@ const props = defineProps({ type: String, default: "create", }, + dialogFlag: { + type: String, + default: "knowledge", + }, }); interface FormType { - name: string; + name: string | undefined; description: string; + comp_type: string; active: boolean; + comp_subtype: string; } const validateName = async (rule: any, value: string) => { @@ -89,7 +127,6 @@ const validateName = async (rule: any, value: string) => { if (len < 2 || len > 30) { return Promise.reject(t("knowledge.nameValid2")); } - console.log(isValidName(value)); if (!isValidName(value)) { return Promise.reject(t("knowledge.nameValid3")); } @@ -98,9 +135,11 @@ const validateName = async (rule: any, value: string) => { const { t } = useI18n(); const emit = defineEmits(["close", "switch"]); +const { dialogFlag } = props; + const typeMap = { - create: t("knowledge.create"), - edit: t("knowledge.edit"), + create: t(`${dialogFlag}.create`), + edit: t(`${dialogFlag}.edit`), } as const; const dialogTitle = computed(() => { return typeMap[props.dialogType as keyof typeof typeMap]; @@ -108,20 +147,39 @@ const dialogTitle = computed(() => { const isEdit = computed(() => { return props.dialogType === "edit"; }); -const isActivated = computed(() => { - return props.dialogData?.active; +const isExperience = computed(() => { + return props.dialogFlag === "experience"; +}); + +const isOriginal = computed(() => { + return form.comp_subtype === "origin_kb"; }); const modelVisible = ref(true); const submitLoading = ref(false); const formRef = ref(); -const { name = "", description = "", active = false } = props.dialogData; +const { + comp_subtype = "origin_kb", + name = undefined, + description = "", + active = false, + experience_active = false, +} = props.dialogData; const form = reactive({ - name, + comp_subtype, + name: isExperience.value ? "Experience" : name, description, - active, + comp_type: dialogFlag, + active: isExperience.value ? experience_active : active, }); - -const rules = reactive({ +const kbList = ref([]); +const rules: FormRules = reactive({ + comp_subtype: [ + { + required: true, + message: t("knowledge.typeValid"), + trigger: "change", + }, + ], name: [ { required: true, @@ -129,6 +187,13 @@ const rules = reactive({ trigger: ["blur", "change"], }, ], + kbName: [ + { + required: true, + message: t("knowledge.nameRequired"), + trigger: "change", + }, + ], active: [ { required: true, @@ -137,17 +202,36 @@ const rules = reactive({ }, ], }); +const handleTypeChange = () => { + form.name = undefined; +}; +const queryKbadmin = async () => { + const data: any = await getkbadminList(); + kbList.value = [].concat(data); +}; +// Format parameter +const formatFormParam = () => { + const { name, description, comp_type, active, comp_subtype } = form; + return { + name, + description, + comp_type, + comp_subtype: !isExperience.value ? comp_subtype : undefined, + active: !isExperience.value ? active : undefined, + experience_active: isExperience.value ? active : undefined, + }; +}; // Submit const handleSubmit = () => { formRef.value?.validate().then(() => { submitLoading.value = true; const { name } = form; - const apiUrl = - props.dialogType === "edit" - ? requestKnowledgeBaseUpdate - : requestKnowledgeBaseCreate; - apiUrl(form) + const apiUrl = isEdit.value + ? requestKnowledgeBaseUpdate + : requestKnowledgeBaseCreate; + + apiUrl(formatFormParam()) .then(() => { emit("switch", name); handleCancel(); @@ -165,6 +249,9 @@ const handleSubmit = () => { const handleCancel = () => { emit("close"); }; +onMounted(() => { + queryKbadmin(); +}); ")}catch(l){console&&console.log(l)}}h=function(){var l,a=document.createElement("div");a.innerHTML=c._iconfont_svg_string_4784207,(a=a.getElementsByTagName("svg")[0])&&(a.setAttribute("aria-hidden","true"),a.style.position="absolute",a.style.width=0,a.style.height=0,a.style.overflow="hidden",a=a,(l=document.body).firstChild?m(a,l.firstChild):l.appendChild(a))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(t=function(){document.removeEventListener("DOMContentLoaded",t,!1),h()},document.addEventListener("DOMContentLoaded",t,!1)):document.attachEvent&&(i=h,o=c.document,v=!1,s(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,e())})}function e(){v||(v=!0,i())}function s(){try{o.documentElement.doScroll("left")}catch(l){return void setTimeout(s,50)}e()}})(window); \ No newline at end of file +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +(window._iconfont_svg_string_4784207 = + ''), + ((c) => { + var l = (a = (a = document.getElementsByTagName("script"))[a.length - 1]).getAttribute("data-injectcss"), + a = a.getAttribute("data-disable-injectsvg"); + if (!a) { + var h, + t, + i, + o, + v, + m = function (l, a) { + a.parentNode.insertBefore(l, a); + }; + if (l && !c.__iconfont__svg__cssinject__) { + c.__iconfont__svg__cssinject__ = !0; + try { + document.write( + "", + ); + } catch (l) { + console && console.log(l); + } + } + (h = function () { + var l, + a = document.createElement("div"); + (a.innerHTML = c._iconfont_svg_string_4784207), + (a = a.getElementsByTagName("svg")[0]) && + (a.setAttribute("aria-hidden", "true"), + (a.style.position = "absolute"), + (a.style.width = 0), + (a.style.height = 0), + (a.style.overflow = "hidden"), + (a = a), + (l = document.body).firstChild ? m(a, l.firstChild) : l.appendChild(a)); + }), + document.addEventListener + ? ~["complete", "loaded", "interactive"].indexOf(document.readyState) + ? setTimeout(h, 0) + : ((t = function () { + document.removeEventListener("DOMContentLoaded", t, !1), h(); + }), + document.addEventListener("DOMContentLoaded", t, !1)) + : document.attachEvent && + ((i = h), + (o = c.document), + (v = !1), + s(), + (o.onreadystatechange = function () { + "complete" == o.readyState && ((o.onreadystatechange = null), e()); + })); + } + function e() { + v || ((v = !0), i()); + } + function s() { + try { + o.documentElement.doScroll("left"); + } catch (l) { + return void setTimeout(s, 50); + } + e(); + } + })(window); diff --git a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts index f6e2bab3ce..d6fd8da012 100644 --- a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts +++ b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts @@ -1,3 +1,6 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + /* eslint-disable */ /* prettier-ignore */ // @ts-nocheck @@ -6,83 +9,98 @@ // biome-ignore lint: disable export {} declare global { - const EffectScope: typeof import('vue')['EffectScope'] - const acceptHMRUpdate: typeof import('pinia')['acceptHMRUpdate'] - const computed: typeof import('vue')['computed'] - const createApp: typeof import('vue')['createApp'] - const createPinia: typeof import('pinia')['createPinia'] - const customRef: typeof import('vue')['customRef'] - const defineAsyncComponent: typeof import('vue')['defineAsyncComponent'] - const defineComponent: typeof import('vue')['defineComponent'] - const defineStore: typeof import('pinia')['defineStore'] - const effectScope: typeof import('vue')['effectScope'] - const getActivePinia: typeof import('pinia')['getActivePinia'] - const getCurrentInstance: typeof import('vue')['getCurrentInstance'] - const getCurrentScope: typeof import('vue')['getCurrentScope'] - const h: typeof import('vue')['h'] - const inject: typeof import('vue')['inject'] - const isProxy: typeof import('vue')['isProxy'] - const isReactive: typeof import('vue')['isReactive'] - const isReadonly: typeof import('vue')['isReadonly'] - const isRef: typeof import('vue')['isRef'] - const mapActions: typeof import('pinia')['mapActions'] - const mapGetters: typeof import('pinia')['mapGetters'] - const mapState: typeof import('pinia')['mapState'] - const mapStores: typeof import('pinia')['mapStores'] - const mapWritableState: typeof import('pinia')['mapWritableState'] - const markRaw: typeof import('vue')['markRaw'] - const nextTick: typeof import('vue')['nextTick'] - const onActivated: typeof import('vue')['onActivated'] - const onBeforeMount: typeof import('vue')['onBeforeMount'] - const onBeforeRouteLeave: typeof import('vue-router')['onBeforeRouteLeave'] - const onBeforeRouteUpdate: typeof import('vue-router')['onBeforeRouteUpdate'] - const onBeforeUnmount: typeof import('vue')['onBeforeUnmount'] - const onBeforeUpdate: typeof import('vue')['onBeforeUpdate'] - const onDeactivated: typeof import('vue')['onDeactivated'] - const onErrorCaptured: typeof import('vue')['onErrorCaptured'] - const onMounted: typeof import('vue')['onMounted'] - const onRenderTracked: typeof import('vue')['onRenderTracked'] - const onRenderTriggered: typeof import('vue')['onRenderTriggered'] - const onScopeDispose: typeof import('vue')['onScopeDispose'] - const onServerPrefetch: typeof import('vue')['onServerPrefetch'] - const onUnmounted: typeof import('vue')['onUnmounted'] - const onUpdated: typeof import('vue')['onUpdated'] - const onWatcherCleanup: typeof import('vue')['onWatcherCleanup'] - const provide: typeof import('vue')['provide'] - const reactive: typeof import('vue')['reactive'] - const readonly: typeof import('vue')['readonly'] - const ref: typeof import('vue')['ref'] - const resolveComponent: typeof import('vue')['resolveComponent'] - const setActivePinia: typeof import('pinia')['setActivePinia'] - const setMapStoreSuffix: typeof import('pinia')['setMapStoreSuffix'] - const shallowReactive: typeof import('vue')['shallowReactive'] - const shallowReadonly: typeof import('vue')['shallowReadonly'] - const shallowRef: typeof import('vue')['shallowRef'] - const storeToRefs: typeof import('pinia')['storeToRefs'] - const toRaw: typeof import('vue')['toRaw'] - const toRef: typeof import('vue')['toRef'] - const toRefs: typeof import('vue')['toRefs'] - const toValue: typeof import('vue')['toValue'] - const triggerRef: typeof import('vue')['triggerRef'] - const unref: typeof import('vue')['unref'] - const useAttrs: typeof import('vue')['useAttrs'] - const useCssModule: typeof import('vue')['useCssModule'] - const useCssVars: typeof import('vue')['useCssVars'] - const useId: typeof import('vue')['useId'] - const useLink: typeof import('vue-router')['useLink'] - const useModel: typeof import('vue')['useModel'] - const useRoute: typeof import('vue-router')['useRoute'] - const useRouter: typeof import('vue-router')['useRouter'] - const useSlots: typeof import('vue')['useSlots'] - const useTemplateRef: typeof import('vue')['useTemplateRef'] - const watch: typeof import('vue')['watch'] - const watchEffect: typeof import('vue')['watchEffect'] - const watchPostEffect: typeof import('vue')['watchPostEffect'] - const watchSyncEffect: typeof import('vue')['watchSyncEffect'] + const EffectScope: (typeof import("vue"))["EffectScope"]; + const acceptHMRUpdate: (typeof import("pinia"))["acceptHMRUpdate"]; + const computed: (typeof import("vue"))["computed"]; + const createApp: (typeof import("vue"))["createApp"]; + const createPinia: (typeof import("pinia"))["createPinia"]; + const customRef: (typeof import("vue"))["customRef"]; + const defineAsyncComponent: (typeof import("vue"))["defineAsyncComponent"]; + const defineComponent: (typeof import("vue"))["defineComponent"]; + const defineStore: (typeof import("pinia"))["defineStore"]; + const effectScope: (typeof import("vue"))["effectScope"]; + const getActivePinia: (typeof import("pinia"))["getActivePinia"]; + const getCurrentInstance: (typeof import("vue"))["getCurrentInstance"]; + const getCurrentScope: (typeof import("vue"))["getCurrentScope"]; + const h: (typeof import("vue"))["h"]; + const inject: (typeof import("vue"))["inject"]; + const isProxy: (typeof import("vue"))["isProxy"]; + const isReactive: (typeof import("vue"))["isReactive"]; + const isReadonly: (typeof import("vue"))["isReadonly"]; + const isRef: (typeof import("vue"))["isRef"]; + const mapActions: (typeof import("pinia"))["mapActions"]; + const mapGetters: (typeof import("pinia"))["mapGetters"]; + const mapState: (typeof import("pinia"))["mapState"]; + const mapStores: (typeof import("pinia"))["mapStores"]; + const mapWritableState: (typeof import("pinia"))["mapWritableState"]; + const markRaw: (typeof import("vue"))["markRaw"]; + const nextTick: (typeof import("vue"))["nextTick"]; + const onActivated: (typeof import("vue"))["onActivated"]; + const onBeforeMount: (typeof import("vue"))["onBeforeMount"]; + const onBeforeRouteLeave: (typeof import("vue-router"))["onBeforeRouteLeave"]; + const onBeforeRouteUpdate: (typeof import("vue-router"))["onBeforeRouteUpdate"]; + const onBeforeUnmount: (typeof import("vue"))["onBeforeUnmount"]; + const onBeforeUpdate: (typeof import("vue"))["onBeforeUpdate"]; + const onDeactivated: (typeof import("vue"))["onDeactivated"]; + const onErrorCaptured: (typeof import("vue"))["onErrorCaptured"]; + const onMounted: (typeof import("vue"))["onMounted"]; + const onRenderTracked: (typeof import("vue"))["onRenderTracked"]; + const onRenderTriggered: (typeof import("vue"))["onRenderTriggered"]; + const onScopeDispose: (typeof import("vue"))["onScopeDispose"]; + const onServerPrefetch: (typeof import("vue"))["onServerPrefetch"]; + const onUnmounted: (typeof import("vue"))["onUnmounted"]; + const onUpdated: (typeof import("vue"))["onUpdated"]; + const onWatcherCleanup: (typeof import("vue"))["onWatcherCleanup"]; + const provide: (typeof import("vue"))["provide"]; + const reactive: (typeof import("vue"))["reactive"]; + const readonly: (typeof import("vue"))["readonly"]; + const ref: (typeof import("vue"))["ref"]; + const resolveComponent: (typeof import("vue"))["resolveComponent"]; + const setActivePinia: (typeof import("pinia"))["setActivePinia"]; + const setMapStoreSuffix: (typeof import("pinia"))["setMapStoreSuffix"]; + const shallowReactive: (typeof import("vue"))["shallowReactive"]; + const shallowReadonly: (typeof import("vue"))["shallowReadonly"]; + const shallowRef: (typeof import("vue"))["shallowRef"]; + const storeToRefs: (typeof import("pinia"))["storeToRefs"]; + const toRaw: (typeof import("vue"))["toRaw"]; + const toRef: (typeof import("vue"))["toRef"]; + const toRefs: (typeof import("vue"))["toRefs"]; + const toValue: (typeof import("vue"))["toValue"]; + const triggerRef: (typeof import("vue"))["triggerRef"]; + const unref: (typeof import("vue"))["unref"]; + const useAttrs: (typeof import("vue"))["useAttrs"]; + const useCssModule: (typeof import("vue"))["useCssModule"]; + const useCssVars: (typeof import("vue"))["useCssVars"]; + const useId: (typeof import("vue"))["useId"]; + const useLink: (typeof import("vue-router"))["useLink"]; + const useModel: (typeof import("vue"))["useModel"]; + const useRoute: (typeof import("vue-router"))["useRoute"]; + const useRouter: (typeof import("vue-router"))["useRouter"]; + const useSlots: (typeof import("vue"))["useSlots"]; + const useTemplateRef: (typeof import("vue"))["useTemplateRef"]; + const watch: (typeof import("vue"))["watch"]; + const watchEffect: (typeof import("vue"))["watchEffect"]; + const watchPostEffect: (typeof import("vue"))["watchPostEffect"]; + const watchSyncEffect: (typeof import("vue"))["watchSyncEffect"]; } // for type re-export declare global { // @ts-ignore - export type { Component, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue' - import('vue') + export type { + Component, + ComponentPublicInstance, + ComputedRef, + DirectiveBinding, + ExtractDefaultPropTypes, + ExtractPropTypes, + ExtractPublicPropTypes, + InjectionKey, + PropType, + Ref, + MaybeRef, + MaybeRefOrGetter, + VNode, + WritableComputedRef, + } from "vue"; + import("vue"); } diff --git a/EdgeCraftRAG/ui/vue/src/components.d.ts b/EdgeCraftRAG/ui/vue/src/components.d.ts index 6ec287f7f1..35e756d199 100644 --- a/EdgeCraftRAG/ui/vue/src/components.d.ts +++ b/EdgeCraftRAG/ui/vue/src/components.d.ts @@ -1,8 +1,11 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + /* eslint-disable */ // @ts-nocheck // Generated by unplugin-vue-components // Read more: https://github.com/vuejs/core/pull/3399 -export {} +export {}; /* prettier-ignore */ declare module 'vue' { diff --git a/EdgeCraftRAG/ui/vue/src/i18n/en.ts b/EdgeCraftRAG/ui/vue/src/i18n/en.ts index decfaec3f3..86ff43773b 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/en.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/en.ts @@ -28,8 +28,7 @@ export default { reset: "Reset", uploadTip: "Click or drag file to this area to upload", loading: "Loading", - waitTip: - "Please wait patiently and do not refresh the page during this period.", + waitTip: "Please wait patiently and do not refresh the page during this period.", }, system: { title: "System Status", @@ -66,8 +65,7 @@ export default { activated: "Activated", inactive: "Inactive", isActive: "Activated", - pipelineFormatTip: - "Supports JSON format, with file size not exceeding 10M.", + pipelineFormatTip: "Supports JSON format, with file size not exceeding 10M.", importSuccTip: "Files upload successful!", importErrTip: "Files upload failed!", name: "Name", @@ -77,8 +75,7 @@ export default { deactivateTip: "Are you sure deactivate this pipeline?", activeTip: "Are you sure activate this pipeline?", deleteTip: "Are you sure delete this pipeline?", - notActivatedTip: - "There is no available pipeline. Please create or activate it first.", + notActivatedTip: "There is no available pipeline. Please create or activate it first.", validErr: "Form validation failed !", config: { basic: "Basic", @@ -119,11 +116,9 @@ export default { nameValid3: "The name only supports letters, numbers, and underscores.", nodeParserType: "Please select Node Parser Type", chunkSizeValid1: "Please select Chunk Size", - chunkSizeValid2: - "The value of Chunk Size cannot be less than Chunk Overlap", + chunkSizeValid2: "The value of Chunk Size cannot be less than Chunk Overlap", chunkOverlapValid1: "Please select Chunk Overlap", - chunkOverlapValid2: - "The value of Chunk Overlap cannot be greater than Chunk Size", + chunkOverlapValid2: "The value of Chunk Overlap cannot be greater than Chunk Size", windowSize: "Please select Chunk Window Size", indexerType: "Please select Indexer Type", embedding: "Please select embedding Model", @@ -153,36 +148,29 @@ export default { vllmUrlValid3: "URL cannot be accessed", vllmUrlValid4: "Test passed !", vllmUrlValid5: "The URL has not passed verification yet", - nodeParserTypeTip: - "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", - indexerTypeTip: - "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", - retrieverTypeTip: - "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", + nodeParserTypeTip: "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", + indexerTypeTip: "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", + retrieverTypeTip: "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", retrieverChangeTip: "Please go to the Indexer stage to complete the data", indexerTypeValid1: "Indexer type can only select kbadmin", modelRequired: "Please enter embedding model url", modelFormat: "Please enter the correct url", - retrieverValid: - "Please return to the Indexer stage to supplement information.", + retrieverValid: "Please return to the Indexer stage to supplement information.", }, desc: { name: "The name identifier of the pipeline", nodeParserType: "Node parsing type when you use RAG", chunkSize: "Size of each chunk for processing", chunkOverlap: "Overlap size between chunks", - windowSize: - "The number of sentences on each side of a sentence to capture", - indexerType: - "The type of index structure responsible for building based on the parsed nodes", + windowSize: "The number of sentences on each side of a sentence to capture", + indexerType: "The type of index structure responsible for building based on the parsed nodes", embedding: "Embed the text data to represent it and build a vector index", embeddingUrl: "Connecting embedding model url", embeddingDevice: "The device used by the embedding model", retrieverType: "The retrieval type used when retrieving relevant nodes from the index according to the user's experience", topk: "The number of top k results to return", - postProcessorType: - "Select postprocessors for post-processing of the context", + postProcessorType: "Select postprocessors for post-processing of the context", rerank: "Rerank Model", rerankDevice: "Rerank run device", generatorType: "Local inference generator or vLLM generator", @@ -190,21 +178,17 @@ export default { llmDevice: "The device used by the LLM", weights: "Model weight", reranker: "The model for reranking.", - metadataReplace: - "Used to replace the node content with a field from the node metadata.", + metadataReplace: "Used to replace the node content with a field from the node metadata.", vectorsimilarity: "retrieval according to vector similarity", - autoMerge: - "This retriever will try to merge context into parent context.", + autoMerge: "This retriever will try to merge context into parent context.", bm25: "A BM25 retriever that uses the BM25 algorithm to retrieve nodes.", faissVector: "Embeddings are stored within a Faiss index.", vector: "Vector Store Index.", simple: "Parse text with a preference for complete sentences.", - hierarchical: - "Splits a document into a recursive hierarchy Nodes using a NodeParser.", + hierarchical: "Splits a document into a recursive hierarchy Nodes using a NodeParser.", sentencewindow: "Sentence window node parser. Splits a document into Nodes, with each node being a sentence. Each node contains a window from the surrounding sentences in the metadata.", - unstructured: - "UnstructedNodeParser is a component that processes unstructured data.", + unstructured: "UnstructedNodeParser is a component that processes unstructured data.", milvusVector: "Embedding vectors stored in milvus", vector_url: "Connecting milvus vector url", test: "Test", @@ -258,16 +242,14 @@ export default { edit: "Edit Knowledge Base", deleteTip: "Are you sure delete this knowledge base?", activeTip: "Are you sure activate this knowledge base?", - uploadTip: - "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", + uploadTip: "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", notFileTip: "The knowledge base is empty. Go upload your files.", name: "Name", des: "Description", activated: "Activated", nameValid1: "Please input knowledge base name", nameValid2: "Name should be between 2 and 30 characters", - nameValid3: - "Alphanumeric and underscore only, starting with a letter or underscore.", + nameValid3: "Alphanumeric and underscore only, starting with a letter or underscore.", desValid: "Please input knowledge base description", activeValid: "Please select whether to activate", uploadValid: "Single file size not exceeding 200M.", diff --git a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts index 0f0ef3ef2e..e9ab76ef55 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts @@ -51,8 +51,7 @@ export default { step1: "创建 Pipeline", step1Tip: "定制您的 RAG 流程,释放 AI 信息处理的最大能力。", step2: "前往对话", - step2Tip: - "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", + step2Tip: "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", create: "去创建", }, pipeline: { @@ -159,8 +158,7 @@ export default { }, desc: { name: "Pipeline的名称标识,用于区分不同工作流", - nodeParserType: - "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", + nodeParserType: "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", chunkSize: "文本处理时的单块数据大小", chunkOverlap: "相邻数据块的重叠部分大小,确保跨块语义连续性", windowSize: "每个节点捕获的上下文句子窗口大小,用于增强语义完整性", @@ -186,8 +184,7 @@ export default { vector: "矢量存储索引", simple: "解析文本,优先选择完整的句子。", hierarchical: "使用NodeParser将文档拆分为递归层次结构的节点。", - sentencewindow: - "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", + sentencewindow: "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", unstructured: "一个处理非结构化数据的组件", milvusVector: "矢量索引存储在Milvus中", vector_url: "测试Milvus地址是否可用", @@ -211,8 +208,7 @@ export default { desc: { top_n: "重排后结果的数量", temperature: "数值越高,输出越多样化", - top_p: - "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", + top_p: "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", top_k: "从概率前k的 Token 中采样", penalty: "抑制重复的系数,设为1.0表示禁用", maxToken: "生成回答的最大Token数量", @@ -242,8 +238,7 @@ export default { edit: "编辑知识库", deleteTip: "您确定要删除此知识库吗?此操作不可恢复。", activeTip: "您确定要激活此知识库吗?", - uploadTip: - "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", + uploadTip: "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", notFileTip: "您还没有上传任何文件,点击“上传”按钮开始添加内容吧~", name: "名称", des: "描述", diff --git a/EdgeCraftRAG/ui/vue/src/main.ts b/EdgeCraftRAG/ui/vue/src/main.ts index 94c5debfeb..25e2b930cf 100644 --- a/EdgeCraftRAG/ui/vue/src/main.ts +++ b/EdgeCraftRAG/ui/vue/src/main.ts @@ -25,8 +25,7 @@ const setDayjsLocale = (locale: string) => { const body = document.documentElement as HTMLElement; -if (Local.get("themeInfo")?.theme === "dark") - body.setAttribute("data-theme", "dark"); +if (Local.get("themeInfo")?.theme === "dark") body.setAttribute("data-theme", "dark"); else body.setAttribute("data-theme", ""); // watch i18n update dayjs language @@ -35,7 +34,7 @@ watch( (newLocale) => { setDayjsLocale(newLocale); }, - { immediate: true } + { immediate: true }, ); const app = createApp(App); diff --git a/EdgeCraftRAG/ui/vue/src/utils/common.ts b/EdgeCraftRAG/ui/vue/src/utils/common.ts index 8e7e80810a..97e421cb66 100644 --- a/EdgeCraftRAG/ui/vue/src/utils/common.ts +++ b/EdgeCraftRAG/ui/vue/src/utils/common.ts @@ -6,8 +6,7 @@ import { customNotification } from "./notification"; import { Local } from "./storage"; export const useNotification = () => { - const customNotificationInjected = - inject("customNotification"); + const customNotificationInjected = inject("customNotification"); if (!customNotificationInjected) { throw new Error("Notification service not provided"); @@ -22,11 +21,7 @@ export const formatDecimals = (num: number, decimalPlaces: number = 2) => { return Math.round(num * factor) / factor; }; -export const formatCapitalize = ( - string: string, - start: number = 0, - length: number = 1 -) => { +export const formatCapitalize = (string: string, start: number = 0, length: number = 1) => { const end = start + length; const part1 = string.slice(0, start); const part2 = string.slice(start, end).toUpperCase(); @@ -48,11 +43,7 @@ export const getChatSessionId = (): string => { }; const generateFallbackId = (): string => { - if ( - typeof self !== "undefined" && - self.crypto && - self.crypto.getRandomValues - ) { + if (typeof self !== "undefined" && self.crypto && self.crypto.getRandomValues) { const array = new Uint32Array(2); self.crypto.getRandomValues(array); const randomPart = Array.from(array) @@ -60,8 +51,6 @@ const generateFallbackId = (): string => { .join(""); return `${Date.now()}_${randomPart}`; } else { - throw new Error( - "No secure random number generator available for session ID generation." - ); + throw new Error("No secure random number generator available for session ID generation."); } }; diff --git a/EdgeCraftRAG/ui/vue/src/utils/notification.ts b/EdgeCraftRAG/ui/vue/src/utils/notification.ts index d4e6bef319..151141e0ea 100644 --- a/EdgeCraftRAG/ui/vue/src/utils/notification.ts +++ b/EdgeCraftRAG/ui/vue/src/utils/notification.ts @@ -3,12 +3,7 @@ import { h } from "vue"; import { notification } from "ant-design-vue"; -import { - CheckCircleFilled, - CloseCircleFilled, - ExclamationCircleFilled, - InfoCircleFilled, -} from "@ant-design/icons-vue"; +import { CheckCircleFilled, CloseCircleFilled, ExclamationCircleFilled, InfoCircleFilled } from "@ant-design/icons-vue"; const getNotificationIcon = (type: string) => { switch (type) { @@ -27,12 +22,10 @@ const getNotificationIcon = (type: string) => { export const customNotification = ( type: "success" | "warning" | "error" | "info", message: string, - description: string | undefined + description: string | undefined, ) => { const { icon, color } = getNotificationIcon(type); - const styledIcon = icon - ? h(icon, { style: { color: `var(${color})` } }) - : null; + const styledIcon = icon ? h(icon, { style: { color: `var(${color})` } }) : null; notification[type]({ message, diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts index 655d935fa3..de0cc61809 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts @@ -7,7 +7,7 @@ export const handleMessageSend = async ( url: string, postData: any, onDisplay: (data: any) => void, - onEnd?: () => void + onEnd?: () => void, ): Promise => { let reader: ReadableStreamDefaultReader | undefined; diff --git a/EdgeCraftRAG/ui/vue/vite.config.ts b/EdgeCraftRAG/ui/vue/vite.config.ts index ff27303aa5..23fda73b7b 100644 --- a/EdgeCraftRAG/ui/vue/vite.config.ts +++ b/EdgeCraftRAG/ui/vue/vite.config.ts @@ -66,10 +66,7 @@ const viteConfig = defineConfig((mode: ConfigEnv) => { preprocessorOptions: { less: { javascriptEnabled: true, - additionalData: `@import "${path.resolve( - __dirname, - "src/theme/index.less" - )}";`, + additionalData: `@import "${path.resolve(__dirname, "src/theme/index.less")}";`, }, }, }, From 3b0a2cbfad3ff6f978c80f323543ec403dfe9e29 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Wed, 24 Sep 2025 09:27:27 +0800 Subject: [PATCH 03/16] fix CI issue Signed-off-by: Yongbozzz --- EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py | 2 +- .../edgecraftrag/components/generator.py | 2 - .../edgecraftrag/components/pipeline.py | 113 +++++------------- 3 files changed, 31 insertions(+), 86 deletions(-) diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py index ab95cca4e5..e15b3e4f4d 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py @@ -61,7 +61,7 @@ async def get_pipeline_benchmark(): # GET Pipeline benchmark @pipeline_app.get(path="/v1/settings/pipelines/{name}/benchmarks") -async def get_pipeline_benchmark(name): +async def get_pipeline_benchmarks(name): pl = ctx.get_pipeline_mgr().get_pipeline_by_name_or_id(name) if pl and pl.benchmark: return pl.benchmark.benchmark_data_list diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index d72f1cf87e..e4acceec40 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -6,9 +6,7 @@ import json import os import urllib.request -import requests from urllib.parse import urlparse -from fastapi import HTTPException, status from edgecraftrag.base import BaseComponent, CompType, GeneratorType, InferenceType, NodeParserType from edgecraftrag.utils import concat_history, get_prompt_template, save_history diff --git a/EdgeCraftRAG/edgecraftrag/components/pipeline.py b/EdgeCraftRAG/edgecraftrag/components/pipeline.py index 8ec80a8bc4..a4014ac6b9 100644 --- a/EdgeCraftRAG/edgecraftrag/components/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/components/pipeline.py @@ -44,7 +44,7 @@ def __init__( if self.name == "" or self.name is None: self.name = self.idx self.enable_benchmark = os.getenv("ENABLE_BENCHMARK", "False").lower() == "true" - self.run_pipeline_cb = run_generator_ben if self.enable_benchmark else run_generator + self.run_pipeline_cb = run_generator self.run_retriever_cb = run_retrieve self.run_data_prepare_cb = run_simple_doc @@ -239,88 +239,9 @@ async def timing_wrapper(): return ret -def run_generator_ben(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: - benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() - contexts = {} - retri_res = [] - active_kb = chat_request.user if chat_request.user else None - enable_rag_retrieval = chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) if chat_request.chat_template_kwargs else True - if not active_kb: - enable_rag_retrieval = False - elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": - enable_rag_retrieval = False - elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": - enable_rag_retrieval = False - query = chat_request.messages - sub_questionss_result = None - experience_status = True if chat_request.tool_choice == 'auto' else False - if enable_rag_retrieval: - start = time.perf_counter() - if pl.generator.inference_type == InferenceType.VLLM and experience_status: - UI_DIRECTORY ="/home/user/ui_cache" - search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") - search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") - - def run_async_query_search(): - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - try: - return loop.run_until_complete(query_search(query, search_config_path, search_dir, pl)) - finally: - loop.close() - - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(run_async_query_search) - top1_issue, sub_questionss_result = future.result() - if sub_questionss_result: - query = query + sub_questionss_result - benchmark_data[CompType.QUERYSEARCH] = time.perf_counter() - start - start = time.perf_counter() - top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k - retri_res = pl.retriever.run(query=query, top_k=top_k) - query_bundle = QueryBundle(query) - benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start - contexts[CompType.RETRIEVER] = retri_res - - start = time.perf_counter() - if pl.postprocessor: - for processor in pl.postprocessor: - if ( - isinstance(processor, RerankProcessor) - and chat_request.top_n != ChatCompletionRequest.model_fields["top_n"].default - ): - processor.top_n = chat_request.top_n - retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) - contexts[CompType.POSTPROCESSOR] = retri_res - benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start - - if pl.generator is None: - raise ValueError("No Generator Specified") - - text_gen_context, prompt_str = pl.generator.query_transform(chat_request, retri_res) - input_token_size = pl.benchmark.cal_input_token_size(prompt_str) - - np_type = pl.node_parser.comp_subtype - start = time.perf_counter() - if pl.generator.inference_type == InferenceType.LOCAL: - ret = pl.generator.run(chat_request, retri_res, np_type) - elif pl.generator.inference_type == InferenceType.VLLM: - ret = pl.generator.run_vllm(chat_request, retri_res, np_type, sub_questions=sub_questionss_result) - else: - raise ValueError("LLM inference_type not supported") - end = time.perf_counter() - - if isinstance(ret, StreamingResponse): - ret = benchmark_response(ret, pl.benchmark, benchmark_index, benchmark_data, input_token_size, start) - else: - benchmark_data[CompType.GENERATOR] = end - start - pl.benchmark.insert_llm_data(benchmark_index, input_token_size) - pl.benchmark.insert_benchmark_data(benchmark_data) - return ret, contexts - - def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: - query = chat_request.messages + if pl.enable_benchmark: + benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() contexts = {} retri_res = [] active_kb = chat_request.user if chat_request.user else None @@ -335,6 +256,9 @@ def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: sub_questionss_result = None experience_status = True if chat_request.tool_choice == 'auto' else False if enable_rag_retrieval: + start = 0 + if pl.enable_benchmark: + start = time.perf_counter() if pl.generator.inference_type == InferenceType.VLLM and experience_status: UI_DIRECTORY ="/home/user/ui_cache" search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") @@ -353,11 +277,17 @@ def run_async_query_search(): top1_issue, sub_questionss_result = future.result() if sub_questionss_result: query = query + sub_questionss_result + if pl.enable_benchmark: + benchmark_data[CompType.QUERYSEARCH] = time.perf_counter() - start + start = time.perf_counter() top_k = None if chat_request.k == ChatCompletionRequest.model_fields["k"].default else chat_request.k retri_res = pl.retriever.run(query=query, top_k=top_k) + if pl.enable_benchmark: + benchmark_data[CompType.RETRIEVER] = time.perf_counter() - start contexts[CompType.RETRIEVER] = retri_res query_bundle = QueryBundle(query) - + if pl.enable_benchmark: + start = time.perf_counter() if pl.postprocessor: for processor in pl.postprocessor: if ( @@ -367,14 +297,31 @@ def run_async_query_search(): processor.top_n = chat_request.top_n retri_res = processor.run(retri_res=retri_res, query_bundle=query_bundle) contexts[CompType.POSTPROCESSOR] = retri_res + if pl.enable_benchmark: + benchmark_data[CompType.POSTPROCESSOR] = time.perf_counter() - start if pl.generator is None: raise ValueError("No Generator Specified") + + if pl.enable_benchmark: + _, prompt_str = pl.generator.query_transform(chat_request, retri_res) + input_token_size = pl.benchmark.cal_input_token_size(prompt_str) + np_type = pl.node_parser.comp_subtype + if pl.enable_benchmark: + start = time.perf_counter() if pl.generator.inference_type == InferenceType.LOCAL: ret = pl.generator.run(chat_request, retri_res, np_type) elif pl.generator.inference_type == InferenceType.VLLM: ret = pl.generator.run_vllm(chat_request, retri_res, np_type, sub_questions=sub_questionss_result) else: raise ValueError("LLM inference_type not supported") + if pl.enable_benchmark: + end = time.perf_counter() + if isinstance(ret, StreamingResponse): + ret = benchmark_response(ret, pl.benchmark, benchmark_index, benchmark_data, input_token_size, start) + else: + benchmark_data[CompType.GENERATOR] = end - start + pl.benchmark.insert_llm_data(benchmark_index, input_token_size) + pl.benchmark.insert_benchmark_data(benchmark_data) return ret, contexts From 34d965c31b9cbb954a4f0e9858b79c638f6c32fb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 24 Sep 2025 01:36:10 +0000 Subject: [PATCH 04/16] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../docker_compose/intel/gpu/arc/README.md | 5 +- .../intel/gpu/arc/compose_vllm_b60.yaml | 2 +- EdgeCraftRAG/docs/API_Guide.md | 2 +- EdgeCraftRAG/docs/Advanced_Setup.md | 2 +- EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md | 4 +- EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py | 4 +- .../edgecraftrag/api/v1/knowledge_base.py | 134 ++++++++------ EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py | 26 ++- EdgeCraftRAG/edgecraftrag/api_schema.py | 4 +- EdgeCraftRAG/edgecraftrag/base.py | 6 +- .../edgecraftrag/components/benchmark.py | 9 +- .../edgecraftrag/components/generator.py | 13 +- .../edgecraftrag/components/indexer.py | 20 ++- .../edgecraftrag/components/knowledge_base.py | 51 +++--- .../edgecraftrag/components/node_parser.py | 2 + .../edgecraftrag/components/pipeline.py | 16 +- .../components/query_preprocess.py | 24 +-- .../edgecraftrag/components/retriever.py | 141 +++++++-------- .../edgecraftrag/controllers/compmgr.py | 23 ++- .../controllers/knowledge_basemgr.py | 33 ++-- EdgeCraftRAG/edgecraftrag/requirements.txt | 4 +- EdgeCraftRAG/tools/quick_start.sh | 10 +- EdgeCraftRAG/ui/vue/components.d.ts | 5 +- .../ui/vue/src/api/knowledgeBase/index.ts | 5 +- EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts | 4 +- EdgeCraftRAG/ui/vue/src/api/request.ts | 13 +- .../ui/vue/src/assets/iconFont/iconfont.css | 8 +- .../ui/vue/src/assets/iconFont/iconfont.js | 69 ++++++- EdgeCraftRAG/ui/vue/src/auto-imports.d.ts | 168 ++++++++++-------- EdgeCraftRAG/ui/vue/src/components.d.ts | 5 +- EdgeCraftRAG/ui/vue/src/i18n/en.ts | 54 ++---- EdgeCraftRAG/ui/vue/src/i18n/zh.ts | 15 +- EdgeCraftRAG/ui/vue/src/main.ts | 5 +- EdgeCraftRAG/ui/vue/src/utils/common.ts | 19 +- EdgeCraftRAG/ui/vue/src/utils/notification.ts | 13 +- .../chatbot/components/Chatbot/SseService.ts | 2 +- EdgeCraftRAG/ui/vue/vite.config.ts | 5 +- 37 files changed, 520 insertions(+), 405 deletions(-) diff --git a/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md b/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md index b95dd62716..94d920bbc4 100755 --- a/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md +++ b/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md @@ -95,7 +95,8 @@ For more advanced env variables and configurations, please refer to [Prepare env ### 5. Deploy the Service on Intel GPU Using Docker Compose -set Milvus DB and chat history round for inference: +set Milvus DB and chat history round for inference: + ```bash # EC-RAG support Milvus as persistent database, by default milvus is disabled, you can choose to set MILVUS_ENABLED=1 to enable it export MILVUS_ENABLED=0 @@ -123,7 +124,7 @@ docker compose -f docker_compose/intel/gpu/arc/compose_vllm.yaml up -d #### option b. Deploy the Service on Arc B60 Using Docker Compose ```bash -# Besides MILVUS_ENABLED and CHAT_HISTORY_ROUND, below enviroments are exposed for vLLM config, you can change them to your preference: +# Besides MILVUS_ENABLED and CHAT_HISTORY_ROUND, below environments are exposed for vLLM config, you can change them to your preference: # export VLLM_SERVICE_PORT_B60=8086 # export DTYPE=float16 # export TP=1 # for multi GPU, you can change TP value diff --git a/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml b/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml index b1bb099f47..f10b476fc1 100644 --- a/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml +++ b/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml @@ -183,4 +183,4 @@ services: -dp=$${DP}" networks: default: - driver: bridge \ No newline at end of file + driver: bridge diff --git a/EdgeCraftRAG/docs/API_Guide.md b/EdgeCraftRAG/docs/API_Guide.md index 753b9ebcc8..c13753596c 100644 --- a/EdgeCraftRAG/docs/API_Guide.md +++ b/EdgeCraftRAG/docs/API_Guide.md @@ -219,4 +219,4 @@ curl -X POST http://${HOST_IP}:16010/v1/retrieval -H "Content-Type: application/ ```bash curl -X POST http://${HOST_IP}:16011/v1/chatqna -H "Content-Type: application/json" -d '{"messages":"#REPLACE WITH YOUR QUESTION HERE#", "top_n":5, "max_tokens":512}' | jq '.' -``` \ No newline at end of file +``` diff --git a/EdgeCraftRAG/docs/Advanced_Setup.md b/EdgeCraftRAG/docs/Advanced_Setup.md index 136b34803f..ed6a080310 100644 --- a/EdgeCraftRAG/docs/Advanced_Setup.md +++ b/EdgeCraftRAG/docs/Advanced_Setup.md @@ -208,6 +208,6 @@ Model preparation is the same as vLLM inference section, please refer to [Prepar This section is the same as default vLLM inference section, please refer to [Prepare env variables and configurations](../docker_compose/intel/gpu/arc/README.md#prepare-env-variables-and-configurations) and [Start Edge Craft RAG Services with Docker Compose](../docker_compose/intel/gpu/arc/README.md#deploy-the-service-on-arc-a770-using-docker-compose) -### 2. Access Kbadmin UI +### 2. Access Kbadmin UI please refer to [ChatQnA with Kbadmin in UI](./Explore_Edge_Craft_RAG.md#chatqna-with-kbadmin-in-ui) diff --git a/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md b/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md index 624e21a010..0a5a91ba5e 100644 --- a/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md +++ b/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md @@ -34,7 +34,7 @@ After knowledge base creation, you can upload the documents for retrieval. Then, you can submit messages in the chat box in `Chat` page. ![chat_with_rag](../assets/img/chatqna.png) -## ChatQnA with Kbadmin in UI +## ChatQnA with Kbadmin in UI ### Kbadmin Pipeline @@ -52,4 +52,4 @@ Please select 'kbadmin' in `Type`and select kb name from the kbs you created in ![upload_data](../assets/img/kbadmin_kb.png) Then, you can submit messages in the chat box in `Chat` page. -![chat_with_rag](../assets/img/chatqna.png) \ No newline at end of file +![chat_with_rag](../assets/img/chatqna.png) diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py b/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py index 60df3b50eb..7e0f3aa831 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py @@ -38,7 +38,7 @@ async def chatqna(request: ChatCompletionRequest): active_kb = ctx.knowledgemgr.get_active_knowledge_base() request.user = active_kb if active_kb else None if experience_kb: - request.tool_choice = 'auto' if experience_kb.experience_active else 'none' + request.tool_choice = "auto" if experience_kb.experience_active else "none" generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: request.model = generator.model_id @@ -62,7 +62,7 @@ async def ragqna(request: ChatCompletionRequest): active_kb = ctx.knowledgemgr.get_active_knowledge_base() request.user = active_kb if active_kb else None if experience_kb: - request.tool_choice = 'auto' if experience_kb.experience_active else 'none' + request.tool_choice = "auto" if experience_kb.experience_active else "none" generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: request.model = generator.model_id diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py index af588dd02a..fa0516272b 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py @@ -5,15 +5,15 @@ import json import os import re +from typing import Dict, List, Union -from typing import List, Dict, Union from edgecraftrag.api.v1.data import add_data -from edgecraftrag.api_schema import DataIn, KnowledgeBaseCreateIn, ExperienceIn +from edgecraftrag.api_schema import DataIn, ExperienceIn, KnowledgeBaseCreateIn from edgecraftrag.base import IndexerType +from edgecraftrag.components.query_preprocess import query_search +from edgecraftrag.components.retriever import get_kbs_info from edgecraftrag.context import ctx from edgecraftrag.utils import compare_mappings -from edgecraftrag.components.retriever import get_kbs_info -from edgecraftrag.components.query_preprocess import query_search from fastapi import FastAPI, HTTPException, status from pymilvus.exceptions import MilvusException @@ -23,6 +23,7 @@ KNOWLEDGE_BASE_ROOT = "/home/user/ui_cache" CONFIG_DIR = "/home/user/ui_cache/configs" + # Get all knowledge bases @kb_app.get(path="/v1/knowledge") async def get_all_knowledge_bases(): @@ -52,10 +53,10 @@ async def create_knowledge_base(knowledge: KnowledgeBaseCreateIn): detail="Knowledge base names must begin with a letter or underscore", ) - if knowledge.active and knowledge.comp_type =="knowledge" and knowledge.comp_subtype == "origin_kb": + if knowledge.active and knowledge.comp_type == "knowledge" and knowledge.comp_subtype == "origin_kb": active_pl.indexer.reinitialize_indexer(knowledge.name) active_pl.update_indexer_to_retriever() - elif knowledge.active and knowledge.comp_subtype == "kbadmin_kb": + elif knowledge.active and knowledge.comp_subtype == "kbadmin_kb": active_pl.retriever.config_kbadmin_milvus(knowledge.name) kb = ctx.knowledgemgr.create_knowledge_base(knowledge) await save_knowledge_to_file() @@ -74,7 +75,10 @@ async def delete_knowledge_base(knowledge_name: str): if rm_kb.comp_type == "knowledge" and rm_kb.comp_subtype == "origin_kb": if active_kb: if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: - raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Cannot delete a running knowledge base.") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Cannot delete a running knowledge base.", + ) kb_file_path = rm_kb.get_file_paths() if kb_file_path: if active_pl.indexer.comp_subtype == "milvus_vector": @@ -84,7 +88,10 @@ async def delete_knowledge_base(knowledge_name: str): active_pl.update_indexer_to_retriever() if rm_kb.comp_type == "experience": if rm_kb.experience_active: - raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Cannot delete a running experience knowledge base.") + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Cannot delete a running experience knowledge base.", + ) else: rm_kb.clear_experiences() result = ctx.knowledgemgr.delete_knowledge_base(knowledge_name) @@ -129,10 +136,15 @@ async def add_file_to_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) - if kb.comp_type =="experience": - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations.") + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." + ) if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Please proceed to the kbadmin interface to perform the operation.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Please proceed to the kbadmin interface to perform the operation.", + ) # Validate and normalize the user-provided path user_path = file_path.local_path normalized_path = os.path.normpath(os.path.join(KNOWLEDGE_BASE_ROOT, user_path)) @@ -184,10 +196,15 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) - if kb.comp_type =="experience": - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations.") + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." + ) if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": - raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Please proceed to the kbadmin interface to perform the operation.") + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail="Please proceed to the kbadmin interface to perform the operation.", + ) active_kb = ctx.knowledgemgr.get_active_knowledge_base() if file_path.local_path in kb.get_file_paths(): kb.remove_file_path(file_path.local_path) @@ -215,9 +232,10 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): except ValueError as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) + @kb_app.post("/v1/experience") def get_experience_by_question(req: ExperienceIn): - kb = ctx.knowledgemgr.get_experience_kb() + kb = ctx.knowledgemgr.get_experience_kb() result = kb.get_experience_by_question(req.question) if not result: raise HTTPException(404, detail="Experience not found") @@ -238,24 +256,24 @@ def update_experience(experience: ExperienceIn): kb = ctx.knowledgemgr.get_experience_kb() result = kb.update_experience(experience.question, experience.content) if not result: - raise HTTPException(404, detail=f"Question not found") + raise HTTPException(404, detail="Question not found") return result @kb_app.delete("/v1/experiences") -def delete_experience(req :ExperienceIn): +def delete_experience(req: ExperienceIn): kb = ctx.knowledgemgr.get_experience_kb() success = kb.delete_experience(req.question) if not success: raise HTTPException(404, detail=f"Question {req.question} not found") - return {"message": f"Question deleted"} + return {"message": "Question deleted"} @kb_app.post("/v1/multiple_experiences/check") def check_duplicate_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]]): kb = ctx.knowledgemgr.get_experience_kb() if not kb: - raise HTTPException(404, detail=f"No active experience type knowledge base") + raise HTTPException(404, detail="No active experience type knowledge base") all_existing = kb.get_all_experience() existing_questions = {item["question"] for item in all_existing} new_questions = [exp["question"] for exp in experiences if "question" in exp] @@ -263,16 +281,16 @@ def check_duplicate_multiple_experiences(experiences: List[Dict[str, Union[str, if duplicate_questions: return {"code": 2001, "detail": "Duplicate experiences are appended OR overwritten!"} else: - kb.add_multiple_experiences(experiences=experiences, flag=True) - return {"status": "success","detail": "No duplicate experiences, added successfully"} + kb.add_multiple_experiences(experiences=experiences, flag=True) + return {"status": "success", "detail": "No duplicate experiences, added successfully"} @kb_app.post("/v1/multiple_experiences/confirm") -def confirm_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]],flag: bool): +def confirm_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]], flag: bool): kb = ctx.knowledgemgr.get_experience_kb() try: if not kb: - raise HTTPException(404, detail=f"No active experience type knowledge base") + raise HTTPException(404, detail="No active experience type knowledge base") kb.add_multiple_experiences(experiences=experiences, flag=flag) return {"status": "success", "detail": "Experiences added successfully"} except Exception as e: @@ -291,17 +309,14 @@ def add_experiences_from_file(req: DataIn): @kb_app.post(path="/v1/view_sub_questions") async def view_sub_questions(que: ExperienceIn): - active_pl = ctx.get_pipeline_mgr().get_active_pipeline() - CONFIG_DIR - search_config_path = os.path.join(CONFIG_DIR,"search_config.yaml") - search_dir = os.path.join(CONFIG_DIR,"experience_dir/experience.json") - top1_issue, sub_questions_result = await query_search( - user_input=que.question, - search_config_path=search_config_path, - search_dir=search_dir, - pl=active_pl - ) - return sub_questions_result + active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + CONFIG_DIR + search_config_path = os.path.join(CONFIG_DIR, "search_config.yaml") + search_dir = os.path.join(CONFIG_DIR, "experience_dir/experience.json") + top1_issue, sub_questions_result = await query_search( + user_input=que.question, search_config_path=search_config_path, search_dir=search_dir, pl=active_pl + ) + return sub_questions_result @kb_app.get("/v1/kbadmin/kbs_list") @@ -310,9 +325,9 @@ def get_kbs_list(): try: if not active_pl or active_pl.indexer.comp_subtype != "kbadmin_indexer": return [] - CONNECTION_ARGS = {"uri": active_pl.indexer.vector_url} + CONNECTION_ARGS = {"uri": active_pl.indexer.vector_url} kbs_list = get_kbs_info(CONNECTION_ARGS) - kb_names = [name for name in kbs_list.keys()] + kb_names = [name for name in kbs_list.keys()] return kb_names except Exception as e: raise HTTPException(status_code=400, detail=str(e)) @@ -372,7 +387,7 @@ async def load_knowledge_from_file(): for Knowledgebase_data in all_data: pipeline_req = KnowledgeBaseCreateIn(**Knowledgebase_data) kb = ctx.knowledgemgr.create_knowledge_base(pipeline_req) - if kb.comp_type =="knowledge" and kb.comp_subtype =="origin_kb": + if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": if Knowledgebase_data["file_map"]: if active_pl.indexer.comp_subtype != "milvus_vector" and Knowledgebase_data["active"]: for file_path in Knowledgebase_data["file_map"].values(): @@ -388,7 +403,7 @@ async def load_knowledge_from_file(): else: for file_path in Knowledgebase_data["file_map"].values(): kb.add_file_path(file_path) - elif kb.comp_subtype =="kbadmin_kb": + elif kb.comp_subtype == "kbadmin_kb": if Knowledgebase_data["active"]: active_pl.retriever.config_kbadmin_milvus(kb.name) except Exception as e: @@ -404,7 +419,15 @@ async def save_knowledge_to_file(): kb_base = ctx.knowledgemgr.get_all_knowledge_bases() knowledgebases_data = [] for kb in kb_base: - kb_json = {"name": kb.name, "description": kb.description, "active": kb.active, "file_map": kb.file_map, "comp_type": kb.comp_type, "comp_subtype":kb.comp_subtype, "experience_active": kb.experience_active} + kb_json = { + "name": kb.name, + "description": kb.description, + "active": kb.active, + "file_map": kb.file_map, + "comp_type": kb.comp_type, + "comp_subtype": kb.comp_subtype, + "experience_active": kb.experience_active, + } knowledgebases_data.append(kb_json) json_str = json.dumps(knowledgebases_data, indent=2, ensure_ascii=False) with open(KNOWLEDGEBASE_FILE, "w", encoding="utf-8") as f: @@ -415,6 +438,8 @@ async def save_knowledge_to_file(): all_pipeline_milvus_maps = {"change_pl": []} current_pipeline_kb_map = {} + + async def refresh_milvus_map(milvus_name): current_pipeline_kb_map.clear() knowledge_bases_list = await get_all_knowledge_bases() @@ -423,13 +448,13 @@ async def refresh_milvus_map(milvus_name): continue current_pipeline_kb_map[kb.name] = kb.file_map all_pipeline_milvus_maps[milvus_name] = copy.deepcopy(current_pipeline_kb_map) - milvus_maps_path = os.path.join(CONFIG_DIR,"milvus_maps.json") + milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") with open(milvus_maps_path, "w", encoding="utf-8") as f: json.dump(all_pipeline_milvus_maps, f, ensure_ascii=False, indent=2) def read_milvus_maps(): - milvus_maps_path = os.path.join(CONFIG_DIR,"milvus_maps.json") + milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") global all_pipeline_milvus_maps try: with open(milvus_maps_path, "r", encoding="utf-8") as f: @@ -438,26 +463,33 @@ def read_milvus_maps(): all_pipeline_milvus_maps = {"change_pl": []} return all_pipeline_milvus_maps + def save_change_pl(pl_name): - if pl_name not in all_pipeline_milvus_maps["change_pl"]: + if pl_name not in all_pipeline_milvus_maps["change_pl"]: return all_pipeline_milvus_maps["change_pl"].append(pl_name) + async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): try: if pl_change: save_change_pl(new_active_pl.name) active_kb = ctx.knowledgemgr.get_active_knowledge_base() - # Determine whether it is kbadmin type + # Determine whether it is kbadmin type if old_active_pl: - if old_active_pl.retriever.comp_subtype == "kbadmin_retriever" and new_active_pl.retriever.comp_subtype == "kbadmin_retriever": + if ( + old_active_pl.retriever.comp_subtype == "kbadmin_retriever" + and new_active_pl.retriever.comp_subtype == "kbadmin_retriever" + ): if active_kb: if active_kb.comp_subtype == "kbadmin_kb": new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) return True elif old_active_pl.retriever.comp_subtype == "kbadmin_retriever": return True - - milvus_name = (old_active_pl.name + str(old_active_pl.indexer.model_extra["d"]) if old_active_pl else "default_kb") + + milvus_name = ( + old_active_pl.name + str(old_active_pl.indexer.model_extra["d"]) if old_active_pl else "default_kb" + ) if not new_active_pl.status.active: if old_active_pl: if old_active_pl.indexer.comp_subtype == "milvus_vector": @@ -465,11 +497,11 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): return True if not active_kb: return True - if new_active_pl.retriever.comp_subtype=="kbadmin_retriever": - if active_kb: - if active_kb.comp_subtype == "kbadmin_kb": - new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) - return True + if new_active_pl.retriever.comp_subtype == "kbadmin_retriever": + if active_kb: + if active_kb.comp_subtype == "kbadmin_kb": + new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) + return True # Perform milvus data synchronization if new_active_pl.indexer.comp_subtype == "milvus_vector": # Pipeline component state changed diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py index 4af3f6b152..fe6de98d42 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py @@ -3,7 +3,8 @@ import asyncio import json -import os, re +import os +import re import weakref from concurrent.futures import ThreadPoolExecutor @@ -12,16 +13,21 @@ from edgecraftrag.base import IndexerType, InferenceType, ModelType, NodeParserType, PostProcessorType, RetrieverType from edgecraftrag.components.benchmark import Benchmark from edgecraftrag.components.generator import QnAGenerator -from edgecraftrag.components.indexer import VectorIndexer, KBADMINIndexer +from edgecraftrag.components.indexer import KBADMINIndexer, VectorIndexer from edgecraftrag.components.node_parser import ( HierarchyNodeParser, + KBADMINParser, SimpleNodeParser, SWindowNodeParser, UnstructedNodeParser, - KBADMINParser ) from edgecraftrag.components.postprocessor import MetadataReplaceProcessor, RerankProcessor -from edgecraftrag.components.retriever import AutoMergeRetriever, SimpleBM25Retriever, VectorSimRetriever, KBadminRetriever +from edgecraftrag.components.retriever import ( + AutoMergeRetriever, + KBadminRetriever, + SimpleBM25Retriever, + VectorSimRetriever, +) from edgecraftrag.context import ctx from fastapi import FastAPI, File, HTTPException, UploadFile, status from pymilvus import connections @@ -72,7 +78,10 @@ async def get_pipeline_benchmarks(name): async def add_pipeline(request: PipelineCreateIn): pattern = re.compile(r"^[a-zA-Z0-9_]+$") if not pattern.fullmatch(request.name): - raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Pipeline name must consist of letters, numbers, and underscores.") + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Pipeline name must consist of letters, numbers, and underscores.", + ) return load_pipeline(request) @@ -199,7 +208,9 @@ def update_pipeline_handler(pl, req): kbadmin_embedding_url = ind.embedding_url KBADMIN_VECTOR_URL = ind.vector_url embed_model = ind.embedding_model.model_id - pl.indexer = KBADMINIndexer(embed_model, ind.indexer_type, kbadmin_embedding_url, KBADMIN_VECTOR_URL) + pl.indexer = KBADMINIndexer( + embed_model, ind.indexer_type, kbadmin_embedding_url, KBADMIN_VECTOR_URL + ) case _: pass ctx.get_indexer_mgr().add(pl.indexer) @@ -230,7 +241,7 @@ def update_pipeline_handler(pl, req): else: return Exception("No indexer") case RetrieverType.KBADMIN_RETRIEVER: - pl.retriever = KBadminRetriever(pl.indexer, similarity_top_k=retr.retrieve_topk) + pl.retriever = KBadminRetriever(pl.indexer, similarity_top_k=retr.retrieve_topk) case _: pass # Index is updated to retriever @@ -363,4 +374,3 @@ async def check_milvus(request: MilvusConnectRequest): return {"status": "404", "message": "Milvus connection failed."} except Exception as e: return {"status": "404", "message": f"connection failed: {str(e)}"} - diff --git a/EdgeCraftRAG/edgecraftrag/api_schema.py b/EdgeCraftRAG/edgecraftrag/api_schema.py index 901e4ed0f0..2bdf8dbd4e 100644 --- a/EdgeCraftRAG/edgecraftrag/api_schema.py +++ b/EdgeCraftRAG/edgecraftrag/api_schema.py @@ -83,11 +83,13 @@ class KnowledgeBaseCreateIn(BaseModel): active: Optional[bool] = None comp_type: Optional[str] = "knowledge" comp_subtype: Optional[str] = "origin_kb" - experience_active: Optional[bool] = None + experience_active: Optional[bool] = None + class ExperienceIn(BaseModel): question: str content: list[str] = None + class MilvusConnectRequest(BaseModel): vector_url: str diff --git a/EdgeCraftRAG/edgecraftrag/base.py b/EdgeCraftRAG/edgecraftrag/base.py index ec73d37621..3306afc2ed 100644 --- a/EdgeCraftRAG/edgecraftrag/base.py +++ b/EdgeCraftRAG/edgecraftrag/base.py @@ -48,6 +48,7 @@ class NodeParserType(str, Enum): UNSTRUCTURED = "unstructured" KBADMINPARSER = "kbadmin_parser" + class IndexerType(str, Enum): FAISS_VECTOR = "faiss_vector" @@ -55,6 +56,7 @@ class IndexerType(str, Enum): MILVUS_VECTOR = "milvus_vector" KBADMIN_INDEXER = "kbadmin_indexer" + class RetrieverType(str, Enum): VECTORSIMILARITY = "vectorsimilarity" @@ -116,13 +118,13 @@ class BaseMgr: def __init__(self): self.components = {} - def add(self, comp: BaseComponent, name: str=None): + def add(self, comp: BaseComponent, name: str = None): if name: self.components[name] = comp return True self.components[comp.idx] = comp - def append(self, comp: BaseComponent, name: str=None): + def append(self, comp: BaseComponent, name: str = None): key = name if name else comp.idx if key not in self.components: self.components[key] = [] diff --git a/EdgeCraftRAG/edgecraftrag/components/benchmark.py b/EdgeCraftRAG/edgecraftrag/components/benchmark.py index df66ef0e6f..3bf2a7e602 100644 --- a/EdgeCraftRAG/edgecraftrag/components/benchmark.py +++ b/EdgeCraftRAG/edgecraftrag/components/benchmark.py @@ -49,7 +49,14 @@ def cal_input_token_size(self, input_text_list): return input_token_size def init_benchmark_data(self): - pipeline_comp = [CompType.NODEPARSER, CompType.CHUNK_NUM, CompType.RETRIEVER, CompType.POSTPROCESSOR, CompType.QUERYSEARCH, CompType.GENERATOR] + pipeline_comp = [ + CompType.NODEPARSER, + CompType.CHUNK_NUM, + CompType.RETRIEVER, + CompType.POSTPROCESSOR, + CompType.QUERYSEARCH, + CompType.GENERATOR, + ] if self.is_enabled(): with self._idx_lock: self.last_idx += 1 diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index d5c4615e52..70384b7122 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -109,9 +109,10 @@ async def local_stream_generator(lock, llm, prompt_str, unstructured_str): save_history(res) except Exception as e: start_idx = str(e).find("message") + len("message") - result_error = str(e)[start_idx:] + result_error = str(e)[start_idx:] yield f"code:0000{result_error}" + async def stream_generator(llm, prompt_str, unstructured_str): response = llm.stream_complete(prompt_str) collected_data = [] @@ -126,9 +127,9 @@ async def stream_generator(llm, prompt_str, unstructured_str): res = "".join(collected_data) save_history(res) except Exception as e: - start_idx = str(e).find("message") + len("message") - result_error = str(e)[start_idx:] - yield f"code:0000{result_error}" + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" class QnAGenerator(BaseComponent): @@ -153,7 +154,7 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin llm_instance = llm_model() if llm_instance.model_path is None or llm_instance.model_path == "": self.model_id = llm_instance.model_id - self.model_path = os.path.join("/home/user/models/",os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) + self.model_path = os.path.join("/home/user/models/", os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) else: self.model_id = llm_instance.model_id self.model_path = llm_instance.model_path @@ -237,7 +238,7 @@ def query_transform(self, chat_request, retrieved_nodes, sub_questions=None): chat_history = concat_history(chat_request.messages) # Modify model think status if chat_request.chat_template_kwargs: - if 'enable_thinking' in chat_request.chat_template_kwargs: + if "enable_thinking" in chat_request.chat_template_kwargs: if self.enable_think != chat_request.chat_template_kwargs["enable_thinking"]: self.prompt = self.init_prompt( self.model_path, diff --git a/EdgeCraftRAG/edgecraftrag/components/indexer.py b/EdgeCraftRAG/edgecraftrag/components/indexer.py index 00e0f3f4d6..bd79bb3042 100644 --- a/EdgeCraftRAG/edgecraftrag/components/indexer.py +++ b/EdgeCraftRAG/edgecraftrag/components/indexer.py @@ -80,10 +80,10 @@ class KBADMINIndexer(BaseComponent): # Handled in the kbadmin project def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url="http://localhost:29530"): BaseComponent.__init__( - self, - comp_type=CompType.INDEXER, - comp_subtype=IndexerType.KBADMIN_INDEXER, - ) + self, + comp_type=CompType.INDEXER, + comp_subtype=IndexerType.KBADMIN_INDEXER, + ) self.embed_model = embed_model self.kbadmin_embedding_url = kbadmin_embedding_url self.vector_url = vector_url @@ -91,7 +91,7 @@ def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url=" def insert_nodes(self, nodes): return None - def _index_struct(self, nodes): + def _index_struct(self, nodes): return None def run(self, **kwargs) -> Any: @@ -105,5 +105,11 @@ def clear_milvus_collection(self, **kwargs): @model_serializer def ser_model(self): - set = {"idx": self.idx, "indexer_type": self.comp_subtype, "model": {"model_id": self.embed_model}, "kbadmin_embedding_url": self.kbadmin_embedding_url, "vector_url":self.vector_url} - return set \ No newline at end of file + set = { + "idx": self.idx, + "indexer_type": self.comp_subtype, + "model": {"model_id": self.embed_model}, + "kbadmin_embedding_url": self.kbadmin_embedding_url, + "vector_url": self.vector_url, + } + return set diff --git a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py index d3a050ab4c..45ea309fad 100644 --- a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py @@ -1,8 +1,9 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import os, json -from typing import Any, List, Optional, Dict, Union +import json +import os +from typing import Any, Dict, List, Optional, Union from edgecraftrag.base import BaseComponent from pydantic import model_serializer @@ -43,7 +44,7 @@ def ensure_file_exists(self): dir_path = os.path.dirname(self.file_paths[0]) os.makedirs(dir_path, exist_ok=True) if not os.path.exists(self.file_paths[0]): - with open(self.file_paths[0], 'w', encoding='utf-8') as f: + with open(self.file_paths[0], "w", encoding="utf-8") as f: json.dump([], f, ensure_ascii=False, indent=4) def get_all_experience(self) -> List[Dict]: @@ -52,80 +53,82 @@ def get_all_experience(self) -> List[Dict]: self.file_paths.append(experinence_file) if not os.path.isfile(self.file_paths[0]): self.ensure_file_exists() - with open(self.file_paths[0], 'r', encoding='utf-8') as f: + with open(self.file_paths[0], "r", encoding="utf-8") as f: return json.load(f) def get_experience_by_question(self, question: str) -> Optional[Dict]: for item in self.get_all_experience(): - if item.get('question') == question: + if item.get("question") == question: return item return None - def add_multiple_experiences(self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True) -> List[Dict]: + def add_multiple_experiences( + self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True + ) -> List[Dict]: all_experiences = self.get_all_experience() result = [] for exp in experiences: - question = exp.get('question') + question = exp.get("question") if not question: raise ValueError("Must exist when uploading question") - content = exp.get('content', []) + content = exp.get("content", []) found = False for item in all_experiences: - if item['question'] == question: + if item["question"] == question: if flag: - item['content'].extend([c for c in content if c not in item['content']]) + item["content"].extend([c for c in content if c not in item["content"]]) else: - item['content'] = content + item["content"] = content result.append(item) found = True break if not found: - new_item = {'question': question, 'content': content} + new_item = {"question": question, "content": content} all_experiences.append(new_item) result.append(new_item) - with open(self.file_paths[0], 'w', encoding='utf-8') as f: + with open(self.file_paths[0], "w", encoding="utf-8") as f: json.dump(all_experiences, f, ensure_ascii=False, indent=4) return result def delete_experience(self, question: str) -> bool: items = self.get_all_experience() - remaining_items = [item for item in items if item.get('question') != question] + remaining_items = [item for item in items if item.get("question") != question] if len(remaining_items) == len(items): return False - with open(self.file_paths[0], 'w', encoding='utf-8') as f: + with open(self.file_paths[0], "w", encoding="utf-8") as f: json.dump(remaining_items, f, ensure_ascii=False, indent=4) return True def clear_experiences(self) -> bool: all_experiences = self.get_all_experience() - with open(self.file_paths[0], 'w', encoding='utf-8') as f: + with open(self.file_paths[0], "w", encoding="utf-8") as f: json.dump([], f, ensure_ascii=False, indent=4) return True def update_experience(self, question: str, content: List[str]) -> Optional[Dict]: items = self.get_all_experience() for i, item in enumerate(items): - if item.get('question') == question: - updated_item = {'question': question, 'content': content} + if item.get("question") == question: + updated_item = {"question": question, "content": content} items[i] = updated_item - with open(self.file_paths[0], 'w', encoding='utf-8') as f: + with open(self.file_paths[0], "w", encoding="utf-8") as f: json.dump(items, f, ensure_ascii=False, indent=4) return updated_item return None def add_experiences_from_file(self, file_path: str, flag: bool = False) -> List[Dict]: - if not file_path.endswith('.json'): + if not file_path.endswith(".json"): raise ValueError("File upload type error") try: - with open(file_path, 'r', encoding='utf-8') as f: + with open(file_path, "r", encoding="utf-8") as f: experiences = json.load(f) if not isinstance(experiences, list): raise ValueError("The contents of the file must be a list") return self.add_multiple_experiences(experiences=experiences, flag=flag) except json.JSONDecodeError as e: - raise ValueError(f"File parsing failure") + raise ValueError("File parsing failure") except Exception as e: - raise RuntimeError(f"File Error") + raise RuntimeError("File Error") def calculate_totals(self): if self.comp_type == "knowledge": @@ -150,6 +153,6 @@ def ser_model(self): "description": self.description, "active": self.active, "experience_active": self.experience_active, - "total": self.calculate_totals() + "total": self.calculate_totals(), } return set diff --git a/EdgeCraftRAG/edgecraftrag/components/node_parser.py b/EdgeCraftRAG/edgecraftrag/components/node_parser.py index 2491cbf9dd..0bd49b91b4 100644 --- a/EdgeCraftRAG/edgecraftrag/components/node_parser.py +++ b/EdgeCraftRAG/edgecraftrag/components/node_parser.py @@ -169,6 +169,7 @@ def ser_model(self): } return set + class KBADMINParser(BaseComponent): # Handled in the kbadmin project def __init__(self, **kwargs): @@ -181,6 +182,7 @@ def run(self, **kwargs) -> Any: def insert_nodes(self): return None + @model_serializer def ser_model(self): set = { diff --git a/EdgeCraftRAG/edgecraftrag/components/pipeline.py b/EdgeCraftRAG/edgecraftrag/components/pipeline.py index a4014ac6b9..29205a3819 100644 --- a/EdgeCraftRAG/edgecraftrag/components/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/components/pipeline.py @@ -216,7 +216,7 @@ def run_simple_doc(pl: Pipeline, docs: List[Document]) -> Any: if pl.indexer is not None: pl.indexer.insert_nodes(n) if pl.enable_benchmark: - benchmark_data[CompType.NODEPARSER] += (time.perf_counter() - start) + benchmark_data[CompType.NODEPARSER] += time.perf_counter() - start benchmark_data[CompType.CHUNK_NUM] += len(n) pl.benchmark.insert_benchmark_data(benchmark_data) return n @@ -244,23 +244,27 @@ def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() contexts = {} retri_res = [] - active_kb = chat_request.user if chat_request.user else None - enable_rag_retrieval = chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) if chat_request.chat_template_kwargs else True + active_kb = chat_request.user if chat_request.user else None + enable_rag_retrieval = ( + chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) + if chat_request.chat_template_kwargs + else True + ) if not active_kb: enable_rag_retrieval = False elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": enable_rag_retrieval = False - elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": + elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": enable_rag_retrieval = False query = chat_request.messages sub_questionss_result = None - experience_status = True if chat_request.tool_choice == 'auto' else False + experience_status = True if chat_request.tool_choice == "auto" else False if enable_rag_retrieval: start = 0 if pl.enable_benchmark: start = time.perf_counter() if pl.generator.inference_type == InferenceType.VLLM and experience_status: - UI_DIRECTORY ="/home/user/ui_cache" + UI_DIRECTORY = "/home/user/ui_cache" search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") diff --git a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py index ed43a99911..0f0a3e792a 100644 --- a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py +++ b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py @@ -175,8 +175,8 @@ async def compute_score(self, input_pair): def read_json_files(file_path: str) -> dict: result = {} if os.path.isfile(file_path): - with open(file_path, 'r', encoding='utf-8') as f: - result = json.load(f) + with open(file_path, "r", encoding="utf-8") as f: + result = json.load(f) return result @@ -196,14 +196,14 @@ async def query_search(user_input, search_config_path, search_dir, pl): cfg = {} if not os.path.exists(search_config_path): - cfg["query_matcher"] = { - "instructions": "You're an expert in TCB Bonder, your task is to decide the semantic similarity of two queries.\n If they are expressing similar idea, mark as High.\n If they are totally different, mark as Low.\n If some parts of them are similar, some are not, mark as Medium.\n", - "input_template": " {} \n {} \n", - "output_template": "output from {json_levels}.\n", - "json_key": "similarity", - "json_levels": ["Low", "Medium", "High"], - "temperature": 1 - } + cfg["query_matcher"] = { + "instructions": "You're an expert in TCB Bonder, your task is to decide the semantic similarity of two queries.\n If they are expressing similar idea, mark as High.\n If they are totally different, mark as Low.\n If some parts of them are similar, some are not, mark as Medium.\n", + "input_template": " {} \n {} \n", + "output_template": "output from {json_levels}.\n", + "json_key": "similarity", + "json_levels": ["Low", "Medium", "High"], + "temperature": 1, + } else: cfg = OmegaConf.load(search_config_path) cfg["query_matcher"]["model_id"] = model_id @@ -221,10 +221,10 @@ async def limited_compute_score(query_matcher, user_input, issue): match_scores.sort(key=lambda x: x[1], reverse=True) # Maximum less than 0.6, we don't use query search. - if match_scores[0][1] < 0.6: + if match_scores[0][1] < 0.6: return top1_issue, sub_questions_result top1_issue = match_scores[0][0] for i in range(len(maintenance_data)): - if maintenance_data[i]['question'] == top1_issue: + if maintenance_data[i]["question"] == top1_issue: sub_questions_result = "\n".join(maintenance_data[i]["content"]) return top1_issue, sub_questions_result diff --git a/EdgeCraftRAG/edgecraftrag/components/retriever.py b/EdgeCraftRAG/edgecraftrag/components/retriever.py index 209fd7b5a0..cdd3fe0bc2 100644 --- a/EdgeCraftRAG/edgecraftrag/components/retriever.py +++ b/EdgeCraftRAG/edgecraftrag/components/retriever.py @@ -1,23 +1,19 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from typing import Any, List, cast +import warnings +from typing import Any, List, Optional, cast -import requests, warnings +import requests from edgecraftrag.base import BaseComponent, CompType, RetrieverType +from langchain_milvus import Milvus +from langchain_openai import OpenAIEmbeddings from llama_index.core.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.core.retrievers import AutoMergingRetriever -from llama_index.core.schema import BaseNode +from llama_index.core.schema import BaseNode, Document, NodeWithScore from llama_index.retrievers.bm25 import BM25Retriever from pydantic import model_serializer -from llama_index.core.schema import NodeWithScore - -from langchain_openai import OpenAIEmbeddings -from langchain_milvus import Milvus -from llama_index.core.schema import Document -from typing import List, Optional -from pymilvus import MilvusException -from pymilvus import connections, utility, Collection +from pymilvus import Collection, MilvusException, connections, utility class VectorSimRetriever(BaseComponent, VectorIndexRetriever): @@ -49,7 +45,7 @@ def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk - self.similarity_top_k=top_k + self.similarity_top_k = top_k return self.retrieve(v) return None @@ -149,42 +145,45 @@ def __init__(self, indexer, **kwargs): self.collection_name = None self.topk = kwargs.get("similarity_top_k", 30) self.KBADMIN_MILVUS_URL = indexer.vector_url - self.CONNECTION_ARGS = {"uri": indexer.vector_url} + self.CONNECTION_ARGS = {"uri": indexer.vector_url} self.vector_field = "q_1024_vec" self.text_field = "content_with_weight" self.embedding_model_name = indexer.embed_model self.embedding_url = indexer.kbadmin_embedding_url + "/v3" - self.embedding = OpenAIEmbeddings(model=self.embedding_model_name, api_key="unused", base_url=self.embedding_url, tiktoken_enabled=False, embedding_ctx_length=510) + self.embedding = OpenAIEmbeddings( + model=self.embedding_model_name, + api_key="unused", + base_url=self.embedding_url, + tiktoken_enabled=False, + embedding_ctx_length=510, + ) def config_kbadmin_milvus(self, knowledge_name): collection_name = knowledge_name if not kbs_rev_maps: - get_kbs_info( self.CONNECTION_ARGS) + get_kbs_info(self.CONNECTION_ARGS) collection_name = kbs_rev_maps[collection_name] self.vector_db = Milvus( self.embedding, - connection_args = self.CONNECTION_ARGS, - collection_name = collection_name, - - vector_field = self.vector_field, - text_field = self.text_field, + connection_args=self.CONNECTION_ARGS, + collection_name=collection_name, + vector_field=self.vector_field, + text_field=self.text_field, enable_dynamic_field=True, - index_params = {"index_type": "FLAT", "metric_type": "IP", "params": {}} + index_params={"index_type": "FLAT", "metric_type": "IP", "params": {}}, ) - def similarity_search_with_embedding(self, query: str, k) -> list[tuple[Document, float]]: url = self.embedding_url + "/embeddings" - embedding_info = {"model": self.embedding_model_name,"input": query} + embedding_info = {"model": self.embedding_model_name, "input": query} # Get embedding result from embedding service - response = requests.post(url, headers={'Content-Type': 'application/json'}, json=embedding_info) + response = requests.post(url, headers={"Content-Type": "application/json"}, json=embedding_info) embedding_json = response.json() - embedding = embedding_json['data'][0]['embedding'] + embedding = embedding_json["data"][0]["embedding"] docs_and_scores = self.vector_db.similarity_search_with_score_by_vector(embedding=embedding, k=k) relevance_score_fn = self.vector_db._select_relevance_score_fn() return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] - def run(self, **kwargs) -> Any: query = kwargs["query"] top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk @@ -202,17 +201,15 @@ def run(self, **kwargs) -> Any: @model_serializer def ser_model(self): - set = { - "idx": self.idx, - "retriever_type": self.comp_subtype, - "CONNECTION_ARGS": self.CONNECTION_ARGS - } + set = {"idx": self.idx, "retriever_type": self.comp_subtype, "CONNECTION_ARGS": self.CONNECTION_ARGS} return set # global kbs maps. global kbs_rev_maps kbs_rev_maps = {} + + def get_kbs_info(CONNECTION_ARGS): alias = "default" try: @@ -221,53 +218,53 @@ def get_kbs_info(CONNECTION_ARGS): all_kb_infos = {} new_infos = {} for kb in collections: - collection = Collection(kb) - collection.load() + collection = Collection(kb) + collection.load() + try: + if any(field.name == "kb_id" for field in collection.schema.fields): + docs = collection.query( + expr="pk != 0", + output_fields=["kb_name", "kb_id", "docnm_kwd"], + timeout=10, + ) + else: + docs = collection.query( + expr="pk != 0", + output_fields=["filename"], + timeout=10, + ) + collection.release() + except MilvusException as e: + continue + this_kbinfo = {} + for doc in docs: try: - if any(field.name == 'kb_id' for field in collection.schema.fields): - docs = collection.query( - expr="pk != 0", - output_fields=["kb_name", "kb_id", "docnm_kwd"], - timeout=10, - ) + if "kb_name" in doc: + if not this_kbinfo: + this_kbinfo["name"] = doc["kb_name"] + this_kbinfo["uuid"] = doc["kb_id"] + this_kbinfo["files"] = set([doc["docnm_kwd"]]) + else: + this_kbinfo["files"].add(doc["docnm_kwd"]) else: - docs = collection.query( - expr="pk != 0", - output_fields=["filename"], - timeout=10, - ) - collection.release() - except MilvusException as e: - continue - this_kbinfo = {} - for doc in docs: - try: - if 'kb_name' in doc: - if not this_kbinfo: - this_kbinfo['name'] = doc['kb_name'] - this_kbinfo['uuid'] = doc['kb_id'] - this_kbinfo['files'] = set([doc['docnm_kwd']]) - else: - this_kbinfo['files'].add(doc['docnm_kwd']) + if not this_kbinfo: + this_kbinfo["name"] = kb + this_kbinfo["uuid"] = "" + this_kbinfo["files"] = set([doc["filename"]]) else: - if not this_kbinfo: - this_kbinfo['name'] = kb - this_kbinfo['uuid'] = "" - this_kbinfo['files'] = set([doc['filename']]) - else: - this_kbinfo['files'].add(doc['filename']) - except KeyError: - this_kbinfo = None - break - if this_kbinfo: - unique_files = list(this_kbinfo['files']) - this_kbinfo['files'] = unique_files - new_infos[kb] = this_kbinfo + this_kbinfo["files"].add(doc["filename"]) + except KeyError: + this_kbinfo = None + break + if this_kbinfo: + unique_files = list(this_kbinfo["files"]) + this_kbinfo["files"] = unique_files + new_infos[kb] = this_kbinfo all_kb_infos.update(new_infos) kbs_rev_maps.clear() for kb_id in all_kb_infos: - kbs_rev_maps[all_kb_infos[kb_id]['name']] = kb_id + kbs_rev_maps[all_kb_infos[kb_id]["name"]] = kb_id return kbs_rev_maps finally: if connections.has_connection(alias): - connections.disconnect(alias) \ No newline at end of file + connections.disconnect(alias) diff --git a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py index 0b155e498b..c956ee316d 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py @@ -32,24 +32,29 @@ def search_parser_change(self, pl, req): if pl.node_parser.comp_subtype != req.node_parser.parser_type: return True if pl.node_parser.comp_subtype == req.node_parser.parser_type: - if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: - if (pl.node_parser.chunk_size != req.node_parser.chunk_size - or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): - pl_change = True + if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: + if ( + pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap + ): + pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.SENTENCEWINDOW: - if pl.node_parser.window_size != req.node_parser.window_size: + if pl.node_parser.window_size != req.node_parser.window_size: pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.HIERARCHY: if pl.node_parser.chunk_sizes != req.node_parser.chunk_sizes: - pl_change = True + pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.UNSTRUCTURED: - if (pl.node_parser.chunk_size != req.node_parser.chunk_size - or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): - pl_change = True + if ( + pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap + ): + pl_change = True except: return False return pl_change + class IndexerMgr(BaseMgr): def __init__(self): diff --git a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py index 4eabfcc021..d6dbba3ead 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py @@ -50,26 +50,34 @@ def active_experience(self, knowledge: KnowledgeBaseCreateIn): if kb.comp_type != "experience": raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Knowledge type cannot be active") self.active_experience_idx = kb.idx if knowledge.experience_active else None - if kb.experience_active != knowledge.experience_active: + if kb.experience_active != knowledge.experience_active: for idx, comp in self.components.items(): if isinstance(comp, Knowledge): comp.experience_active = idx == self.active_experience_idx return kb - def create_knowledge_base(self, knowledge: KnowledgeBaseCreateIn) -> Knowledge: for _, kb in self.components.items(): if kb.name == knowledge.name: raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="The knowledge base already exists.") - if knowledge.comp_type == "experience": + if knowledge.comp_type == "experience": for idx, kb in self.components.items(): - if kb.comp_type =='experience': - raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created.") + if kb.comp_type == "experience": + raise HTTPException( + status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created." + ) if knowledge.comp_type == "experience": knowledge.active = False if knowledge.active is None: knowledge.active = False - kb = Knowledge(name=knowledge.name, description=knowledge.description, active=knowledge.active, comp_type=knowledge.comp_type, comp_subtype=knowledge.comp_subtype, experience_active=knowledge.experience_active) + kb = Knowledge( + name=knowledge.name, + description=knowledge.description, + active=knowledge.active, + comp_type=knowledge.comp_type, + comp_subtype=knowledge.comp_subtype, + experience_active=knowledge.experience_active, + ) self.add(kb) if knowledge.active: self.active_knowledge(knowledge) @@ -89,11 +97,11 @@ def update_knowledge_base(self, knowledge) -> Knowledge: kb.description = knowledge.description if knowledge.active is not None and kb.active != knowledge.active: kb = self.active_knowledge(knowledge) - if kb.comp_type == "experience": - if knowledge.description is not None: - kb.description = knowledge.description - if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: - kb = self.active_experience(knowledge) + if kb.comp_type == "experience": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: + kb = self.active_experience(knowledge) return "Knowledge base update successfully" def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: @@ -104,6 +112,5 @@ def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: def get_experience_kb(self): for idx, kb in self.components.items(): - if kb.comp_type =='experience': + if kb.comp_type == "experience": return kb - diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt index 3dd0877a82..289ba3ef4d 100755 --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -3,6 +3,8 @@ EbookLib>=0.18 faiss-cpu>=1.8.0.post1 html2text>=2025.4.15 langchain-core==0.3.60 +langchain-milvus +langchain-openai llama-index==0.12.36 llama-index-core==0.12.37 llama-index-embeddings-openvino==0.5.2 @@ -23,5 +25,3 @@ python-docx==1.1.2 unstructured unstructured[pdf] werkzeug==3.1.3 -langchain-openai -langchain-milvus \ No newline at end of file diff --git a/EdgeCraftRAG/tools/quick_start.sh b/EdgeCraftRAG/tools/quick_start.sh index 0510b1637d..01da5fcc53 100755 --- a/EdgeCraftRAG/tools/quick_start.sh +++ b/EdgeCraftRAG/tools/quick_start.sh @@ -71,10 +71,10 @@ function start_vllm_services() { for (( x=0; x { showLoading: true, }); }; -export const requestExperienceConfirm = ( - flag: Boolean, - data: EmptyArrayType -) => { +export const requestExperienceConfirm = (flag: Boolean, data: EmptyArrayType) => { return request({ url: `/v1/multiple_experiences/confirm?flag=${flag}`, method: "post", diff --git a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts index bf12c4f331..335908b6c9 100644 --- a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts @@ -106,6 +106,4 @@ export const requestUrlVllm = (data: Object) => { }); }; -export const importUrl = `${ - import.meta.env.VITE_API_URL -}v1/settings/pipelines/import`; +export const importUrl = `${import.meta.env.VITE_API_URL}v1/settings/pipelines/import`; diff --git a/EdgeCraftRAG/ui/vue/src/api/request.ts b/EdgeCraftRAG/ui/vue/src/api/request.ts index ce1ee0ca5e..44f6cf2051 100644 --- a/EdgeCraftRAG/ui/vue/src/api/request.ts +++ b/EdgeCraftRAG/ui/vue/src/api/request.ts @@ -27,7 +27,7 @@ service.interceptors.request.use( }, (error) => { return Promise.reject(error); - } + }, ); // response interceptor @@ -40,11 +40,7 @@ service.interceptors.response.use( const antNotification = serviceManager.getService("antNotification"); if (antNotification) - antNotification( - "success", - i18n.global.t("common.success"), - i18n.global.t(config.successMsg) - ); + antNotification("success", i18n.global.t("common.success"), i18n.global.t(config.successMsg)); } return Promise.resolve(res); }, @@ -60,11 +56,10 @@ service.interceptors.response.use( errorMessage = error.message; } const antNotification = serviceManager.getService("antNotification"); - if (antNotification) - antNotification("error", i18n.global.t("common.error"), errorMessage); + if (antNotification) antNotification("error", i18n.global.t("common.error"), errorMessage); return Promise.reject(error); - } + }, ); export default service; diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css index e62f3bfbba..5163bc195e 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css @@ -1,8 +1,9 @@ @font-face { font-family: "iconfont"; /* Project id 4784207 */ - src: url('iconfont.woff2?t=1757469597873') format('woff2'), - url('iconfont.woff?t=1757469597873') format('woff'), - url('iconfont.ttf?t=1757469597873') format('truetype'); + src: + url("iconfont.woff2?t=1757469597873") format("woff2"), + url("iconfont.woff?t=1757469597873") format("woff"), + url("iconfont.ttf?t=1757469597873") format("truetype"); } .iconfont { @@ -240,4 +241,3 @@ .icon-active:before { content: "\e795"; } - diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js index e5e61851fd..5e96151e2e 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js @@ -1 +1,68 @@ -window._iconfont_svg_string_4784207='',(c=>{var l=(a=(a=document.getElementsByTagName("script"))[a.length-1]).getAttribute("data-injectcss"),a=a.getAttribute("data-disable-injectsvg");if(!a){var h,t,i,o,v,m=function(l,a){a.parentNode.insertBefore(l,a)};if(l&&!c.__iconfont__svg__cssinject__){c.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(l){console&&console.log(l)}}h=function(){var l,a=document.createElement("div");a.innerHTML=c._iconfont_svg_string_4784207,(a=a.getElementsByTagName("svg")[0])&&(a.setAttribute("aria-hidden","true"),a.style.position="absolute",a.style.width=0,a.style.height=0,a.style.overflow="hidden",a=a,(l=document.body).firstChild?m(a,l.firstChild):l.appendChild(a))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(t=function(){document.removeEventListener("DOMContentLoaded",t,!1),h()},document.addEventListener("DOMContentLoaded",t,!1)):document.attachEvent&&(i=h,o=c.document,v=!1,s(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,e())})}function e(){v||(v=!0,i())}function s(){try{o.documentElement.doScroll("left")}catch(l){return void setTimeout(s,50)}e()}})(window); \ No newline at end of file +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +(window._iconfont_svg_string_4784207 = + ''), + ((c) => { + var l = (a = (a = document.getElementsByTagName("script"))[a.length - 1]).getAttribute("data-injectcss"), + a = a.getAttribute("data-disable-injectsvg"); + if (!a) { + var h, + t, + i, + o, + v, + m = function (l, a) { + a.parentNode.insertBefore(l, a); + }; + if (l && !c.__iconfont__svg__cssinject__) { + c.__iconfont__svg__cssinject__ = !0; + try { + document.write( + "", + ); + } catch (l) { + console && console.log(l); + } + } + (h = function () { + var l, + a = document.createElement("div"); + (a.innerHTML = c._iconfont_svg_string_4784207), + (a = a.getElementsByTagName("svg")[0]) && + (a.setAttribute("aria-hidden", "true"), + (a.style.position = "absolute"), + (a.style.width = 0), + (a.style.height = 0), + (a.style.overflow = "hidden"), + (a = a), + (l = document.body).firstChild ? m(a, l.firstChild) : l.appendChild(a)); + }), + document.addEventListener + ? ~["complete", "loaded", "interactive"].indexOf(document.readyState) + ? setTimeout(h, 0) + : ((t = function () { + document.removeEventListener("DOMContentLoaded", t, !1), h(); + }), + document.addEventListener("DOMContentLoaded", t, !1)) + : document.attachEvent && + ((i = h), + (o = c.document), + (v = !1), + s(), + (o.onreadystatechange = function () { + "complete" == o.readyState && ((o.onreadystatechange = null), e()); + })); + } + function e() { + v || ((v = !0), i()); + } + function s() { + try { + o.documentElement.doScroll("left"); + } catch (l) { + return void setTimeout(s, 50); + } + e(); + } + })(window); diff --git a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts index f6e2bab3ce..d6fd8da012 100644 --- a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts +++ b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts @@ -1,3 +1,6 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + /* eslint-disable */ /* prettier-ignore */ // @ts-nocheck @@ -6,83 +9,98 @@ // biome-ignore lint: disable export {} declare global { - const EffectScope: typeof import('vue')['EffectScope'] - const acceptHMRUpdate: typeof import('pinia')['acceptHMRUpdate'] - const computed: typeof import('vue')['computed'] - const createApp: typeof import('vue')['createApp'] - const createPinia: typeof import('pinia')['createPinia'] - const customRef: typeof import('vue')['customRef'] - const defineAsyncComponent: typeof import('vue')['defineAsyncComponent'] - const defineComponent: typeof import('vue')['defineComponent'] - const defineStore: typeof import('pinia')['defineStore'] - const effectScope: typeof import('vue')['effectScope'] - const getActivePinia: typeof import('pinia')['getActivePinia'] - const getCurrentInstance: typeof import('vue')['getCurrentInstance'] - const getCurrentScope: typeof import('vue')['getCurrentScope'] - const h: typeof import('vue')['h'] - const inject: typeof import('vue')['inject'] - const isProxy: typeof import('vue')['isProxy'] - const isReactive: typeof import('vue')['isReactive'] - const isReadonly: typeof import('vue')['isReadonly'] - const isRef: typeof import('vue')['isRef'] - const mapActions: typeof import('pinia')['mapActions'] - const mapGetters: typeof import('pinia')['mapGetters'] - const mapState: typeof import('pinia')['mapState'] - const mapStores: typeof import('pinia')['mapStores'] - const mapWritableState: typeof import('pinia')['mapWritableState'] - const markRaw: typeof import('vue')['markRaw'] - const nextTick: typeof import('vue')['nextTick'] - const onActivated: typeof import('vue')['onActivated'] - const onBeforeMount: typeof import('vue')['onBeforeMount'] - const onBeforeRouteLeave: typeof import('vue-router')['onBeforeRouteLeave'] - const onBeforeRouteUpdate: typeof import('vue-router')['onBeforeRouteUpdate'] - const onBeforeUnmount: typeof import('vue')['onBeforeUnmount'] - const onBeforeUpdate: typeof import('vue')['onBeforeUpdate'] - const onDeactivated: typeof import('vue')['onDeactivated'] - const onErrorCaptured: typeof import('vue')['onErrorCaptured'] - const onMounted: typeof import('vue')['onMounted'] - const onRenderTracked: typeof import('vue')['onRenderTracked'] - const onRenderTriggered: typeof import('vue')['onRenderTriggered'] - const onScopeDispose: typeof import('vue')['onScopeDispose'] - const onServerPrefetch: typeof import('vue')['onServerPrefetch'] - const onUnmounted: typeof import('vue')['onUnmounted'] - const onUpdated: typeof import('vue')['onUpdated'] - const onWatcherCleanup: typeof import('vue')['onWatcherCleanup'] - const provide: typeof import('vue')['provide'] - const reactive: typeof import('vue')['reactive'] - const readonly: typeof import('vue')['readonly'] - const ref: typeof import('vue')['ref'] - const resolveComponent: typeof import('vue')['resolveComponent'] - const setActivePinia: typeof import('pinia')['setActivePinia'] - const setMapStoreSuffix: typeof import('pinia')['setMapStoreSuffix'] - const shallowReactive: typeof import('vue')['shallowReactive'] - const shallowReadonly: typeof import('vue')['shallowReadonly'] - const shallowRef: typeof import('vue')['shallowRef'] - const storeToRefs: typeof import('pinia')['storeToRefs'] - const toRaw: typeof import('vue')['toRaw'] - const toRef: typeof import('vue')['toRef'] - const toRefs: typeof import('vue')['toRefs'] - const toValue: typeof import('vue')['toValue'] - const triggerRef: typeof import('vue')['triggerRef'] - const unref: typeof import('vue')['unref'] - const useAttrs: typeof import('vue')['useAttrs'] - const useCssModule: typeof import('vue')['useCssModule'] - const useCssVars: typeof import('vue')['useCssVars'] - const useId: typeof import('vue')['useId'] - const useLink: typeof import('vue-router')['useLink'] - const useModel: typeof import('vue')['useModel'] - const useRoute: typeof import('vue-router')['useRoute'] - const useRouter: typeof import('vue-router')['useRouter'] - const useSlots: typeof import('vue')['useSlots'] - const useTemplateRef: typeof import('vue')['useTemplateRef'] - const watch: typeof import('vue')['watch'] - const watchEffect: typeof import('vue')['watchEffect'] - const watchPostEffect: typeof import('vue')['watchPostEffect'] - const watchSyncEffect: typeof import('vue')['watchSyncEffect'] + const EffectScope: (typeof import("vue"))["EffectScope"]; + const acceptHMRUpdate: (typeof import("pinia"))["acceptHMRUpdate"]; + const computed: (typeof import("vue"))["computed"]; + const createApp: (typeof import("vue"))["createApp"]; + const createPinia: (typeof import("pinia"))["createPinia"]; + const customRef: (typeof import("vue"))["customRef"]; + const defineAsyncComponent: (typeof import("vue"))["defineAsyncComponent"]; + const defineComponent: (typeof import("vue"))["defineComponent"]; + const defineStore: (typeof import("pinia"))["defineStore"]; + const effectScope: (typeof import("vue"))["effectScope"]; + const getActivePinia: (typeof import("pinia"))["getActivePinia"]; + const getCurrentInstance: (typeof import("vue"))["getCurrentInstance"]; + const getCurrentScope: (typeof import("vue"))["getCurrentScope"]; + const h: (typeof import("vue"))["h"]; + const inject: (typeof import("vue"))["inject"]; + const isProxy: (typeof import("vue"))["isProxy"]; + const isReactive: (typeof import("vue"))["isReactive"]; + const isReadonly: (typeof import("vue"))["isReadonly"]; + const isRef: (typeof import("vue"))["isRef"]; + const mapActions: (typeof import("pinia"))["mapActions"]; + const mapGetters: (typeof import("pinia"))["mapGetters"]; + const mapState: (typeof import("pinia"))["mapState"]; + const mapStores: (typeof import("pinia"))["mapStores"]; + const mapWritableState: (typeof import("pinia"))["mapWritableState"]; + const markRaw: (typeof import("vue"))["markRaw"]; + const nextTick: (typeof import("vue"))["nextTick"]; + const onActivated: (typeof import("vue"))["onActivated"]; + const onBeforeMount: (typeof import("vue"))["onBeforeMount"]; + const onBeforeRouteLeave: (typeof import("vue-router"))["onBeforeRouteLeave"]; + const onBeforeRouteUpdate: (typeof import("vue-router"))["onBeforeRouteUpdate"]; + const onBeforeUnmount: (typeof import("vue"))["onBeforeUnmount"]; + const onBeforeUpdate: (typeof import("vue"))["onBeforeUpdate"]; + const onDeactivated: (typeof import("vue"))["onDeactivated"]; + const onErrorCaptured: (typeof import("vue"))["onErrorCaptured"]; + const onMounted: (typeof import("vue"))["onMounted"]; + const onRenderTracked: (typeof import("vue"))["onRenderTracked"]; + const onRenderTriggered: (typeof import("vue"))["onRenderTriggered"]; + const onScopeDispose: (typeof import("vue"))["onScopeDispose"]; + const onServerPrefetch: (typeof import("vue"))["onServerPrefetch"]; + const onUnmounted: (typeof import("vue"))["onUnmounted"]; + const onUpdated: (typeof import("vue"))["onUpdated"]; + const onWatcherCleanup: (typeof import("vue"))["onWatcherCleanup"]; + const provide: (typeof import("vue"))["provide"]; + const reactive: (typeof import("vue"))["reactive"]; + const readonly: (typeof import("vue"))["readonly"]; + const ref: (typeof import("vue"))["ref"]; + const resolveComponent: (typeof import("vue"))["resolveComponent"]; + const setActivePinia: (typeof import("pinia"))["setActivePinia"]; + const setMapStoreSuffix: (typeof import("pinia"))["setMapStoreSuffix"]; + const shallowReactive: (typeof import("vue"))["shallowReactive"]; + const shallowReadonly: (typeof import("vue"))["shallowReadonly"]; + const shallowRef: (typeof import("vue"))["shallowRef"]; + const storeToRefs: (typeof import("pinia"))["storeToRefs"]; + const toRaw: (typeof import("vue"))["toRaw"]; + const toRef: (typeof import("vue"))["toRef"]; + const toRefs: (typeof import("vue"))["toRefs"]; + const toValue: (typeof import("vue"))["toValue"]; + const triggerRef: (typeof import("vue"))["triggerRef"]; + const unref: (typeof import("vue"))["unref"]; + const useAttrs: (typeof import("vue"))["useAttrs"]; + const useCssModule: (typeof import("vue"))["useCssModule"]; + const useCssVars: (typeof import("vue"))["useCssVars"]; + const useId: (typeof import("vue"))["useId"]; + const useLink: (typeof import("vue-router"))["useLink"]; + const useModel: (typeof import("vue"))["useModel"]; + const useRoute: (typeof import("vue-router"))["useRoute"]; + const useRouter: (typeof import("vue-router"))["useRouter"]; + const useSlots: (typeof import("vue"))["useSlots"]; + const useTemplateRef: (typeof import("vue"))["useTemplateRef"]; + const watch: (typeof import("vue"))["watch"]; + const watchEffect: (typeof import("vue"))["watchEffect"]; + const watchPostEffect: (typeof import("vue"))["watchPostEffect"]; + const watchSyncEffect: (typeof import("vue"))["watchSyncEffect"]; } // for type re-export declare global { // @ts-ignore - export type { Component, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue' - import('vue') + export type { + Component, + ComponentPublicInstance, + ComputedRef, + DirectiveBinding, + ExtractDefaultPropTypes, + ExtractPropTypes, + ExtractPublicPropTypes, + InjectionKey, + PropType, + Ref, + MaybeRef, + MaybeRefOrGetter, + VNode, + WritableComputedRef, + } from "vue"; + import("vue"); } diff --git a/EdgeCraftRAG/ui/vue/src/components.d.ts b/EdgeCraftRAG/ui/vue/src/components.d.ts index 6ec287f7f1..35e756d199 100644 --- a/EdgeCraftRAG/ui/vue/src/components.d.ts +++ b/EdgeCraftRAG/ui/vue/src/components.d.ts @@ -1,8 +1,11 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + /* eslint-disable */ // @ts-nocheck // Generated by unplugin-vue-components // Read more: https://github.com/vuejs/core/pull/3399 -export {} +export {}; /* prettier-ignore */ declare module 'vue' { diff --git a/EdgeCraftRAG/ui/vue/src/i18n/en.ts b/EdgeCraftRAG/ui/vue/src/i18n/en.ts index decfaec3f3..86ff43773b 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/en.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/en.ts @@ -28,8 +28,7 @@ export default { reset: "Reset", uploadTip: "Click or drag file to this area to upload", loading: "Loading", - waitTip: - "Please wait patiently and do not refresh the page during this period.", + waitTip: "Please wait patiently and do not refresh the page during this period.", }, system: { title: "System Status", @@ -66,8 +65,7 @@ export default { activated: "Activated", inactive: "Inactive", isActive: "Activated", - pipelineFormatTip: - "Supports JSON format, with file size not exceeding 10M.", + pipelineFormatTip: "Supports JSON format, with file size not exceeding 10M.", importSuccTip: "Files upload successful!", importErrTip: "Files upload failed!", name: "Name", @@ -77,8 +75,7 @@ export default { deactivateTip: "Are you sure deactivate this pipeline?", activeTip: "Are you sure activate this pipeline?", deleteTip: "Are you sure delete this pipeline?", - notActivatedTip: - "There is no available pipeline. Please create or activate it first.", + notActivatedTip: "There is no available pipeline. Please create or activate it first.", validErr: "Form validation failed !", config: { basic: "Basic", @@ -119,11 +116,9 @@ export default { nameValid3: "The name only supports letters, numbers, and underscores.", nodeParserType: "Please select Node Parser Type", chunkSizeValid1: "Please select Chunk Size", - chunkSizeValid2: - "The value of Chunk Size cannot be less than Chunk Overlap", + chunkSizeValid2: "The value of Chunk Size cannot be less than Chunk Overlap", chunkOverlapValid1: "Please select Chunk Overlap", - chunkOverlapValid2: - "The value of Chunk Overlap cannot be greater than Chunk Size", + chunkOverlapValid2: "The value of Chunk Overlap cannot be greater than Chunk Size", windowSize: "Please select Chunk Window Size", indexerType: "Please select Indexer Type", embedding: "Please select embedding Model", @@ -153,36 +148,29 @@ export default { vllmUrlValid3: "URL cannot be accessed", vllmUrlValid4: "Test passed !", vllmUrlValid5: "The URL has not passed verification yet", - nodeParserTypeTip: - "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", - indexerTypeTip: - "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", - retrieverTypeTip: - "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", + nodeParserTypeTip: "Both Indexer Type and Retriever Type will be set to kbadmin at the same time", + indexerTypeTip: "Both Node Parser Type and Retriever Type will be set to kbadmin at the same time", + retrieverTypeTip: "Both Node Parser Type and Indexer Type will be set to kbadmin at the same time", retrieverChangeTip: "Please go to the Indexer stage to complete the data", indexerTypeValid1: "Indexer type can only select kbadmin", modelRequired: "Please enter embedding model url", modelFormat: "Please enter the correct url", - retrieverValid: - "Please return to the Indexer stage to supplement information.", + retrieverValid: "Please return to the Indexer stage to supplement information.", }, desc: { name: "The name identifier of the pipeline", nodeParserType: "Node parsing type when you use RAG", chunkSize: "Size of each chunk for processing", chunkOverlap: "Overlap size between chunks", - windowSize: - "The number of sentences on each side of a sentence to capture", - indexerType: - "The type of index structure responsible for building based on the parsed nodes", + windowSize: "The number of sentences on each side of a sentence to capture", + indexerType: "The type of index structure responsible for building based on the parsed nodes", embedding: "Embed the text data to represent it and build a vector index", embeddingUrl: "Connecting embedding model url", embeddingDevice: "The device used by the embedding model", retrieverType: "The retrieval type used when retrieving relevant nodes from the index according to the user's experience", topk: "The number of top k results to return", - postProcessorType: - "Select postprocessors for post-processing of the context", + postProcessorType: "Select postprocessors for post-processing of the context", rerank: "Rerank Model", rerankDevice: "Rerank run device", generatorType: "Local inference generator or vLLM generator", @@ -190,21 +178,17 @@ export default { llmDevice: "The device used by the LLM", weights: "Model weight", reranker: "The model for reranking.", - metadataReplace: - "Used to replace the node content with a field from the node metadata.", + metadataReplace: "Used to replace the node content with a field from the node metadata.", vectorsimilarity: "retrieval according to vector similarity", - autoMerge: - "This retriever will try to merge context into parent context.", + autoMerge: "This retriever will try to merge context into parent context.", bm25: "A BM25 retriever that uses the BM25 algorithm to retrieve nodes.", faissVector: "Embeddings are stored within a Faiss index.", vector: "Vector Store Index.", simple: "Parse text with a preference for complete sentences.", - hierarchical: - "Splits a document into a recursive hierarchy Nodes using a NodeParser.", + hierarchical: "Splits a document into a recursive hierarchy Nodes using a NodeParser.", sentencewindow: "Sentence window node parser. Splits a document into Nodes, with each node being a sentence. Each node contains a window from the surrounding sentences in the metadata.", - unstructured: - "UnstructedNodeParser is a component that processes unstructured data.", + unstructured: "UnstructedNodeParser is a component that processes unstructured data.", milvusVector: "Embedding vectors stored in milvus", vector_url: "Connecting milvus vector url", test: "Test", @@ -258,16 +242,14 @@ export default { edit: "Edit Knowledge Base", deleteTip: "Are you sure delete this knowledge base?", activeTip: "Are you sure activate this knowledge base?", - uploadTip: - "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", + uploadTip: "Supports PDF, Word, TXT,Doc,Html,PPT formats, with a single file size not exceeding 200M", notFileTip: "The knowledge base is empty. Go upload your files.", name: "Name", des: "Description", activated: "Activated", nameValid1: "Please input knowledge base name", nameValid2: "Name should be between 2 and 30 characters", - nameValid3: - "Alphanumeric and underscore only, starting with a letter or underscore.", + nameValid3: "Alphanumeric and underscore only, starting with a letter or underscore.", desValid: "Please input knowledge base description", activeValid: "Please select whether to activate", uploadValid: "Single file size not exceeding 200M.", diff --git a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts index 0f0ef3ef2e..e9ab76ef55 100644 --- a/EdgeCraftRAG/ui/vue/src/i18n/zh.ts +++ b/EdgeCraftRAG/ui/vue/src/i18n/zh.ts @@ -51,8 +51,7 @@ export default { step1: "创建 Pipeline", step1Tip: "定制您的 RAG 流程,释放 AI 信息处理的最大能力。", step2: "前往对话", - step2Tip: - "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", + step2Tip: "开始与智能聊天机器人互动,它支持文件上传和信息检索,帮助您更高效地完成任务。", create: "去创建", }, pipeline: { @@ -159,8 +158,7 @@ export default { }, desc: { name: "Pipeline的名称标识,用于区分不同工作流", - nodeParserType: - "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", + nodeParserType: "RAG 处理时的文本拆分策略,支持简单句子、层次结构等解析方式", chunkSize: "文本处理时的单块数据大小", chunkOverlap: "相邻数据块的重叠部分大小,确保跨块语义连续性", windowSize: "每个节点捕获的上下文句子窗口大小,用于增强语义完整性", @@ -186,8 +184,7 @@ export default { vector: "矢量存储索引", simple: "解析文本,优先选择完整的句子。", hierarchical: "使用NodeParser将文档拆分为递归层次结构的节点。", - sentencewindow: - "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", + sentencewindow: "将文档分割成节点,每个节点代表一个句子。每个节点包含一个来自元数据中周围句子的窗口", unstructured: "一个处理非结构化数据的组件", milvusVector: "矢量索引存储在Milvus中", vector_url: "测试Milvus地址是否可用", @@ -211,8 +208,7 @@ export default { desc: { top_n: "重排后结果的数量", temperature: "数值越高,输出越多样化", - top_p: - "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", + top_p: "从累积概率超过 top_p 的最小标记集中采样,设为1则禁用并从所有标记取样。", top_k: "从概率前k的 Token 中采样", penalty: "抑制重复的系数,设为1.0表示禁用", maxToken: "生成回答的最大Token数量", @@ -242,8 +238,7 @@ export default { edit: "编辑知识库", deleteTip: "您确定要删除此知识库吗?此操作不可恢复。", activeTip: "您确定要激活此知识库吗?", - uploadTip: - "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", + uploadTip: "支持 PDF、Word、TXT、Doc、HTML、PPT 格式,单个文件大小不超过 200M。", notFileTip: "您还没有上传任何文件,点击“上传”按钮开始添加内容吧~", name: "名称", des: "描述", diff --git a/EdgeCraftRAG/ui/vue/src/main.ts b/EdgeCraftRAG/ui/vue/src/main.ts index 94c5debfeb..25e2b930cf 100644 --- a/EdgeCraftRAG/ui/vue/src/main.ts +++ b/EdgeCraftRAG/ui/vue/src/main.ts @@ -25,8 +25,7 @@ const setDayjsLocale = (locale: string) => { const body = document.documentElement as HTMLElement; -if (Local.get("themeInfo")?.theme === "dark") - body.setAttribute("data-theme", "dark"); +if (Local.get("themeInfo")?.theme === "dark") body.setAttribute("data-theme", "dark"); else body.setAttribute("data-theme", ""); // watch i18n update dayjs language @@ -35,7 +34,7 @@ watch( (newLocale) => { setDayjsLocale(newLocale); }, - { immediate: true } + { immediate: true }, ); const app = createApp(App); diff --git a/EdgeCraftRAG/ui/vue/src/utils/common.ts b/EdgeCraftRAG/ui/vue/src/utils/common.ts index 8e7e80810a..97e421cb66 100644 --- a/EdgeCraftRAG/ui/vue/src/utils/common.ts +++ b/EdgeCraftRAG/ui/vue/src/utils/common.ts @@ -6,8 +6,7 @@ import { customNotification } from "./notification"; import { Local } from "./storage"; export const useNotification = () => { - const customNotificationInjected = - inject("customNotification"); + const customNotificationInjected = inject("customNotification"); if (!customNotificationInjected) { throw new Error("Notification service not provided"); @@ -22,11 +21,7 @@ export const formatDecimals = (num: number, decimalPlaces: number = 2) => { return Math.round(num * factor) / factor; }; -export const formatCapitalize = ( - string: string, - start: number = 0, - length: number = 1 -) => { +export const formatCapitalize = (string: string, start: number = 0, length: number = 1) => { const end = start + length; const part1 = string.slice(0, start); const part2 = string.slice(start, end).toUpperCase(); @@ -48,11 +43,7 @@ export const getChatSessionId = (): string => { }; const generateFallbackId = (): string => { - if ( - typeof self !== "undefined" && - self.crypto && - self.crypto.getRandomValues - ) { + if (typeof self !== "undefined" && self.crypto && self.crypto.getRandomValues) { const array = new Uint32Array(2); self.crypto.getRandomValues(array); const randomPart = Array.from(array) @@ -60,8 +51,6 @@ const generateFallbackId = (): string => { .join(""); return `${Date.now()}_${randomPart}`; } else { - throw new Error( - "No secure random number generator available for session ID generation." - ); + throw new Error("No secure random number generator available for session ID generation."); } }; diff --git a/EdgeCraftRAG/ui/vue/src/utils/notification.ts b/EdgeCraftRAG/ui/vue/src/utils/notification.ts index d4e6bef319..151141e0ea 100644 --- a/EdgeCraftRAG/ui/vue/src/utils/notification.ts +++ b/EdgeCraftRAG/ui/vue/src/utils/notification.ts @@ -3,12 +3,7 @@ import { h } from "vue"; import { notification } from "ant-design-vue"; -import { - CheckCircleFilled, - CloseCircleFilled, - ExclamationCircleFilled, - InfoCircleFilled, -} from "@ant-design/icons-vue"; +import { CheckCircleFilled, CloseCircleFilled, ExclamationCircleFilled, InfoCircleFilled } from "@ant-design/icons-vue"; const getNotificationIcon = (type: string) => { switch (type) { @@ -27,12 +22,10 @@ const getNotificationIcon = (type: string) => { export const customNotification = ( type: "success" | "warning" | "error" | "info", message: string, - description: string | undefined + description: string | undefined, ) => { const { icon, color } = getNotificationIcon(type); - const styledIcon = icon - ? h(icon, { style: { color: `var(${color})` } }) - : null; + const styledIcon = icon ? h(icon, { style: { color: `var(${color})` } }) : null; notification[type]({ message, diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts index 655d935fa3..de0cc61809 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts @@ -7,7 +7,7 @@ export const handleMessageSend = async ( url: string, postData: any, onDisplay: (data: any) => void, - onEnd?: () => void + onEnd?: () => void, ): Promise => { let reader: ReadableStreamDefaultReader | undefined; diff --git a/EdgeCraftRAG/ui/vue/vite.config.ts b/EdgeCraftRAG/ui/vue/vite.config.ts index ff27303aa5..23fda73b7b 100644 --- a/EdgeCraftRAG/ui/vue/vite.config.ts +++ b/EdgeCraftRAG/ui/vue/vite.config.ts @@ -66,10 +66,7 @@ const viteConfig = defineConfig((mode: ConfigEnv) => { preprocessorOptions: { less: { javascriptEnabled: true, - additionalData: `@import "${path.resolve( - __dirname, - "src/theme/index.less" - )}";`, + additionalData: `@import "${path.resolve(__dirname, "src/theme/index.less")}";`, }, }, }, From 6c5eb7731d32843beba5fdbb061f2a566b108fe8 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Wed, 24 Sep 2025 12:54:09 +0800 Subject: [PATCH 05/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh | 2 +- EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh index 192e18b67c..50a6fb319f 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh @@ -35,7 +35,7 @@ TENSOR_PARALLEL_SIZE=1 SELECTED_XPU_0=0 vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" LLM_MODEL="Qwen/Qwen3-8B" -LLM_MODEL_PATH="${HOME}/qwen/" +LLM_MODEL_PATH="${HOME}/${LLM_MODEL}" NGINX_CONFIG_PATH="$WORKPATH/nginx/nginx.conf" VLLM_IMAGE_TAG="0.8.3-b20" DP_NUM=1 diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh index d0b808df05..1f8b6bb774 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh @@ -31,7 +31,6 @@ VLLM_SERVICE_PORT_B60=8086 TP=1 vLLM_ENDPOINT="http://${HOST_IP}:${VLLM_SERVICE_PORT_B60}" LLM_MODEL="Qwen/Qwen3-8B" -LLM_MODEL_PATH="${HOME}/qwen/" VLLM_IMAGE_TAG="1.0" DP=1 From 052da8c467774c04d9d23da28b6b7330b6b00b86 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Wed, 24 Sep 2025 15:03:33 +0800 Subject: [PATCH 06/16] add config file for CI Signed-off-by: Yongbozzz --- EdgeCraftRAG/tests/configs/test_data.json | 3 ++ .../configs/test_pipeline_ipex_vllm.json | 45 +++++++++++++++++++ .../configs/test_pipeline_local_llm.json | 44 ++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 EdgeCraftRAG/tests/configs/test_data.json create mode 100644 EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json create mode 100644 EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json diff --git a/EdgeCraftRAG/tests/configs/test_data.json b/EdgeCraftRAG/tests/configs/test_data.json new file mode 100644 index 0000000000..648ae9624d --- /dev/null +++ b/EdgeCraftRAG/tests/configs/test_data.json @@ -0,0 +1,3 @@ +{ + "text": "A test case for the rag pipeline. The test id is 1234567890. There are several tests in this test case. The first test is for node parser. There are 3 types of node parsers. Their names are Aa, Bb and Cc. The second test is for indexer. The indexer will do the indexing for the given nodes. The last test is for retriever. Retrieving text is based on similarity search." +} diff --git a/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json b/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json new file mode 100644 index 0000000000..097309d7e2 --- /dev/null +++ b/EdgeCraftRAG/tests/configs/test_pipeline_ipex_vllm.json @@ -0,0 +1,45 @@ +{ + "name": "rag_test_local_llm", + "node_parser": { + "chunk_size": 400, + "chunk_overlap": 48, + "parser_type": "simple" + }, + "indexer": { + "indexer_type": "faiss_vector", + "embedding_model": { + "model_id": "BAAI/bge-small-en-v1.5", + "model_path": "./models/BAAI/bge-small-en-v1.5", + "device": "auto", + "weight": "INT4" + } + }, + "retriever": { + "retriever_type": "vectorsimilarity", + "retrieve_topk": 30 + }, + "postprocessor": [ + { + "processor_type": "reranker", + "top_n": 2, + "reranker_model": { + "model_id": "BAAI/bge-reranker-large", + "model_path": "./models/BAAI/bge-reranker-large", + "device": "auto", + "weight": "INT4" + } + } + ], + "generator": { + "inference_type": "vllm", + "model": { + "model_id": "Qwen/Qwen3-8B", + "model_path": "", + "device": "", + "weight": "" + }, + "prompt_path": "./default_prompt.txt", + "vllm_endpoint": "" + }, + "active": "True" +} diff --git a/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json b/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json new file mode 100644 index 0000000000..39ee2ef0f1 --- /dev/null +++ b/EdgeCraftRAG/tests/configs/test_pipeline_local_llm.json @@ -0,0 +1,44 @@ +{ + "name": "rag_test_local_llm", + "node_parser": { + "chunk_size": 400, + "chunk_overlap": 48, + "parser_type": "simple" + }, + "indexer": { + "indexer_type": "faiss_vector", + "embedding_model": { + "model_id": "BAAI/bge-small-en-v1.5", + "model_path": "./models/BAAI/bge-small-en-v1.5", + "device": "auto", + "weight": "INT4" + } + }, + "retriever": { + "retriever_type": "vectorsimilarity", + "retrieve_topk": 30 + }, + "postprocessor": [ + { + "processor_type": "reranker", + "top_n": 2, + "reranker_model": { + "model_id": "BAAI/bge-reranker-large", + "model_path": "./models/BAAI/bge-reranker-large", + "device": "auto", + "weight": "INT4" + } + } + ], + "generator": { + "model": { + "model_id": "Qwen/Qwen3-8B", + "model_path": "./models/Qwen/Qwen3-8B/INT4_compressed_weights", + "device": "auto", + "weight": "INT4" + }, + "prompt_path": "./default_prompt.txt", + "inference_type": "local" + }, + "active": "True" +} From 9d3d4dd7621479a89bc7fd8c9e0e0b2af9d7f78d Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Wed, 24 Sep 2025 16:46:55 +0800 Subject: [PATCH 07/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh index 50a6fb319f..700dd92990 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh @@ -35,7 +35,7 @@ TENSOR_PARALLEL_SIZE=1 SELECTED_XPU_0=0 vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" LLM_MODEL="Qwen/Qwen3-8B" -LLM_MODEL_PATH="${HOME}/${LLM_MODEL}" +LLM_MODEL_PATH="${MODEL_PATH}/${LLM_MODEL}" NGINX_CONFIG_PATH="$WORKPATH/nginx/nginx.conf" VLLM_IMAGE_TAG="0.8.3-b20" DP_NUM=1 From abbbeea265a99e364fbb1a16726869796530990e Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Thu, 25 Sep 2025 12:12:39 +0800 Subject: [PATCH 08/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh | 2 +- EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh index 700dd92990..ae0beee885 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh @@ -32,7 +32,7 @@ NGINX_PORT_0=8100 NGINX_PORT_1=8100 VLLM_SERVICE_PORT_0=8100 TENSOR_PARALLEL_SIZE=1 -SELECTED_XPU_0=0 +SELECTED_XPU_0=1 vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" LLM_MODEL="Qwen/Qwen3-8B" LLM_MODEL_PATH="${MODEL_PATH}/${LLM_MODEL}" diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh index 1f8b6bb774..df97274a89 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc_b60.sh @@ -135,6 +135,7 @@ function validate_megaservice() { function stop_docker() { cd $WORKPATH/docker_compose/intel/gpu/arc + export MODEL_PATH="${HOME}/models" docker compose -f $COMPOSE_FILE down } From 385b2d5f1c1c3e95748452a9f440930db35540ff Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Thu, 25 Sep 2025 13:41:27 +0800 Subject: [PATCH 09/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh index ae0beee885..700dd92990 100755 --- a/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh +++ b/EdgeCraftRAG/tests/test_compose_vllm_on_arc.sh @@ -32,7 +32,7 @@ NGINX_PORT_0=8100 NGINX_PORT_1=8100 VLLM_SERVICE_PORT_0=8100 TENSOR_PARALLEL_SIZE=1 -SELECTED_XPU_0=1 +SELECTED_XPU_0=0 vLLM_ENDPOINT="http://${HOST_IP}:${NGINX_PORT}" LLM_MODEL="Qwen/Qwen3-8B" LLM_MODEL_PATH="${MODEL_PATH}/${LLM_MODEL}" From fc7c3a79ae160d9a9304bb41f931bf68365ab0fe Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Fri, 26 Sep 2025 17:05:51 +0800 Subject: [PATCH 10/16] sync with ec-rag 25.09 Signed-off-by: Yongbozzz --- EdgeCraftRAG/Dockerfile.server | 43 ++-- .../docker_compose/intel/gpu/arc/README.md | 5 +- .../intel/gpu/arc/compose_vllm_b60.yaml | 2 +- EdgeCraftRAG/docs/API_Guide.md | 2 +- EdgeCraftRAG/docs/Advanced_Setup.md | 2 +- EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md | 4 +- EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py | 4 +- EdgeCraftRAG/edgecraftrag/api/v1/data.py | 69 ++++--- .../edgecraftrag/api/v1/knowledge_base.py | 180 +++++++--------- EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py | 30 +-- EdgeCraftRAG/edgecraftrag/api/v1/prompt.py | 2 + EdgeCraftRAG/edgecraftrag/api_schema.py | 4 +- EdgeCraftRAG/edgecraftrag/base.py | 6 +- .../edgecraftrag/components/benchmark.py | 9 +- .../edgecraftrag/components/generator.py | 25 +-- .../edgecraftrag/components/indexer.py | 20 +- .../edgecraftrag/components/knowledge_base.py | 51 +++-- .../edgecraftrag/components/node_parser.py | 2 - .../edgecraftrag/components/pipeline.py | 16 +- .../components/query_preprocess.py | 56 +++-- .../edgecraftrag/components/retriever.py | 141 ++++++------- .../edgecraftrag/controllers/compmgr.py | 23 +-- .../edgecraftrag/controllers/filemgr.py | 58 +++--- .../controllers/knowledge_basemgr.py | 33 ++- EdgeCraftRAG/edgecraftrag/requirements.txt | 4 +- EdgeCraftRAG/edgecraftrag/utils.py | 4 - EdgeCraftRAG/tools/quick_start.sh | 170 ++++++++++++++- EdgeCraftRAG/ui/vue/components.d.ts | 5 +- EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts | 7 + .../ui/vue/src/api/knowledgeBase/index.ts | 5 +- EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts | 4 +- EdgeCraftRAG/ui/vue/src/api/request.ts | 13 +- .../ui/vue/src/assets/iconFont/iconfont.css | 8 +- .../ui/vue/src/assets/iconFont/iconfont.js | 69 +------ EdgeCraftRAG/ui/vue/src/auto-imports.d.ts | 168 +++++++-------- EdgeCraftRAG/ui/vue/src/components.d.ts | 5 +- .../ui/vue/src/components/PartialLoading.vue | 1 + EdgeCraftRAG/ui/vue/src/i18n/en.ts | 63 ++++-- EdgeCraftRAG/ui/vue/src/i18n/zh.ts | 24 ++- EdgeCraftRAG/ui/vue/src/layout/Header.vue | 2 +- EdgeCraftRAG/ui/vue/src/main.ts | 5 +- EdgeCraftRAG/ui/vue/src/utils/clipboard.ts | 99 +++++++++ EdgeCraftRAG/ui/vue/src/utils/common.ts | 19 +- EdgeCraftRAG/ui/vue/src/utils/notification.ts | 13 +- .../views/chatbot/components/Chatbot/Chat.vue | 78 ++++--- .../components/Chatbot/MessageItem.vue | 195 +++++++++++++++++- .../chatbot/components/Chatbot/SseService.ts | 2 +- .../KnowledgeBase/DetailComponent.vue | 1 + .../KnowledgeBase/KnowledgeDetail.vue | 127 +++++++----- EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts | 2 + EdgeCraftRAG/ui/vue/vite.config.ts | 5 +- 51 files changed, 1155 insertions(+), 730 deletions(-) create mode 100644 EdgeCraftRAG/ui/vue/src/utils/clipboard.ts diff --git a/EdgeCraftRAG/Dockerfile.server b/EdgeCraftRAG/Dockerfile.server index 4ac52700a8..24af2affb9 100755 --- a/EdgeCraftRAG/Dockerfile.server +++ b/EdgeCraftRAG/Dockerfile.server @@ -1,49 +1,38 @@ FROM python:3.11-slim SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -RUN apt-get update -y && apt-get install -y --no-install-recommends --fix-missing \ - libjemalloc-dev \ - libmagic1 \ - libglib2.0-0 \ - poppler-utils \ - tesseract-ocr - -RUN apt-get update && apt-get install -y gnupg wget git -RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \ - gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg -RUN echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | \ - tee /etc/apt/sources.list.d/intel-gpu-jammy.list -RUN apt-get update && apt-get install -y \ - intel-opencl-icd intel-level-zero-gpu \ - intel-level-zero-gpu-raytracing \ - intel-media-va-driver-non-free libmfx1 libmfxgen1 libvpl2 \ - libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ - libglapi-mesa libgles2-mesa-dev libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ - mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo +RUN apt-get update && apt-get install -y gnupg2 wget git +RUN apt-get remove -y libze-intel-gpu1 libigc1 libigdfcl1 libze-dev || true; \ +    apt-get update; \ +    apt-get install -y curl +RUN curl -sL 'https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&op=get&search=0x0C0E6AF955CE463C03FC51574D098D70AFBE5E1F' | tee /etc/apt/trusted.gpg.d/driver.asc +RUN echo -e "Types: deb\nURIs: https://ppa.launchpadcontent.net/kobuk-team/intel-graphics/ubuntu/\nSuites: plucky\nComponents: main\nSigned-By: /etc/apt/trusted.gpg.d/driver.asc" > /etc/apt/sources.list.d/driver.sources +RUN apt update && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-dev intel-ocloc libze-intel-gpu-raytracing RUN useradd -m -s /bin/bash user && \ - mkdir -p /home/user && \ - chown -R user /home/user/ +    mkdir -p /home/user && \ +    chown -R user /home/user/ RUN mkdir /templates && \ - chown -R user /templates +    chown -R user /templates COPY ./edgecraftrag/prompt_template/default_prompt.txt /templates/ RUN chown -R user /templates/default_prompt.txt COPY ./edgecraftrag /home/user/edgecraftrag -RUN mkdir -p /home/user/ui_cache +RUN mkdir -p /home/user/ui_cache ENV UI_UPLOAD_PATH=/home/user/ui_cache USER user WORKDIR /home/user/edgecraftrag -RUN pip install --no-cache-dir --upgrade pip setuptools==70.0.0 && \ - pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt +RUN pip3 install --no-cache-dir --upgrade setuptools==70.0.0 --break-system-packages && \ +    pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt --break-system-packages + +RUN pip3 install --no-cache-dir docarray==0.40.0 --break-system-packages WORKDIR /home/user/ RUN git clone https://github.com/openvinotoolkit/openvino.genai.git genai ENV PYTHONPATH="$PYTHONPATH:/home/user/genai/tools/llm_bench" -ENTRYPOINT ["python", "-m", "edgecraftrag.server"] \ No newline at end of file +ENTRYPOINT ["python3", "-m", "edgecraftrag.server"] diff --git a/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md b/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md index 94d920bbc4..b95dd62716 100755 --- a/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md +++ b/EdgeCraftRAG/docker_compose/intel/gpu/arc/README.md @@ -95,8 +95,7 @@ For more advanced env variables and configurations, please refer to [Prepare env ### 5. Deploy the Service on Intel GPU Using Docker Compose -set Milvus DB and chat history round for inference: - +set Milvus DB and chat history round for inference: ```bash # EC-RAG support Milvus as persistent database, by default milvus is disabled, you can choose to set MILVUS_ENABLED=1 to enable it export MILVUS_ENABLED=0 @@ -124,7 +123,7 @@ docker compose -f docker_compose/intel/gpu/arc/compose_vllm.yaml up -d #### option b. Deploy the Service on Arc B60 Using Docker Compose ```bash -# Besides MILVUS_ENABLED and CHAT_HISTORY_ROUND, below environments are exposed for vLLM config, you can change them to your preference: +# Besides MILVUS_ENABLED and CHAT_HISTORY_ROUND, below enviroments are exposed for vLLM config, you can change them to your preference: # export VLLM_SERVICE_PORT_B60=8086 # export DTYPE=float16 # export TP=1 # for multi GPU, you can change TP value diff --git a/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml b/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml index f10b476fc1..b1bb099f47 100644 --- a/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml +++ b/EdgeCraftRAG/docker_compose/intel/gpu/arc/compose_vllm_b60.yaml @@ -183,4 +183,4 @@ services: -dp=$${DP}" networks: default: - driver: bridge + driver: bridge \ No newline at end of file diff --git a/EdgeCraftRAG/docs/API_Guide.md b/EdgeCraftRAG/docs/API_Guide.md index c13753596c..753b9ebcc8 100644 --- a/EdgeCraftRAG/docs/API_Guide.md +++ b/EdgeCraftRAG/docs/API_Guide.md @@ -219,4 +219,4 @@ curl -X POST http://${HOST_IP}:16010/v1/retrieval -H "Content-Type: application/ ```bash curl -X POST http://${HOST_IP}:16011/v1/chatqna -H "Content-Type: application/json" -d '{"messages":"#REPLACE WITH YOUR QUESTION HERE#", "top_n":5, "max_tokens":512}' | jq '.' -``` +``` \ No newline at end of file diff --git a/EdgeCraftRAG/docs/Advanced_Setup.md b/EdgeCraftRAG/docs/Advanced_Setup.md index ed6a080310..136b34803f 100644 --- a/EdgeCraftRAG/docs/Advanced_Setup.md +++ b/EdgeCraftRAG/docs/Advanced_Setup.md @@ -208,6 +208,6 @@ Model preparation is the same as vLLM inference section, please refer to [Prepar This section is the same as default vLLM inference section, please refer to [Prepare env variables and configurations](../docker_compose/intel/gpu/arc/README.md#prepare-env-variables-and-configurations) and [Start Edge Craft RAG Services with Docker Compose](../docker_compose/intel/gpu/arc/README.md#deploy-the-service-on-arc-a770-using-docker-compose) -### 2. Access Kbadmin UI +### 2. Access Kbadmin UI please refer to [ChatQnA with Kbadmin in UI](./Explore_Edge_Craft_RAG.md#chatqna-with-kbadmin-in-ui) diff --git a/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md b/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md index 0a5a91ba5e..624e21a010 100644 --- a/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md +++ b/EdgeCraftRAG/docs/Explore_Edge_Craft_RAG.md @@ -34,7 +34,7 @@ After knowledge base creation, you can upload the documents for retrieval. Then, you can submit messages in the chat box in `Chat` page. ![chat_with_rag](../assets/img/chatqna.png) -## ChatQnA with Kbadmin in UI +## ChatQnA with Kbadmin in UI ### Kbadmin Pipeline @@ -52,4 +52,4 @@ Please select 'kbadmin' in `Type`and select kb name from the kbs you created in ![upload_data](../assets/img/kbadmin_kb.png) Then, you can submit messages in the chat box in `Chat` page. -![chat_with_rag](../assets/img/chatqna.png) +![chat_with_rag](../assets/img/chatqna.png) \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py b/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py index 7e0f3aa831..60df3b50eb 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/chatqna.py @@ -38,7 +38,7 @@ async def chatqna(request: ChatCompletionRequest): active_kb = ctx.knowledgemgr.get_active_knowledge_base() request.user = active_kb if active_kb else None if experience_kb: - request.tool_choice = "auto" if experience_kb.experience_active else "none" + request.tool_choice = 'auto' if experience_kb.experience_active else 'none' generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: request.model = generator.model_id @@ -62,7 +62,7 @@ async def ragqna(request: ChatCompletionRequest): active_kb = ctx.knowledgemgr.get_active_knowledge_base() request.user = active_kb if active_kb else None if experience_kb: - request.tool_choice = "auto" if experience_kb.experience_active else "none" + request.tool_choice = 'auto' if experience_kb.experience_active else 'none' generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: request.model = generator.model_id diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/data.py b/EdgeCraftRAG/edgecraftrag/api/v1/data.py index fad8b5c0b8..3836454231 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/data.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/data.py @@ -13,19 +13,19 @@ # Upload a text or files @data_app.post(path="/v1/data") -async def add_data(request: DataIn): - pl = ctx.get_pipeline_mgr().get_active_pipeline() +async def add_data(request: DataIn, docs_name: str = None): + active_pl = ctx.get_pipeline_mgr().get_active_pipeline() docs = [] if request.text is not None: docs.extend(ctx.get_file_mgr().add_text(text=request.text)) if request.local_path is not None: - docs.extend(ctx.get_file_mgr().add_files(docs=request.local_path)) + docs.extend(ctx.get_file_mgr().add_files(docs=request.local_path, docs_name=docs_name)) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=docs) - if pl.indexer.comp_subtype != "kbadmin_indexer": + if active_pl.indexer.comp_subtype != "kbadmin_indexer": if nodelist is None or len(nodelist) == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="File not found") - ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) + ctx.get_node_mgr().add_nodes(active_pl.node_parser.idx, nodelist) return "Done" @@ -33,16 +33,24 @@ async def add_data(request: DataIn): @data_app.post(path="/v1/data/reindex") async def redindex_data(): pl = ctx.get_pipeline_mgr().get_active_pipeline() - + kb = ctx.get_knowledge_mgr().get_active_knowledge_base() + if kb: + kb_name = kb.name + docs_name = kb_name + pl.name + str(pl.indexer.d) + else: + kb_name = None + docs_name = None ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) - pl.indexer.reinitialize_indexer() + pl.indexer.reinitialize_indexer(kb_name) pl.update_indexer_to_retriever() - all_docs = ctx.get_file_mgr().get_all_docs() + all_docs = [] + docs_list =ctx.get_file_mgr().get_kb_files_by_name(docs_name) + for docs_file in docs_list: + all_docs.extend(docs_file.documents) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=all_docs) if nodelist is not None and len(nodelist) > 0: ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) - return "Done" @@ -50,13 +58,15 @@ async def redindex_data(): @data_app.post(path="/v1/data/files") async def add_files(request: FilesIn): docs = [] + pl = ctx.get_pipeline_mgr().get_active_pipeline() + kb = ctx.get_knowledge_mgr().get_active_knowledge_base() + docs_name = kb.name + pl.name + str(pl.indexer.d) if request.local_paths is not None: - docs.extend(ctx.get_file_mgr().add_files(docs=request.local_paths)) + docs.extend(ctx.get_file_mgr().add_files(docs=request.local_path, kb_name=docs_name)) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=docs) if nodelist is None or len(nodelist) == 0: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="File not found") - pl = ctx.get_pipeline_mgr().get_active_pipeline() ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) return "Done" @@ -64,37 +74,50 @@ async def add_files(request: FilesIn): # GET files @data_app.get(path="/v1/data/files") async def get_files(): - return ctx.get_file_mgr().get_files() + return ctx.get_file_mgr().get_all_docs() # GET a file @data_app.get(path="/v1/data/files/{name}") -async def get_file_docs(name): - return ctx.get_file_mgr().get_file_by_name_or_id(name) +async def get_kb_files_by_name(name): + return ctx.get_file_mgr().get_kb_files_by_name(name) # DELETE a file @data_app.delete(path="/v1/data/files/{name}") -async def delete_file(name): - if ctx.get_file_mgr().del_file(name): - pl = ctx.get_pipeline_mgr().get_active_pipeline() - +async def delete_file(kb_name, file_path): + pl = ctx.get_pipeline_mgr().get_active_pipeline() + docs_name = kb_name + pl.name + str(pl.indexer.d) + if ctx.get_file_mgr().del_file(docs_name, file_path): # Current solution: reindexing all docs after deleting one file # TODO: delete the nodes related to the file ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) - pl.indexer.reinitialize_indexer() + pl.indexer.reinitialize_indexer(kb_name) pl.update_indexer_to_retriever() - - all_docs = ctx.get_file_mgr().get_all_docs() + all_docs = ctx.get_file_mgr().get_file_by_name(docs_name) nodelist = ctx.get_pipeline_mgr().run_data_prepare(docs=all_docs) if nodelist is not None and len(nodelist) > 0: ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) + return f"File is deleted" + else: + return f"File not found" + +# DELETE a file +@data_app.delete(path="/v1/data/all_files/{name}") +async def delete_all_file(name): + if ctx.get_file_mgr().del_kb_file(name): + pl = ctx.get_pipeline_mgr().get_active_pipeline() + + # Current solution: reindexing all docs after deleting one file + # TODO: delete the nodes related to the file + ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) + pl.indexer.reinitialize_indexer() + pl.update_indexer_to_retriever() return f"File {name} is deleted" else: return f"File {name} not found" - # Upload & save a file from UI @data_app.post(path="/v1/data/file/{file_name}") async def upload_file(file_name: str, file: UploadFile = File(...)): @@ -122,4 +145,4 @@ async def upload_file(file_name: str, file: UploadFile = File(...)): except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to upload file: {str(e)}" - ) + ) \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py index fa0516272b..872eb3357a 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/knowledge_base.py @@ -5,15 +5,15 @@ import json import os import re -from typing import Dict, List, Union +from typing import List, Dict, Union from edgecraftrag.api.v1.data import add_data -from edgecraftrag.api_schema import DataIn, ExperienceIn, KnowledgeBaseCreateIn +from edgecraftrag.api_schema import DataIn, KnowledgeBaseCreateIn, ExperienceIn from edgecraftrag.base import IndexerType -from edgecraftrag.components.query_preprocess import query_search -from edgecraftrag.components.retriever import get_kbs_info from edgecraftrag.context import ctx from edgecraftrag.utils import compare_mappings +from edgecraftrag.components.retriever import get_kbs_info +from edgecraftrag.components.query_preprocess import query_search from fastapi import FastAPI, HTTPException, status from pymilvus.exceptions import MilvusException @@ -23,7 +23,6 @@ KNOWLEDGE_BASE_ROOT = "/home/user/ui_cache" CONFIG_DIR = "/home/user/ui_cache/configs" - # Get all knowledge bases @kb_app.get(path="/v1/knowledge") async def get_all_knowledge_bases(): @@ -53,10 +52,10 @@ async def create_knowledge_base(knowledge: KnowledgeBaseCreateIn): detail="Knowledge base names must begin with a letter or underscore", ) - if knowledge.active and knowledge.comp_type == "knowledge" and knowledge.comp_subtype == "origin_kb": + if knowledge.active and knowledge.comp_type =="knowledge" and knowledge.comp_subtype == "origin_kb": active_pl.indexer.reinitialize_indexer(knowledge.name) active_pl.update_indexer_to_retriever() - elif knowledge.active and knowledge.comp_subtype == "kbadmin_kb": + elif knowledge.active and knowledge.comp_subtype == "kbadmin_kb": active_pl.retriever.config_kbadmin_milvus(knowledge.name) kb = ctx.knowledgemgr.create_knowledge_base(knowledge) await save_knowledge_to_file() @@ -75,10 +74,7 @@ async def delete_knowledge_base(knowledge_name: str): if rm_kb.comp_type == "knowledge" and rm_kb.comp_subtype == "origin_kb": if active_kb: if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Cannot delete a running knowledge base.", - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Cannot delete a running knowledge base.") kb_file_path = rm_kb.get_file_paths() if kb_file_path: if active_pl.indexer.comp_subtype == "milvus_vector": @@ -88,10 +84,7 @@ async def delete_knowledge_base(knowledge_name: str): active_pl.update_indexer_to_retriever() if rm_kb.comp_type == "experience": if rm_kb.experience_active: - raise HTTPException( - status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, - detail="Cannot delete a running experience knowledge base.", - ) + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Cannot delete a running experience knowledge base.") else: rm_kb.clear_experiences() result = ctx.knowledgemgr.delete_knowledge_base(knowledge_name) @@ -107,6 +100,10 @@ async def update_knowledge_base(knowledge: KnowledgeBaseCreateIn): try: kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge.name) active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + if active_pl.indexer.comp_subtype == "kbadmin_indexer" and kb.comp_subtype != "kbadmin_kb": + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="The kbadmin pipeline must correspond to the kbadmin type kb.") + if active_pl.indexer.comp_subtype != "kbadmin_indexer" and kb.comp_subtype == "kbadmin_kb": + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Not kbadmin pipeline cannot active kbadmin type kb.") if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": if active_pl.indexer.comp_subtype != "milvus_vector": if knowledge.active and knowledge.active != kb.active: @@ -136,15 +133,10 @@ async def add_file_to_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) - if kb.comp_type == "experience": - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." - ) + if kb.comp_type =="experience": + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations.") if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail="Please proceed to the kbadmin interface to perform the operation.", - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Please proceed to the kbadmin interface to perform the operation.") # Validate and normalize the user-provided path user_path = file_path.local_path normalized_path = os.path.normpath(os.path.join(KNOWLEDGE_BASE_ROOT, user_path)) @@ -196,15 +188,10 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): try: active_pl = ctx.get_pipeline_mgr().get_active_pipeline() kb = ctx.knowledgemgr.get_knowledge_base_by_name_or_id(knowledge_name) - if kb.comp_type == "experience": - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations." - ) + if kb.comp_type =="experience": + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="The experience type cannot perform file operations.") if kb.comp_subtype == "kbadmin_kb" or active_pl.indexer.comp_subtype == "kbadmin_indexer": - raise HTTPException( - status_code=status.HTTP_404_NOT_FOUND, - detail="Please proceed to the kbadmin interface to perform the operation.", - ) + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Please proceed to the kbadmin interface to perform the operation.") active_kb = ctx.knowledgemgr.get_active_knowledge_base() if file_path.local_path in kb.get_file_paths(): kb.remove_file_path(file_path.local_path) @@ -213,17 +200,9 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): kb_file_path = kb.get_file_paths() if active_pl.indexer.comp_subtype == "milvus_vector": - if active_kb: - if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: - await remove_file_handler(kb_file_path, knowledge_name) - else: - await remove_file_handler(kb_file_path, knowledge_name) - active_pl.indexer.reinitialize_indexer(active_kb.name) - active_pl.update_indexer_to_retriever() - else: - await remove_file_handler(kb_file_path, knowledge_name) - active_pl.indexer.reinitialize_indexer(active_kb.name) - active_pl.update_indexer_to_retriever() + docs_name = kb.name + active_pl.name + str(active_pl.indexer.d) + docs_list =ctx.get_file_mgr().del_file(docs_name, file_path.local_path) + active_pl.indexer.delete(docs_list) elif active_kb: if active_kb.name == knowledge_name or active_kb.idx == knowledge_name: await update_knowledge_base_handler(kb_file_path, knowledge_name) @@ -232,10 +211,9 @@ async def remove_file_from_knowledge_base(knowledge_name, file_path: DataIn): except ValueError as e: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e)) - @kb_app.post("/v1/experience") def get_experience_by_question(req: ExperienceIn): - kb = ctx.knowledgemgr.get_experience_kb() + kb = ctx.knowledgemgr.get_experience_kb() result = kb.get_experience_by_question(req.question) if not result: raise HTTPException(404, detail="Experience not found") @@ -256,24 +234,24 @@ def update_experience(experience: ExperienceIn): kb = ctx.knowledgemgr.get_experience_kb() result = kb.update_experience(experience.question, experience.content) if not result: - raise HTTPException(404, detail="Question not found") + raise HTTPException(404, detail=f"Question not found") return result @kb_app.delete("/v1/experiences") -def delete_experience(req: ExperienceIn): +def delete_experience(req :ExperienceIn): kb = ctx.knowledgemgr.get_experience_kb() success = kb.delete_experience(req.question) if not success: raise HTTPException(404, detail=f"Question {req.question} not found") - return {"message": "Question deleted"} + return {"message": f"Question deleted"} @kb_app.post("/v1/multiple_experiences/check") def check_duplicate_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]]): kb = ctx.knowledgemgr.get_experience_kb() if not kb: - raise HTTPException(404, detail="No active experience type knowledge base") + raise HTTPException(404, detail=f"No active experience type knowledge base") all_existing = kb.get_all_experience() existing_questions = {item["question"] for item in all_existing} new_questions = [exp["question"] for exp in experiences if "question" in exp] @@ -281,16 +259,16 @@ def check_duplicate_multiple_experiences(experiences: List[Dict[str, Union[str, if duplicate_questions: return {"code": 2001, "detail": "Duplicate experiences are appended OR overwritten!"} else: - kb.add_multiple_experiences(experiences=experiences, flag=True) - return {"status": "success", "detail": "No duplicate experiences, added successfully"} + kb.add_multiple_experiences(experiences=experiences, flag=True) + return {"status": "success","detail": "No duplicate experiences, added successfully"} @kb_app.post("/v1/multiple_experiences/confirm") -def confirm_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]], flag: bool): +def confirm_multiple_experiences(experiences: List[Dict[str, Union[str, List[str]]]],flag: bool): kb = ctx.knowledgemgr.get_experience_kb() try: if not kb: - raise HTTPException(404, detail="No active experience type knowledge base") + raise HTTPException(404, detail=f"No active experience type knowledge base") kb.add_multiple_experiences(experiences=experiences, flag=flag) return {"status": "success", "detail": "Experiences added successfully"} except Exception as e: @@ -309,14 +287,17 @@ def add_experiences_from_file(req: DataIn): @kb_app.post(path="/v1/view_sub_questions") async def view_sub_questions(que: ExperienceIn): - active_pl = ctx.get_pipeline_mgr().get_active_pipeline() - CONFIG_DIR - search_config_path = os.path.join(CONFIG_DIR, "search_config.yaml") - search_dir = os.path.join(CONFIG_DIR, "experience_dir/experience.json") - top1_issue, sub_questions_result = await query_search( - user_input=que.question, search_config_path=search_config_path, search_dir=search_dir, pl=active_pl - ) - return sub_questions_result + active_pl = ctx.get_pipeline_mgr().get_active_pipeline() + CONFIG_DIR + search_config_path = os.path.join(CONFIG_DIR,"search_config.yaml") + search_dir = os.path.join(CONFIG_DIR,"experience_dir/experience.json") + top1_issue, sub_questions_result = await query_search( + user_input=que.question, + search_config_path=search_config_path, + search_dir=search_dir, + pl=active_pl + ) + return sub_questions_result @kb_app.get("/v1/kbadmin/kbs_list") @@ -325,9 +306,9 @@ def get_kbs_list(): try: if not active_pl or active_pl.indexer.comp_subtype != "kbadmin_indexer": return [] - CONNECTION_ARGS = {"uri": active_pl.indexer.vector_url} + CONNECTION_ARGS = {"uri": active_pl.indexer.vector_url} kbs_list = get_kbs_info(CONNECTION_ARGS) - kb_names = [name for name in kbs_list.keys()] + kb_names = [name for name in kbs_list.keys()] return kb_names except Exception as e: raise HTTPException(status_code=400, detail=str(e)) @@ -339,8 +320,9 @@ async def update_knowledge_base_handler(file_path=None, knowledge_name: str = "d raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Please activate pipeline") pl = ctx.get_pipeline_mgr().get_active_pipeline() + docs_name = knowledge_name + pl.name + str(pl.indexer.d) if add_file and file_path: - return await add_data(file_path) + return await add_data(file_path, docs_name) else: try: ctx.get_node_mgr().del_nodes_by_np_idx(pl.node_parser.idx) @@ -349,7 +331,7 @@ async def update_knowledge_base_handler(file_path=None, knowledge_name: str = "d if file_path: for file in file_path: request = DataIn(local_path=file) - await add_data(request) + await add_data(request, docs_name) except MilvusException as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) return "Done" @@ -368,10 +350,11 @@ async def remove_file_handler(file_path=None, knowledge_name: str = "default_kb" except MilvusException as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) pl.update_indexer_to_retriever() + docs_name = knowledge_name + pl.name + str(pl.indexer.d) if file_path: for file in file_path: request = DataIn(local_path=file) - await add_data(request) + await add_data(request, docs_name) return "Done" @@ -387,7 +370,7 @@ async def load_knowledge_from_file(): for Knowledgebase_data in all_data: pipeline_req = KnowledgeBaseCreateIn(**Knowledgebase_data) kb = ctx.knowledgemgr.create_knowledge_base(pipeline_req) - if kb.comp_type == "knowledge" and kb.comp_subtype == "origin_kb": + if kb.comp_type =="knowledge" and kb.comp_subtype =="origin_kb": if Knowledgebase_data["file_map"]: if active_pl.indexer.comp_subtype != "milvus_vector" and Knowledgebase_data["active"]: for file_path in Knowledgebase_data["file_map"].values(): @@ -403,7 +386,7 @@ async def load_knowledge_from_file(): else: for file_path in Knowledgebase_data["file_map"].values(): kb.add_file_path(file_path) - elif kb.comp_subtype == "kbadmin_kb": + elif kb.comp_subtype =="kbadmin_kb": if Knowledgebase_data["active"]: active_pl.retriever.config_kbadmin_milvus(kb.name) except Exception as e: @@ -419,15 +402,7 @@ async def save_knowledge_to_file(): kb_base = ctx.knowledgemgr.get_all_knowledge_bases() knowledgebases_data = [] for kb in kb_base: - kb_json = { - "name": kb.name, - "description": kb.description, - "active": kb.active, - "file_map": kb.file_map, - "comp_type": kb.comp_type, - "comp_subtype": kb.comp_subtype, - "experience_active": kb.experience_active, - } + kb_json = {"name": kb.name, "description": kb.description, "active": kb.active, "file_map": kb.file_map, "comp_type": kb.comp_type, "comp_subtype":kb.comp_subtype, "experience_active": kb.experience_active} knowledgebases_data.append(kb_json) json_str = json.dumps(knowledgebases_data, indent=2, ensure_ascii=False) with open(KNOWLEDGEBASE_FILE, "w", encoding="utf-8") as f: @@ -438,8 +413,6 @@ async def save_knowledge_to_file(): all_pipeline_milvus_maps = {"change_pl": []} current_pipeline_kb_map = {} - - async def refresh_milvus_map(milvus_name): current_pipeline_kb_map.clear() knowledge_bases_list = await get_all_knowledge_bases() @@ -448,13 +421,13 @@ async def refresh_milvus_map(milvus_name): continue current_pipeline_kb_map[kb.name] = kb.file_map all_pipeline_milvus_maps[milvus_name] = copy.deepcopy(current_pipeline_kb_map) - milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") + milvus_maps_path = os.path.join(CONFIG_DIR,"milvus_maps.json") with open(milvus_maps_path, "w", encoding="utf-8") as f: json.dump(all_pipeline_milvus_maps, f, ensure_ascii=False, indent=2) def read_milvus_maps(): - milvus_maps_path = os.path.join(CONFIG_DIR, "milvus_maps.json") + milvus_maps_path = os.path.join(CONFIG_DIR,"milvus_maps.json") global all_pipeline_milvus_maps try: with open(milvus_maps_path, "r", encoding="utf-8") as f: @@ -463,33 +436,26 @@ def read_milvus_maps(): all_pipeline_milvus_maps = {"change_pl": []} return all_pipeline_milvus_maps - def save_change_pl(pl_name): - if pl_name not in all_pipeline_milvus_maps["change_pl"]: + if pl_name not in all_pipeline_milvus_maps["change_pl"]: return all_pipeline_milvus_maps["change_pl"].append(pl_name) - async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): try: if pl_change: save_change_pl(new_active_pl.name) active_kb = ctx.knowledgemgr.get_active_knowledge_base() - # Determine whether it is kbadmin type + # Determine whether it is kbadmin type if old_active_pl: - if ( - old_active_pl.retriever.comp_subtype == "kbadmin_retriever" - and new_active_pl.retriever.comp_subtype == "kbadmin_retriever" - ): + if old_active_pl.retriever.comp_subtype == "kbadmin_retriever" and new_active_pl.retriever.comp_subtype == "kbadmin_retriever": if active_kb: if active_kb.comp_subtype == "kbadmin_kb": new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) return True elif old_active_pl.retriever.comp_subtype == "kbadmin_retriever": return True - - milvus_name = ( - old_active_pl.name + str(old_active_pl.indexer.model_extra["d"]) if old_active_pl else "default_kb" - ) + + milvus_name = (old_active_pl.name + str(old_active_pl.indexer.model_extra["d"]) if old_active_pl else "default_kb") if not new_active_pl.status.active: if old_active_pl: if old_active_pl.indexer.comp_subtype == "milvus_vector": @@ -497,11 +463,11 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): return True if not active_kb: return True - if new_active_pl.retriever.comp_subtype == "kbadmin_retriever": - if active_kb: - if active_kb.comp_subtype == "kbadmin_kb": - new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) - return True + if new_active_pl.retriever.comp_subtype=="kbadmin_retriever": + if active_kb: + if active_kb.comp_subtype == "kbadmin_kb": + new_active_pl.retriever.config_kbadmin_milvus(active_kb.name) + return True # Perform milvus data synchronization if new_active_pl.indexer.comp_subtype == "milvus_vector": # Pipeline component state changed @@ -513,8 +479,10 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): new_active_pl.indexer.reinitialize_indexer(kb.name) new_active_pl.update_indexer_to_retriever() add_list = kb.get_file_paths() + docs_name = kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + ctx.get_file_mgr().del_kb_file(docs_name) for file in add_list: - await add_data(DataIn(local_path=file)) + await add_data(DataIn(local_path=file), docs_name) all_pipeline_milvus_maps["change_pl"].remove(new_active_pl.name) return True # Pipeline component state not changed @@ -531,21 +499,22 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): # Synchronization of deleted files for kb_name, file_paths in deleted_files.items(): if file_paths: - new_active_pl.indexer.clear_milvus_collection(kb_name) if kb_name not in new_milvus_map.keys(): + new_active_pl.indexer.clear_milvus_collection(kb_name) continue kb = await get_knowledge_base(kb_name) new_active_pl.indexer.reinitialize_indexer(kb_name) - file_paths = kb.get_file_paths() - if file_paths: - for file in file_paths: - await add_data(DataIn(local_path=file)) + for file_path in file_paths.values(): + docs_name = kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + docs_list =ctx.get_file_mgr().del_file(docs_name, file_path) + new_active_pl.indexer.delete(docs_list) # Synchronization of added files for kb_name, file_paths in added_files.items(): if file_paths: for file_path in file_paths.values(): new_active_pl.indexer.reinitialize_indexer(kb_name) - await add_data(DataIn(local_path=file_path)) + docs_name = kb_name + new_active_pl.name + str(new_active_pl.indexer.d) + await add_data(DataIn(local_path=file_path), docs_name) new_active_pl.indexer.reinitialize_indexer(active_kb.name) new_active_pl.update_indexer_to_retriever() @@ -555,7 +524,8 @@ async def Synchronizing_vector_data(old_active_pl, new_active_pl, pl_change): new_active_pl.update_indexer_to_retriever() add_list = active_kb.get_file_paths() for file in add_list: - await add_data(DataIn(local_path=file)) + docs_name = active_kb.name + new_active_pl.name + str(new_active_pl.indexer.d) + await add_data(DataIn(local_path=file), docs_name) if old_active_pl: if old_active_pl.indexer.comp_subtype == "milvus_vector": await refresh_milvus_map(milvus_name) diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py index fe6de98d42..9a6d946ebc 100755 --- a/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/pipeline.py @@ -3,8 +3,7 @@ import asyncio import json -import os -import re +import os, re import weakref from concurrent.futures import ThreadPoolExecutor @@ -13,21 +12,16 @@ from edgecraftrag.base import IndexerType, InferenceType, ModelType, NodeParserType, PostProcessorType, RetrieverType from edgecraftrag.components.benchmark import Benchmark from edgecraftrag.components.generator import QnAGenerator -from edgecraftrag.components.indexer import KBADMINIndexer, VectorIndexer +from edgecraftrag.components.indexer import VectorIndexer, KBADMINIndexer from edgecraftrag.components.node_parser import ( HierarchyNodeParser, - KBADMINParser, SimpleNodeParser, SWindowNodeParser, UnstructedNodeParser, + KBADMINParser ) from edgecraftrag.components.postprocessor import MetadataReplaceProcessor, RerankProcessor -from edgecraftrag.components.retriever import ( - AutoMergeRetriever, - KBadminRetriever, - SimpleBM25Retriever, - VectorSimRetriever, -) +from edgecraftrag.components.retriever import AutoMergeRetriever, SimpleBM25Retriever, VectorSimRetriever, KBadminRetriever from edgecraftrag.context import ctx from fastapi import FastAPI, File, HTTPException, UploadFile, status from pymilvus import connections @@ -78,10 +72,7 @@ async def get_pipeline_benchmarks(name): async def add_pipeline(request: PipelineCreateIn): pattern = re.compile(r"^[a-zA-Z0-9_]+$") if not pattern.fullmatch(request.name): - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail="Pipeline name must consist of letters, numbers, and underscores.", - ) + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Pipeline name must consist of letters, numbers, and underscores.") return load_pipeline(request) @@ -180,10 +171,6 @@ def update_pipeline_handler(pl, req): pl.node_parser = KBADMINParser() ctx.get_node_parser_mgr().add(pl.node_parser) - all_docs = ctx.get_file_mgr().get_all_docs() - nodelist = pl.node_parser.run(docs=all_docs) - if nodelist is not None and len(nodelist) > 0: - ctx.get_node_mgr().add_nodes(pl.node_parser.idx, nodelist) pl._node_changed = True if req.indexer is not None: @@ -208,9 +195,7 @@ def update_pipeline_handler(pl, req): kbadmin_embedding_url = ind.embedding_url KBADMIN_VECTOR_URL = ind.vector_url embed_model = ind.embedding_model.model_id - pl.indexer = KBADMINIndexer( - embed_model, ind.indexer_type, kbadmin_embedding_url, KBADMIN_VECTOR_URL - ) + pl.indexer = KBADMINIndexer(embed_model, ind.indexer_type, kbadmin_embedding_url, KBADMIN_VECTOR_URL) case _: pass ctx.get_indexer_mgr().add(pl.indexer) @@ -241,7 +226,7 @@ def update_pipeline_handler(pl, req): else: return Exception("No indexer") case RetrieverType.KBADMIN_RETRIEVER: - pl.retriever = KBadminRetriever(pl.indexer, similarity_top_k=retr.retrieve_topk) + pl.retriever = KBadminRetriever(pl.indexer, similarity_top_k=retr.retrieve_topk) case _: pass # Index is updated to retriever @@ -374,3 +359,4 @@ async def check_milvus(request: MilvusConnectRequest): return {"status": "404", "message": "Milvus connection failed."} except Exception as e: return {"status": "404", "message": f"connection failed: {str(e)}"} + diff --git a/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py b/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py index 86639a40a7..0de6a283a2 100644 --- a/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py +++ b/EdgeCraftRAG/edgecraftrag/api/v1/prompt.py @@ -41,6 +41,8 @@ async def get_prompt(): try: generator = ctx.get_pipeline_mgr().get_active_pipeline().generator if generator: + if generator.prompt_content is not None: + return generator.prompt_content return generator.prompt except Exception as e: raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)) diff --git a/EdgeCraftRAG/edgecraftrag/api_schema.py b/EdgeCraftRAG/edgecraftrag/api_schema.py index 2bdf8dbd4e..901e4ed0f0 100644 --- a/EdgeCraftRAG/edgecraftrag/api_schema.py +++ b/EdgeCraftRAG/edgecraftrag/api_schema.py @@ -83,13 +83,11 @@ class KnowledgeBaseCreateIn(BaseModel): active: Optional[bool] = None comp_type: Optional[str] = "knowledge" comp_subtype: Optional[str] = "origin_kb" - experience_active: Optional[bool] = None - + experience_active: Optional[bool] = None class ExperienceIn(BaseModel): question: str content: list[str] = None - class MilvusConnectRequest(BaseModel): vector_url: str diff --git a/EdgeCraftRAG/edgecraftrag/base.py b/EdgeCraftRAG/edgecraftrag/base.py index 3306afc2ed..ec73d37621 100644 --- a/EdgeCraftRAG/edgecraftrag/base.py +++ b/EdgeCraftRAG/edgecraftrag/base.py @@ -48,7 +48,6 @@ class NodeParserType(str, Enum): UNSTRUCTURED = "unstructured" KBADMINPARSER = "kbadmin_parser" - class IndexerType(str, Enum): FAISS_VECTOR = "faiss_vector" @@ -56,7 +55,6 @@ class IndexerType(str, Enum): MILVUS_VECTOR = "milvus_vector" KBADMIN_INDEXER = "kbadmin_indexer" - class RetrieverType(str, Enum): VECTORSIMILARITY = "vectorsimilarity" @@ -118,13 +116,13 @@ class BaseMgr: def __init__(self): self.components = {} - def add(self, comp: BaseComponent, name: str = None): + def add(self, comp: BaseComponent, name: str=None): if name: self.components[name] = comp return True self.components[comp.idx] = comp - def append(self, comp: BaseComponent, name: str = None): + def append(self, comp: BaseComponent, name: str=None): key = name if name else comp.idx if key not in self.components: self.components[key] = [] diff --git a/EdgeCraftRAG/edgecraftrag/components/benchmark.py b/EdgeCraftRAG/edgecraftrag/components/benchmark.py index 3bf2a7e602..df66ef0e6f 100644 --- a/EdgeCraftRAG/edgecraftrag/components/benchmark.py +++ b/EdgeCraftRAG/edgecraftrag/components/benchmark.py @@ -49,14 +49,7 @@ def cal_input_token_size(self, input_text_list): return input_token_size def init_benchmark_data(self): - pipeline_comp = [ - CompType.NODEPARSER, - CompType.CHUNK_NUM, - CompType.RETRIEVER, - CompType.POSTPROCESSOR, - CompType.QUERYSEARCH, - CompType.GENERATOR, - ] + pipeline_comp = [CompType.NODEPARSER, CompType.CHUNK_NUM, CompType.RETRIEVER, CompType.POSTPROCESSOR, CompType.QUERYSEARCH, CompType.GENERATOR] if self.is_enabled(): with self._idx_lock: self.last_idx += 1 diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index 70384b7122..117865cf53 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -109,10 +109,9 @@ async def local_stream_generator(lock, llm, prompt_str, unstructured_str): save_history(res) except Exception as e: start_idx = str(e).find("message") + len("message") - result_error = str(e)[start_idx:] + result_error = str(e)[start_idx:] yield f"code:0000{result_error}" - async def stream_generator(llm, prompt_str, unstructured_str): response = llm.stream_complete(prompt_str) collected_data = [] @@ -127,9 +126,9 @@ async def stream_generator(llm, prompt_str, unstructured_str): res = "".join(collected_data) save_history(res) except Exception as e: - start_idx = str(e).find("message") + len("message") - result_error = str(e)[start_idx:] - yield f"code:0000{result_error}" + start_idx = str(e).find("message") + len("message") + result_error = str(e)[start_idx:] + yield f"code:0000{result_error}" class QnAGenerator(BaseComponent): @@ -154,7 +153,7 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin llm_instance = llm_model() if llm_instance.model_path is None or llm_instance.model_path == "": self.model_id = llm_instance.model_id - self.model_path = os.path.join("/home/user/models/", os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) + self.model_path = os.path.join("/home/user/models/",os.getenv("LLM_MODEL", "Qwen/Qwen3-8B")) else: self.model_id = llm_instance.model_id self.model_path = llm_instance.model_path @@ -181,7 +180,6 @@ def init_prompt(self, model_path, prompt_content=None, prompt_template_file=None # using the prompt template enhancement strategy(only tested on Qwen2-7B-Instruction) if template_enhance_on is true template_enhance_on = True if "Qwen2" in self.model_id else False if prompt_content: - self.set_prompt(prompt_content) return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) elif prompt_template_file is None: print("There is no template file, using the default template.") @@ -208,15 +206,12 @@ def set_prompt(self, prompt): prompt += "\n<|im_start|>{context}<|im_end|>" if "{chat_history}" not in prompt: prompt += "\n<|im_start|>{chat_history}" - self.prompt = prompt + self.prompt_content = prompt + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) def reset_prompt(self): - prompt_template = get_prompt_template(self.model_id) - self.prompt = ( - DocumentedContextRagPromptTemplate.from_template(prompt_template) - if self.template_enhance_on - else prompt_template - ) + self.prompt_content = None + self.prompt = self.init_prompt(self.model_path, self.prompt_content, self.prompt_template_file) def clean_string(self, string): ret = string @@ -238,7 +233,7 @@ def query_transform(self, chat_request, retrieved_nodes, sub_questions=None): chat_history = concat_history(chat_request.messages) # Modify model think status if chat_request.chat_template_kwargs: - if "enable_thinking" in chat_request.chat_template_kwargs: + if 'enable_thinking' in chat_request.chat_template_kwargs: if self.enable_think != chat_request.chat_template_kwargs["enable_thinking"]: self.prompt = self.init_prompt( self.model_path, diff --git a/EdgeCraftRAG/edgecraftrag/components/indexer.py b/EdgeCraftRAG/edgecraftrag/components/indexer.py index bd79bb3042..00e0f3f4d6 100644 --- a/EdgeCraftRAG/edgecraftrag/components/indexer.py +++ b/EdgeCraftRAG/edgecraftrag/components/indexer.py @@ -80,10 +80,10 @@ class KBADMINIndexer(BaseComponent): # Handled in the kbadmin project def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url="http://localhost:29530"): BaseComponent.__init__( - self, - comp_type=CompType.INDEXER, - comp_subtype=IndexerType.KBADMIN_INDEXER, - ) + self, + comp_type=CompType.INDEXER, + comp_subtype=IndexerType.KBADMIN_INDEXER, + ) self.embed_model = embed_model self.kbadmin_embedding_url = kbadmin_embedding_url self.vector_url = vector_url @@ -91,7 +91,7 @@ def __init__(self, embed_model, vector_type, kbadmin_embedding_url, vector_url=" def insert_nodes(self, nodes): return None - def _index_struct(self, nodes): + def _index_struct(self, nodes): return None def run(self, **kwargs) -> Any: @@ -105,11 +105,5 @@ def clear_milvus_collection(self, **kwargs): @model_serializer def ser_model(self): - set = { - "idx": self.idx, - "indexer_type": self.comp_subtype, - "model": {"model_id": self.embed_model}, - "kbadmin_embedding_url": self.kbadmin_embedding_url, - "vector_url": self.vector_url, - } - return set + set = {"idx": self.idx, "indexer_type": self.comp_subtype, "model": {"model_id": self.embed_model}, "kbadmin_embedding_url": self.kbadmin_embedding_url, "vector_url":self.vector_url} + return set \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py index 45ea309fad..d3a050ab4c 100644 --- a/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py +++ b/EdgeCraftRAG/edgecraftrag/components/knowledge_base.py @@ -1,9 +1,8 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import json -import os -from typing import Any, Dict, List, Optional, Union +import os, json +from typing import Any, List, Optional, Dict, Union from edgecraftrag.base import BaseComponent from pydantic import model_serializer @@ -44,7 +43,7 @@ def ensure_file_exists(self): dir_path = os.path.dirname(self.file_paths[0]) os.makedirs(dir_path, exist_ok=True) if not os.path.exists(self.file_paths[0]): - with open(self.file_paths[0], "w", encoding="utf-8") as f: + with open(self.file_paths[0], 'w', encoding='utf-8') as f: json.dump([], f, ensure_ascii=False, indent=4) def get_all_experience(self) -> List[Dict]: @@ -53,82 +52,80 @@ def get_all_experience(self) -> List[Dict]: self.file_paths.append(experinence_file) if not os.path.isfile(self.file_paths[0]): self.ensure_file_exists() - with open(self.file_paths[0], "r", encoding="utf-8") as f: + with open(self.file_paths[0], 'r', encoding='utf-8') as f: return json.load(f) def get_experience_by_question(self, question: str) -> Optional[Dict]: for item in self.get_all_experience(): - if item.get("question") == question: + if item.get('question') == question: return item return None - def add_multiple_experiences( - self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True - ) -> List[Dict]: + def add_multiple_experiences(self, experiences: List[Dict[str, Union[str, List[str]]]], flag: bool = True) -> List[Dict]: all_experiences = self.get_all_experience() result = [] for exp in experiences: - question = exp.get("question") + question = exp.get('question') if not question: raise ValueError("Must exist when uploading question") - content = exp.get("content", []) + content = exp.get('content', []) found = False for item in all_experiences: - if item["question"] == question: + if item['question'] == question: if flag: - item["content"].extend([c for c in content if c not in item["content"]]) + item['content'].extend([c for c in content if c not in item['content']]) else: - item["content"] = content + item['content'] = content result.append(item) found = True break if not found: - new_item = {"question": question, "content": content} + new_item = {'question': question, 'content': content} all_experiences.append(new_item) result.append(new_item) - with open(self.file_paths[0], "w", encoding="utf-8") as f: + with open(self.file_paths[0], 'w', encoding='utf-8') as f: json.dump(all_experiences, f, ensure_ascii=False, indent=4) return result def delete_experience(self, question: str) -> bool: items = self.get_all_experience() - remaining_items = [item for item in items if item.get("question") != question] + remaining_items = [item for item in items if item.get('question') != question] if len(remaining_items) == len(items): return False - with open(self.file_paths[0], "w", encoding="utf-8") as f: + with open(self.file_paths[0], 'w', encoding='utf-8') as f: json.dump(remaining_items, f, ensure_ascii=False, indent=4) return True def clear_experiences(self) -> bool: all_experiences = self.get_all_experience() - with open(self.file_paths[0], "w", encoding="utf-8") as f: + with open(self.file_paths[0], 'w', encoding='utf-8') as f: json.dump([], f, ensure_ascii=False, indent=4) return True def update_experience(self, question: str, content: List[str]) -> Optional[Dict]: items = self.get_all_experience() for i, item in enumerate(items): - if item.get("question") == question: - updated_item = {"question": question, "content": content} + if item.get('question') == question: + updated_item = {'question': question, 'content': content} items[i] = updated_item - with open(self.file_paths[0], "w", encoding="utf-8") as f: + with open(self.file_paths[0], 'w', encoding='utf-8') as f: json.dump(items, f, ensure_ascii=False, indent=4) return updated_item return None def add_experiences_from_file(self, file_path: str, flag: bool = False) -> List[Dict]: - if not file_path.endswith(".json"): + if not file_path.endswith('.json'): raise ValueError("File upload type error") try: - with open(file_path, "r", encoding="utf-8") as f: + with open(file_path, 'r', encoding='utf-8') as f: experiences = json.load(f) if not isinstance(experiences, list): raise ValueError("The contents of the file must be a list") return self.add_multiple_experiences(experiences=experiences, flag=flag) except json.JSONDecodeError as e: - raise ValueError("File parsing failure") + raise ValueError(f"File parsing failure") except Exception as e: - raise RuntimeError("File Error") + raise RuntimeError(f"File Error") def calculate_totals(self): if self.comp_type == "knowledge": @@ -153,6 +150,6 @@ def ser_model(self): "description": self.description, "active": self.active, "experience_active": self.experience_active, - "total": self.calculate_totals(), + "total": self.calculate_totals() } return set diff --git a/EdgeCraftRAG/edgecraftrag/components/node_parser.py b/EdgeCraftRAG/edgecraftrag/components/node_parser.py index 0bd49b91b4..2491cbf9dd 100644 --- a/EdgeCraftRAG/edgecraftrag/components/node_parser.py +++ b/EdgeCraftRAG/edgecraftrag/components/node_parser.py @@ -169,7 +169,6 @@ def ser_model(self): } return set - class KBADMINParser(BaseComponent): # Handled in the kbadmin project def __init__(self, **kwargs): @@ -182,7 +181,6 @@ def run(self, **kwargs) -> Any: def insert_nodes(self): return None - @model_serializer def ser_model(self): set = { diff --git a/EdgeCraftRAG/edgecraftrag/components/pipeline.py b/EdgeCraftRAG/edgecraftrag/components/pipeline.py index 29205a3819..a4014ac6b9 100644 --- a/EdgeCraftRAG/edgecraftrag/components/pipeline.py +++ b/EdgeCraftRAG/edgecraftrag/components/pipeline.py @@ -216,7 +216,7 @@ def run_simple_doc(pl: Pipeline, docs: List[Document]) -> Any: if pl.indexer is not None: pl.indexer.insert_nodes(n) if pl.enable_benchmark: - benchmark_data[CompType.NODEPARSER] += time.perf_counter() - start + benchmark_data[CompType.NODEPARSER] += (time.perf_counter() - start) benchmark_data[CompType.CHUNK_NUM] += len(n) pl.benchmark.insert_benchmark_data(benchmark_data) return n @@ -244,27 +244,23 @@ def run_generator(pl: Pipeline, chat_request: ChatCompletionRequest) -> Any: benchmark_index, benchmark_data = pl.benchmark.init_benchmark_data() contexts = {} retri_res = [] - active_kb = chat_request.user if chat_request.user else None - enable_rag_retrieval = ( - chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) - if chat_request.chat_template_kwargs - else True - ) + active_kb = chat_request.user if chat_request.user else None + enable_rag_retrieval = chat_request.chat_template_kwargs.get("enable_rag_retrieval", True) if chat_request.chat_template_kwargs else True if not active_kb: enable_rag_retrieval = False elif pl.retriever.comp_subtype == "kbadmin_retriever" and active_kb.comp_subtype == "origin_kb": enable_rag_retrieval = False - elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": + elif pl.retriever.comp_subtype != "kbadmin_retriever" and active_kb.comp_subtype == "kbadmin_kb": enable_rag_retrieval = False query = chat_request.messages sub_questionss_result = None - experience_status = True if chat_request.tool_choice == "auto" else False + experience_status = True if chat_request.tool_choice == 'auto' else False if enable_rag_retrieval: start = 0 if pl.enable_benchmark: start = time.perf_counter() if pl.generator.inference_type == InferenceType.VLLM and experience_status: - UI_DIRECTORY = "/home/user/ui_cache" + UI_DIRECTORY ="/home/user/ui_cache" search_config_path = os.path.join(UI_DIRECTORY, "configs/search_config.yaml") search_dir = os.path.join(UI_DIRECTORY, "configs/experience_dir/experience.json") diff --git a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py index 0f0a3e792a..c6f995aa99 100644 --- a/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py +++ b/EdgeCraftRAG/edgecraftrag/components/query_preprocess.py @@ -85,6 +85,7 @@ def __init__( output_template="", json_key="relevance", json_levels=["Low", "High"], + scores_weight=None, temperature=1.0, API_BASE=None, **kwargs, @@ -107,6 +108,19 @@ def __init__( self.json_levels = json_levels self.API_BASE = API_BASE + # dynamically set scores_weight, use default if not provided + if scores_weight is None: + # generate default weights based on json_levels count + if len(json_levels) == 2: + self.scores_weight = [0.0, 1.0] # Low, High + elif len(json_levels) == 3: + self.scores_weight = [0.0, 0.5, 1.0] # Low, Medium, High + else: + # for other counts, generate evenly spaced weights + self.scores_weight = [i / (len(json_levels) - 1) for i in range(len(json_levels))] + else: + self.scores_weight = scores_weight + async def invoke_vllm(self, input_texts): headers = {"Content-Type": "application/json"} payload = { @@ -152,18 +166,22 @@ async def _calculate_logits_score(self, user_input, issue): def _calculate_token_score_vllm(self, outputs, output_index=1, transform="exp"): generated_scores = outputs[output_index] - three_scores = [ - generated_scores.get("Low", -9999.0), - generated_scores.get("Medium", -9999.0), - generated_scores.get("High", -9999.0), - ] - level_scores = [score / self.temperature for score in three_scores] + + # dynamically get scores for all levels + level_scores = [] + for level in self.json_levels: + level_scores.append(generated_scores.get(level, -9999.0)) + + # apply temperature scaling + level_scores = [score / self.temperature for score in level_scores] level_scores_np = numpy.array(level_scores) level_scores_np = numpy.where(level_scores_np < -1000, -1000, level_scores_np) level_scores_np_exp = numpy.exp(level_scores_np - numpy.max(level_scores_np)) scores_probs = level_scores_np_exp / level_scores_np_exp.sum() - scores_weight = numpy.array([0.0, 0.5, 1.0]) # Low=0, Medium=0.5, High=1 + + # using dynamic scores_weight + scores_weight = numpy.array(self.scores_weight) final_score = numpy.dot(scores_probs, scores_weight) return final_score @@ -175,8 +193,8 @@ async def compute_score(self, input_pair): def read_json_files(file_path: str) -> dict: result = {} if os.path.isfile(file_path): - with open(file_path, "r", encoding="utf-8") as f: - result = json.load(f) + with open(file_path, 'r', encoding='utf-8') as f: + result = json.load(f) return result @@ -196,14 +214,14 @@ async def query_search(user_input, search_config_path, search_dir, pl): cfg = {} if not os.path.exists(search_config_path): - cfg["query_matcher"] = { - "instructions": "You're an expert in TCB Bonder, your task is to decide the semantic similarity of two queries.\n If they are expressing similar idea, mark as High.\n If they are totally different, mark as Low.\n If some parts of them are similar, some are not, mark as Medium.\n", - "input_template": " {} \n {} \n", - "output_template": "output from {json_levels}.\n", - "json_key": "similarity", - "json_levels": ["Low", "Medium", "High"], - "temperature": 1, - } + cfg["query_matcher"] = { + "instructions": "You're a knowledgeable assistant. Your task is to judge if two queries ask for the same information about the same primary subject. Output only 'Yes' or 'No'. Yes = same subject entity AND same information need, with only wording or stylistic differences. No = different subject entity, different spec or numeric constraint, different attribute/metric, or scope changed by adding/removing a restricting condition. Entity changes MUST lead to No.", + "input_template": "Query 1: {}\nQuery 2: {}\n", + "output_template": "\nAre these queries equivalent? Answer 'Yes' or 'No':", + "json_key": "similarity", + "json_levels": ["No", "Yes"], + "temperature": 0.1 + } else: cfg = OmegaConf.load(search_config_path) cfg["query_matcher"]["model_id"] = model_id @@ -221,10 +239,10 @@ async def limited_compute_score(query_matcher, user_input, issue): match_scores.sort(key=lambda x: x[1], reverse=True) # Maximum less than 0.6, we don't use query search. - if match_scores[0][1] < 0.6: + if match_scores[0][1] < 0.6: return top1_issue, sub_questions_result top1_issue = match_scores[0][0] for i in range(len(maintenance_data)): - if maintenance_data[i]["question"] == top1_issue: + if maintenance_data[i]['question'] == top1_issue: sub_questions_result = "\n".join(maintenance_data[i]["content"]) return top1_issue, sub_questions_result diff --git a/EdgeCraftRAG/edgecraftrag/components/retriever.py b/EdgeCraftRAG/edgecraftrag/components/retriever.py index cdd3fe0bc2..209fd7b5a0 100644 --- a/EdgeCraftRAG/edgecraftrag/components/retriever.py +++ b/EdgeCraftRAG/edgecraftrag/components/retriever.py @@ -1,19 +1,23 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import warnings -from typing import Any, List, Optional, cast +from typing import Any, List, cast -import requests +import requests, warnings from edgecraftrag.base import BaseComponent, CompType, RetrieverType -from langchain_milvus import Milvus -from langchain_openai import OpenAIEmbeddings from llama_index.core.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.core.retrievers import AutoMergingRetriever -from llama_index.core.schema import BaseNode, Document, NodeWithScore +from llama_index.core.schema import BaseNode from llama_index.retrievers.bm25 import BM25Retriever from pydantic import model_serializer -from pymilvus import Collection, MilvusException, connections, utility +from llama_index.core.schema import NodeWithScore + +from langchain_openai import OpenAIEmbeddings +from langchain_milvus import Milvus +from llama_index.core.schema import Document +from typing import List, Optional +from pymilvus import MilvusException +from pymilvus import connections, utility, Collection class VectorSimRetriever(BaseComponent, VectorIndexRetriever): @@ -45,7 +49,7 @@ def run(self, **kwargs) -> Any: for k, v in kwargs.items(): if k == "query": top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk - self.similarity_top_k = top_k + self.similarity_top_k=top_k return self.retrieve(v) return None @@ -145,45 +149,42 @@ def __init__(self, indexer, **kwargs): self.collection_name = None self.topk = kwargs.get("similarity_top_k", 30) self.KBADMIN_MILVUS_URL = indexer.vector_url - self.CONNECTION_ARGS = {"uri": indexer.vector_url} + self.CONNECTION_ARGS = {"uri": indexer.vector_url} self.vector_field = "q_1024_vec" self.text_field = "content_with_weight" self.embedding_model_name = indexer.embed_model self.embedding_url = indexer.kbadmin_embedding_url + "/v3" - self.embedding = OpenAIEmbeddings( - model=self.embedding_model_name, - api_key="unused", - base_url=self.embedding_url, - tiktoken_enabled=False, - embedding_ctx_length=510, - ) + self.embedding = OpenAIEmbeddings(model=self.embedding_model_name, api_key="unused", base_url=self.embedding_url, tiktoken_enabled=False, embedding_ctx_length=510) def config_kbadmin_milvus(self, knowledge_name): collection_name = knowledge_name if not kbs_rev_maps: - get_kbs_info(self.CONNECTION_ARGS) + get_kbs_info( self.CONNECTION_ARGS) collection_name = kbs_rev_maps[collection_name] self.vector_db = Milvus( self.embedding, - connection_args=self.CONNECTION_ARGS, - collection_name=collection_name, - vector_field=self.vector_field, - text_field=self.text_field, + connection_args = self.CONNECTION_ARGS, + collection_name = collection_name, + + vector_field = self.vector_field, + text_field = self.text_field, enable_dynamic_field=True, - index_params={"index_type": "FLAT", "metric_type": "IP", "params": {}}, + index_params = {"index_type": "FLAT", "metric_type": "IP", "params": {}} ) + def similarity_search_with_embedding(self, query: str, k) -> list[tuple[Document, float]]: url = self.embedding_url + "/embeddings" - embedding_info = {"model": self.embedding_model_name, "input": query} + embedding_info = {"model": self.embedding_model_name,"input": query} # Get embedding result from embedding service - response = requests.post(url, headers={"Content-Type": "application/json"}, json=embedding_info) + response = requests.post(url, headers={'Content-Type': 'application/json'}, json=embedding_info) embedding_json = response.json() - embedding = embedding_json["data"][0]["embedding"] + embedding = embedding_json['data'][0]['embedding'] docs_and_scores = self.vector_db.similarity_search_with_score_by_vector(embedding=embedding, k=k) relevance_score_fn = self.vector_db._select_relevance_score_fn() return [(doc, relevance_score_fn(score)) for doc, score in docs_and_scores] + def run(self, **kwargs) -> Any: query = kwargs["query"] top_k = kwargs["top_k"] if kwargs["top_k"] else self.topk @@ -201,15 +202,17 @@ def run(self, **kwargs) -> Any: @model_serializer def ser_model(self): - set = {"idx": self.idx, "retriever_type": self.comp_subtype, "CONNECTION_ARGS": self.CONNECTION_ARGS} + set = { + "idx": self.idx, + "retriever_type": self.comp_subtype, + "CONNECTION_ARGS": self.CONNECTION_ARGS + } return set # global kbs maps. global kbs_rev_maps kbs_rev_maps = {} - - def get_kbs_info(CONNECTION_ARGS): alias = "default" try: @@ -218,53 +221,53 @@ def get_kbs_info(CONNECTION_ARGS): all_kb_infos = {} new_infos = {} for kb in collections: - collection = Collection(kb) - collection.load() - try: - if any(field.name == "kb_id" for field in collection.schema.fields): - docs = collection.query( - expr="pk != 0", - output_fields=["kb_name", "kb_id", "docnm_kwd"], - timeout=10, - ) - else: - docs = collection.query( - expr="pk != 0", - output_fields=["filename"], - timeout=10, - ) - collection.release() - except MilvusException as e: - continue - this_kbinfo = {} - for doc in docs: + collection = Collection(kb) + collection.load() try: - if "kb_name" in doc: - if not this_kbinfo: - this_kbinfo["name"] = doc["kb_name"] - this_kbinfo["uuid"] = doc["kb_id"] - this_kbinfo["files"] = set([doc["docnm_kwd"]]) - else: - this_kbinfo["files"].add(doc["docnm_kwd"]) + if any(field.name == 'kb_id' for field in collection.schema.fields): + docs = collection.query( + expr="pk != 0", + output_fields=["kb_name", "kb_id", "docnm_kwd"], + timeout=10, + ) else: - if not this_kbinfo: - this_kbinfo["name"] = kb - this_kbinfo["uuid"] = "" - this_kbinfo["files"] = set([doc["filename"]]) + docs = collection.query( + expr="pk != 0", + output_fields=["filename"], + timeout=10, + ) + collection.release() + except MilvusException as e: + continue + this_kbinfo = {} + for doc in docs: + try: + if 'kb_name' in doc: + if not this_kbinfo: + this_kbinfo['name'] = doc['kb_name'] + this_kbinfo['uuid'] = doc['kb_id'] + this_kbinfo['files'] = set([doc['docnm_kwd']]) + else: + this_kbinfo['files'].add(doc['docnm_kwd']) else: - this_kbinfo["files"].add(doc["filename"]) - except KeyError: - this_kbinfo = None - break - if this_kbinfo: - unique_files = list(this_kbinfo["files"]) - this_kbinfo["files"] = unique_files - new_infos[kb] = this_kbinfo + if not this_kbinfo: + this_kbinfo['name'] = kb + this_kbinfo['uuid'] = "" + this_kbinfo['files'] = set([doc['filename']]) + else: + this_kbinfo['files'].add(doc['filename']) + except KeyError: + this_kbinfo = None + break + if this_kbinfo: + unique_files = list(this_kbinfo['files']) + this_kbinfo['files'] = unique_files + new_infos[kb] = this_kbinfo all_kb_infos.update(new_infos) kbs_rev_maps.clear() for kb_id in all_kb_infos: - kbs_rev_maps[all_kb_infos[kb_id]["name"]] = kb_id + kbs_rev_maps[all_kb_infos[kb_id]['name']] = kb_id return kbs_rev_maps finally: if connections.has_connection(alias): - connections.disconnect(alias) + connections.disconnect(alias) \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py index c956ee316d..0b155e498b 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/compmgr.py @@ -32,29 +32,24 @@ def search_parser_change(self, pl, req): if pl.node_parser.comp_subtype != req.node_parser.parser_type: return True if pl.node_parser.comp_subtype == req.node_parser.parser_type: - if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: - if ( - pl.node_parser.chunk_size != req.node_parser.chunk_size - or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap - ): - pl_change = True + if pl.node_parser.comp_subtype == NodeParserType.SIMPLE: + if (pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): + pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.SENTENCEWINDOW: - if pl.node_parser.window_size != req.node_parser.window_size: + if pl.node_parser.window_size != req.node_parser.window_size: pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.HIERARCHY: if pl.node_parser.chunk_sizes != req.node_parser.chunk_sizes: - pl_change = True + pl_change = True elif pl.node_parser.comp_subtype == NodeParserType.UNSTRUCTURED: - if ( - pl.node_parser.chunk_size != req.node_parser.chunk_size - or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap - ): - pl_change = True + if (pl.node_parser.chunk_size != req.node_parser.chunk_size + or pl.node_parser.chunk_overlap != req.node_parser.chunk_overlap): + pl_change = True except: return False return pl_change - class IndexerMgr(BaseMgr): def __init__(self): diff --git a/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py index 0278f1f6ac..8ca1d07f00 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/filemgr.py @@ -20,7 +20,7 @@ def add_text(self, text: str): self.add(file) return file.documents - def add_files(self, docs: Any): + def add_files(self, docs: Any, docs_name:str = "default"): if not isinstance(docs, list): docs = [docs] @@ -41,38 +41,31 @@ def add_files(self, docs: Any): for file_path in files: file = File(file_path=file_path) - self.add(file) + self.append(file, docs_name) input_docs.extend(file.documents) - return input_docs - def get_file_by_name_or_id(self, name: str): - for _, file in self.components.items(): - if file.name == name or file.idx == name: - return file + def get_file_by_name(self, docs_name:str = "default", file_path: str = None): + for name, files in self.components.items(): + if docs_name == name: + for file in files: + if file_path == file.documents[0].metadata["file_path"]: + return file.documents return None - def get_files(self): - return [file for _, file in self.components.items()] + def get_kb_files_by_name(self, docs_name:str = "default"): + file_docs = [] + for name, files in self.components.items(): + if name == docs_name: + return files + return file_docs def get_all_docs(self) -> List[Document]: - all_docs = [] - for _, file in self.components.items(): - all_docs.extend(file.documents) + all_docs = {} + for doc_name, files in self.components.items(): + all_docs[doc_name] = files return all_docs - def get_docs_by_file(self, name) -> List[Document]: - file = self.get_file_by_name_or_id(name) - return file.documents if file else [] - - def del_file(self, name): - file = self.get_file_by_name_or_id(name) - if file: - self.remove(file.idx) - return True - else: - return False - def update_file(self, name): file = self.get_file_by_name_or_id(name) if file: @@ -81,3 +74,20 @@ def update_file(self, name): return True else: return False + + def del_kb_file(self, docs_name:str = "default"): + files = self.get_kb_files_by_name(docs_name) + if files: + self.remove(docs_name) + + def del_file(self, docs_name:str = "default", file_path: str= None ): + files = self.get_file_by_name(docs_name, file_path) + docs_list = [] + for docs_file in files: + docs_list.append(docs_file.id_) + files = self.get_kb_files_by_name(docs_name) + for docs_file in files: + if file_path == docs_file.documents[0].metadata["file_path"]: + self.components[docs_name].remove(docs_file) + return docs_list + return None diff --git a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py index d6dbba3ead..4eabfcc021 100644 --- a/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py +++ b/EdgeCraftRAG/edgecraftrag/controllers/knowledge_basemgr.py @@ -50,34 +50,26 @@ def active_experience(self, knowledge: KnowledgeBaseCreateIn): if kb.comp_type != "experience": raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Knowledge type cannot be active") self.active_experience_idx = kb.idx if knowledge.experience_active else None - if kb.experience_active != knowledge.experience_active: + if kb.experience_active != knowledge.experience_active: for idx, comp in self.components.items(): if isinstance(comp, Knowledge): comp.experience_active = idx == self.active_experience_idx return kb + def create_knowledge_base(self, knowledge: KnowledgeBaseCreateIn) -> Knowledge: for _, kb in self.components.items(): if kb.name == knowledge.name: raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="The knowledge base already exists.") - if knowledge.comp_type == "experience": + if knowledge.comp_type == "experience": for idx, kb in self.components.items(): - if kb.comp_type == "experience": - raise HTTPException( - status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created." - ) + if kb.comp_type =='experience': + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail="Only one experience class can be created.") if knowledge.comp_type == "experience": knowledge.active = False if knowledge.active is None: knowledge.active = False - kb = Knowledge( - name=knowledge.name, - description=knowledge.description, - active=knowledge.active, - comp_type=knowledge.comp_type, - comp_subtype=knowledge.comp_subtype, - experience_active=knowledge.experience_active, - ) + kb = Knowledge(name=knowledge.name, description=knowledge.description, active=knowledge.active, comp_type=knowledge.comp_type, comp_subtype=knowledge.comp_subtype, experience_active=knowledge.experience_active) self.add(kb) if knowledge.active: self.active_knowledge(knowledge) @@ -97,11 +89,11 @@ def update_knowledge_base(self, knowledge) -> Knowledge: kb.description = knowledge.description if knowledge.active is not None and kb.active != knowledge.active: kb = self.active_knowledge(knowledge) - if kb.comp_type == "experience": - if knowledge.description is not None: - kb.description = knowledge.description - if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: - kb = self.active_experience(knowledge) + if kb.comp_type == "experience": + if knowledge.description is not None: + kb.description = knowledge.description + if knowledge.experience_active is not None and kb.experience_active != knowledge.experience_active: + kb = self.active_experience(knowledge) return "Knowledge base update successfully" def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: @@ -112,5 +104,6 @@ def get_all_knowledge_bases(self) -> List[Dict[str, Any]]: def get_experience_kb(self): for idx, kb in self.components.items(): - if kb.comp_type == "experience": + if kb.comp_type =='experience': return kb + diff --git a/EdgeCraftRAG/edgecraftrag/requirements.txt b/EdgeCraftRAG/edgecraftrag/requirements.txt index 289ba3ef4d..3dd0877a82 100755 --- a/EdgeCraftRAG/edgecraftrag/requirements.txt +++ b/EdgeCraftRAG/edgecraftrag/requirements.txt @@ -3,8 +3,6 @@ EbookLib>=0.18 faiss-cpu>=1.8.0.post1 html2text>=2025.4.15 langchain-core==0.3.60 -langchain-milvus -langchain-openai llama-index==0.12.36 llama-index-core==0.12.37 llama-index-embeddings-openvino==0.5.2 @@ -25,3 +23,5 @@ python-docx==1.1.2 unstructured unstructured[pdf] werkzeug==3.1.3 +langchain-openai +langchain-milvus \ No newline at end of file diff --git a/EdgeCraftRAG/edgecraftrag/utils.py b/EdgeCraftRAG/edgecraftrag/utils.py index bbc6434e98..1eef20f8c2 100755 --- a/EdgeCraftRAG/edgecraftrag/utils.py +++ b/EdgeCraftRAG/edgecraftrag/utils.py @@ -89,10 +89,6 @@ def compare_mappings(new_dict, old_dict): deleted = {name: old_files[name] for name in set(old_files) - set(new_files)} if deleted: deleted_files[key] = deleted - - for key in list(added_files.keys()): - if key in deleted_files: - del added_files[key] return added_files, deleted_files diff --git a/EdgeCraftRAG/tools/quick_start.sh b/EdgeCraftRAG/tools/quick_start.sh index 01da5fcc53..0976425429 100755 --- a/EdgeCraftRAG/tools/quick_start.sh +++ b/EdgeCraftRAG/tools/quick_start.sh @@ -71,10 +71,10 @@ function start_vllm_services() { for (( x=0; x ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + +function quick_start_vllm_B60_services() { + WORKPATH=$(dirname "$PWD") + COMPOSE_FILE="compose_vllm_b60.yaml" + EC_RAG_SERVICE_PORT=16010 + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE down + + export HOST_IP=${HOST_IP:-"${ip_address}"} + export MODEL_PATH=${MODEL_PATH:-"${PWD}/models"} + export DOC_PATH=${DOC_PATH:-"$WORKPATH/tests"} + export TMPFILE_PATH=${TMPFILE_PATH:-"$WORKPATH/tests"} + export MILVUS_ENABLED=${MILVUS_ENABLED:-1} + export CHAT_HISTORY_ROUND=${CHAT_HISTORY_ROUND:-2} + export LLM_MODEL=${LLM_MODEL:-Qwen/Qwen3-72B} + export VIDEOGROUPID=$(getent group video | cut -d: -f3) + export RENDERGROUPID=$(getent group render | cut -d: -f3) + # export vllm ENV + export DP=${DP:-4} + export TP=${TP:-1} + export DTYPE=${DTYPE:-float16} + export ZE_AFFINITY_MASK=${ZE_AFFINITY_MASK:-0,1,2,3} + export ENFORCE_EAGER=${ENFORCE_EAGER:-1} + export TRUST_REMOTE_CODE=${TRUST_REMOTE_CODE:-1} + export DISABLE_SLIDING_WINDOW=${DISABLE_SLIDING_WINDOW:-1} + export GPU_MEMORY_UTIL=${GPU_MEMORY_UTIL:-0.8} + export NO_ENABLE_PREFIX_CACHING=${NO_ENABLE_PREFIX_CACHING:-1} + export MAX_NUM_BATCHED_TOKENS=${MAX_NUM_BATCHED_TOKENS:-8192} + export DISABLE_LOG_REQUESTS=${disable_LOG_REQUESTS:-1} + export MAX_MODEL_LEN=${MAX_MODEL_LEN:-49152} + export BLOCK_SIZE=${BLOCK_SIZE:-64} + export QUANTIZATION=${QUANTIZATION:-fp8} + + + check_baai_folder + export no_proxy="localhost, 127.0.0.1, 192.168.1.1, ${HOST_IP}" + sudo chown -R 1000:1000 ${MODEL_PATH} ${DOC_PATH} ${TMPFILE_PATH} + docker compose -f $WORKPATH/docker_compose/intel/gpu/arc/$COMPOSE_FILE up -d + echo "ipex-llm-serving-xpu is booting, please wait..." + n=0 + until [[ "$n" -ge 100 ]]; do + docker logs ipex-llm-serving-xpu-container-0 > ipex-llm-serving-xpu-container.log 2>&1 + if grep -q "Starting vLLM API server on http://0.0.0.0:" ipex-llm-serving-xpu-container.log; then + break + fi + sleep 6s + n=$((n+1)) + done + rm -rf ipex-llm-serving-xpu-container.log + echo "service launched, please visit UI at ${HOST_IP}:8082" +} + + function main { if [[ $- == *i* ]]; then - read -p "Do you want to start vLLM or local OpenVINO services? (vLLM/ov) [vLLM]: " user_input - user_input=${user_input:-"vLLM"} - if [ "$user_input" == "vLLM" ]; then + read -p "Do you want to start vLLM or local OpenVINO services? (vLLM_A770/vLLM_B60/ov) [vLLM_A770]: " user_input + user_input=${user_input:-"vLLM_A770"} + if [[ "$user_input" == "vLLM_A770" ]]; then start_vllm_services + elif [[ "$user_input" == "vLLM_B60" ]]; then + start_vLLM_B60_services else start_services fi else - export SERVICE_TYPE=${SERVICE_TYPE:-"vLLM"} - if [ "$SERVICE_TYPE" == "vLLM" ]; then + export SERVICE_TYPE=${SERVICE_TYPE:-"vLLM_A770"} + if [[ "$SERVICE_TYPE" == "vLLM_A770" || "$SERVICE_TYPE" == "vLLM" ]]; then quick_start_vllm_services + elif [[ "$SERVICE_TYPE" == "vLLM_B60" || "$SERVICE_TYPE" == "vLLM_b60" ]]; then + quick_start_vllm_B60_services else quick_start_ov_services fi diff --git a/EdgeCraftRAG/ui/vue/components.d.ts b/EdgeCraftRAG/ui/vue/components.d.ts index 599bc31cb8..bf19897b66 100644 --- a/EdgeCraftRAG/ui/vue/components.d.ts +++ b/EdgeCraftRAG/ui/vue/components.d.ts @@ -1,11 +1,8 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - /* eslint-disable */ // @ts-nocheck // Generated by unplugin-vue-components // Read more: https://github.com/vuejs/core/pull/3399 -export {}; +export {} /* prettier-ignore */ declare module 'vue' { diff --git a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts index 0c4d3cdb5a..e8981e0f9a 100644 --- a/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/chatbot/index.ts @@ -27,3 +27,10 @@ export const getBenchmark = () => { method: "get", }); }; + +export const requestStopChat = () => { + return request({ + url: `/v1/chatqna/stop`, + method: "post", + }); +}; diff --git a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts index fe300d6b33..94976fbe78 100644 --- a/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/knowledgeBase/index.ts @@ -85,7 +85,10 @@ export const requestExperienceCreate = (data: EmptyArrayType) => { showLoading: true, }); }; -export const requestExperienceConfirm = (flag: Boolean, data: EmptyArrayType) => { +export const requestExperienceConfirm = ( + flag: Boolean, + data: EmptyArrayType +) => { return request({ url: `/v1/multiple_experiences/confirm?flag=${flag}`, method: "post", diff --git a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts index 335908b6c9..bf12c4f331 100644 --- a/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts +++ b/EdgeCraftRAG/ui/vue/src/api/pipeline/index.ts @@ -106,4 +106,6 @@ export const requestUrlVllm = (data: Object) => { }); }; -export const importUrl = `${import.meta.env.VITE_API_URL}v1/settings/pipelines/import`; +export const importUrl = `${ + import.meta.env.VITE_API_URL +}v1/settings/pipelines/import`; diff --git a/EdgeCraftRAG/ui/vue/src/api/request.ts b/EdgeCraftRAG/ui/vue/src/api/request.ts index 44f6cf2051..ce1ee0ca5e 100644 --- a/EdgeCraftRAG/ui/vue/src/api/request.ts +++ b/EdgeCraftRAG/ui/vue/src/api/request.ts @@ -27,7 +27,7 @@ service.interceptors.request.use( }, (error) => { return Promise.reject(error); - }, + } ); // response interceptor @@ -40,7 +40,11 @@ service.interceptors.response.use( const antNotification = serviceManager.getService("antNotification"); if (antNotification) - antNotification("success", i18n.global.t("common.success"), i18n.global.t(config.successMsg)); + antNotification( + "success", + i18n.global.t("common.success"), + i18n.global.t(config.successMsg) + ); } return Promise.resolve(res); }, @@ -56,10 +60,11 @@ service.interceptors.response.use( errorMessage = error.message; } const antNotification = serviceManager.getService("antNotification"); - if (antNotification) antNotification("error", i18n.global.t("common.error"), errorMessage); + if (antNotification) + antNotification("error", i18n.global.t("common.error"), errorMessage); return Promise.reject(error); - }, + } ); export default service; diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css index 5163bc195e..e62f3bfbba 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.css @@ -1,9 +1,8 @@ @font-face { font-family: "iconfont"; /* Project id 4784207 */ - src: - url("iconfont.woff2?t=1757469597873") format("woff2"), - url("iconfont.woff?t=1757469597873") format("woff"), - url("iconfont.ttf?t=1757469597873") format("truetype"); + src: url('iconfont.woff2?t=1757469597873') format('woff2'), + url('iconfont.woff?t=1757469597873') format('woff'), + url('iconfont.ttf?t=1757469597873') format('truetype'); } .iconfont { @@ -241,3 +240,4 @@ .icon-active:before { content: "\e795"; } + diff --git a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js index 5e96151e2e..e5e61851fd 100644 --- a/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js +++ b/EdgeCraftRAG/ui/vue/src/assets/iconFont/iconfont.js @@ -1,68 +1 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -(window._iconfont_svg_string_4784207 = - ''), - ((c) => { - var l = (a = (a = document.getElementsByTagName("script"))[a.length - 1]).getAttribute("data-injectcss"), - a = a.getAttribute("data-disable-injectsvg"); - if (!a) { - var h, - t, - i, - o, - v, - m = function (l, a) { - a.parentNode.insertBefore(l, a); - }; - if (l && !c.__iconfont__svg__cssinject__) { - c.__iconfont__svg__cssinject__ = !0; - try { - document.write( - "", - ); - } catch (l) { - console && console.log(l); - } - } - (h = function () { - var l, - a = document.createElement("div"); - (a.innerHTML = c._iconfont_svg_string_4784207), - (a = a.getElementsByTagName("svg")[0]) && - (a.setAttribute("aria-hidden", "true"), - (a.style.position = "absolute"), - (a.style.width = 0), - (a.style.height = 0), - (a.style.overflow = "hidden"), - (a = a), - (l = document.body).firstChild ? m(a, l.firstChild) : l.appendChild(a)); - }), - document.addEventListener - ? ~["complete", "loaded", "interactive"].indexOf(document.readyState) - ? setTimeout(h, 0) - : ((t = function () { - document.removeEventListener("DOMContentLoaded", t, !1), h(); - }), - document.addEventListener("DOMContentLoaded", t, !1)) - : document.attachEvent && - ((i = h), - (o = c.document), - (v = !1), - s(), - (o.onreadystatechange = function () { - "complete" == o.readyState && ((o.onreadystatechange = null), e()); - })); - } - function e() { - v || ((v = !0), i()); - } - function s() { - try { - o.documentElement.doScroll("left"); - } catch (l) { - return void setTimeout(s, 50); - } - e(); - } - })(window); +window._iconfont_svg_string_4784207='',(c=>{var l=(a=(a=document.getElementsByTagName("script"))[a.length-1]).getAttribute("data-injectcss"),a=a.getAttribute("data-disable-injectsvg");if(!a){var h,t,i,o,v,m=function(l,a){a.parentNode.insertBefore(l,a)};if(l&&!c.__iconfont__svg__cssinject__){c.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(l){console&&console.log(l)}}h=function(){var l,a=document.createElement("div");a.innerHTML=c._iconfont_svg_string_4784207,(a=a.getElementsByTagName("svg")[0])&&(a.setAttribute("aria-hidden","true"),a.style.position="absolute",a.style.width=0,a.style.height=0,a.style.overflow="hidden",a=a,(l=document.body).firstChild?m(a,l.firstChild):l.appendChild(a))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(t=function(){document.removeEventListener("DOMContentLoaded",t,!1),h()},document.addEventListener("DOMContentLoaded",t,!1)):document.attachEvent&&(i=h,o=c.document,v=!1,s(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,e())})}function e(){v||(v=!0,i())}function s(){try{o.documentElement.doScroll("left")}catch(l){return void setTimeout(s,50)}e()}})(window); \ No newline at end of file diff --git a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts index d6fd8da012..f6e2bab3ce 100644 --- a/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts +++ b/EdgeCraftRAG/ui/vue/src/auto-imports.d.ts @@ -1,6 +1,3 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - /* eslint-disable */ /* prettier-ignore */ // @ts-nocheck @@ -9,98 +6,83 @@ // biome-ignore lint: disable export {} declare global { - const EffectScope: (typeof import("vue"))["EffectScope"]; - const acceptHMRUpdate: (typeof import("pinia"))["acceptHMRUpdate"]; - const computed: (typeof import("vue"))["computed"]; - const createApp: (typeof import("vue"))["createApp"]; - const createPinia: (typeof import("pinia"))["createPinia"]; - const customRef: (typeof import("vue"))["customRef"]; - const defineAsyncComponent: (typeof import("vue"))["defineAsyncComponent"]; - const defineComponent: (typeof import("vue"))["defineComponent"]; - const defineStore: (typeof import("pinia"))["defineStore"]; - const effectScope: (typeof import("vue"))["effectScope"]; - const getActivePinia: (typeof import("pinia"))["getActivePinia"]; - const getCurrentInstance: (typeof import("vue"))["getCurrentInstance"]; - const getCurrentScope: (typeof import("vue"))["getCurrentScope"]; - const h: (typeof import("vue"))["h"]; - const inject: (typeof import("vue"))["inject"]; - const isProxy: (typeof import("vue"))["isProxy"]; - const isReactive: (typeof import("vue"))["isReactive"]; - const isReadonly: (typeof import("vue"))["isReadonly"]; - const isRef: (typeof import("vue"))["isRef"]; - const mapActions: (typeof import("pinia"))["mapActions"]; - const mapGetters: (typeof import("pinia"))["mapGetters"]; - const mapState: (typeof import("pinia"))["mapState"]; - const mapStores: (typeof import("pinia"))["mapStores"]; - const mapWritableState: (typeof import("pinia"))["mapWritableState"]; - const markRaw: (typeof import("vue"))["markRaw"]; - const nextTick: (typeof import("vue"))["nextTick"]; - const onActivated: (typeof import("vue"))["onActivated"]; - const onBeforeMount: (typeof import("vue"))["onBeforeMount"]; - const onBeforeRouteLeave: (typeof import("vue-router"))["onBeforeRouteLeave"]; - const onBeforeRouteUpdate: (typeof import("vue-router"))["onBeforeRouteUpdate"]; - const onBeforeUnmount: (typeof import("vue"))["onBeforeUnmount"]; - const onBeforeUpdate: (typeof import("vue"))["onBeforeUpdate"]; - const onDeactivated: (typeof import("vue"))["onDeactivated"]; - const onErrorCaptured: (typeof import("vue"))["onErrorCaptured"]; - const onMounted: (typeof import("vue"))["onMounted"]; - const onRenderTracked: (typeof import("vue"))["onRenderTracked"]; - const onRenderTriggered: (typeof import("vue"))["onRenderTriggered"]; - const onScopeDispose: (typeof import("vue"))["onScopeDispose"]; - const onServerPrefetch: (typeof import("vue"))["onServerPrefetch"]; - const onUnmounted: (typeof import("vue"))["onUnmounted"]; - const onUpdated: (typeof import("vue"))["onUpdated"]; - const onWatcherCleanup: (typeof import("vue"))["onWatcherCleanup"]; - const provide: (typeof import("vue"))["provide"]; - const reactive: (typeof import("vue"))["reactive"]; - const readonly: (typeof import("vue"))["readonly"]; - const ref: (typeof import("vue"))["ref"]; - const resolveComponent: (typeof import("vue"))["resolveComponent"]; - const setActivePinia: (typeof import("pinia"))["setActivePinia"]; - const setMapStoreSuffix: (typeof import("pinia"))["setMapStoreSuffix"]; - const shallowReactive: (typeof import("vue"))["shallowReactive"]; - const shallowReadonly: (typeof import("vue"))["shallowReadonly"]; - const shallowRef: (typeof import("vue"))["shallowRef"]; - const storeToRefs: (typeof import("pinia"))["storeToRefs"]; - const toRaw: (typeof import("vue"))["toRaw"]; - const toRef: (typeof import("vue"))["toRef"]; - const toRefs: (typeof import("vue"))["toRefs"]; - const toValue: (typeof import("vue"))["toValue"]; - const triggerRef: (typeof import("vue"))["triggerRef"]; - const unref: (typeof import("vue"))["unref"]; - const useAttrs: (typeof import("vue"))["useAttrs"]; - const useCssModule: (typeof import("vue"))["useCssModule"]; - const useCssVars: (typeof import("vue"))["useCssVars"]; - const useId: (typeof import("vue"))["useId"]; - const useLink: (typeof import("vue-router"))["useLink"]; - const useModel: (typeof import("vue"))["useModel"]; - const useRoute: (typeof import("vue-router"))["useRoute"]; - const useRouter: (typeof import("vue-router"))["useRouter"]; - const useSlots: (typeof import("vue"))["useSlots"]; - const useTemplateRef: (typeof import("vue"))["useTemplateRef"]; - const watch: (typeof import("vue"))["watch"]; - const watchEffect: (typeof import("vue"))["watchEffect"]; - const watchPostEffect: (typeof import("vue"))["watchPostEffect"]; - const watchSyncEffect: (typeof import("vue"))["watchSyncEffect"]; + const EffectScope: typeof import('vue')['EffectScope'] + const acceptHMRUpdate: typeof import('pinia')['acceptHMRUpdate'] + const computed: typeof import('vue')['computed'] + const createApp: typeof import('vue')['createApp'] + const createPinia: typeof import('pinia')['createPinia'] + const customRef: typeof import('vue')['customRef'] + const defineAsyncComponent: typeof import('vue')['defineAsyncComponent'] + const defineComponent: typeof import('vue')['defineComponent'] + const defineStore: typeof import('pinia')['defineStore'] + const effectScope: typeof import('vue')['effectScope'] + const getActivePinia: typeof import('pinia')['getActivePinia'] + const getCurrentInstance: typeof import('vue')['getCurrentInstance'] + const getCurrentScope: typeof import('vue')['getCurrentScope'] + const h: typeof import('vue')['h'] + const inject: typeof import('vue')['inject'] + const isProxy: typeof import('vue')['isProxy'] + const isReactive: typeof import('vue')['isReactive'] + const isReadonly: typeof import('vue')['isReadonly'] + const isRef: typeof import('vue')['isRef'] + const mapActions: typeof import('pinia')['mapActions'] + const mapGetters: typeof import('pinia')['mapGetters'] + const mapState: typeof import('pinia')['mapState'] + const mapStores: typeof import('pinia')['mapStores'] + const mapWritableState: typeof import('pinia')['mapWritableState'] + const markRaw: typeof import('vue')['markRaw'] + const nextTick: typeof import('vue')['nextTick'] + const onActivated: typeof import('vue')['onActivated'] + const onBeforeMount: typeof import('vue')['onBeforeMount'] + const onBeforeRouteLeave: typeof import('vue-router')['onBeforeRouteLeave'] + const onBeforeRouteUpdate: typeof import('vue-router')['onBeforeRouteUpdate'] + const onBeforeUnmount: typeof import('vue')['onBeforeUnmount'] + const onBeforeUpdate: typeof import('vue')['onBeforeUpdate'] + const onDeactivated: typeof import('vue')['onDeactivated'] + const onErrorCaptured: typeof import('vue')['onErrorCaptured'] + const onMounted: typeof import('vue')['onMounted'] + const onRenderTracked: typeof import('vue')['onRenderTracked'] + const onRenderTriggered: typeof import('vue')['onRenderTriggered'] + const onScopeDispose: typeof import('vue')['onScopeDispose'] + const onServerPrefetch: typeof import('vue')['onServerPrefetch'] + const onUnmounted: typeof import('vue')['onUnmounted'] + const onUpdated: typeof import('vue')['onUpdated'] + const onWatcherCleanup: typeof import('vue')['onWatcherCleanup'] + const provide: typeof import('vue')['provide'] + const reactive: typeof import('vue')['reactive'] + const readonly: typeof import('vue')['readonly'] + const ref: typeof import('vue')['ref'] + const resolveComponent: typeof import('vue')['resolveComponent'] + const setActivePinia: typeof import('pinia')['setActivePinia'] + const setMapStoreSuffix: typeof import('pinia')['setMapStoreSuffix'] + const shallowReactive: typeof import('vue')['shallowReactive'] + const shallowReadonly: typeof import('vue')['shallowReadonly'] + const shallowRef: typeof import('vue')['shallowRef'] + const storeToRefs: typeof import('pinia')['storeToRefs'] + const toRaw: typeof import('vue')['toRaw'] + const toRef: typeof import('vue')['toRef'] + const toRefs: typeof import('vue')['toRefs'] + const toValue: typeof import('vue')['toValue'] + const triggerRef: typeof import('vue')['triggerRef'] + const unref: typeof import('vue')['unref'] + const useAttrs: typeof import('vue')['useAttrs'] + const useCssModule: typeof import('vue')['useCssModule'] + const useCssVars: typeof import('vue')['useCssVars'] + const useId: typeof import('vue')['useId'] + const useLink: typeof import('vue-router')['useLink'] + const useModel: typeof import('vue')['useModel'] + const useRoute: typeof import('vue-router')['useRoute'] + const useRouter: typeof import('vue-router')['useRouter'] + const useSlots: typeof import('vue')['useSlots'] + const useTemplateRef: typeof import('vue')['useTemplateRef'] + const watch: typeof import('vue')['watch'] + const watchEffect: typeof import('vue')['watchEffect'] + const watchPostEffect: typeof import('vue')['watchPostEffect'] + const watchSyncEffect: typeof import('vue')['watchSyncEffect'] } // for type re-export declare global { // @ts-ignore - export type { - Component, - ComponentPublicInstance, - ComputedRef, - DirectiveBinding, - ExtractDefaultPropTypes, - ExtractPropTypes, - ExtractPublicPropTypes, - InjectionKey, - PropType, - Ref, - MaybeRef, - MaybeRefOrGetter, - VNode, - WritableComputedRef, - } from "vue"; - import("vue"); + export type { Component, ComponentPublicInstance, ComputedRef, DirectiveBinding, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, MaybeRef, MaybeRefOrGetter, VNode, WritableComputedRef } from 'vue' + import('vue') } diff --git a/EdgeCraftRAG/ui/vue/src/components.d.ts b/EdgeCraftRAG/ui/vue/src/components.d.ts index 35e756d199..6ec287f7f1 100644 --- a/EdgeCraftRAG/ui/vue/src/components.d.ts +++ b/EdgeCraftRAG/ui/vue/src/components.d.ts @@ -1,11 +1,8 @@ -// Copyright (C) 2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - /* eslint-disable */ // @ts-nocheck // Generated by unplugin-vue-components // Read more: https://github.com/vuejs/core/pull/3399 -export {}; +export {} /* prettier-ignore */ declare module 'vue' { diff --git a/EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue b/EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue index 6208787793..b744f43e11 100644 --- a/EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue +++ b/EdgeCraftRAG/ui/vue/src/components/PartialLoading.vue @@ -48,6 +48,7 @@ const props = defineProps({ diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts index de0cc61809..655d935fa3 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/SseService.ts @@ -7,7 +7,7 @@ export const handleMessageSend = async ( url: string, postData: any, onDisplay: (data: any) => void, - onEnd?: () => void, + onEnd?: () => void ): Promise => { let reader: ReadableStreamDefaultReader | undefined; diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue index d1c82e06cb..5ba1d94094 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/KnowledgeBase/DetailComponent.vue @@ -56,6 +56,7 @@ const handleBack = () => { diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts b/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts index be52b40b62..6f853480c3 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/type.ts @@ -9,6 +9,8 @@ export interface Benchmark { export interface IMessage { role: string; content: string; + query?: string; + errorMessage?: string; benchmark?: Benchmark | undefined; } export interface ThinkType { diff --git a/EdgeCraftRAG/ui/vue/vite.config.ts b/EdgeCraftRAG/ui/vue/vite.config.ts index 23fda73b7b..ff27303aa5 100644 --- a/EdgeCraftRAG/ui/vue/vite.config.ts +++ b/EdgeCraftRAG/ui/vue/vite.config.ts @@ -66,7 +66,10 @@ const viteConfig = defineConfig((mode: ConfigEnv) => { preprocessorOptions: { less: { javascriptEnabled: true, - additionalData: `@import "${path.resolve(__dirname, "src/theme/index.less")}";`, + additionalData: `@import "${path.resolve( + __dirname, + "src/theme/index.less" + )}";`, }, }, }, From c6b1a219ab8a125e838fc91566dec8f507790b1d Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Fri, 26 Sep 2025 17:37:17 +0800 Subject: [PATCH 11/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/Dockerfile.server | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/EdgeCraftRAG/Dockerfile.server b/EdgeCraftRAG/Dockerfile.server index 24af2affb9..7468cb97fe 100755 --- a/EdgeCraftRAG/Dockerfile.server +++ b/EdgeCraftRAG/Dockerfile.server @@ -3,18 +3,18 @@ FROM python:3.11-slim SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN apt-get update && apt-get install -y gnupg2 wget git RUN apt-get remove -y libze-intel-gpu1 libigc1 libigdfcl1 libze-dev || true; \ -    apt-get update; \ -    apt-get install -y curl + apt-get update; \ + apt-get install -y curl RUN curl -sL 'https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&op=get&search=0x0C0E6AF955CE463C03FC51574D098D70AFBE5E1F' | tee /etc/apt/trusted.gpg.d/driver.asc RUN echo -e "Types: deb\nURIs: https://ppa.launchpadcontent.net/kobuk-team/intel-graphics/ubuntu/\nSuites: plucky\nComponents: main\nSigned-By: /etc/apt/trusted.gpg.d/driver.asc" > /etc/apt/sources.list.d/driver.sources RUN apt update && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-dev intel-ocloc libze-intel-gpu-raytracing RUN useradd -m -s /bin/bash user && \ -    mkdir -p /home/user && \ -    chown -R user /home/user/ + mkdir -p /home/user && \ + chown -R user /home/user/ RUN mkdir /templates && \ -    chown -R user /templates + chown -R user /templates COPY ./edgecraftrag/prompt_template/default_prompt.txt /templates/ RUN chown -R user /templates/default_prompt.txt @@ -27,7 +27,7 @@ USER user WORKDIR /home/user/edgecraftrag RUN pip3 install --no-cache-dir --upgrade setuptools==70.0.0 --break-system-packages && \ -    pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt --break-system-packages + pip3 install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt --break-system-packages RUN pip3 install --no-cache-dir docarray==0.40.0 --break-system-packages From f8eaee4b509cf101265ae85d72ee68b3689de7f6 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Fri, 26 Sep 2025 17:42:17 +0800 Subject: [PATCH 12/16] minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/Dockerfile.server | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/EdgeCraftRAG/Dockerfile.server b/EdgeCraftRAG/Dockerfile.server index 7468cb97fe..119b0ec3f9 100755 --- a/EdgeCraftRAG/Dockerfile.server +++ b/EdgeCraftRAG/Dockerfile.server @@ -7,7 +7,7 @@ RUN apt-get remove -y libze-intel-gpu1 libigc1 libigdfcl1 libze-dev || true; \ apt-get install -y curl RUN curl -sL 'https://keyserver.ubuntu.com/pks/lookup?fingerprint=on&op=get&search=0x0C0E6AF955CE463C03FC51574D098D70AFBE5E1F' | tee /etc/apt/trusted.gpg.d/driver.asc RUN echo -e "Types: deb\nURIs: https://ppa.launchpadcontent.net/kobuk-team/intel-graphics/ubuntu/\nSuites: plucky\nComponents: main\nSigned-By: /etc/apt/trusted.gpg.d/driver.asc" > /etc/apt/sources.list.d/driver.sources -RUN apt update && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-dev intel-ocloc libze-intel-gpu-raytracing +RUN apt-get update && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-intel-gpu1 libze1 intel-metrics-discovery intel-opencl-icd clinfo intel-gsc && apt-get install -y libze-dev intel-ocloc libze-intel-gpu-raytracing RUN useradd -m -s /bin/bash user && \ mkdir -p /home/user && \ From f8657506ea515983b033b919e689c448022d9c49 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Fri, 26 Sep 2025 18:25:51 +0800 Subject: [PATCH 13/16] generator minor fix Signed-off-by: Yongbozzz --- EdgeCraftRAG/edgecraftrag/components/generator.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/EdgeCraftRAG/edgecraftrag/components/generator.py b/EdgeCraftRAG/edgecraftrag/components/generator.py index 117865cf53..c14c9b02ed 100755 --- a/EdgeCraftRAG/edgecraftrag/components/generator.py +++ b/EdgeCraftRAG/edgecraftrag/components/generator.py @@ -177,18 +177,12 @@ def __init__(self, llm_model, prompt_template_file, inference_type, vllm_endpoin self.vllm_endpoint = vllm_endpoint def init_prompt(self, model_path, prompt_content=None, prompt_template_file=None, enable_think=False): - # using the prompt template enhancement strategy(only tested on Qwen2-7B-Instruction) if template_enhance_on is true - template_enhance_on = True if "Qwen2" in self.model_id else False if prompt_content: return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) elif prompt_template_file is None: print("There is no template file, using the default template.") prompt_template = get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) - return ( - DocumentedContextRagPromptTemplate.from_template(prompt_template) - if template_enhance_on - else prompt_template - ) + return prompt_template else: safe_root = "/templates" prompt_template_file = os.path.normpath(os.path.join(safe_root, prompt_template_file)) @@ -196,10 +190,7 @@ def init_prompt(self, model_path, prompt_content=None, prompt_template_file=None raise ValueError("Invalid template path") if not os.path.exists(prompt_template_file): raise ValueError("Template file not exists") - if template_enhance_on: - return DocumentedContextRagPromptTemplate.from_file(prompt_template_file) - else: - return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) + return get_prompt_template(model_path, prompt_content, prompt_template_file, enable_think) def set_prompt(self, prompt): if "{context}" not in prompt: From 209b6d2b62e8e90a1e7a44a4a19915e57330db45 Mon Sep 17 00:00:00 2001 From: Yongbozzz Date: Sun, 28 Sep 2025 10:41:13 +0800 Subject: [PATCH 14/16] minor fix Signed-off-by: Yongbozzz --- .../components/Chatbot/MessageItem.vue | 20 ++++++++++++++----- .../KnowledgeBase/KnowledgeDetail.vue | 5 ++++- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/MessageItem.vue b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/MessageItem.vue index 15ebddfb40..fffa99e963 100644 --- a/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/MessageItem.vue +++ b/EdgeCraftRAG/ui/vue/src/views/chatbot/components/Chatbot/MessageItem.vue @@ -57,7 +57,7 @@
@@ -142,7 +147,7 @@ {{ message.content }}