From 69e4a42ee82bb293147a32aa07fc6c4cf64a7cd3 Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 22:14:51 -0300 Subject: [PATCH 01/10] Added gradle wrapper + gradle-witness --- .gitignore | 1 + gradle/gradle-witness.jar | Bin 0 -> 23967 bytes gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 56177 bytes gradle/wrapper/gradle-wrapper.properties | 5 + gradlew | 172 +++++++++++++++++++++++ gradlew.bat | 84 +++++++++++ 6 files changed, 262 insertions(+) create mode 100644 gradle/gradle-witness.jar create mode 100644 gradle/wrapper/gradle-wrapper.jar create mode 100644 gradle/wrapper/gradle-wrapper.properties create mode 100755 gradlew create mode 100644 gradlew.bat diff --git a/.gitignore b/.gitignore index 80fe98dc..2e5012c9 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ javadoc lib tmp run-test +/.gradle diff --git a/gradle/gradle-witness.jar b/gradle/gradle-witness.jar new file mode 100644 index 0000000000000000000000000000000000000000..1006badec20c807813445f66c246554bd131963e GIT binary patch literal 23967 zcmb@tWpEu~lBO%mVjVFaF*7q;%*@Qp%wW+GGguZgGcz;GVkV23$&#)+dU|%J_r^x- z&Q?TKsTKEeu?|n0r>Yu|DPS@gyf~f#Z*)oeYQzyHiXzV#o+ z{#%HLm8-p(%l{hM&*M9T2?YkW000A1{NIOa{PPG!TQ>_UdolwfM@L&P0~1>Z7dK}! zwtpF1|1!84Ol*x@T&mPhJkZq8KfBn>ew`@xH>#opf(I5`5h3TEOX^6|%gK;y(y8sc zvPePy`pLzbr#b|QHHfb<=+}3k^94zjJ2K}tUe7a<)54qiuUT@(t0j!BeoK_btVPvCYH_kyysnS&Y_j|A&;wk3*K3YvZcJxmQY>VE`lu=Zv$`^Ryxh ziexBhfTzV{21octmhUq-@Gwl1G8q@r3?sD^nOx;~8hkNG0Q^N&3yJx2dRUTjU=Z=T z5}uyj40ZFSnW-#DGQZMFn?mBqbv``B(t`=64BhG2%EC}WG9s%zkGBJeGeM;!UgZx8 z&8s|goJwjhXwpsdX|ESDyE512AOY zM&p8UR2-^xm;XRUHqz{E;bkwGVHvZxkQGK>T(lh4zKuSmm{{tFQfa%7FycRI3MA>_ z{~n_&mk?}?NLHUCW67wQYR(1l>5nK&SvaXaqHv^bb_!ev`o_$GxwEJiHu0q#)31pP zq;i)74n{wqVfs{PPl`R_4yo+dSm+QrtZ-e}VM{&xUAwZ)HH?IlZl>7#D%<+mgVDc z^&)q#<6vGR2eVn_Eq`$jwE(WVKFJ9QqlSs%4Gs|OCzhik7Q~Pe>@EuVnW&C=B5?$nC&n3(etycr|tMrV-4ekvW2yhOZsjFWFo*Oj_Pkk>#2i!Ap7RWDQ8ELP_-G!~)-{N$85xK$N z68vNRk5$kPux}>yZI;-isg}JLUBi1-iT+60~SmvM$K;{ye;|rr=CpG;j z(r&%m2Z=ta6C9%q)ehJ42iritGvw|#-++}t0XB6u*7A&>b7vrDh2%|(rGME*FY>^t zBM{euJBWneM5ebRSqwOTTBI9)EifJFiA2am*CDoI(_ZM)G%3(vSjbB^QOIzOA~FCl z?<(F`%2aIPV`TY4$Qo`hwc4DxXx?61S{kBoT^4@#Yzx{ihCG8uIl3l4Fyn2DoMB4x zov+d4xylLSBDzCu&wIcW2eDPOvgW*&c~?q|Vf73ZYSpURESmhqJ z&1U6Bo5UOD1pDrTH=-kRM>8;x`GO%~e(C&PoRq-dwfc_sC%xN!ZsGnek0tPROVZ_@ z=CW&agsPi(WhYh@CCZ5~r>0Rm=nZy6L&oZ$fL+fyvhsr@F!KXYcN#H1v6oQV$w!ca zd#oiiBkip|hZZ?ulkZGg)ql@6lo2q@Ls%T-3He*@l9tASVYlS6+3`?EnyOq0sb|8k z4R^owatpSV_@R)-vE6jk>f9aSCDj0Md{J@*?^_=9E+ScP(3_o0SY=6zvn-Cs6*oMJ z!AJGRK!p5L@amcTo;=;b@XL0V;HTgo%WHa#q2b2EF68oNcX~!Ch9i^akd8&dHi@^3 zgnB|l`>I(XLwh9TwP*wx%+*m&Z+uh{vT_HtG4vSQ^p2pYK7~ahidP$-Y;9jCt#0eD z&<5#Y^Z&1HsJJefMx|9%YgTv>llx{@L~1!f7^FI*#4}5J4J6a?|f`Os^Wst=G3kLlYJw@v89`mZ${`NYOKSGsQ z1W^L>g55TAnNY$2uE_`^?SZaAUz^NN?65`?(+hD26b2dpEPeKELwh#w#_lON#|EHD z_&Fq|y3yjrKb#*PZaoQH-T80*d7p6z0Q3CvjrD$Ao|>Q4s@3eWwV`^XWaB=1S8Ky< z{wV0kszI5h^08B$A)~=-vvD(W07;dy3l&?$W~}bwWVNwOtH_mNB>~y&81uPZZzoB6 zNG?oSe&{xuI$NNR{?d|waY4S)XhpNc-N7*=@97gY1=&JVr&C012)<@M8GplFmD4(?pNevwrI(Dro!#PSpw(X^VRmlcM#cV_v*VpMOe097Heb z<`ufsOhp!vLPOp;h>Ychg=b&Yz{*v;gRGxA2M@mTSzCMHIzr_$V$RP%_6o24Gf}i3 z#kqtEWQNy)qi$}p1gGCu!4jKq(8trrmT;kJGA2}CTzxq%s@{Rn zPht+w18Wsm>tjNUjH#6LKZRn|_8LWXilz_?Y-v1uSnRR`A}FU!uZmp(7@fmZ)Dp7i zJ@wLjK*9?p8U7+lOpz)i9}Y~m_RIM4^i960GRh}_>ohn;aBj9N0lFAt_TdxD0DHvYH=3?)*1aI-bQv!WZ@$^?}S4bj7J{#!~%nu1Xko z7OEM0Z~}uEK*AkT(8Uv^gq^`X7bD^tMg`Lj7YD`-CBTlhSKN*221Gle?Nk+i#*h-Ma z;Dtm(m1pYXpe^nc30nzc9CA6y!Y@`iMbgdF$838y==YWf*UjN-m_J6a`1@rGeC?RC zezX#@D5`WUAiXhg^0W##L^@7?WJmffk@%=f!K>9mHQzbNXGj~f&fj{(&8C_%l7 z;ldn4hfYMjanH}vlKj0j#AC$DBp@s&CQ%+t5;x>k0rp$?y)rSpjE;!?)#?sSMs+MF zXFebJk&Z)VmZLXWLFEpcwlH`r;x%9+$$s5<9oz<-rn_lOWuoch8RIk6{d z_YSowW(w2`-^hk%bw+F$tO)M&oqs#&z1Ay@@^tWoJ>zKxz5`QO(G;4@J8BR5n=-0Dx;&93U#U?*I4^82|~iMj-wy2aRM z2}Z%qC8Gm0;xbQZjg4cjwSG)!BuTgM4blmOb3!^08qvt5;TWTvP!gS(vUbK3)_P)R z16;^8h7DG})xc(uoQdAU%syghk?W4ht8P07W}CTV#5lK&O!ET9TrExc49N6SO-hQ* zPC65(5<%SzQ0}|~`4_2nURi*whEB4d)T_2~`=0&T6b7z7qmr9r#`c$YU@dALy%sHjv=wuI3*+$)(F zRCV&HBL|^ww^Zi2aZv~B0+%e5?C`UV{Qgz*Sg6{V*Z0m*_f0BRElj3pcS4F>q`MBm1DLWMkvZ?m_$WZ%=XpFLd;F^ zx6>pG)RY_1riWrdyiI>*td#vggyh9eO{Dxf|1$4b#Y8aRM#EE=K0cZ}cCM*O4-%UX zJqsP_fTR+oVFz#d>a;<07-I3O3+r&i>?R? zDGlPlBZfjpd0xm@<9+)+1<{uR@v;$2c1$^#_G&v03zvshPG41-8Bjq=aZ| zLCAx9162nI#KM;_>Qj22F84#_Gjs7veTF4zT>$7X`bgVdhy!5lo_c|C!9|d(GmPNE z6&ohGRX=4+E)la*cc-4WCyhflaszV_>SBr)hmsP)-RtPKM`xKG!+XFjH9lTxe8;bG z-3WSf>vUWVG(eAr*;mOFkS4VGKvE++~J8;&u6PEb$6ftHOU#=Q^QQG!G|M*`c*M4lJb)r<*=fR$5iA zJ_Y}|L^es31*>pmW zpmT`U)yQp2CeapCWUQ^R`1P^%Q|tFmwuMwp;}+^vw`FEzmsdWl1q6 zpT2UZx^mXJWegx*N21wA+S$hJT$YE?l5k2&C2Za=w41M#sS%4(Tp}k)wb~=Qv11!> zDWYpl0R{SfM%EgG%Yoe(_vlMlNfuq_WU(JaM$rX4^vckmlpGO02MicEN(QwZVoTt& z=?lvzNal&J;@tMX2#f3V0+NTPUiaDDCmwLEGL?n#FsRy8mpHr%zCFNapA^Ms^TwEn zc$0Al{ul;#dJQ8&zJjx*kl#5b)Rf=n(tpW9y#IjpL#=tp9=;*wd8HT<0s4Ai!XX3{ zsQ1;ib8QX01@^luQz1N(&I;LNeKAG(ydnIr&}5!@nCtrsO++LxFs=W4XtMsRQOEu-gX3QY=RbMcr2e9@ zp^Dw*U`{m82AOV5%aB4tKOmST+(fgPhs!d1sz{@XFLL;$j2M^%2!HEd_zP3N5(e}O zr|9UHXVI4M>J;;8NEreuKzQo}_zFMM*^{l2 z%*jN7>4OpNS^ERpeR81{nL!6YQog1fosBuZ?2J{VVMRUB-?xVF15Ka!Zm>?E+sw`C zpA-Wi(bqJNa>xZFM7;g>mL{X!cyQnZ&^VQ&AGlO_gyYLk_#3gswL2v*ngY%XRL-+J z&nM!*$-aD13uOl~Z7*bf&Xt13Zca(plHpS3kb%*=0959z*t60NtoJ##Lad{b7Om2= zY|Kv7*4u~s^b-RI_B-$;NPo1D;l!pE;|(j6JTO!#$T^TPRt@6-Q8H%~d&tTWy5P#4 z0;91RRBOR8CM=(Z;An!k$bUP><;$m*e1`odm(Vc12pY*kjM@|1GE~G$G}X+;pbJ{2 zN{6k9Ir$`NYa=D5(Nh05B4I)crl=zARoTdd5!{P$-(TU1qMaMQ|jgq1# z=47{iy;pX7TTL^DHUTwYI5b!qv!=Kl_!Rclr`as;2n0$oWNX5Pz44};q3tY0(YWb% zX`sw{o8_vt!m7T!xwF)(4n5GeLRGO;53BF4gJjSkMUo5AG!?aPytrs%GN zg8>z2?=BaxSP4QGE2ISoK0p$vHplrkil&gSg^mrQ`~$K4#RzHWKWrkvhKkm)Libio z`NH@_%I4cm8O=d)H6w`EA6hm2E*N8>TIyA;D3k=6a6Rqmy>VB! zCn5q5O-dbfmsX0#PCwcqCY9*A>wJ$Q`UaH5V9vO+Th^sxdrXvlplt(HWdZ!13=`E+ z3kKVK%zibO`k)du939q(A9@}NuLhGBBQ?7g?#NeF{GuITGa2U1qVd7;)g61jZXbTD zN^L+gLxl@;!u4~f4h@XHl+6(RLrC<#XX0*u!?RZZJWIAvLr~at?at!{Nzf5#H_=kJ zqya)YQQ?jmo~F1(r*QWo<*;afAr(~vNcqkXOCw$F>Cz=CmWtQHG z-J6V5*V8H_ZOgKgZF#K+(E2FQZC$^1&pqqtt6o5k(Bu4~TN5$Qj)?2nGZ^6%Ys+Y0 z&Zs9+&M)#s`9M_8D!GJ{MLdyGt2oQi*hZt9%l=U?kNyM zto>y6<18bIUig>f^A99!BaGqJ^#|DHPTkYDg+6&t0yJ9foAdCS#CQu=C|x_`jkY2D zA9^eyP8J2K&H*QMk8g!N=#UaK*rzub4;3i{f=?Vq3WIbWo;1%hk zB_i!AqkljHeO@S^d_x}LIN48XOt)AFlSGdUk(zIeC}y^BC@pgYfu+ykwx|-ABx838 zzmdymK)AD5Bu$}`4`WiMi=z_ZlqJ2C-9w7)H*7RR$LQj(zqXA^olsw3zsW$5NjVPz zD-#V>>Cvq5_9D*FN*GDav(9;=v#$qGMph;hX&`!5(1+i(qzzFA9h-mVlYlKzE+U+6 z{w&n4*%G;M@97cg`mvUE`o3_fOgJg)Ksg21Qgf%0;8_~?Cwd^ubkd~6u~Q+2OkuAq z!jaqWJZxfpU>eN=LmZb!K_sjHF015Pme9^Gf-Y~gOWQQy0jMg;rH?UU z z2FgpMr{~{_&5^IDEa1>!U`ojUEv4)KhnxR_ON|#FG%;*{N~u*>E$I~k97e&&LMxj_ z7|SSQYDMY!$U;S0Yh8{cSLV%_4qWT5E1gb^HT&W?3n!yC(- z>B(u|JO3+>NzVE0kLO)Vu(KQd0fq^NLh&$!l0iF*NvD_aBpi0#Bwl&z*((pM3CIE4 zN$VnIC%OtO`V!l_2bEw1mEf4aEl0b)=1LKhvV3z)WxbEc5GIi*#X=o!fy$IDRU)$= za?9AeW%l;YIxb%J-Vzuhn|SX(UUjf}KG{uRl|ba&6*9D#KnAHoB7Yg47OVR$OcbD?e@yn^zMRxGA~7iCes@KcHAC*wrsL)G;lJ+Do|H8B0R5Kl8{C$Qgizyrw zK`}V%t*)Oq+h40I>*3MMRbu z@mC|?tn}7C1&j)R+siO*+?9?jyUbdk#ri>)veH~2@@&T{l60-px8y9A{YzS0xF=4Zp%O{D>zQ2%OM6`uv1z>KMf7ZMtzC>YbY#e{ zqcFeq%C#kFBZI~Ys;7ZQgT4n6+V;9IX-vN-A2COJa5LR?Ciup@ciC*WF{gp_pR_o} z6SZa?{VBYEze_a+eS1h?Yx~EIU`-G%6PXFJs)Wg%2KmMyGWp(SzHJn0QT&ML;)Lfx zM_flb7Cndy$j(bPFN5#m=tqKz_}toexW-Lz(?UHOI5C@0q8a`wAfx3XwoX%_!2}Q? zn-YSg&5bOVl%T9znGg?>s?N$kHBi%_q%l3qcVL7h-(VOBy7;P~qP8Q925+XseG-Jb zD9v9i`O8sQmyVJT`0zswNbBV2H6Vs^H4 zJ(B?FVqZ(A)(IN?2#p}Y7QF@ijM@`we?9jCUWwi?;kTEaFicG1hO%@SjXqNm6jjy~ z#u%CGu~qYKitSvSA$v$__Pu)wV&9=}c32>h?Edt^swSL>9FDqutBfA-jNSdi+N#$N z&!Rs_02#Lr+jn7B*B~+3C4EIW)$&^35J1`)%ULe~vJezL{#WBUNrY`$h1&>Z8u3 zUZX7fFxKHq(8dQT)M^ZmJiF>EW$&-gnBfg!B^&GgM!U(Z9ff^kTStcU5@a)#SYz@E z@cIqb68zIlHSB)JA@7*}aL5#eI@X%3*tO-1?AqU;Sw50$oAHgA)I( zjtmpEhU)w4fe_)Hx7GsSG)IvBw9|n{HQ8sv(tsbQ^gXhhATKON_8laZx!g^*y5S6t z)!Z!OME&y}k#_%=A>S{87>S|fQ%_eW{Y8EJ8cYM5y~W_|a88u&;zyi(aNR)hN{l#i z9K9g7MhbCQoY2k$BAiF@AnQR~*m^ZGDTUkbzg)t-Z92l04WiZ-mhvf1YTBw^d~`8` zmNP}y6#>cgC*XrCLuR_$r zC;WbqbpDPWwWDtN<<}CJ75-37E-02HJ#)}*e}pN~{sUuT=$tGw@7^hzO<`xY^@k~V zqJ2pEwi0b$q*~MjQEx?wzGV%CdDl7D<;O6*9r}}O)ZosiD1}o_i*!jKoMUZ!VxkDx z0>vW5L!PX%TzQ=0dZj?>Pt@W_Y?zuyL;@?doIfa>PpA(i3VYY_IMUg@cl&`{?;7ag z9*`>$CarH+mAUTVj9K{zT|L5CgS1_gH+#r-eleaZnOo^5cAHnF365NtL+f0qV9hAG z*V)qP->?;O2Iq)xe_2poAIxOu)4aZOeb0^FVwlsw9_f^n;$p}YoQ3OtAR#2-`=tEs z5%6!kb$UNaNdG%p!}oV;=D(rp{SV&$hpMLm>xsUI|Jh}lq?HQU?|3X-mj?x#5t5>Y zLaHp;3O+#$TWH9R*wHXHYHFU91sP;VuWfTt#9tY@HlS^D1*Hn39IC&zmPGJa&0p1p zCr{wc)1r3a0s0SjoUEjmnB2JAiO~0<@IHPN5 zL4Lwx(wKP>_{Y9hW;0b~*$f@d-9Tf>J&Js$)78>~NBxKvgzWQ-AEHN}sjqm#gf|_0$HkSW52#it!O7894l>ff&0eWVwrdh0!`V z*0O~?n>%)3{#nbWrTKfsVRk!)1YjSsG3aRABJhrdQm1upG9&xMbgBZv`ZTh8S7%jy zoeFb3vy(P{GP!AOa{aeio4hs5k9%zmEbdA>3}l_MW0HxwKSSVA6I9yfC7tRnoba;M zXB>W3t{n#%GW;{SX!8#)HS{$)*a>tuQG*HysK3zWnfO6)nUqWxvOH6pcq8a!#^F0b zToi_TT*^J7M0>2>;8j0e_-|r}#+^I{XM&fDXj^qg} zZnzBsMU@4po6Yvuap&pEFPdC>P6{QSR?H_ltd>Qq-Ltd7O@2#~zR;SXd8YU4re;n( zj0Ue-W>O$ZWsmd^Tc6X&#j56NaHhE$KXZpY9Elg!ZjXTE_rflkx|y=4K6j=#LmXiE zd3Farf3_}vh(?kyhoOl*0Io|t>q`;rd%AAI#l24A<`Z}J@+(MfHx^2qkwA0l(r4pk zy9jqjDY&AJ3nb>rZR2uZ1}XxdAIFS=O5JI6W>FPjj57kNOJ1o;_AaT9y&89vGoDQ^ zYLddTfV&i7weyo&WKb6v76E2jZpD`vm0jnWJkr{Vc4I+qiEg4nG_d866k~-tRj5g0Hn4M)-hgLy!w&wHl5gO8j zmZ+lS*px!iusy5ymA-@pTDU_Tm~E~~He;mx`*nr>5c?_Ce(q-J!PNYjKY+)Nn1C5w zf0eP?k&Vyh;kmCe>hm$mW?Q5#;s=5m13dN48Ii>;lh-i^9rfly`T!hnaxvW?SX5cl z678dcWABMcHpLR1Ojg=@kK#4S4|^hs+E)m(QumYd`T zR)5>Ncw`m#9@j4Kq+;vGEK8rXL#SwKSMi&>`_wJyWN^~0Pg1WJcxLss5_dqBV-w{( zNz0j=DwusO%U>%3=WQ{$ujdx`jy9ed%^*1k%9a~p#aZG8M&fMIIF7CAXW?8@?Axn# z_Mtm{$rvhCg_$YszrUr#*%~Vyo9NV=0@}!JEBfgY3As~JzRjYvi5*-J=E?!*^1(y6 zwqm-NH$wd?eYl-72b=S33+NfCV&AUyf1X}NnV?$#R{Q)C_*ysp4M$)U9jMBlmR()V z=wcG%l^eM);>iSpzTW+Q6JPG+3-L4_95>$fopo8hU{-u!b@c3@ldX<{*I6^&D-j1h z#oFWU!!ajvKj}G{?P>o0A-}R}xS2~JR$#H=i>q(8AsI5 zqvJ%gV~PqFDZDfynED`QWB9>T>kiPp;9yVDTOHRq2_;>5nOO&#QnHr2K0ug(&&B*}f=7fvpfn;c<`VdFRr;o3ak`cJxW z64K#bsCxq{POG{&6yeQKtwR+@sYKc?4B1Vut~*~=w5_FEuQ zbG_6c*?!i7P+WxkzW9r-cQmJ`iba9HR(qK<*wm>Zm`rTm0Zgjh1y4xIRDx&-OvNv` zgn1%GJ(9+vjB;@jHVw*h-XY}l)LBI13+777TaaskKSB@g{Fnq&(t22388{rC&{3-C zEm1Q?qo^8*Gt4q|Dp|WgmhD1^q}$gXaqSrjl1L*NLwQ5B{L{j&fn$n;TwUhx7d~~3 zN-gv4^Q9fXm3N+s8};$Nmf)vwf%GJBAG%%h+m*KIQ`)%sM|l@|bc|`B&sU-8Z;?k1 zy3!U)x68@4QkNVYu*VQ>{oc7~EZi{;)Ml|O{DPGZPakN;Ve6nhb`S>Jm9blslwu7l zG=n&u)k5>rf;o4mWQ+Jo0$Usv2DT*y;V%YSF|sV6eA#5}z_%0+V-%M1z$trbpxoO}(bK(|$K z4F160Wq-AR;XGA9)~_KXJ^%Gh!ZSDs6x93yDXdWi54TllnDarlYyU12!g;2EwpGsL zuz!*(-AO2%VqPX7wtip3ADhbVI?EZm^NiU=;svvPf;3lR%W=hLY5m=Rr(%!LgGfGG}i!-Y6Yl7VRqip%!omlfB-r z`DYjT!&eoyeDCNbUW|RvIYuGnxnmp};WNIP;*9Hq+RL$q6(A^feP{4rWs3MpFBy@) zGDSVYe@okE{SR$l{Y?W+0{w#m)>c@ux%vX~ELHaxJCr_UaP45t1r9w<5lw`S z6LZqys-`A|w^{|8ZHG5j>nKh6if@Z9ubf_MTdR7cJEuZpM(*y1(|*2B(;iPb0iVzC zz2bkyYo{7nZsv^mbN1D{(^_661q901p=OTup*N4ZZHWtTAE?=XOkk>LFHK_6bt{3J zl#Xi_d*x%#l#5er?Osa4u&}{4rT;t}uqGyaDy$4LIwR%l4#VA0w##P7qB1GXHn+?2 z2_fdu`6LSgj^hymQ^nEtbcdDawFop^ynS=b2=>!A*O+nzhjpQ-kE)sIq-A4_zY*?X za8)LRQ|+W~$8>Ove$eW_&H|4tPDFg@u^mQDa|Aws!_=XJ_)y82KK zOk3`yc?-K;T=1VhA2jS;sSSV)1ib=Uco%Fr%GNtK>s8^}xYrz4wmFg9Vt(I|5%&1i zs`UvkjKUbA45Vwlkj+*IS4AVIQl*7Oa=X5wfa2wV#+Fiiw(KaH7}O1hE*0xUlhlf! z(alHTVQ~ocQwot>1_=&mQQ(quqYJ&TJ`6hz8yo=dCzOh827@}RnyN&|4rgqcJolXf z)Pxk$7UBlD8@eU1SCK=NcUS83UYHTX!^K`B#Ozy7nnLAZnIt`WlM$p|q5Fei6eVfU z6fL(RE;WyL+&sl_=TFs`tJbaaAG%r((TZfgDAJb+n(z3JhjkLSnowb|@NEg7!DQj~;ZT@tMzj(*${6`&< zlyv1zJoPY+AN=-qXV6z{WFb6NCc>1xOGIw~FRpNsW13LgMY&8EZ@$3sR5~#RU(}H) zp3u0OuCrQTq(GgM>V_xDoD%!8T{sTBIOG@g1)KTR8v96GQ<`kHuyfwbGD8?3$|aN0 zX0aJQfqFa?Rs^5CnMRFPCeJT6dAT{a$_Rq&d~Pe9_5>#?u0b_BE~KrS`Feg<2X2?3{&#PXnohLJVq90=uqn391^eLc?MzEgf(h!M#K@B{YEXleUlgg~z4MN==mZD7O5P+O zd4K#a{hWQ~@50ZO*Oi}HgQc^F9m2_|Ha=NPtzWCxJoQ*uOvUdc!(sQ&WfrISKIDl$ zAqrix>Rg%2&+HRz{>6eK8rmuaW{uSeCvpC=B`IR7c(nYmc+~pUU>@%c!EImD2Vv?9 z*8mv@!m#oF3XOZU@f-U&j4@z2>k4vLIL5@(W-PR7<)rnRbZX)RD2(r)-v<6jXqR|| znw|ULetw&ILV_SHfxw+~jTe5N-K)Y`yWWKENJlMixR_m(IDN{?qme&1zj}D0ikD@J z7tz5!#>zgHT|AELJ$Gcac3`C~LUbmZu`?gEikO!P;DF;d&dFEV)qaqzkdPq#_5k{UgQ z&VhOSF2K73U9J;e&35ireomC1y&3#TT&$83+r&N8Dpv|o&99g=WSQlF{jZsK=p0GH zf%+kI0X(T`gD*nDCubGUm+dB{(f(L`7^?JHs9lgZ4N26nJut|UO+it@&{cM~xv;Hq z|2}&5w!@>z{&(is9v%$rAIm`ho0XY=+j}uFvNdtDHF7mmu{2^~|4-4JTfD& z8t5PPsH?S=*l2l8>QNk)Fp_NQZ(voZeI#Eh^R%Vy+HLLoPwO)-?L}YfuSN)eZ7cB# zo;5y}@XYs&S@~REqDqBhmM}H=JZ-sj-(?+583cU3@_t8q4m299r~O$3xmo7CuH~-V zeQcWQy8JsfsqM6f5!e)IPZ5=FZOJa;=4B)_mCdKN0GKFS<)aN`CZlOK+ti)LF4r3S z{Ra)!2=v8ButC~asEYfi;CS`V>h4iitky9*VY)?jccyczTHfOdro!e4RpDeK4Ky9l zK-nL+hvHM#%~w+5C%L2+mix|7!O3L9<_w@jx7Tjs($dc9q<;9S>< z$py18>_X!NWC|C+IX|e`Xmeqc6(FGv)U|8JX-vEsGS6} zCnb-IIA+0tK9&XXI0};|(`P?uCVGu0GFZgLk~#DFd7f00C023h?6XlA{}aa|+`%m( zuYfB0H3vN*k(}$pLp!L734@M`e(ELBF@EEOKeY>EXaXRSL(PNgEv0qmsk@Jni0ua;&dQYI!~CgLTSe}+MYR4^^|UPY6#6<*QVYZ;hJoHGo%y@^R1JGTUwM7Q z;h`W81oPsFD$gV;#{4oK0hQ669+bpIzK_v4?)b-wn9j0U-%TP_kAZADWr2oVzyA!t zD;Daox3bMrQ>8_V-CrBc>8RZ&@7kPnn)ohc0WYYLH>Fq}6%MKNY`0OwpjV1F4iz zhU`iC?VWhhkD{Y9k&e!(P6+UZhdgC~a&t0Lr^6#20;6rHSk8T7WYa z`vm{uEMjnmL@Z~qRkXtQq~kL4%0Ke*nk@fWq_o~#`fks1&lV`BRn6!mv#+MKiE7V0 zs!z`wtGzolP~!jY7p-7Qw?ALp%Wit$0+eZ$YhabgL%rL|U(r>v)01rm-VimwQ_{Fudz=vk@TP^75%Javl#{Id_ipok9 z*5H(Ig`a1_^q}>(D&fXrW<0?ul=%27lG3q2PX!Kts_gSm$P*a#>+gvD1eTm-feaf6 zvkXaDRa zbJc(OCHdWztSfSPFE0mKu4aF7TrYF#>DwSfT}N!)>L32h3xlEONG#wNsr}MB6jW4k z;|pU{NKd_c%%$~+RfvwT6W;77CEfCW^peGvoK|^@24@>#XRHJ4$syj6drQjC^4fv958cOMNvBWR2$ltH_h~-QQu||BY`K#efHBC zOlTD^<(M|wq3c}oS)NP&Z}S+5hWWR|{=ZcY6YO#7T>ltw`a2c;|0Fdt|0^}0X#7ph zpIu~3PT0uc49UuM`5-XyQXNP;vxemFOGM#o#RJzc1x6HEKWkUk{2D5n-nRUYJr*$Q zveo}pLU026G5IxjGsiNXF1vaOm9AHDN?86cETzu+7v}Rd;-NBS6fN5%EeiAo=FhHirI~_F2+3R$}y&m{5~Xo zGqXfsY%U>jWkB9jDJ|f@+=$mb6KAU!ri3#NBmhIk(Zf^?yT9T=RXE=0g2e&Y9BN{* z0Ly;1o3P8O>!N*%X*Yx~!6DF^DPQ79Hmtg^S~0L{-&JiK9^k@v4Tg~A@UPTH{2 zP$_G@ycS6 z=?pc0a9*G`Ge1+GEwt|Pjl=bqCe`7ilu!3twd`J#wUDtXvdWg00lu+}*9vTOt%OjD za<~)G6rk_|ajq&7QPazy(s4ge7TK3SClZ46R`f(Og*mGXJ=b>A;QgM6U~&$ce&PaPHBuEY7hU) zIG8zH{W%NyiCV`(WrzIGMI6*^c z@U(^D1WOtq8FrH#&ETryJ`@wzVY)DZzqn{rS_pKrQYSYFK+XWC3!qYmqCHKoz zQ*nflFuUIcf{gprqk2#8SZw?~A)W5wm*Z$Pqm=tlg9%z{Ks@9CW~#et^N1%wE`@3J zsUbM|-Nxu7PYxTdqw*jK^gzap*Nb)~#Vk9q@F-4^PjJR7VsSCg|WED{{+35qg|F<&^n=(iUD z(>d%mIj+Y(Kx2lSe>>59j6^1PGE+?2Gb?Y``Lmc@mLY>?HUA(+SvJKAsKUh=KzVnh z(J#X?O`1;>gB9XXgKTtkCbPU}BmC&L_mw@PAnWNH(e|K@&1h9i86e%UC)LP?O96tm zztADcOb2zVfSAcz+wYW1>i2(bHOH8 zq0{$0q1_?Z;3u=;u(pE>bhK|)7CwuOPYfed(f=qSFs|4JLns05-X;`v4ZrMWE%I;? zT9ibF0JG2ZqPYu{-E`lTwH%?}^#y^#m%Mrm)+;25v=Wf~J$Wtr*GZU)++i_qUlU$I zHl@}Z28vq9Qhz|F<3W8Bm%5*+*(gC^9D69Xr~}#R>?^Vs#t&3gzL(P9->f{~t@*>9 z&|A|NA?B8`*Vgb}Fcuq1Yo2EE%654kF%6+OrFyUW7X)=H%JF0|9=({J}OPatR8SxWyP_-=sG!VVxO(9&pFx#7cf_KJJekI6tKrz%ptfhn2Jp6JfIF2cZ zm^bzHgaj7l%^N(zt1gu1@cN$Km2Ox`Hv@w}e%d_LqJix@!609TUG~&Pf)*b9i@y*P zOIYAfS}v4|o|qt2MN`+EGf@Dib; z_vlFkZW<2U3(4~JUNoir_EffKhXuPY5MkZxp(>Jt?G#Z3GJ>6>nN0LxN3o>y48(K$Tlt^+8WA*M?27}_4cY|A5MR+&q`@<0= z`xvr6RQ;6maQ^uzvd-PM@*iLSYhR1Sg*w~r@1i0bJ{TCo|3TsKPX<+K!Rq2I0zU(4 znI_C7N9~0fT%;4t0&R%`<5ggP6VZ{GlZKZKja{CzjMYc+HbktfsaI*M_*gpl5I(9# zMS$e4C2eqZF8o&B>;t-V*4M0cD&Dsy&%e-xtQY@PdTKXrY2bYID3d+tt>f-TJn z1#u=szs5b-yym7agt>~~&%Teakyf9!_l)2?4Jj+pGX%nDKqAsgUvv@_N_1Gq3#UMfC=f}F*E{T>k5pvV?}UIhvhSlOOGZ6Z zppXqpVzP*avZ2%7Dw5_z7lCgYM%QzQlR$;YiQr6zsd$%`ep**fvL+j@Zxr3gDKbk| zHXxKawk4lU390|Gs5oiX-^o1gW{#O=A#RY9hfpdc0>n1$<_|gT{Mp?Na>ISwJ5^$D z(*feC?mwJmq^D=#W-M{CIoMJp+t2N#*TpEFS6!wuL%Zlv36UGQtEsYUr)biZ04 z!*r_2H|s1x13PKsD{eeDLmcTty~1T_#-)A!MR|1*wd1_ZJLPx9LE_3$5}a5rY{9jo zy0eRlG-W^$fYNeG=~}UvTGqZ1J1;|PM^P*!uaaD82N9Z7sZr4&TV5Mjd70Ci3d?D& zIh6G-<$^+!A5-!IRLq44h=xkLe>^vjf_wo@mspGwkqi-lP#%Jy^0n@d3i1d~rAOYE zm`vRl{mmNjg+d&px^`U8viOIo>-A${Ru0$(7p1f;QtLQeYgMAn@nM1TLFLmen7m)> z*+~5q#^^tGl(CxgLTmlnjg%KT86;nY>q4@n%O6m=h+E_)*aL%j*4r-}^ALDpeTpfq z+x@rc|7C2%<|ArqM9N3f!ELNnRi?o9e5LgAEtY0&Q!yFGyT%7h$F4(47c*`$!Z?(gsI`s zIie|{Wn_4Da5}a>tgrES2e1>F4cn-veT`Di{L*#-ZWU7W`Jw1eJ6z0`%l}_1XB`z) z*S2v$0Rfee20^+9X^;|dXi!R|yJHwax+I1MLAtwPK!gF2PKl8YX+*kfkow?zLO+(@ zv)=bz@1C<}_8;fIuf5N#b@ti&T>BTa3bX6{5J)SvvM zJeMQ(j~>`Xo~fiDsVZ}3i-n<77u0l1H^stz(=fG& zJlcx&+B?Hk1t+>J@0`}@$QN8;o~E^_ zAA@`v?DM=t5urs?_cXnhM~y3C^XKjJuLw*X)Gc zEh&k^WL&tw+H;CqBABSH%`IgKel>M==rbLzBrgxe?iWG{l(gUsv|J;)I{Gg=1a}nY z7o8Ku=r@{lfZo*gd^1V1_HRga{GrZZ0n^Sxw4N%!#E$8#f?|G zD9A|G7AD3zPE}z>^wg4N zu~EAU9&&BHH3IrI{)XQJ6gv3Y)JpHnnz#yVJr(B$%o5GlGw5+~fU4U?9p}*zxU+0V zLPIum0KN00E(C@b^Qaen>#dscEh4%pe?2d)RvqWh2{sDnnv(eEhF@nx9!Xc^7OxdY zFp0B3h>g#h>%=bJ3KsR&8PsSkejKnqbReAw++cUngkHujRiI6a&u|$oY-^jjl(?8eTw1EA3{!_5kTk5otmP|I0BtzN)w&{92*&OrZBmwXp@%xBcY z5}~yqLUqGQC;83Z`p@60hj(^-)lEK_8f|TP9xha>?%-Ds<_VXPVM-;(e6P?}!#E5T z%!(m|(%fgLg2fPmvG8Quo^gN&4PP=^>w2eFy_+p%ORY)~l;9cSTvMzYHzpe)$ zKHc>rd%Oie&0bpNR_1(5fOM7D6q8MCL-!L3OM4|6n=TI(#*}?L8^;0qjhOD|R;bl% z3(NFiXDGg|d-JTp3SM1Rn6oYJ4BnmMXR{(s!o41bNj$c)Sr0eHq^T#`ABM|+Ea(FY zTJRajL_k>#qN@wvrUt7D)zc)()6(raSx)d9=wP&F!FY4-hZICHHYpjW$$mE@Oss=3 zzF^pV7_Vd0>y{Y$#9Xv9BvyLs>6`E`rL;WY?I({YN~E5K7V8xHN>~o2F=3O+u1M!| znqLC2EC*nQcFTP@Z-P}hbGEoaKKAZHW%LS7MG;Hd$a0A1;v}*x+6ynxnQbKmO!r)} z5t^vG%^^K`0&cwnbeIB=DLeqL!(*an=!h}5I2x3SLB2Px7Oa>EMFR`7((-LF*7D8T z*)u%*TA0q`ct5(bruKy&{>&v+bRKDBAgcL?1@Gq^v5hVz&=ERq?S|27wgsq!T~)Ahurt^!8KoO zlp`|6-dQQ;?e4vYxg`jqWsLj3>MlZJx_TwChs#Ks@RaigdPWS$KuT38LAPM`NW#TRm0Fs=$G=x}sf?M(Or3kL|ShwhJ-cr* zKMQ3e^IoaP;h72%R|`rGdVDWiScY8sP^6xxo(A@wG1{&jo{T>(=Q&);P1aapOEfuN z?_E_QuPM+4DPfoqB*wimeezBBtA@Y3-AAbZiI?C^lO|>|Y=WArBC32n6sHvkBOqhG z18PgQW+6A@6Lgtka&#s-kpxSX=Nc|hz{24L()TgZcazk_xa>U3R9~)KR(o3Ej#knv+IFaM8=GdwTw37hh8g4 zYC#YO*4rObYj4k5D)v6u%KA8ekPEg%+#d8z>g15?&D)g*&Cw3>6rwokIlR>VsBk&V-_%_Exq~NOs`16JMhBN^L!R7toghp?b zA){c>ye5=`WoChGf5dJrwPl-A^HPMxt2#Wx-)beDv&Xw*2sQ4KH)Y9--fGBDrB8Ww z5ciCR;atOgy44iNg7G8{K+!D9b^DSTcJ9sD!;Y^>Um0H*v?=`d)IU5Tq@o7N*}_zV zdb!PqIFsafe4R6vNWZ*`6%Gipi3sSUBc*m1_#BAagYLPdMv+8$P+Q|-PKEuDqAm#H z1Bu)DvYKfkWKN;k+-iWWwyW=APJqpX?dl5a_q-$K9NOYhy3yPRVHD#*6 zhPU>5Z6CUCgVN66M$eJV+Z#!e)Yu5p$YTz}a#qcpBuK|vV=6PH>nJFy)1-zb-j$FG z8A*o#ibIIPGS}YmQw0uXMPp*82+_6QW0xi2&E-~0<+nfslvkdmOFT+c%gzoI z0s!xUg(U2S?;fHuWM{LLev~2zt{0#-<%}MzFaC5DThohSZ3JyS6hS`+9Pc%$x$`QtNY?FZ#~eoiE3qccm<}~Qt0>WZTE=i>jl>tv z<$O&8BlKzY_>?4KVpYwI!E{ce?81fxS^!cAo6huNVil1fp=v&Gm%7Ub)1T3-`EBkG zsknv8=;0NKhq^6LGlBh%iMy#R!BxHOovMArIWaUx!9)gVRDiXP49iv1aa`>g8!|s1 z(<%h?LFwykLJ6F3EBg-EVF?vD;v9p+NIHqg#+aXKl|%vWL47vyk#PEPzmoH(oI<-_$GV-BenO$Sey*H9AlOCIE(al&jwk83B&lde?F@=SS3Ws^o z4i7luQyMd*cQJ*-XoN7Q@0vU?e8R$plwR4iZHK%)X+);d4xK$}v0qDS^15rcB1OWC zK&moTb%J^Y(;;`_EVbH48SqeMp>mZre|Q_BP)l{!A*LJ$iiwhL&wc90GEJ&mA2QkP zYq^~ZiM)7{m*-Lj8k+2Xa}npx???79ZR}`ie*um)n$5K$Lbc?A76$hlS=yaJCS~W zg1r)Bn3v2PVxaJ-!44G!h|1px$hI8hr>CP!vIjTVFn|NqalkJ(!t?v40s3!1hj{f7 z2TQp$%z7xG3^~X|)-4&`@4^8T4)>K$B119~Bux?NpxWaCAE(g)L76B+QRCVCGe!0#kSfY%lWhMFUBi+6l9A^2lzLN38I78C zRCyox;J(R(A}li0 z`^|w8F$C6ddeWS4eh1CHt~3T6(e3vRz4J{LXRL zTO&n}xhXmQRdH%I|B}3Bm?)oCk9?&MmP^8V-76pYI$@SPnIX3t4ao;ZQ-(N?{VV{2 zT0Tbd!<}{JqX{PBS{W?@I6M5``Hz=SwsJ~>EKbP5>ip#>Wtuadu^H$Kjw+9Hfbj!I z`fn3oZU8JlJH-t$psg^wpJ(Z6YgO5o3cNgsJ7S~!zH1Xm7H^@!-XI~CRgZxHz?)SN zX+GtPWc7O9FK7{Ydz9cAN)9mj9& z<>pbeYNhD_VH7XFd`|?AR}Yj9K3<+wf}Zn@il>a|HNTz{X>-xLZa}Wtqe)QL-HJJu{;C!RME! zu~8TDLz!BOHCze4*zn3?wTbnpcOi~Rn?rb!uqF|9{}o|hP5k_laZKZ8XKCBvbF4A% zRUdsfu#ertHY&0);&uox(xgCHhj5xE{Bu)ZR{p5UTx~!$8|3=9_lX=`BRm6M?G}>N zeBvBE(hy5pU;1VH-ofo8`=calcL7i`IY{^{d$_u1v3Y5Y(Yy1U4m*pQJ9K;)HS8Jb z_lmgme(^9Mz%a*vG?-~fxGehhaMJRN&325vMPo(1geov4Um!*E(KwGwUZ4aA^F3xP zujqJo^z|cZnHDC@TDpyz=64X%pma!dd{_!gq9!vjMb60Wb=88*UAzwAgvyC`+N^xI z57cR1S;yPCTSUn9EV&j|mY~P=$$CvgClE1$sxrZ=CPd(V<4j#Qn~2=*K+hpo-RIVz zExK?}W6*x)Jn0m$k>=j_ZD*dlT9tRVJKU`&oT`6thV)OX@Sb?OG-^HDdfg^E$LtkJ zb3x_2>z?A{#Sal;7x9aH@-1>+glLZ;-+u6Txhg&hmlfM4wK;!(k`lT)f^RT0Kl5d@ zj|&mjx}%}KuV>e#=Sb&0LFJ9AM6?G+pT^I%V>-cwoH6Fb2b!YAQ&g@-8uSCtFd!c7 z1YgHktwkCRtoKYEHt>=!lZWd+IV40XsleyDnUb9zjHI_u0y(~wG-L5Q-&TbE3qLLZ9^Yj-bvNTFYb6;R#B zE<45o?_vc=voV*k0%ZEwMp@vj-B(cAQ7p(8WP#k(L|5%Ank+I3{;mI?Vtdt&tAc~H zDR%w)cUOK5`yJEv`sC}9FauYl7cFZht1|`+b7HkheDz-9+C0kq8ON$M+8MZ(H(Lq8mn_e+__p zQ~f6F@TYp%)#l{iT!(*8{f~$Ao1!;CTR%kuuI#!0sp#*uZt_WfYSmrMJpMzipS!^R zf|UGiy!w+?auTX1=T-|A~#iUzViF2D)97QSN;8Uyg4iXnR`$E zU(bDGj{a*0emBPbA##21#d!J4{r<*?cQeb)5$|VH#r`47pSQ1H#=jSGs8{YDBqY46 OPy1D^?J61x>3;w)FvMd3 literal 0 HcmV?d00001 diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..94336fcae912db8a11d55634156fa011f4686124 GIT binary patch literal 56177 zcmagFV{~WVwk?_pE4FRhwr$(CRk3Z`c2coz+fFL^#m=jD_df5v|GoR1_hGCxKaAPt z?5)i;2YO!$(jcHHKtMl#0s#RD{xu*V;Q#dm0)qVemK9YIq?MEtqXz*}_=h7rUxk;@ zUkCNS_ILXK>nJNICn+YXtU@O%b}u_MDI-lwHxDaKOEoh!+oZ&>#JqQWH$^)pIW0R) zElKkO>LS!6^{7~jvK^hY^r+ZqY@j9c3=``N^WF*I^y7b9^Y1eM&*nh?j_sYy|BrqB ze|@0;?PKm_XkugfKe{6S)79O{(80mf>HnBQ#34(~1_lH~4+R87`=6%>+1tA~yZoIm zYiMbw>|*HTV(LU^Y-8x`9HXY~z9@$9g*K^XB=U0vl0(2qg20WAtt2@$xbznx$sQ<{ za5-cN#nT4jm=e{bj#uy8d$;dF3%#$cK8}{$`MLEw^&9;gXiiG?9(MN0QMDR#6Z5?< zGxwc7yuUZl9+2NpqF`phD>1E+?C4hlFGsd;XAjPBFq0uCzMuGXpbg8|rqN&xm~|8FNJG}`RKnZg45_9^T=D3C+BKkzDBTQ5f5NVs=-m9GYb_yg>yI~N z0*$o@HIrw2F#?E!Q<|P|4xTid-M&g$W@w)-o92)dG-oJ3iY_kQl!<648r8pJ~dk@K5;JAztVD-R2@5QsN81< zBR&WBUmt~pxa3IT&?&COh8s%j+K7_~L4V@3sZa3;>*oXvLvzipOR9^fcE=2D>phM^ zvv=|`F^N89g;#Aoa=I=v7GWvM=Fk-s)+y~JwK@4LugDb99J*Gj2r}PUwiq3$wI3T? z$Fa_@$waHnWgk?evWmc^YCUkVOZ1yzvRMc-$tf&FYc@FfY;a;&s&5246dJ&Tqv8xR zhT6&#qzP86Qq&7b*npvK#XBnZ({8EVhH57jay$X6=mEmQ2$GzInz#n+#o<`hHp zoBDSv&BD7%zxj(!Kl)1|P^V{%w`UBw7#%WoYIGfnPmF!JJf65-IYz76!R4?CM+OtM z7oSzSn@U-1gXfaoz9PEz(mf`xuMJ@(W-dpaB4+b(bn!YP*7ba#ST?r z;mOda0fr40t1SX&d4+6<-qeCdm+8(}u!9~db63LUBj@fmO%XHcaw)VRp7#d8BjOjD zOjLB{uU5hu*ty3s+Z_6ZFmHC>{^2}$nJFHvurpdoc`^C#F|0NE=Jj9Q&EPouZdXOB zj<5{T7`zqQj6!NI>DPqZ873hK4Xiflz3}>KZ@5Y;?0O-+kpd@pM^s!ZbDV_R!VE;J z4U9w~$y98zFT`I8=$iI3Z>@#g%EPG<0wjGBNE2^j=f0Q2;Sb~k?!z7W^MeG9N!eFV z1xYJ>kv&1bu7)T+**L=evIl@ZZ^I9u0*;Fj*Js-?R~pef6{9)Bp)kY)<3Sx#EF=&Z zgCq?3a|;w@JN@3%m#VHR>Li~JGjm!{Q*mS2;wa?XpA0Y`fV!1@twpJJLZw_ zpe(lnL$65kHnC*!oz)06cR%I(U?wiSxl-R9IkvSHM7c{?A-?fQ3_jvj3=&vE^(Mq! zx#o!;5dMA2jr4v#&;Q&&jeYUl{yQvyRpi^jiu&xlWC>JK5tvu5{(12Wp?~MJ7@5G6 zJr>!3|F=Ze0Hl;HbPi91KJ-P0TQw6M;X0H-rOBW*D0QdQZc2SFFj@;9go1Z&^4sQL=|s#bi6*{2+D&M&na)7^jE!`QRF@>ND$+2NWl7z4%u@^YA|4h zO-wt1UfK~oczniW<87e4sJf2L90Sp8g|aq#tmP;MS(Oy``;%4;6d^H)aly9vR?kal zW1$^Q46s;|tSOuR6;OQt>uisEn;;mi0G&yQ|AoN@$FAJ=d=KQG7+0N4df@*CVS&Ff zj^+Ocqk@yYho_*ci-oD3i>0xli~YZ2O^ULvJ(3^_FG%vRsimW8{fd;WwQgnOQk?|@ z8K|+5kW7*l@?sgKjKQ>97)(&IzR5vS&zcyr|1bUt4~TLkDXs0W4);Ht&odp)=Kf!A zPau81Jgo_0{h>jDAt@+!8ydq}P?wZ6SkI|3uv@K&VdjR51Gu3_O$1O6&Y|tot7k z`tSLXH1lVvG&rRFfT`NaFt=BgIcykY65hul3hE~It|Zh0Fa4Z?RAExWF=3EroklV`JFe?bjw|%I;N3u#_3at$%`y9ZzUl1Y=Q}W#@6S{@3s@!*%fy-2Xe;nq3ztpVEm_%q&E32wfDO-f3 z>p(AtkpD2eI}`I}0n^qfVpB#PLqR3gqSz>QDSOE7(tN9YQglhMRd7A^?iF+t5- zx(-L+r)T9>S%lN8A}26&I~(0|vW-o3 z$n;7gHsXj@bX)M{VDmBIH#l9A>$r4LxOBZ^3Qc3h?mrLMCFF@s3mgzo94-(L;s1QV z{`CpvXhIsGta^U=S++21#RO|O(qd@9tO=F%W7s%ikkAE?1fvOpjyw^>6o)L=@^DAR z=WviEvx#GSk;n-tbIWaU*=D1Z8HULEkXSlqw*J{}mh~#O_4<9j-5i5^>}?N!Erq=d zna_Unvip8>^C|Ch+)3XBYLKJ@WAL*Md@hDwz47_7@-@=RPnfm0Ld}12$oj_zo8M^P z4LCyI4cP7bOAyc(f`4&l9aSd3+H@YM1H{)--ztm`?=P+oO(4M!Payw*UX{sRg=zha zmrI~8@LiSZ-O7_2;1}-?VW97Df2HZm6qCnUvL4jF-aUQTkE{rPcmvw6BH#;oT7v_A zkQe$7chsJkZ^%7=fIpeo(vqH1F<;z~+o*$yio6bULB0EB}G zjIxX}6)YrZJ%~PANu+)Qie$^h@|;*B!7mUc>xqG1pd~ZOqMI1lzxQ^Ea>5E+Z8;6Inn;RwQZICdr-dBuaL@qfEv+FgC+1v{EYJhQ#LSaDw5VAqfL;jHS39n9FV zkUqE(gi<~E)L8CbO2%cl&*i>crLK}N8x6*-*s6zD#k1Hk3rp0e$QeXrCn;ADiqAEb zj*|vNd^ot09Wz%Hb7u5)>LSaCvv@q4wsGbyjA4y7U{#mQrz5y^ExmQjlcbpz+vqWz znL&o|u$1!{%EQGlIfUfrqKBG#ti#@zK;ERH7`b!B(0$xEjL;vEX#jHrfK5h+H)IeZe- zb7wQR_Q_G*WH(JjZ8EVfOqD{VUw0xC$TZ_s&K$=vWjt8h4WsQkXva^(ugfzpQ-u@C zU6x~J!he`dq6oENJG9Nec~N*Q;kiHURO+o#=h>&&XlRjHi(`c5UasAkxHvW&u%+H? zYuP4(0{TDFd(>C1qv6TJiOa5wn@sO_Uh?HaHZP=uH7bT`aUHv+$l5jmV#q8Pcfee$ zn6U}k)@CsesYMaa&0=O}XoDmBi{|Z;9s1MTu4~)YoekxMS~>zLapgGsE5Jg%Zj9X0 z&~6s#R}0WC@ZU9PG$w)YrADo%52rDX)|PoF*0nL{tMTTs_gfLc(jkGOqvvC&G?nz8 zLITsc&IiI!#Z^o}G$M4_niI3H$m1{rYGjEaNuAq*;64P25*dX zTS*dkTrzjoXR19%^$;@G3P~-rMnUS1d<* z(r)8+V!fo-3x?x(>(=|c?H2pU9vg|ijd>m^(phdfi!%y_PK?yhgvAb$4IKHIa%RcH zU3@0{m_7>wQ63SY3J2`glg!sN=ZSXGUPtw$-A=)p7Ls`)Fq~GBy*N!r?MPRSp4hwy zssj6^BfREg@js;H#v}!G`P$%5LF5o7GzoYN$p^u(wUc$W$Y?{i%*QD^cH<#vJQZvP zevy`$&Lt9ZT1FH_+o6VLkPdo`Cn7FKPasMcR=SI^ny=q(rH7mX0`rAlsVv9S6_TY# z-Jc&_p041Z$uZUTLB!*pLRn>kqa2B{IZoRRx#cXAW(epbZedV@yG1y{#trSDZdSkG z-~muhMP4nSTi<=cR0>%8b3*9HH3hr|l{x z{m3qgh?db*3#m6AD<*}XBxZ5`p7))Gsc)O)jy!YHzLYXZAgDH*ZOg`wYRQfr3DbI7 z%e|J3nH%m^bpOJa z2{VeU$B}`BFRu_DdKm*6|sA>)-a!sa0ZPcXTIhpA$N#C65szy2(vxkgFub(8i_HoQMWkxbns9@~I zh&g;kS`96_a%M8>S)I>j7XsgF>jmXmOUq}FrRiyNPh-k6$$rq6rz?2{Zwn#mT2%$V z0Yc(5d9G%Py6DAfzB9s`2m47eQ7L1yR$8KS0F#B)VPDPPQ>r_U~@ zSc`s+yRlZ&LPgjpW;vy>Iv*Zz5iv`{Ezg^rPQj{Z#63}Ek4r158)bg5VmPW-B+9RU zy!RNL$+AW#9pi>%af{iq7usOsyF^-*ZD(o?bCp5v(TJGTS0P;v&obm1<=AN9Gj1P4;}RO!ivCDYdF`xN)NNq)ny8{Kimq!0Xjo z;k-goG{a@^D$`S&>>$d3oF$D$TWhgrLV5jg<(psV7=t43C>N|#>WY)oTz;R@84qi+ zXBX=lBPLHeyX5kQ(r`41R7U&4vJhs4@4Q0)Hw|S;fmbfu6h5)%(QMbwCHKjFN@Pz4 zdZa(ce(d@V4XTtzWiXT`RdqkYZ$gK?QK#&F%_n1^35F5JE`w|V1zwyr_{z4RFRyia zeS{Bi3GRS<8*JnyThZ)8D67nkw>=$A>h#@|qQJ)|3IFg7;ih z_Jt?lz#vQ^m6!F&G{;)0Slzu5Y!+g;TCDceP4tuRfu$*2ay`)K<3z^GPTh`z%2>;m zOE~rxHkku~n7GWRb_X5qjlG(A*fTccm(4)@fzp|)z#kNT(cHV!J#oywSH0w;)jp&_ zLZ4Fgnet_=kt3Jovc`s4-{65D>JW?2XDMJByVLRRFliXJpq;lxhsBd}Sm6x=-h1!XFo-fF{Rs7%xS|J#feu1pb^oY;! z%jnRPw2M0+Ux$ugC4Qm2P!Wwi1u$Q!DkrG}e)uSqRH>W}M0DG5G^9b6F;xs4z93A9 zhParChorwS@Ci+p_k9sjm3ca}1W<$ft@Me*eq;xb!|+({8H49C&4B?DW?7t_`Kabq zb_L&ANFQfONqA(HvkFnmJsEESmSo!3*(qE2Nc9<|e5A9q5?IQgLd01GVHTn(TGn=Z zu>qkhY*1OUA00{jS+CCM{;e{Gm&-mgZ;zqOU>Nn_{PIaN^)Fybd_nSNnm%06HQd-( zWe)E0_f@yN=v`$AT?-bSz|s)6Y~T*c4)3s680iBud)<~-Rs=9NC+sn9W+yOcrVfm9 zoJcIo9I)p`l)@xa4qJj#S^Z}@o-pefqwzT}qFm`>MrYrNBg4>Gb(1>+sJ_h9L< zKb5x9ha%2oMzu^ma(dIFQ%Jt@e(`iZ*^U0;5f6reTPcAW>*;BJMX_dRG|4ZaJ+rhz z3)95}5zEpv&Z!bY* z*0R?IX20l}_72O4nEE&(U|xi;FbVxl`fQ?Mmfo_~Fs2hOF|x-8W$<_eIrEBx@r@1d zQLKaFnBn>QsrD^vHUpvsG`BxEV$)j8X-1}~wb}>>_n@`f5S|duRD2Q4@O&e>p>mtR zdM9%8l6y-zcZbU93MUw*tbtm{mi!~c5MS{AS@U`Z$P^a*t#v2<8sq<5^ZxCrm^+y| zJIh!)yO`SjSNGmErXMO$07dkMdeI71Wb#RLPGB=tH2$Zk(z_&nX*e;n@t1ZKUw&L9 z%Z3|zSSM%p>N^0mexNVtv_L+6sFKc!^l(l}J7ZcF4RSOXKr?ov8yQ%`k@sZ1o2UPC zP(hXJKsS@w@b_nhcn#9@2xvuvPQ6|$nPGto5fbfTwrGv1W+U1+%D`FHWL6i44s&d^ zG=a-pERGPm-20sMTEP2{f8wR|Djw_t2Lg(K0Rm$F&v->WjBQ+xG&c`VnJC>DU4M3<^B4N-w3P_`7^%^A*~2fB<_ zq7ew1(K~p^A*Bu-FC_x5BQ(l2J}XYAF0IVeonTH|Y13KS^rzx;%?llJu}{q?EvBMc z_M{BJR3R<%eXb^*G`;hKQ-7^mwY1Y(j0d)%FBBOb+xcH%&00M?gh@*y`7~nCi ztkQlxBk&TXGM5~epV?%iwQ(&^5AiYLJgRYz+Vsw8{SFP|;HPfm_CR*uQ~Z3v&Or4! z$3iVAIL2_cRI<)FE^^ZbG-`%sL8k8aD1LyMDZNT#M}zOy-C0JJ&c&@v*;(qqi*W0E znr)7jv$(6)_NM9LB@qS`{L!_RZeoa25smlFpU1u-k#EA3;4XW#laVPWf)Vhadr!0j z>Vv4Tvz9Nd0)ei{rn^M-;bmQ{hv|OHMF|Z75m#?kIByz{Fuan^CG5-#c?3G6G@EMq zR#GLJGt;EbhFWmzcA|WWEyecCWx8#)py-55KX+1v4k;XF!FjGIz?0pp^a}Kzb=}1* z^AcC*!>YKR40~hsuF&Vy#mWx3Uuyfht+@db%Z*VBivV69{ZaT^9>9`0`iaYj0^-{( zF)sfIG?!mtDmnmI&{2D|qOxeijq?T=B6O=#mj!2)9V(Z_*D_f)MZ9PYDATe35eAI^ z5creHr3(e?ts+)=40_9*d<;^g%M+J>aI(51R^35%6jaXoJW&&`r?Ors5lsG27)<7LNvfz*K;lgRyezJy^ax6*kF zu^91WyXL`hs)|>UC7wDVwQT2(GIY*{hud(pr-tf31>;{b32G5T(uUvcLc< zRUbUtwhL+cWSQi)mTE^-!mlBb^wKib#$2^lKjBJU z4@3Mw?;*B*midR!J&_Y72w?;8a)~7Jm1U9sa4$3LGf#B#nY82WSw`~6UV!AEa*52g z!XuoofBneZfe*%q8!FW4?D!)F{bYdrbSDkYAjHTMDIctl5P*qzm0a-iId7u03r}rUwk}_lceAd* z8xdF8b$w}s@q?h!N-NBz}B!nuncB`+|J@uB=5RD&7;suL0fEO@Ybl2dKSWIpPMqR9(&F=Bh;TL%-<07d&H5(P({Q+$bv(XJ~o2xXoxL3Jcons>6UJ~6NCfP z;D`oMc|=yr0|u*R#e!TK%WQ>A-sKEHYbm?29k1KP#%0qo$*V~KNdk$ z^aEAcBOAX-oU)c)8cz8RgVNLDd)N>*@6dh}sWo3zn2sYhSOj*IHCl`{`p0*F0-yBY z3sR@pW;{HM3l8~(?>!KRatr|U`!%-ed5*Xrcg_c7Tf4sV;g8e(5Xjp(0jAfOGCWVg zj)&{3vyWIH-UsrAmz_~vA9r|ckGxZIv@OdfO8KP_jm0{}OuSz#yZL&Ye4WB>tfWt_ zdSQtUq&VLFQf9`(Dvg0OCzA_Z0aOoZ)+-JZ*T4D z@Ne2)c~fpv0D%{p&@H-SiA4YkMM_&@0SVngnjR%0@JED$B5=YTN`?t4%t$OwSfrmS zJyJf=V*~tWY2`&VGDQH7fi!bd(V_E9wY&fKCjhw*1`XxmAR@X9ij0Ahu$CY=IJ#Ja zKPn$$mQ;o^{HKDHiS7t=LK*3lM7k-44x1X9`yzM9^3;LT2E~nu} z#b&AUO4Hx)bo>lM%zF#bu~LHd?YZp-P@))u7Hu-cz2B`%zeTSz;9|ag8i8K#f|*IGV4QhI-2m+S{Q_wPPeV z%xeJy!tOsjnrWKWK8ny$s1AT*39K%=7@#@<1Q_1Ma*M!yMcG{A-WKjIRbH~S$yM_4 z8=cWO`)@i&tn(YDhwt)nM5vilZa_(p6Uw-3ah3|TyGp?*yBFGAMXZ7Bb~k(T?+9VX zo!LDs;97~x*f6LvJ}8p$EZaVeAau9FAty%cN;$@JahZyB5PO0@vHlvO2n{krfv2c+ z1qx-5;S5CNvGMufBmgOGX?1QsUG*327NC$+Wg9wA4mt!5bMP;O4W%nKLbwqz(lD@y2=(>{!Nix_|9#@ zh}Fra#Xk%%*c$!*-_$Q;`=e;De|0Ba7(hT&|2d=k*CAH_mw4s>)}Q>FzR`g2L0-lD z=BIf-x?lfg!(apj>|sc42xcR6u?7y)2)mY!kr*$`XA@A(ybv*8UCUybMYm8Y``bLT zHoiG!n*;J(ChO03srOCyX7tx?4v96+p1!}v%^%;J%}d`=YZvY(FjS8c-(ey~?(SE1uR@5^^ zyS!)&h+kc#tw-L`t6ztY03E)HBmWGQhd_Ujo{vNzU$qe=Um-z>5hs}n%}8-zT%`tO z$5vbzii{_qK9Y;4@IWy;$v$rU*x2c{9X;>%Ac?B$C3(wVtN)OSFKD*X12|6^;OQec zj1C|L(^tDiMa{ZZMb#f%?S2U@el11cRl2o(eZ%#9Ddzd8HF+pT-%X0{xfzB>`B2z! zO4IQ>8os`JHKz9~JScm~2+Z>aKudl|qxKHe9p7Q2_72~ueBk*j+=`=uyd()+KXqT{ z6x0g8zjZ$0ZOpGOx|Z8N3%Kjo{i1hK;V*zF^0FaWvmYjINMH+?fMZUre@JI77f%Wm z$Pe#ovd-`3URusLR?ZPyZ>sCGCVhM*;)+C+*Ft*!wkeS{4H&V_SMUoZi~;PZpkxg{!zF zXrl-{5uTfs5$cvjJ1j6o^e({q`}3u`c&}E}Coq<2;p5Rg1oSn&eOMgbm>8&vM;8GW zfFD8!G-hP2lccpLWs; zH)ywsZ6ZS&M@L|#c~t69fnMmu*BKp3Yiy0ZFpSz7hmcWacy^o%I^#~Hp6^hut5F)Y zlAVNiWZp6s7G_pPU~P@)Il~U(>QgEtNE4kzye8JB@|u#N2N0oI4A7%d86}XRMUh5o zR7RK*<%b_u-1ISfTZEL?zlbc4nYO*aUnv+o=78iHP^kzQ!sEi~WUDiYgR z7V5D`M8srTBp!SScGhPd%9)bQJy{DJ11fqe*!TSGtHWuzkCJSv`OEH?E! z-Ac2^>4XCbQ*y-eu(B{#*Cx74N&33NtaPP47MIh+t@o&e%}Ar8?N8v;wmMHZ#W|V0kLC!Ck(-g8&7Urzb%cNnrrzdIU&uC5qlhT-98O2?=U zG5@ZulhTE8bH&=`WtRTYSY*BMeY4NDXE*x}3YT%xaKyo@=bvwgFxh~n{ljB#l;BBt z&+3m^LH2t=cK5_*K(;UGGlcV#YB9oHQ|P5@Fz73aPb!<70FOZt&ViO0NZNr{ZDtS< zZrCf0IL6=*Q3HptBWf@&TZCposbunl1K>ffz{LXCv<9!29L%(LSNZK{moRD1-4|h; z{Iz@m5tuEO4rRY8QkOqelO$(Z%aT5o<>?!54CRZ~B$?uNm5k^RaKXJD=jT?ch-Eg7>z)(>QSsK0qCbWOZ7vhH#1xqA$db$yMD5*NVTm1 zT8{Lj?+I+~Nz09+bAc{OgHFZlPW|eUc-G$+Y76VK*P8(qWu3dQC6YMdW1) z>`P}=c>;qZXFD4#<&+RC*YQ+T;4Xz&x-R2vo8_-?)LR0i2EDi~F-phJj#_)6E_$l* zx=Hu$tpuIFog1qLo}kALN@=2=SoCUY9H6XUte;w50x5O40w$r>ACKy*rW+62yfe2^ zbjcrgG-FyQtECNnp|F+K+AsA~LQCr{%PoPkW);P%>S#k~pA7;)-)e7p0&9dxV?LAG zoq%UK)6`0Rfz@+bOs5O%>B`dJ*1?J#uE}lU=YA|1;47Q+C!JZT-TcrV1adsRb%)L! z)rAdu_UZbSotn=H>rLpNLUFEsTUe%0ySD;lJPmI-iqH@ape3CkfCab~&vjG*991?Z z+&Ho9jP>l{Srw;oWqbahxII;m8(bw~SbKS*Sn+LAO;R5{XK$M3JvKr-{^nocdIOg)lu@r@zam`OD=mbo)!xicn} zfM8J;L`b@D;}Ti z5~T20ZhC+}+N{C^fJXI4yu|DNjFu{@;|bYzFB*~bwRncTnrW75*y=e4T0iz;o_-l)r(hB$;YVkf4$4%AJ4Y;nMLGPXapH<-7 z0mez?-^6+IuMz#{1X}XH#Do7zoJIfkdE(r-CCHkobql7S4EPf8g zbstfgZYt9qBr?3kWy<3M_Y2}4A!#|#w$U!P7%w(;gM7pO6Djv5IgdXC5D+`Ue~;A8 z*~QSt=D$ReIqI+O*y^ZXxvUEmckPZ_WTLVQSQliCO4^#4!5q+%*U6a^a#o{^k{~WL zvc(aj%tkB|N~w*>sVxYt2aR=xlq|Fj2P|{IA;2X9(57Mfujm{QT6^Bii8PaulDC{a z_B-Cs+mD^kyu9x>>cv#U(xDFrgpg5obgO4ud7yv2BS8-54!G}8Rf&woNILG)6!0Z5M zQeHbVa@~5O>MH<5QT355_-nOwQ=_7MVb6rSKQyE-4o!$6wt7)W(xoqjr9s zL+R+|bexEcGvj(swOEDO3`)nuz}(F-ji)+Z6`9o@T_noqb6>Z2sLU)kr6zFgUxWny z)r!RS-M@`YYl}%M1LFoTNw+yyC^D^a;)Q#7Hm$Yj8K^ST2D!~I(n{Z5 zGuSR}k~-)cF^;?nTCi2Ud9BOQHvfLl|Fv*qg85itxyTkOt&AM%Esz)Qc_uO0jI*Sx zJVPB7`Je;@ypeCK98`iH1+HGJKa^1m`=DLGKvu~+zn#9D&aPT+%AcGfX~)>yDJpb3T(*gi4vGhJUq#(4x&Tr4zaP^_F1vmjH5zp z61%WASsn~KLvhzC4B2}mH6JTke4y))+glL>+EQhxt=qBi`rBB2AmWgKx@U?*o1A*E z<19UJc9$LG5-~f}Mm$lQu;}(6103uH-FacrkDs1zeXVLrvj(_JhR9WUO7XRW`)Nuubqs>pFc_)(l7vIVAeZfB6n|Dd^!}2P zenGoTo>+QAH!OdvMgo6i9wdoRx$z0Njo4Mq#v4ZH98jgQQwM}@;CV!0dM-D7uy4iR zPvjq(gZjmgK};G|Xw(!Fc2nJb7oth}vXUkC_2x5SG}L~E-KxCzk4v6z+a)o?rA)O2 z-hLU7Hr5*_nQY}?IfTjaxRtc#9`CN_(!Z2a?hSn>EUFVa)M!jMt6y?Ol5*P&Du9LX zqP^tmNgRv|HD_&Ya%;>S^CRJRbz0NIHDRuFq`04DP;je`FyCG2XZy}Fq7{#58*-mT z-Xh=qk=aj-S{ftjJ9f$@de~1gZI&WlSH;~Ar!mK+&ajIY-wS7?!FP%>G&VjT*h^!zJd@9eQ&P~ zF1FoS^K0ch=_Ki}gCul$g42%YVg@HVnu1F);pGZ)V8%@mB=W#NGCH;9=dldj_j$p@ zTYWuaT@7Ey+wH*Bc6lJq3y(WnP#TYm4#DM!TQe+9SX{P87DtzyzBV3M zl}DQ{YIN5|$68kJ1;$79k1RK}pV&Aw9vYTUU{Vz1WK%b3@O4>XB}H9mDlRUT4W%&E z;-)Q_10tcU#j{~}O?AXenbg3us)}FQoqkjahf@bMUyfFpO&^5v`KP71>2u)q{8ERK zF)sV?O4%DE+CaBda3W3_B7PvPFD<0N%Me|C$@u0`O~9c$EM;mE^8GkH*_aTM&S!H3 zcYhAS79po(s#k!z(Lk3GPC1{xM_IwWOh8jKw2vXgtKC36IKdL*okNA6B@%7896j7` zLMYUa4rlxdR`!uu(>VVYkVVMa44-B}^bEF`LW=M-0x&OK)My;JLIWxP#-uS>;dYYD8CoZ5rG(uRHv!f_hSRMQ1-hI z73S~=`tT7o8^SxR{E|W4PUwNOSaoZ;Rl5sDzMSKZDYeQYD3bjP`EyjI>s%kE zf7?XWL&JV|@F4wXBnV~g*Z?H6E%pqZlIDKoGAm;-W*$HEAbuRt>CLg>LCZ&Ef;I6+ z?>F#2!}q=EqYd5PpXyAgfq)49n?&Vb;rrkHJxvG$m1ErRZ|6hZSO_74K1O*H6C^ey z6j(wD7Elrx5LF*Zy~H4Fz#m)^tEv`_YTXspd9I5AK~)tb2H=$d>`kk*7A^Cd&X(H9 z(%$dqKXhqF2=VbZ?>p>Y-oE;|Z*Kv-A}lezw@TD;$!5tcMJ1TT(`z;?ewMMRvyOTb zr^YOJHw1qBg!G=Cfz`6fW{GL{9Qv8S^yp3rX|+d2mSomC2PK3&qEGV69+_cf-k#vI zOCG6dVz)N*_>;~ir7D>nSoo(U4L;Fnai^YoRENk%_ac@P#TmPClb!)1sCati0Lez< zgfue8lBv9_edXdhBq#Jqt(LS<01`ZX%GZ*O-UzFn-VAjYM$M8(N}3r6`ifjqsaobT zuwjhAOKg~YS_U(VUKJn%kBvu%9Qjd?D*?Nhv3qMw7K_~)Cw`xcUiHq4p7tPrgpi&V z?JSDpYCqhkS%O*ru&GOBP%*|>Pm8eoxJ1<_I_z-4KHjV+joqm#Y?H^Q6~SAMEpKuc zHMQq-|Gt=CpW?M=1l?mi7-Rk;AK(4}y5zNBB&)kQR$baT!R8}j1l{_>m|oPxKHZ-P z!jDSlYig4JRQl*13G-73#VKMWjR`SH4-+nH{w^OeDua=1H!w29l)5stPFF#*$w%|} z19g%*O{Gp(tJMclS#FujI7ktRWk8mcRgDF~E^~6Jmj@|UQ*2Gk67;Y%jNaG@f>>78 zEZNdTm1IL@0fiMS&}@99e15@5OuBN3NX`q32z#(Ue7=u`Y;j})EW)*a!AN7;lz>qM z9cAp030EVt2O>-?z2>psgQmV;2jgd^>EojrP3ziE?8w$c83ZagFQC1xQLup@)_9A5 zFUG!Ac4sGx#(Q-p&PifevPDJJfO<___~nfGV{kN4kOVK{_JwfpBW}j?=1h>et@7w} zQTBd<^5+$C*+C|BP$RU(>}Z_oMsJE{#yONYEHwh8+$?))UIa?SjBu)p#np^Ecx)67 zE1)-vd^);a>O#TNA8ar6mMPU5Y7w*@=h{}8F_z5c%R|C4L4gBrfz6^Z^rJ4SHfegaAndFblMlRsp3 z4lUTUGdO6(noT7p#S}hlp~Ox&NN)k_ zEdDf1Aq02V?P^ez;kBOj@zB=AZnoC|S7wXfKw*Hr5nlFjl|s=q#(ca)$EKZ_L7+$2 zWbIKp)VFehDC7VptF9eyo*00op0>zupw-QvBtpd4NY)cNqYmPGVx`#zLQ8M>3x0T| zs)-N*Y!>7iSpz;*1uU5%^ywk0HMQ9O#rvAKmb}$-OiX?M1w88`I4zYu>+#aKa4^Hu z7m|-e*uj9-#2UJh?V_d~Q3WjlH)^Qpv9$5s&&)bX(>?>%Y8bg$7JloMIZKwSO^z4~ z7v5ZJQQKuEA9F-V&7eyx4n$uzpVCGHP`<8?*xmnx2qQymriEHl&o6D#u@oH&+>pM; z(^bpfoD#^I%0xc3X=cJk!yE(7?K4sxDzPQCUM_L05FwHGj%Nrryap;bVTr-*==d*bm7vi=Sl@^}l~38vo+;?I zRz7?{wf+ml$MYhq-)bp%99}Pp(W(!T#Vc+c6+RF57t4s5OOwlW`&2!utu&H(lOnF_unxBMNC55}SC0{9%n8;tD3`tjW=%@)=Aa6;#IH zGNqHma9Wx*%EcK})6I4&%3!J|CRrjWjJ~B-#U%Nbz-R5m5XpMNq=vHmEY-rH`6Sht zz*R321~q^9c$DGtyfDJzSU${JkuR?Exnxqs!Zv1_)T zKhRvSo(sQ8l<_vJm-#Pja`8&Voj>^g7AU(v^U2w$5H6ecp+&$~?57H=T|5_hE0E*Q zm&MYryNCU-&apqrV(HQ3vzvca+o`;_?Lv+C*prFLqw2F;eTC~mrYUy*d0MNfq86PA zkrFVo`NHmS_W*0z14Yn`zZ^8<4%p_}9o%&7NxKm)9@h!9@adi5Zr449+o`yx^ApIF z%fUy1t6lJ9?~ag}_w~@^u>lh@qbg+1@k}%t%hOYOA(su8y<-=dO6SLE_$W7{B}RC{ z-eUhocJi#B=4WlGvt_DGu=|j{STWQ(XBVSBlU)91)f*qyo%VES$jF2Ighsdg zU7H9ohegXP;W=BsskWBmzycZhN`I@qm4QD2_`XPpI7O*o>`M%VgtQ3rTDVXe#~=G> zF(JP}d(lJ2gfv}qS+tRlbJhy{67>pyAsZnMOteoWj)_FxoJ0@bLQopjNMH>AjLO3| znzN5~jYDKE{&9KBkLH=#@PoYLPl=sv!zLOm)(sN3iw~Uciu;?FXRdESu~}jBhfs~i zHaY}3kNosmXo(dF>Oik_-Nt11W%e*43Kg6t^O>dBIG-ee*Q6Q$liqx_`PVw5Xkq46 z^Y$0>vD&B18Tz|j&=u*0k8TM4iZ|KQv{y0{pM*k>KI(B>-b;p@Z^F$HA7{$cXhL2g zp+G?3odnNXz7F~$r4Es1{+sr1Y88KD60M6g2SDXW-T4O>e=tuMiv<=VBT?^G`tW|f zV!Lv_BIcSHu}wtPaD#X>^*$Um)&8*-2^(j$lH4i#i)_s9!fW0~>&*9odwuJC?VF2V z+V0}3?-!7$#R!*pnf#0J5*L?0N#!^DH+e-o-(&g=zHq>YK4Y|Ew`*&$cmW#^?@lRw z#BV;tYv0PEdXptJF8`6$iw{nF@jV`oK5;-+Hln{+3H$Y!{gNbzf|QK%-%a})AM6u?*rijx|PRW6H@2oxF?I?P-Q1+hXI4|+^fl7l!HgYoKE-Si-WKKt?y2z21#%FH})#`uS- zVvt)`37%Ta{QOAEquN+7QdJbw>t$!Q<8MLD^?JHCVJsxt9 zu@Sp-W=156D{AOlKPaCQ#otlRbjmU(Y#sFylq^iD>hL9Q!)>dkLxUWlRn{pmx3U%H z{c+<$AX?H(Lj%UTjegLNSxOlDm(iZ+Oj*ZLfNDXFrbkt7I-VD|QRFQ@diIxA^rZmh-_IO92K{{#cCT|6=Sbfa7SBEQJF{~j{&jA>XvQG{`-)wWT0&d)|_-tW@EDel$i>}7&wh4f?U z=lY*rw2z_IMYxjB+0k5V$;9R-i335+3PoNz07%wKvS|FHIg=%2a^kpJZakdj{ zXFsyEF7hF9PKcYxbBQ==dmPEXP>$6rVV+26YdUtK)!?rlI)pO0FmHuEi@O8}5OGb% zF&^fg1}a?t*}ugVQ*@309rTQec1~24YYEi?7wJ9~a0c7kZz&m%d&ZS{JB!5gg)O>- znGLic;?|@RZIS7S@>Z3E9VJ66Cb*oA9ip1Ym z3gkfRBGpTTE0963;Y?DHz>Z17_8 zZJ3;AYaEv&k`}h%t4lcqeHixJwOW`g9u=8Lh#w@mzhVoEs6LKsR4UD4b>&e z{Q{c2F&TSf0E2})<%G$-A;_eHUv3@Ba|$Lh-Fu76U$4`wW3{vO;wC!|Br;gSTYb*; zCT}m!3JYW#e3#DHCOpCKZmhsd8fTd+d@|%>44Z~~b=&S=8r?F8jGd_J=n91`6`__a zrj#2oik&FbET^=}3#8Q$h1sX-<{+FP4#{*RM=kl?Ag<8!8>mF=(s|?ZWrAbADJg7# z5Sz^ovnBb-b0$irD@5Fhw8Dr4+HB5^yTS##pxNc>TG1X3=V7gdqAGMj&z!kJ_3LuoSVg*lj7X4BlHLrygY%(&sh#)&UJ<< zESHfQnJ9v%Ygqt5)waqR*2Ph=kMY)}ldN5?Gux;;|0t_9ByA#vc-QF!J39Lsw=_T0 zn_$XME&$mE#M)~v^JBil;EvngrmfqX7B>(IqIvd zhM;6cG?wU#m)C}}Y?o*oy#3~ccqU)_2w_SkriOM=a2=Tcm4+IC5w#)Ll2P1SSX@2w zqnKI&*2X$3J>5X{gr>R-@RHf1U3OxSL5#sY+md8%r}$%>tLP70fFtT%kV+U)_9K#P zY)DNew1c*gCe7Ca(5JfG7h=bqo(b+-T^>y*{e&7-Uy&XnS zrmRlMqdExx4`Iew-9OR|TUdiKh3O3;#Rarg4C}0;N9lVbAvSAL@7sC{jViw;*A!fS z#T)FpT;%W6Th3Epu5PE~+gHUXgZv8Ut;lP#p+YPz0Xf5qRt%7)ED$HqJD}LR5-p9t zpWexJ=gQoNG3z1CJELTFhH;`c7)8Ok2gx{Or!CU--WMK&o+KTf4xunxZ)5k0B+j4C z0pFaZDdi8^u(0aHZ*RaOBE`LV`4&CsKzwkofTN+C&RP?spfxt1+ zX39xzn7aqdDJjlU&<~*^-!jv_)4;I~(vLL~^lq-lp-7L@sshZ=bn(!a0JAir`txi` z*w1e9wa2*egU&YTG0g$U^QG@BItfhe^K58m^hh67NK1B7M!!r3v)J(K^3bM@1p0nO zo=e~@$4UVh^T*z}K0t_?c6^`$pTPrws9WBcb4wAIuS9-sz1jCP{lG3M&2H(Of(_w( z3zCGl>~|2`akh-?Flny)U*mD_`oSi-Jz- zCPaw|Wvp{+72i)1Wv(EeylcM?b^&ZElx` zaXPB^z)x{+%}IW8?#S|4iA`YhTAg*cn)70-hj0VV)N%l;5T+p@HV_Q!e_M8%iH zGAMCqvw7h}*9T=L?!I%0$vHhjp84?QPB7Thw;eCb{$jP@MZPct% z2prUbYI2>@rqcCM_!0TMijRi+s~)K0ztT;Y19Z1p*b8K1NFrdr_Pn=;N-81UlMvQV zrknRR+Wk50@a62MH~Bqg-7^Y8VH$Fl;de)akV}Jtog;wQ(JzoAyDl#%t51e9x*ArrnVi4Tcpz}B4BbNV}+JffKWORxZ>#1IYnuIy2R7)D#N zfaU-LAh}}_PVzPI9g0B=@{5(>v{20Nxx+3{n(4y|h71{<4Bt`MV)o~Z__em*xu=y3 zmMbaCfpOs0WpFqycRVm?!LpTe@3S+K4M3gc$$34c$dQA%eml6-$SO<$( zB(pq~rV`z;RaYszrV8+GG3;@Yof>6G>)Ra51$YM`;DiCrbGB+61=6!m;bCL|auCFMmlND1S zVrl#-)32%*0|Fe*|(&k|XM* ziFH|{$C4BB@MJ8a8wa&+uqo#8^BmlIq@*RR&d}g)l3|t03pF07nxq$#6Yr>|d z!|1AKXp$D7l98*Wu#1bCow2Q%Gnt%&iIJ_?=NOl>l`+88%HbdVuqi6Kvbe%%?-S;0^Ud?k zcN%BpI)vLAYb3s^5Xun5iy~2o0%#P&NR;~Sy`}|^HE8f6gs-6QR7XFUlLuhC!?L)4 zU9g08_&@qWeM2Q2WC{!+;iJnqtm0mOdfY6KyTmO|$|>bA%3nq~AkonF$wg_IcQ~V! zzr0qR*M5@Isy1)M=4`SgWBEOmzn04LPH{cErXZO;k5YzxU{|5G#~Zvha(N{@-EDi9 zzIkqjAe~-Wu0{Zuv{v~*f+q`}uVhFx$x9i25nsR}ms?sFSXn6lGp?SB64=X@;>Cze zH%@98s-yc97rcSNVfOAYTwS83?c3T$GI^yTKQR1IS#fgB31hZ9@uh=M_K7TCU?=+G>Ni9Zb;RcL8FfbM4v}G@mE<#qM_gjauEyl?dL8 zC-PgUf8VoIa)FSTpY07spBy$6{~vbn_bN$>hLtGp0y;lv z?l1NTUErb&QnM|!8wyKq9hPo%^7K&Xxz$PGOCp2Sa-;l%E2SMtOI}Rp11Esj-8?=Z zoZ^Y;V(nr7xA%npde+l{|GEcim-cFmqn1NAb~>`&U<`CoJ3KCn77c8@escdT%_%gA zR$5k~lmeF74+n|d?NnQbk=mkdRAjtfO47&VcHSVxu&W=?0#TFVm+%6NGni^V%KIzG znSBi`d?nkmG{5l%G)cm@DvW&OlRFuDIs2wK#h*2>Hd3FSn0})UxRX8-{AS!_4896t zGDuEhEPc$2B&6oz(bt;2NirX<8=tQ?!JvcGS+0loCaFo2k&y0=h;lJWnpLHZx>0qZ zO*3azrM-c3Ir{-4?(L%8PX0FvSRlzwW07}G&Jyj)TJR#PM&T~ zq3OVu|0gGgY^ZNpEiq0uc0;_^;utO)ve#6j+(BUA{^Mq1V3!!NY!m5hvDsKMrv`$z zu;DmvAmeVD>q>G{C${4s`TFx5hQ*d-sFYT-lm2|85{8qBXRMCp++z9Mf~&WwKsPcA zu9uxU6bI82W{2Wm3uAgqf5hEgFYT0})=?ZImX-}@VR167pi7C`%hRH<^}(yq;s2qnM=o&P-U7UZj+fY zY;sBAoDwybKO?{++aeZkLsh}%);%czhd#b$?$ls4zeWkiLUcZ1j?!=lQBQk8&DzkR z_%9`ogmjygMXFV{Vh;RXnwA7aE&DFCFH+L1(SFPxMyC&1b?}r;TxkMiuqa#NyoMDg z`gS;s^(boXg+wB4J7Yh8CcXEXsCA-(O0yzPV2<2p5dWrSYA#^2h~r1WBRI&2m7E-EIAV>~ zIdf@~;1`sJp6UAlVB|1RzS2ctP2ba>loQC^cE|CH6J(OWc@Gz~dSnHnySDamSTeBN z@6V)~>;}(QaQz|rfb}|Vb1@rb=8WcN^rnQ}^WiW@&s^jgWjEL9uSdOs zH5aq(l!&8lkBtnaIk$ZL>7j?-92;b(+>5(t^#0~Ic%o$c^xi{-oX!u`#k;NB?-Q$CQ;F^|i(`DT?>#$Ae`+l*E~pmu!sdLEWD>RA_3>?`L+dTut0G9gxhT~(`hVDkVs^?`u&RMt;O7TQ#=4WRY*>TGo$ zitpz~l-R4B;PpC#VF(HxU}eCBUL%JRN%7iwB&&pHymCEtQ#qq=^2HPN?!&g0a|x(E z^pOglCTs}Acd^Q?YNzS;G$`+IY+ftrS&hi&hkD05wXhF!4oUil9PI8&-S*+HCJ}#o z7(<%&a&vU%7Lw>tzXianIbOJ#L)GmaQk$25RNFkEslF2|R}9)m?{MiHxj-eYDelhp zVfYc|eh}Yovj|AMY7AI>z2WoDxCX<}caX3?m8{*Z_m6gl9x0EEQ#ENBc;-=*IRa1= zl+a>%ls=F{B&`hZufwjlovmYRp#k{4leK?R$b?Sk09yLm8`v8a^qi*Eto8bL#IBt_ zLO9-Ch8aWRUf>lY#|Z|Gevic$ns15_c83AOp1~B=9sTj&xcI;L!p{iC5V%d1P`#B} zRFn+lLeY9eVhOtnyVFYV?4dA>Go)cqeMqSFmrre7L@6G4W+ZgUQxsgmelZl|y28l- zCQS#o9mlsJ%ddl~a!dl&#qO~^K&fT?sG`~ zlOWgC%FIQ|$o`XE_n#cMs;Zi3?;O%x#CT#tb6RSV8a?!Nm=)wwy6Dza5HeKZ9gCt| z6q3E%N5c_94)=aFidhqjVZQ;VawV+yA}Shk2Sd1R{uGrg?r;er|Rf2Hs~5 zRUL_)A8$K~Ac|W$AZzJLm(Cyv>CoR$RAIM49}As%KpvUfC>W%!Qu$1$5$OZS$%?d6Mbf6C#-)g>x|AHHbNTDi z({X>cGO_aVi!yT%@JjCOlAlFl3|pGhBs$vm%85hjDCn9`Ov_mqjP3%y4u^-8B=mVrOlz9kM!^kExmd6#ng1kqEp#pUL*vM#2ER~CvLhi8caNUtIXEO%+(`HE zgpjl_)r9{28#;%%`HjM~So*hbS!Uk0UbggQ7Wlm^RyTTo7LKGERG-k-T+6vL3|b2* z@$+$_d%@ahCgQkTtGH9){Um{S4SX4q$F-0dvf%&;`p-KoL8R++vWC7-&yhc))c@dh zFK{qejvs5Qc+ze-6pm)fXMZhUx!&+>E&#&b6a z9ER3`^6s;afk+iqyIQ`@l#OJ$!gElWDtkj0THXV8w5lG*@SPv=lbQ6&4xPi92Jfh? zKtUh+bOqLj!+~cY(!gj{)w@E~leD371uSg9cBQ^ebGCIUtFF;(x%F4#if=+)rdq-v zI<&-D^vMHe@l`GgVCFWRAdxwPP&%ZC9=$kk9@&wLP#gbe=ec@A)<|D5BmNX@j}LIkJ0J9jM8MOJ23N{fskhFpFPaK*w2`)x>-~ zUpKs>VBhUHV;gqoVVZ%%+WI3A#GHO$A!n3vPv(VJw5~PSLxts$^h4B@n+1`T&N2V% zYXaV;6W*=^QCI6$d)N+fH4f6Q=8&7PXK)6zWcT!fKisxE=8WvpAx#jpa=AFj^VDP= z3^*29R(QrqrP8BlFxI5oJWc!&r6tT*eY!|B)+6oUJ}@x{JJRKN?_eA5UIFh~?@f;HYA z+wOyhpZu~l2-=u9$iad|=Fe|hm6iiKgR<|D*~`5B^&>9Z93F?F`39@1Fm-tc@9hzr@)A!K zx$l9GeFQB!IZ?GSYu9$}EpD$fiUV?TV~5xPlF_kzQyj8{2rctB_y;wlMeBLKboZhl zR;Q@qj{UY_eptgf-96#ICnD#vxKIh7;K|b`(Z>H}uJ|9rn4%8$=2jK}XQO{+p)pBz zim1X!gC8pv$HF-vpyE}LjbV-|kU7#GrIBUEr9#`d&LItW)SAxj^L>g%5it>ruONO@ zJEv=4XRY!+tgO7OA4?k(O`RXFuaLQcl2&>>KCp12QoT}J1P@WGYRxT^(rqj*t^16`pHKhtP4Ymyr^sH4J*#07likw~UG#d1KmL(%rscp(i7@Kxz@gK< zb_U+iWYfwa7-c#pSkE8oTy@3~Q*1*3q}yq*$mK? zPNt4rudrsXCez+MIQ|J_qw!fjTxx!2N9R+&(K^~Nm_KyXypCq#CBD0-^Xb9Wl1V!5 zT{@8R?g*hPr`+09R z^c)0F!WlxpGGQH1@+y?@kFZ|PJ|i;m6CRP2ADHO(1#uzw4Lf{)Wm$6S8;&KBP|je{ zmQ!I1ff=#hA{voPuxJjf*hUHBtLeYHkn-gxOhpQWb9&X|i?I=D7g zEsoLPP;IyzQd$kES+#%%-;IYW%G-uBPcq_B38wp?jT6uH3m3tf z*VWD(Ka4JnSJ^%r@pgt_NiwyqJCb!G;_z7%i1q}D?Fz9$6&g1s$$pQ|-KzJa+0V!nwRRG(`CgAUH%hpSgV0s*8RC{Mq{VZ!bC zFwsZoNy5D?J!rz6ryV{Ykv>Y%M>N_?EAx-&VBSl#3a;LYoAzg0=p2(fMy6hIJ})d~W~@(mZ#!PiLYrqN(KUT?vptfBpv=ucc*a5W4Q=u{nFQC zRnr?V=NwdcniRnFNy^G*NzEzRrE5+P6|c|v8jXqszGmc-O^odUJ#oyVNC^DhJITCn zsI{q>&?T2>WV4K?cuN(od5s1YlFhIIwHbN6eugY9tSM;}($saQY((YdpXvZh$j%Ns z7a*?en&JS_Z-xA~$SkXkO(UrRmq&`btHg2e{>(D@GW#+ZDJ~vynauXQ;QKT$M3us9j6lcF8AR_HEy=VI;a0!-VX8B?7=7?Yil)>sC#*V2sC z2Hdas6O*pgY{FEOK3i7=SUriKl+mVLxl^*4~H{qEl#Y{-(gUgDpK%6n(bVZt5RrnVa#r-cAnYE@yfZ^+aK+g78Nw=v?X8nL+sfeX+^Icc-W)0!J8APDB$~} z^`u)1RNH31ol>AK_FuW=(BU0?<5dbWoF&zcf=zK4PqcjU9@M)-XGF0eLU*0hRP*hQ zYe5Ngx$`o3aTSNG(M1)bS&b)~u0p1Fh)RN8kCCtI#*gfXSZhaZO8~Yj$ugDQ7LLSq zi}j7{)0;D=I({5?fQvp@KH!#sdjoIJawS+zrtf#{}nt!@6 z=IWz!O#9_nbY|Y;XTQlTyL;XLn)d6o*bsSPnDnFXSp{0*?@!o`&y89cNY#5!$!7XC zo`@k-1q^sX_uiD^#D-KHAf-z>dVFPfL9(E0_QSCo07%VHt)yL|z_nt4Gi*YLMWu$1 zliYG?j1{(>702;9!We`V0Uvw9=YYON;_?Q_pU`% zT?`4U`+0sr9?Z`b)pm*2FKE@mB=lm&72KODYjHTh^sQz(PNg5 z!!QI5&LN{WwfCmkWKqXHs~0#jc1(``tfUB=%wp425SXNWNALs1|B{O(hloVC-kM+~ zY#7}AegL&$QMfbffavaORRXjs-?~&3oS7p&0-^eqqMT4+Ne5OMUm8AX>`TT^X5%B2 zx?9~nQ|=lrt~qaN$WOQlK@~hK;*<7%hY7#RNnJof@Y&1J+6ivl)@Vp!P(P)~Cub0j zcn}V(NPVJZ<9rqI`fX$sHG5R}p+2^Kr-lw2ZTFGV_NdJra(O!@8Q*)NP0CFvHX)}$ zOC%86sls=3e1Yk_WDK=Z9ke)w-3ZMo^IWFz9>!U#3m}wyc-yguRXaGms6@vAQEEwR zH{{L2yek901zM5BG86Q522`XRn1JFZRZJPaKzen&*H~W9MCiZ^xPB~&slRe%B z7W199)Czu#tePl2T^oSWRL4br7p)|-i_rs?CuO=v(u0V4&C;XyT~mdnBl56>&(9VB zu=?A}b!(pX5aXpT!hT(z!#Pp9)Q`Xj84=1R;w1TGoD87-d)}74p)F8>75A&-o1x7a zx}Rs?&X&1mnzR|=R4Cx0PL@f4O@5++$#E()ip5AMGnQ<`Rmd}agGSm5cHh$AMGO3UHu4$Sruzst z<5<@59%{1gy5c1=28f@frlFRVk!(H zx6d}oYAn#tuYglGlgGUp#Cc~0oDMxq*b&<)8!a}E-8FsW)cBz0TUV%;A^)_GK@RP; z-HFb*QAzVwIKmHss7%2=E%Y_ltxtp#EewGRYpkTt&$UUsT~6)hryGiSXu(oliYKMS41y^gB`tKNY}=wzkz$WXwp3IiXS(cmrKj5l@U|w9CCD;wH_KoLyL zT@zvC4Wqop!m13|g7*eemdNLYPC@%Q(`NHQ}ud4j7Y+!b>Q`_l}js+Bj72lWkIy560U zn7Tfi=a+;h=o)7|&eFJHxKF##Etesl@F*r6Y2Up>xPOj@7BSq2?6<6Y+;SDaOx`jy zkCWR_>I(sW0`|_DZ~tp3B4KP^AwDQpX=2X}Y< z#_b(uEOiCO1~@A+oa~5IkhsEXK_6dAX{*MK$ zXO`Bys^kZk41nPEt{^#sDZXyG<&w+Enb1ubQ&4_Bin1bspxL+)66q{ZxhZu|>F$ z#`yQO>woaX8Ld4-r#UQu)<=MtwQ?)llaPAx_=38mZ$ERZs8i*eJ%|Fy-N%`(oc*>r zPKp(Fs)1?x)2QsiX7WK|RI8+!poT7Ob$ z$YmSsFjboM*?gbL#9O7+Gf?umDBL9~xlMju4MfEX)3Dc%F-}Ok2327m)Vlh3Rs-uN zJdM1lZwfE<{wUA!CpzARKPHX@E77T|RfX#InT&X9Fk(gS?7y~Y#yW?6+qQ7svL6i4 z8=haSF6L=)VvHdEFl<_=-rk=GP9sgNH(yd|;^mpt%Wrtj-fuN+k2MN?Px3Nrk6^~$ z!9o?5b0DP@Nl6H!FbT}DEg&)u%Q+-*Gds$-^2(B^J+T{EwhKDlyGQ`!j zz(T{d+so;ysq>nGJcy>>&I+J)enBUZH#?}JuZg6XhOAIpUw|)hio+f-_~Ti6H$dQ} zig8g0la>G4jQUBK?+YKb&4+y=<-{o6)VT3u@dIL7l?>h`>+pVvolfsGI%yfEgUQ~a zh%4A+9FQ|@XAss=g%--tk#N_I@qJ%GHcw}oCidl7AopR;k+X{NTfv<8+K^4kyj`di zZ_Vs0IaSi*UAks#ula1}<-Y_UjF%Fo%7$#l*TChT_X5a%>9f)YNybKi~0 z#yxI`80_D;wGn69Q#Rcy4y#3YL=byNib#jxH%uZh4zRMj-9@o5dOmAC;}9g@36W%G zfFIDrf*jf3g5BPwaw9Kmkzk9G#X$Hb1v5m_Hj8hE<4iFR_CQ6qW!oUjzj&Q5eI z`+6LrV5olr^*EJ<`40K-fQoO`gs0?Z_loSNNBs}p^j|hCVP^|~-KU__Cqb{7<39nz zl!S2^aAvd+#b?%nCZLWT?Qzd}qdL^81}q6|&t^~R`K(pCggMIaSZU2(`DPE)WnLc{ zy?P_Gxl@w2^M$+O(97TnZU8HrEY-KsU^`3zCIZ+&CS3MC^l{ibzi**|nE2tHYQOj* zKMo2S!(KYFnlHnm9Y$O_&XjUtN(Li14no;BMNU+RYY%E5s$uyQ96G+_7#zvD{s>pG zu`LlM&6qL8OvOO}f1zF^!*|>Uvb?;acW2=#gYC1QEa_BFru(|R{Q>3?6!U2sNXgGE zs-SKA0}dyQCMBPa9XS>TJ#a$MK)m*a{euCOI&Ntjg?{&rF+ByG8P(Ml@MqRj;XP;T0+B7*)PAM{{r#vtJ1Ks{fzy&Di)usLjAuT%fGD3Ut*gWWqH|NAtc|~KLc|$ z<&={oY_Jl197ROp%Ft9~9vj6c_2g?qZmQ2Ke2?I-%G(?vC~~m+T5kK}zaK(>m907&Gf3Z&ZteKa88rcaovVPXT;;5ispEVuySTsP9&$#rt0; zpzX;*j42i}9W^QWsEiV(RU*D&^*L=W$$FfJ{J{7$hhC`@=W@o4#PA-#|2Y!(?h1>U5epTxxqnvsYEI2%OY?!<&aYF9s+h&Z+ z@Qc^sH%jXVJv8S^1ftF^YxS79svTI~_jxNIw0xs2(4rx=f5p*uuFFr^$%Y1Bm%Gad zxh8=W5A$O9FAzC+1;QKrCp@0{zk7B57DN8a{Z;%IQ_s?ncAwQid*9_sHHjj_LZKWJ zrHYkzTw#-w?nNqY#11HwhEYa45?I3>6D=rqeSqyUFGVGL}DPSheSAGBSeCQVhdnWJSl#6ID~o zELekjZ&rB?klEEPW2BMW`Bq~>JM z)SO5(o?tjIhJMq~+C-GsnPE6FM#fs4!O>_sGL=Ny(l5^blVG-Cxe&i^A6Lf4Q&qMs zH8m9pYo?)1A2epV~Ow7s2fVHHbQ=hmxyOVoTR{A73C9Uz4)gC!)->Q@-(}|4Fa_3(4La zOJRaAIXORoj1QBH#B~%kN>sJ0C+w_9e>@V2X4D#nK?wMK zr|gPCrAUxgkiDdF=#|g64BnKeJ?$uItbUBTw}|>es0FMqaTaGS!e8kB2KbY?Os|A~ z+M_$?%iSa0RNF-b%VE?I{R_Q4=nNJZAz8E7QnabxJ}9huDKJ6x_(}d_Sz{j>9f#%< zt+?3Aa+_|D>z9wPoBItaTbU_V5uFUlM0qmhq7@F-U?4p(s|az=JB84GCpd8OvgPtk zq&w|Vrh9?pHnjx3Jn(V%)r?-;FJXDq#Is?WqS1`CAv4$4kD^2s_x-4$Bvu;w_`G`p zmfxdV z#NfO&%wH|gu3^nbGWdG+!s(s-^v&)3OoVWut>qb9{_^HcclFT>^1UI?3MEIB{lbv$@^hA=OJQWGI7!l`nn~ef@*mx zM4^)MVjPRCWT#QWb6Yz*{HBkn$0PRj=a3Wahs80aV0{l97Kp74>V5o^!7}VdQI>Dx z{p@+b1q}XAQ@r?YTmbZAl(0-$=a6VG*CAQvu1qs0+#kV3s6;p4{{62%6=6D;BJ{zy z`#O5LwgWQvbuW{4V3f%~XH9#9Pd`;W2JK2GW|%nX3*AgkX;{gZ@P)6xghP>;?vBli7N`^e32p@(tMTn_%vj(?=aPBwRzZY$L-rv5ATRL0qgM zb^>Mq4j`5RpkU*adsKM?+xheTNMVetL7_py!rAao>ehO zuDKP*k!Y{^1C)fFdUE<86H4Aqy{SP!OcJ3_Ttu%Nj`@sYAOB#equfbh0owwmW)5&( z>Sj>7LkFvNL6T6xh*Gd6&SJBHSi?h{#uqAL25EB{`Av_pT}RyQh)I$pHg3+Y|j5pa1|0Q z{5KU)@ej);9XPkW)^M93gFGte$Uw^QGbP;_h{WS9Jr58>^5SOKEuVdVfwA`g(r=K! zBY{Uo&TnX0%KVjL+(XAIPYS53Vaq85*rqkL%l5byxR~h`je`HuR1Ho?+8;>GZ>(3M zb5@VYIp~iB5ow>zuq!TfIfa%ELz6jH!DD3q1pVJ6WmG1Qws?IRA2GgdvUW|qEIRBu zl-dj*{zVA1p3e71`Loyg0hZY>^-WNFq*AWpQ-l*0hmG>aw5tgL^~I&HVoL_2v#Y0D6Xm2g$yGoFpIB2w8a*@D1$&A{qwk zAn}C+q7On2HXUWFixin;8>|?T3`-|^L1r4&7)#39OCWurNKg2yIh+hro}ImnHA7kH zb$ubG8NbAGQe-)nDtv?J-TcQq(^3m;$KoYT5P#mDX{f@47LA>`>03)OHBt%hXJXk? zUP$|@XTIFh2G4(`8Cp3>3dv`5Sbv{Nje-+==SU$hE|t8X|Y>0|2|M(+!akK zJn-BuzdRhZDi+{YN7gAH<2_o@<>3>mPh8VV297Bj{aJtq$KseM!Z?=1<2dQR=jcmg zG9-b|mN;h)x2h_%*uxINOlXs_2(}oDu-9|!31I+jP#7~Z=u)M`h&Mf~Nh1o4XpL=G z;#9NKtx`t!9gN8QtQ@b_p{2O!gToDWwZ)-A;Lx#FM3;8c#I07D{jOw+&Muq9i5RZ` zYyftBvXmQyAt`adKMr_ScQr=Vl2Nlz;h@Eg%DzHUw`%-8fCbEGGNlS3y2H3=AceO+ zZntHE*O-V=GuNNMd2y%J2Fsqlw7xw*(c0?)ELENTiG zU8Kuc!o#yA_!NOyqA z5Z1a$D4ZX4n+7&OImMiub=U3RppIfMVgfJHzq)9)auex_Vd{!7%69i^$ho(t=7GC! zH%EXv2VK}tPe=%dZFbxBV3XO?E;@KXtU5W#IV^3VNpr`3iqYVk=Z1*Z{eV^N`A!Wg z0A{g2;jkZY0fxowg2%=z(k$khG3GXvR2j#$5V2kxg+&6ZNxK$q4E9Qo(GQ-;8!iCh z-!Fc(Xx~dRP2Tp1`R`f8{hpy&;omZd&#v^psIC0xUFpA`)W1i(E`NVQt5WO~XO%uD zYkuLL9Dc#23ZH}v6oO06%MWKp_JJN2Lp4P;T&l|G}z@|3Rkrq}|^|d-+n?O4H}!2hb0r@CD=x6+hVHH1S6(xqwf}-Ut<~&W8gH0_&FX;%g+_M2 ze%pCYJ_1EkyAyS{6n=OE=R{3rHtKNUm%JH$N4>8He(4j>s}s{X^l!z4ikB}DaHFtF z_25QTmsH*W-u+f|9$F4KW8g)TiZoy8Iq?~+_ggQP@_}qk{qdUy@)Qfq!&3*5&?5cp zq2G&Fqh*o==4?JdknwF>KJ3%|2heS*A64b|Yv5Dc<}nBvaiseJUzjQhcG7o- z`*YEgJGh@{SfcSQV1j_>=U(V1dGxv_&Ak>H7(c|nXg{?kh%>UG!@)<@-6CA+G+&6N z&Ej%f%M3J^ZEIjeHIFm7}|iCDDWfqlseHXcSwL#me49rO4V}g@DwD{ z-bdItM-B4r_FOVhLqHO7C3pZBPrBkbi|?5U1}1Hc&0oTdCW2|1Y#_635|t9z9?VDr zU(~NOD6toJ zrFN3q4z0>Fv3e4#EtHkHq{_UGX_fTEXpf}my6<(um1?UK2yi2HOMyS-)~^Q8XQ=XNZ8v21%AxSfO0f`-$8}zW>YDv)k(3fCvPZA7i(1ZV%^c z-jmt<-cA1RFDGyy*jOx~3B1BN`K6rhw8swE%-IOTR&c9ArOjqL_ zT|jbVw9*m=>9Ku$DkJu{=G{a?MSJzs_a$t&YN9db=rDh z#f@3)q0_Iv;a@$lV$_^vwzevVZ5P2~Qu3@g{@UB(mY%I*P-Vw?MmppSf!aZo8+9KL z`2p(Ye>gCrOT~Yd(x#~(T0@%GsxVVoAtnoioA8!oZPM%|)&FztB5D+iXln8ZeW0WK(F5{aI`2-LiXsgR`W^E)iIklu_=J}j zu)$nQ6&vaQZGtuD5qV30s0acf$mv=$``ow|O@R76RJBN`{1HA6AHHK%ytz-aP@-Qm z`+^U^*}s+jUCglo0)T8n7v=;ECexLO)$gXz1#C@vcinHEr1zn9?{`=o!$2FuIgwHC zV@)UZz;_tUo=b%IKNh%Y^sG8Ui*5VZv_W2@m!;^vFADg-@iC1yN9<&e8W_W19`dEH zv>mbxd8gHGW-I-PsS8Ie(!+@n>gU{_y~Sr7 z>}d4achGQj!fQDzQPD-o*Ft547CcZRN4Qb>@A@3 zO0q6c2yVgM-Q7L7yA#~qU4y&3ySqbhcL>4Vf(0kIzOVnDdEL$Q^qW^}-Nj`sYS*Ri zsk*1C&e_{zlVr7au&JU+=~C?;zRivj31T44H;@9qp;<*)5fTaFd}6B0o!PeI>ES6P z28ivF00!B$A$3Ly`tG{kCcm)X7+D3G75NVH`{(aTy=+4H${U8_%^iMvsi)#=k|8mEcjpkx9`eV@dB* zXij9G3}Z4> zJ*CaXP^H?UatFWB+s3L!o;H}9p(H)Xk$=Iqe+h9)CdjBz<|kAsI0rqt)D`}b@8JFo z)Mk(*W(4aJbZHQoLi9_6j*|KibQZZC_dv~#tl6R+>B(lUy;|uQkxjga&p!EIeZd$o zZh8!WANYs}1jPHlSgn+et*g!NzTod4N+l07;AOotvF^>nYEVcj&snX2YWhSP1la0x*P;?W81vkhwXOT<{t0 zOMOD|A;A0WB&hRE(Ek4KLR}1JSg~} zS`heOQ^bTk;lrtymju~*V+loW&~m>nA_Gm`pEx&sx=`r1B%tW)52cWFk}tx)SbgOB zYJSa?Y(qlQA(_~eKykfnjgdZ|1Xu_)fN2sJCz;8pTkw=M4aIv{rf@RkVqJ#Xn6Z~8 zS81>&?9roB+|od1`hqLS1-D8WA`jpYRfpY^2q00`W`vccO2nFr8Qn8~v%GDQYF!RGAK7(f z<@~`hl(D%;4EI`&J;g9jQ&xHPXDsyx>zjsVPWC*`3Kh>ClAs&7mbMV$(cZ!#3e+}A z8u{EsNSf5dlJ#hlvgpw?RST|{^ri)RDfe%1&X3I05A{sF(-=@S5=*rDF+iZN&-^6T zK4(QX2IyASyZV&yr#v*f`ke6Sm!}LMtSHSo%*KO_md>&H=lAG0DqYEc@JR&UMg z_&p#4pElAsV{h_xG|3GWsS_3;Rxz#ADi?P(N)I_`5fwlv_zlfIB~F#7d^Swa0Udun z-6uJv-TjfC%1u?xEQvgnaM0o$U`fF+BG8?i96~D4a#=R4aRm{Jt8zxD0IvXLILU=S}PO% z3U9rcvZ7-mkNBxYQbd;P$t$%{bnfC1DCg~ zus~_hq;Yku*2J87!5211@pSY)lJOpgSgH1IOl*jvpD%b9X$UOQYmj6YCKI9c2ft4J zhg0UtGfKf<4&TyEon;_dCX0u_=rWgIL;;C1dlFSVzSb~vd)=@v8G$x-SP_(KAXM6i z)DDfsaB)Y*BI{IQ!(}7$3+nEQ%t*4`mK7Q4BXcD%ar16o=}s%KtSJsZIkQF!IWx_< z=L$&Ibp}^^ERL(mtq{4;iFeFVbjlh`Kr~Mp_#``g|lQ!Kb1YI%E~k zE&BCi3a97bTw7!P&B;4iN3_|8ezj2k`T>6K>M{6)+`^em_2|i1al+q&EQGoQQqBWI z{H1&n9)-!gb=Dv77ma$~b}z%!LZwY=8YbqpxUy!gHc(DGv0x_B1PKtOuo*&_l2kp5 zYl|*_1_<(p^<5`aVC=0OnyE~6PGyy?w=p~OxE9-p*Tj#TX@40XA8QTz8V|OnV17XL zxDq6o4ha8C|{g?;XWEhwT?I#=2~920N}@+;7>cBCv-UyMd0y zXZ#Ba>%Q@duo4q&1e1J>yF1?zw8y~Rf&4o7bOuGmdz^+WT!*#(WA&!-W3Jw)fo6@s zz?}>6%pqr}W<5HN$RM6_-JZQN^hs|fvU+Q_KHt-!GWk9e!VdBd7qp1iPpo8Kk*@7y zZJj)XxNPRGCYSUy%EQl349FP<#R+*(A_BT`Tf+h5^ooJByRX=W?GVlhS~p)R$DoX$ zeDTGaOq~@5khw!P)C)KkwXI-rB!y}@a1%+}0+?hWMCE2VrVJZU8##2hu(c4Zt?)!9 zw|!qP=H{Z6jL7b%WPin=b zshKDw`iz(TmpAw2Xv@%D)pP~40m1Zhh_|)|TyBuO_rwtKUzVqT+kUwN95nt zs^&7d6jK#UNlBA-Q=@j#0`{#ulZkgy4KX~n$LZUgWHf%YnlfR?1u^WEPiikZVeXel zTP0$}FIqP=8hH#kU(|I0I%kkx#d5?{cWopni@ z`Iws5Y;nSNdBfnTGaYSFNC@M3mB>*vPm9(fQWTK8E?ZwYTD$4YOoHSn%fqlt0?QHD zIfZ2PWAyn|{G>>M@-LD$+5>isd@VL*A95Y0LR@>$x*6aZ;1%6FrD%1>0sYdsxCg$& zM9(`0F%To18IvpVxw2a=AKvIySUtDd#c%CT%FlzLUKACdgY>Uh=wLl2m*YO~8%oiR z9YSSb&clNQjFhf+0OOj%(&$a}5S?MP29AR#GvGng?LVy&2OsHZPB5%`f?$$;Z3)o- ziP8^+l~udekNf?_&vvyKT50O0gW>CDcvdkbPp}ocsnHQga-e3BJ}X>2i|}0Fp;2ff zd7;Q*8dWWbF!W$f=vf>Vp<}FjB2Nor&xVjGlIf8Z3&SvH{FW5-_#szJ9l}=>!6rd_ z{5o6OZ1ASJc59rf!5KSXbnlPW5+m-Smy{rdF#HJX!=LOu@K^2(TjluZurZqLju1*n zvI-$b)fn*n&x4`JP*WWu@k4xU#u=CW$v$(M*wYHr-g|`RO<&x4#%4}t1NBQ9{cPjIe{qoh;VK)%dvtWhtAkhF&O+LSM7zI zqp$R@D3tq#oHoG!SBJB+s_wEDVEtnN>;In|&VQM`tGj{~D*v|)>2s#KP(^J+ zG=c8b%V=cPqbC`QuKOjFP?jZ4!+-OvnTz_flnwVx&JO)W1U?HQYy59P4nvMoy>XK$ zVY(h?oCj^wjvmu(r_;KdzCaWPtic>ZEQhUxYP(px0P?Ze+1TO2a7s8TXetwy0eNM6 zr9s+Yw@I6(Ru%fRnPKXGhttAyEFD(>X<01{jpti3>(6#RD8sE<5H@~EwyOIBh@>6YI%{Qsc zxEfH@2Ax$@7W*K9Ysy$tfN$!wHdGr9h8v--SXa6Gv2@bWZ?Lk%4zA7ydYHDQ!Y5t7 zR!zNp-7u94^Po3Q0scl-&0)BD3fE2MqDAno(Z0zcT};-N%UIj`D}Bp-p=rZRk&8#Q6N4;f zUQDrU&MX4>UMR?DA&y6QVBR+zIC<0QI5i^SR4b;GO_1@r8pu7eJA~IC=U}HrJW@i2 z1>&`^!4%2)IH!c3hyctcrh=;k-9OL3*l%tqSi?2MAO!A z#2iy}Z@lugc51ox0RzB$^XQCJl`@0bBTgU?+R-q#zd78db-GK6Er+)fc< zUqy89xT;hFhw#e8k&Wi4xdLE}9F;{gU-=J`5OA&V7EvD1#|+aE80#BIn8eUV4{iTC z6qwC-o_Ya8p$ae**#DQc*Y88&{T4yezX!p>i~<`*&6t;f{TOs4(^Ur62O528r@rf*RS-B{Dw*qK&}(#;!=)9zD_Q-B@$+vA#PT_BpR zAb%DUlNrGi=$hJ=eSqPc#ZK%Q;y4S6H=_PK1hnbTjh?PfX?6a=DC}<6u>9bJGcx zTdl6qY6KtH3(~0Kv{cV)8*c7sPBO9fvB7%k2D)3f;<-Aea8j_hEvzWysy$FcevsqE z%1aKLH6IlT9yJSrx&M&Wqz_$_H|A$=WR|SI*i?R=?xGEE1)4V2g6Vqu(QR^(o7F;N zhzmsXexx47c_w-3$vt?@`5SDfN`noykJ4P#RZU=em$|ubcqg8A1YEvqx$JD!WlFKx ztGd`dr$Ck;&od3ujAX80TLi!UzCAx^(|%fbwSSPWQG_0$Uir1o%c#|j&` z%Gt46HmROIhINdsMxxRu^peYx`UC3qlXVDLHE!}>-@%}5)k;KZ4YM~4UYr8J4{<37 z$wZ@Fgc@hfipGNmt|<-hB|`O6vv~zayYvHpC#Y6f%Vvzn1f6^(i8=IKD2=xRv|HrKyHSx1 zbG2Uzh;b|aPu{G*Kb`t7n-NKh+Q0E;@iu5Q9FYx?%!_wh&7l;8R_sI+LbAzgLTZX% z=Gi6~Ey*rTjGYwTqd#+cQ(gB0;`x!ztv(144V>^~a=T9Rrg)yM@jrKi*hR|mF)dwe z8}tiJ_LB+SHYk73WHiERSA(^oK7$EP0_0m6u$(}@B)AffDX-Yah^c8wdFGI4|N2Y@ zyEkr0YhL|<86zsm>HU$u}G3)&c?i)97mH3R}tP5&FCW_fK}tpOv- zKDJzOxzT=2Bch6qSRW)jz_(d4pIGFxSdrmi4}rZ&sV!3=$2-ctr#e+EXU+uS)(4gv z@hD}+q3?nY{ytYUe)j3wY~)2m%U~&;A6m#7Z?tL#*+svb28SED?dJ?F0ZBw%;~o5z zE;P;$#rT^Sv>FP!NT`cC*w#k2M5W3t=kN-3sXB{aq~l)9i2S5ZWIHGBmp@Y((BukQ z+)|P|wpG(C+l$M8mZMR}Kwr^iOp%cX)B)_01 z`4C3N_vO6M{%qY}F9V3*}Ww9A;u5XF_n9KAJJA zBbIVvU@Pr_7nZB=i8kt;@|vmmMeb1S=jCnuwj+lclWH-)-FZAFr~9apOI}4Z-03hp zW@$9dT}|FWxL~8fniW`H>S)uNvxSzEEx1hwYlYF4*7jZyu_YN(rWF@KaBms3Nc|D7 zZFd)Wdv}Z#C%{Rfz+@#@$Iq4GJuZ{Mn#DFXR8pN^1dRdDM_v{LN(}|3vP*Uk2P!%x zT;4$j?V|0A#5Ue;gV^!W;SjJ#BQZ59@<13mI;A(iD3kZx66G2M6N6F>M|4SI@*+Mb z;|4!mJ<}AaL8st|uWmFs`?A-b97Heme}d_Y6rZsN1LUq;L)VoSKxi1~P|cJ&@qFlv z?0w5iam8)1fZ)p3lNg2!##EOWc80BR8#8eK3ng-_gh@4xf~ zO_V3J&sDZ@^4q3K+u+^xg?oX%r%L`RUGCugNm?1YCXmMJOTfnZvdH!mR0As_ z8>h|*69zf0h&D)5SnJK)2OH5jhep$5yaGG_f;886iO-p_hdiYYj;8-QrFEjefi?NG5!jr>we-mB?6dM;$70PNorVE_L=+~dDLJjhbs{Oy$f^~}0O@JNqHS_Hx$ z^2sj|Sa1Z=kA_f#Y0xNGc$2OGbMX6bt^xJMj|_UxOE4sv$gW3r%-yzAVf({K`1XV0 zmnqIoPVN@nuFf||J;VyG$GF+NaUmfcA%&1|v8&WYy)nyp7%WLFG|c$pX3G$4SV_9> z@m$po?+E=;llFz#g_-OL&elGJSYZuDWQRWY0ZUB{kE^Cf~5)L_|y- zn}qC%q{Uigm_?J@c^{|--4vSRjW)qrJCcPUKl1RC;CMdt6WEsHg%4Gb@3hXICiQW9 zhNu$LxO!fxz)8V|UhqEAChg5V9D@ZP`3f*!FP;`t_a);DKIT9+39d5wPT6+0zraZr zEp{ev);3!&YZq6nb-*&|5g6-X#;{g0Sl#|mNAy#11{sGt`NmiGHN_wwLQpl6g&`bP z=+Sipw&JZ#NG*P_-vFb{MiW-4^9^bRdDtOiTj1KkZ29aiy!QhyZ`Q5B7rb(4ItZx+ z0u3?=O-vGK^sRI8ZH#0cjdm?j$`5LhdDI7``3)`|91`XfMHChw%hPi3d z1@x$L-aXU`&db!y;_JAyB4bcvBRRLkg80?cr{x=v$$>9YuTaw4!0XflDm(ZFWbqBH z5)P5iFBE#IjZpF8cM9xa6Z$9If1UB$AV_K<02bd4I5%VZU%cS|SOq32ZQ6bZn7J$^ z3XCIIOPQm>n!KKs@|_7ox;P6X;VRMu-mQyYurp=LelznU|HDoM8Q(p`y%^@S^|Da_ zsQLG7{JYF^uY=6hO<$ka4|YI{qG;S~4ojm27Q0Z{nt*d61P6NWqv0CJG>_dtJ(s>b zG4<2O@7x_2cf2cBPI>@JNWov^E7a`E>=jJaI!+Ss0C_D-RsEHs_g#I@FXO@R_8oBLaq-k5T~tE z{lQ_*CKKt(#|bkY(V|deY5-AHkTb|cKSf^h#tSq+0!7NV#C{I-v_NJq%#oEh9wDeVurS~id-D0cr*Ub*QiGk+VJR+JOP^vG^ zb4#|Yv?r)_G4VlY`nGAet?j-bTt9O>15)j3pMOBDMr5?B(yW8uF`!*;N$YNn5rH=J z`Ko<bDt0N7fUj2cLS%4ClszF*{CDYjK z(1i0B?*1Y+gC*32C{}zQ$qH_zABG+79n#j*QeYPjeDxA5a>i!HM00Vf0`!sDNJzo} zI!%E ztZV>>Tm1ivS*h4q{=?B$r;3acfd9t3VU$e2;S(gnB@CiMJShTXE>S2^QIQIYW{|@c z8_DP6pC&0QR*BtPzLx|lUdrwl5N=mHi@g!(^pEH?o@}291xrcrI-I7juRUjfeQj`m zdphL?a$i$L=x_D^DDCu(ihQDwL1~AeMh}ZwK`UwpD?sbEwM2|@7{Pa7z5c8^3@G5S zr`g$cd1tR)$0SwVUW?eYwZrVF&EI%GIZH8Ybr5xSp`ta8>z+p_v>jZ?VGq-{*AcBH zYAyXBy;(r)vX3xX|DK{@TB&lET->O)QN}h-Kn~y3O7@%1WtwyFMZHqt&R3B!i=xJ| z_Lzs_q6l0tYo8@NTzl$%)$~^eK|6=lpUl!ypx`JovX`)x)eq2JVZ9p5n)H7@`zQ= z%as~r054FNw?~dpSTjg{IyllBVIO1zx?u@5UPVmvX`Ku*z>sNKiOe$*>iISrG1$JE zJ-*nclIQJPU~m1&`9uZWv5jH9cZg_WnoSNo9np1A7Oe)O?S zDi=8JMm|-Ny=6^Y$#i*H`2iKsAR>)Q0uc(Tg9w9300ro&4-h_xg9oQ^FeC0nOKDr=Efj%S zTAH)YTO5l56)aIzPcL*Wb}jCycy|r9G@d)VdsitEoV%X0Gp9*_BR`3qbvmAN9%MV7 zadvy2rL;_U*x~fhxYMF@+exyPs5lM{7$35NlJOj}ijWKse6+{hVH-#w*I|@S-C>TS zZVOH&3zpK!R%fD-3m%7@2Pn8EhJ7a8BrlMOOlAy5NyQ*H^k$NM!K=aQ&gU2wF3CJj zfU+>jw;(G^8|9-cq;trYE5=}&7iRRBpArd1$)FIZk()B5pH)`M=a5uUDh5rYZbL0E zE6o15dCgN6k6DgsG9ryU&omwjBR!F{96Z5TxH90?_DwiyLPhu&Y#C#ny1RZ?m}ZkA zEex!NnL!&;tGLO%QQg%TQj_Abknm}}GV8ds2A#8oQyd}sfqs+LP6BFhrE%7_OS{5eI$ zr3oV6&yB=l#HII#v0rK@5l%yYogR-{)OwCM!}o33154D%Zk`TioMl`Wv_;T-M(!01 z_yKF7mDb%NQw+6C%B4G#g8G zQ68tzfuAY#$~t+Gnw}=Hkt8{DU0ew)Oi$XSVpA9q_k)i%kRo+DP1eKb;XY$q93MAV zmua_DpVfo=`OZi8u=+yCepV+>C;LWku(ZbX&%qK4QrG+2*uqw!wb*PO13$YskS{?uW=EGgRctq9p zfh-(ud-L*)bGUqLH`R9>$SQc@fS;}g-*IhW6t5EH6c+8-l5QF+;SggNPcJ)aCfAt3Zp;*%YAEe{;JG!E%2-h4Po{W`3l+1+(seGQ5I)8Z#mgc zP?6$;Nb}S91VqVDN>MJEu;@lpG#Jnbmx@dmv4mb5p6_=Z4&qzA7kRhGzlwxqB#pchs zO6W%hR)~13T8VJ&QA;&gjf$^KmWzP-lm`#8_0GLkPhjnf zyufn7EI(VB7`1cMJ4|Cf_l@?MLfXEjuU`*!9eD%DrGjJ(azqC1C>e9~oeh-XIJ5O!Vep)U( z($W6}N=KnoTx|?RuAaG0C&DB=%jY;&;xG@(!oFIkK9h;b3_3^}P#{cM^O(uY{K#=Y zH3bvg$C=9`5uREie2*48Sq42ZBrevN#+od6UI#)Vqvk+!GRz0#x@`laD_`JwNot_F ziIxItV7)dJ`%$VoZXK=5zXl2#B47`gDODs=RO(iooITD`#W5?_w=Oh9!|vU`kRnu0-0@5WPp^pMLll6ziysTcGL=@GS_3 zwT;ovj;Df{nQ@_2)HI87EFCdOLH@VC?ww7V zhiHebgsVi-%_MTzhwLETk=bOP*%)51on)R0qA6`0>W`+N*&w0GJmf8!R~LjmvdR;C`g)a8z-yRWV>t z!v^NNE{*|F~kpH6WDTa&YpZ5*zq&# zuybYDQ01s{SaE`J-I5j3ssGX1VKs86B6@;qg_S?hC(bdav4jIP4ARShYHbS>XfDgL zq_wm*gluUNI*5^DLBDRD#rC2EvcTyjp-9=d)i7SJxM&pMZ0YWs7-OCOG?kW|%RO;%h%NDQa7S z{Yq5RMCvfCN+-Rz)A>DC&f%2A>?)dHIYku8H?OTH=XTX6ID(x__b@gW=s%@9KfivW zRX+z+;=|9-*I5BsHG>(zI^nf{$qNih;jZ+Jq@Qt4FFQQv3 zdyx|_U zO5sxG5$yrOB@~9OVVqO+u>eDtC*A`k#Yn~5tpeAScebSKXikvu^L8S;QOM_AYcA=d zFCF5ogh;Y@TjDZlECsSh2No*d9DJIW#?hAOHYQ-R7t9I^yoKaX6LPX|eiHkKH<$;I zI};H-`H5aF%v$Q$sA5BVL)SC#N@K-(_{EHg>mDQoUoARtFW|tDbr&~Pl)SCckipMD zZDhHWi2m62j<^BdgN+Gi|GHk%Eog>?-=cf&m2u&4C>-+3Iqw`d%cm~@$l(z^6lxi% zg+7^QRS37P`N!bQw0j3|2u6CC+I7ctp{2=$2^fENZP|EVDzb#RisumeEsB-M&2h8b zH>PBds6aXHH7nEm5&at1)P2)9t(-)5BAN8Zb11@s!Dz4o7pb4XMMxb1Frv%_O5Fkc zq$Lf{zCZ{15Og40y`1Gg_b9}8lL_xT@HYGTyE1Ovx_^pAtHp4?;)!DM6)$fL>q>3! zgpM1FZP6Y3l^j8Kgv9-d-0#RawNnIg+#1q~9I@X9eyzvB;|Zm2*c@-U16HJVhgm+T zou;Mchc3YGDpB(9NH3Fx!8k@B1udNs;2F57aX2w~V|csIJy<~b`N%mrQGnqJ?~vi4 z$Ckt!lW91DjN|7F+W*s&p`)zQ|2!EHZf}?&z6P>o(;Kz`6ygUi>lnHhet{)Vl8+qw z5Ke5#bM~{pO(gG^I9`m!LiJ&Gr_uh*Ti4x85RQ;UANa88)1g4Dn$6XyFp}16&;*uV zr*6|9eKyk7w_J%}g%rw-!J8MqQl6+LJ@L}$$YxO{owAFaJ&_7gj_=%*oDy;d=K?4Q zoDs|5iE1DQd7^*mlEH*obc|Vb-(eK*ecLolqOmm)tHSk3kJUCblOz^sYpI7IMNv-I zU5IiJ(b|ZDo|h}VeDGc`<@w^(O>a)8(z|Zq;So^6)k2`wR{0ZQ|2x&Iq6_LmY8ugG zpg1$BgGax0+xL0Te3*!`h{B2t^>e{XJr7DECH&>c;A&=Os&>YP9dlels_bkLu+=7v zY2nmx(K!QL)g6cCW5gctlL6F2VPu;=(c*rxp>-3Ua9TG!wH=71aQt1W=kP>)J?z&= zlk0qu;NE2WB|798svxrj#gkZ=IwdT`c$pSv@bT)~)yJQc%Hc9+DE)OtgvCOU1|G)AM3Wy%?W-`sb8>~AGu#c0+g^}l8zjpn!Cz{7#iZRkFzuf2 z=tc-E>&Q{S&`;rrA6!uhFDVU&|714w%EH5hWCCg05FQImbXE}h)DXH9f!A>u8Y{VC zV`tMKm`$9jqPrpQ-m!98ev9G;y%v%>2bQhDx)E;Vq7y5GY;vI2Z;fZt^MpFgAoflE zs0VRKh3s3YroOTWJKf38m(oi5@{)^=Pu=&22@=9Rm?stP;g*=B*ls_uF~KA^CwVR< zB1sOkWcK@{gyqq1!%u; zQHoMDfUehALvh3bx{Np!BRWyb*G6#6gH>`3ytuD|>W(;d=gv5w!LT*7?<+%_ZJXYf z!?~f4?(3kKJ(O!6G@wDz1okQ;2<`Iu>|+V~M&dH9by0)?_t1e+!Xs)f1`K!Vg85DE}dw$^wC3 zRPnc3vP#gQHOIf$IYix=Ml#l*!af?F^F}UGXG;wJY>NDZK<*HR;*&2-X>WjLXbLw& z*b@r1%Xvb!!57*uoNqI$p!s{0mkG5xEA*TW&UF)ET*0iN+1MU=0{^)Lf9PG6hzK#HV zrf7aaL?7X=T4!8{=N8edb43vwSNY%{u{>H^itHC+CAfUE37}i9hVB_(qa7_N6{gE_ zW%uF5_KKSyG@b=1%M?2xJ!P7jqlOUua(|Am(MtiTM5Xyo12UuBFTsjiFuE zH0fPMkgE8;p{7XX2(jYB=avk8Q&T!DX}hQ8z2jcc@a=JVrmsF&p}j|bxiii08y+Z^ zOFbf2x|_#nJbD@vl3TAlufU16{dSiWQDRrsRkQX3x7hL9B>N|YpIuzpUu&Yt&nmom zypy^|S4TNOa=PMW^TG*vA4rOQV5iMd4)0A7fh!8^c$d$!n8>TB zF1Ft0ri@;ZX|YE#XW!xyvL1FTxyKP)if#EMc$Y11pzWs2P7a4;HyF?8TD7P3Eqo3s zTzDbc&oB3tIUQ4J=U2q8pKD3`MibJ1(3>qX@cGMk3LUGDzgl!r7MvKK95loFIS_Br?707I zd-nD&YrTQy4CV!}MQjMz>>~TmZQ}nsYcTp(a{6zaf&V&URy)?kQN#2xp`WOihLorC zBReA7tEZ9rMzR7#ne=TS5D1{&L^6LEm_?I7$8F?_CS)n|xk~fgRis%o?sNA|j=b*!SdOEK%aU;jc=trd!Ne2afp^ZGgUg%y`Dr&0M<~C@j6WD^P9)Kn zAPW+El|cg(ebdWKH=dduB?V<}Zu+^c*;ds6^vig+j>;WoDn4uxT(tb9Fg1${PA#R& z2P`k(8qo_8RNe6JC*uk%JJeKNSR&YHMEB`#zP$dnp?B;-LoI=OEtVI!TFB$)&|l8W z?tMTP3l4iMS?_^$(7E_gV(`O;kEwhr^-5T6GgR4pt?a)~r7g3#4$&RMc!rZpZ;K2tXR57pXn2k-|xMbXfX1-rEmhysisVdLH zgK}BPiVTM-mDU0gfudFwOYl*bHr+VpYS78nu%=1{$&^=Hy4XI+D(>hS&Ve1`GQHXK zOVFCsu+gX!(qjl|YLm}U%qbvF@JyIUDTlHG5%Bu^@kRe^j}&M)U>OgNhV!`Y6r64h+EVdg1@8GyPGd zkN*B}qZ{fq#*WqW3T^th6hoZv@S2s&9Myq&2uexXZy)*|q|Y2q?1CBTtH5^&UjFgu z#cvTHsQ7N&W^Vi+EjS_rpz)UOxiZI(BK-B>@OvOQ$yqx5avaso?!kP@^r5;H5!!P$ zCzfv2XD%$CMF(w{5i;7;?1lQzFFe6Q*3vi;jz`E1_gaz~)O?D4770{s?`_j4Jmh#3gmDRFvrW?r246BEZwjv;VfIVC2YVPPvXXol-Fq5 zK~O<=9fUJBL>)EAleChlN~S^ElGvj^+1}2j=yP?8xFlL9R%s;h z2v1!QUrJt#;p)Pd(`mGEW?{VWSwBs923W1pKR$QF$ymd7T?sVbfFY;V)i>LOA7*$N zAb_$x$|!xe{M!w`KUP;vZq5}@t~4QJ5_b)mYA(qFLaL6y#YaJuew2!{PwNQ8C>4~V z=efnEsOkQfKd4+NTBB!CEKr}}xXBmf#j+m#2y``KA8%|}2-joXpi2}Zl- zkHp_Ru+l4DBa@Hx{9#L}msmM*kqn|x`UN8)FKHV$5*hqI4mSz~A9Bp^a^WBZOi!A| zo>QG=X$xUDTx_|Sjf~EH62G8vv{M(i`Pk>FBgC>?>xt=E91rKYSHY@P5B-t0>W#Q9 zGQ`FsjFZ5!6dREQp$Of6!6aVAJyZZ7uh3sPl0f2_$h})Bx?LwOg7ah_t(eNnNns8T zCC9rmZ6Ns_FKD7C zKHXgjK=EBG=TJk`N)kcN;18xnTfM5Q(q0XhN=b2M~Pf`62I=6X>JzQ_Q{OIjj6j9C|`$ireF+CzXMWwLo z?8`0CdKI?ZD{lM3H^%jEnDIrM#O0n~+P*U3ebADN*hUkSx77j*bhW0!4hS&x)lb*n_m)$ctff97nz~@}8M!AQMDV z;`Pi`$v|bBs%cS5)b6)c^v0h-XHnA`EXZ7JFeQ@-Ymn_No$MoaV!tj(LJz1@+g;PT zEtB}WPU&!7p-@JN=U6I`Lm@SD{#b9=w3|LVr~GJE)3rl-BckS^76)n9t~$qx&I`;~ z{N_A9o~mRuZI8q+=c==%;uw`O9+BEphM1l6X`@o^wsj;vzpQb91f;Ol( zd<*8i1L3|2=ClGhXBGhj?9luV4#e;AYQMV?QA*l!bDvOn*K5wi{EQ#uLG@7sjTOpE z?}3Rz&BRq1H3E8D^j#If+fR#6k+w@Ntac*cQ%gZ5=1hGPFJ(XLX^>pz&8Dq-P6Oh0 z0TQ)<*!9%D1eSV=@>FqRe*w$1ezO1n^QL~0?SeYk0&X_lY;aaYqssch-q_70~$tYgy=n^Ya`P*sU#+# zrQ95$^Mfu`!0JTWB?oay^)FMRR=8Ys8k`e|+TykK_o*BMc|v+qTL?oX@{G8HZ8$0| z96Al4Ur-&jbhH~SSxr<(=OovWn?+9J!S7UyfWX#+E*lb28k2Zc-S7P8`|-*Ope+)) zsm#%MJ;>am=U^*T(QyhCc9TnTOYGRBxMGclDcgK6rED13l|LnSs>IT*!j<&pK#jU= z;T$C(NeIDvpgLvMYTMy7(^6U<3d;gCR#0HGoV3|wY#0(~F7LlTLEqI;5CcuBS)c9G zu8!N*(q@}3xNLOeB-GE;hKFF8FjVC7OOx+EX!c(Vum2DzmMV++G&|i)HGhHe3k!`T zZ{`jAoH8-#Mn;DaepN0e_$-pz<->WhdC~Tm0u8%vP;O#n^!FZ3a8#d!u8KbG^7&3{ ztvp`}DSiw%>96AFbX+3eqBu@R9W?3XjXo-@059+GCGHRsSw4mOh@3R!c*m(e==xI` zD9?&<(~b<2UO(M~wBi_?2CB~v+J>IzpCW`cWqytMF};I6@G+Js55LdukphSJ6Pds6 zx7$*tpROmQ(YZQQH-{w80zc(@ z@ed1O@MBe@a7pTdFvwOEhF&BY830}(a+|dn!(bAwoGv*z2zGN|_qXJO``Ssk^D9=B z&aObamu_xJtbS{@?)uBFF!Hcg!W;+DvOARGMOft9J2Fu%mmxtfKu9kPAf%V;Z^np& zt%b3n)Bi$;oE0x6*Y^n}Xc`Pu*o$AjKmVi$G#$fvmslZ^I-dmNPKZ01(K-Yc1nNyv zjg0O$8Qfiza>ga$U7E9_OwP?~z#`I)ixT7>{FUjToc`flES~1CJwVP5TZ2|-J45Nj~!PpgVt5A z{J2-dbEs+Wb14J91lcrNDg_f8Iyg(K-`ty;dCe{g1_wr2RNeH5PTXo7F5^}SAEq5n z#T=3@O5d-MCL%9@M$p1l)u(5p2|qGPK=y7v-1&|}fi73t-VeA4k|<4BOnW(7AS)%;=bdqR-N z%@N831~f96e@(wlX0~or!c4G89sA90C*Vxy((-K(IG%@D%T~2>=|ufd=Hj~@YauvqwiL!cgiYn| z)MKSlAtyOL(SOQTF@=((+BdBGXpBnj7%)c7*abZgdPZVb+;!dfg{?a;joyhCY?3CQ zyUYymlP+Hqx}4AQMDy((yDa=$zZyV42?($h{y%l~fARSP0zUqk%YW}ZgFhrBBmhDH zaQ#s*0JjFt=2k|u4#tMY=5|hhRt1ovrJ9XHJjTsyekpcnvGTya= z2B`VlW64Vae?a-|?oa3dEBm_=PUCN1pKiY;Q9^rk3tE! z{eP>;2*^r^iYO`5$%wv3_^rmj8wLa|{;6aE?thah_@^2G{-HmW-hb8jm$1P;Ww3A6od` zUwaSd?kAm}2Y?v^T)&ZI|526!=Kc?Gfaf)JFm`m52B^Io+x%OA;ypa2M`3>lpew^* zf6s;Z1AY|qZ{YzH+*Zzx04^C(b1P#3Lqk9dGWs_9rvI&htlLpg4?u?p13LUSMZiDG z0>R%lAm*SCP)}6>Fjb1%S{qB-+FCl>{e9PvZ4aY80Bo)U&=G(bvOkp!fUW#Z*ZdBx z1~5E;QtNNF_xHGuI~e=r0JK%WMf4|BAfPq6zr~gKx7GbU9``Cak1xQw*b(024blHS zo{giEzLnK~v*BOHH&%3jX~l>d2#DY>&ldzp@%x+q8^8ec8{XeP-9eLe z{$J28rT!L8+Sc^HzU@GBexQ25pjQQWVH|$}%aZ+DFnNG>i-4n}v9$p}F_%Qz)==L{ z7+|mt<_6Ax@Vvh_+V^tze>7Ai|Nq^}-*>}%o!>t&fzO6ZBt23g4r?*WLL8)z|!gQsH?I_!|Jg%KoqXrnK`% z*#H3k$!LFz{d`~fz3$E*mEkP@qw>F{PyV|*_#XbfmdYRSsaF3L{(o6Yyl?2e;=vyc zeYXFPhW_;Y|3&}cJ^Xv>{y*R^9sUXaowxiR_B~_$AFv8e{{;KzZHV`n?^%ogz|8ab zC(PdyGydDm_?{p5|Ec8cRTBuJD7=ktkw-{nV;#0k5o;S?!9D>&LLkM0AP6Feg`f{0 zDQpB`k<`JrvB<<-J;OKd%+1!z`DQP}{M_XnsTQvW)#kKd4xjO+0(FK~P*t8f?34gT zNeb{dG5{jMk|Z%xPNd?)Kr$uFk;z0bG4oFYGnNlV6q8Vd`WhQhkz5p#m^vZSc48n^ z)8XlE1_e=c^$WG1no(|j8Tc`PgwP}{$Z2MV1V$=SXvP)gXKtqW)?5PUcJu&?e*#h! zqs>gH(jDQk$9cz8;-w$cc*dE1}qLepfsBCXA@(bAJ66ft0aCq$Wrcq)WXX{0nm+#w=uBj1o9rLyA i;x|p)^~-yfPOPa3(|vBayXKz \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000..e95643d6 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega From df161f299a38cac4fb69b33bb27ba094cc9d07e0 Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 23:30:42 -0300 Subject: [PATCH 02/10] Gradle java source structure --- build.xml | 73 - src/plugins/Library/Index.java | 30 - src/plugins/Library/Library.java | 780 ------- src/plugins/Library/Main.java | 199 -- src/plugins/Library/SpiderIndexURIs.java | 121 - src/plugins/Library/SpiderIndexUploader.java | 969 -------- src/plugins/Library/Version.java | 17 - src/plugins/Library/VirtualIndex.java | 20 - src/plugins/Library/WriteableIndex.java | 32 - .../Library/client/FreenetArchiver.java | 608 ----- src/plugins/Library/index/ProtoIndex.java | 356 --- .../index/ProtoIndexComponentSerialiser.java | 586 ----- .../Library/index/ProtoIndexSerialiser.java | 230 -- src/plugins/Library/index/TermEntry.java | 137 -- .../Library/index/TermEntryReaderWriter.java | 124 -- src/plugins/Library/index/TermIndexEntry.java | 58 - src/plugins/Library/index/TermPageEntry.java | 240 -- src/plugins/Library/index/TermTermEntry.java | 54 - src/plugins/Library/index/URIEntry.java | 75 - src/plugins/Library/index/URIKey.java | 48 - src/plugins/Library/index/package-info.java | 10 - .../Library/index/xml/FindRequest.java | 224 -- .../Library/index/xml/LibrarianHandler.java | 237 -- .../Library/index/xml/MainIndexParser.java | 245 --- .../Library/index/xml/URLUpdateHook.java | 7 - src/plugins/Library/index/xml/Util.java | 75 - src/plugins/Library/index/xml/XMLIndex.java | 861 -------- .../Library/index/xml/package-info.java | 9 - .../Library/io/DataFormatException.java | 44 - src/plugins/Library/io/ObjectBlueprint.java | 520 ----- .../Library/io/ObjectStreamReader.java | 21 - .../Library/io/ObjectStreamWriter.java | 21 - src/plugins/Library/io/YamlReaderWriter.java | 226 -- src/plugins/Library/io/serial/Archiver.java | 54 - .../Library/io/serial/FileArchiver.java | 198 -- .../Library/io/serial/IterableSerialiser.java | 53 - .../Library/io/serial/LiveArchiver.java | 46 - .../Library/io/serial/MapSerialiser.java | 50 - src/plugins/Library/io/serial/Packer.java | 995 --------- .../Library/io/serial/ParallelSerialiser.java | 271 --- .../Library/io/serial/ProgressTracker.java | 255 --- .../io/serial/ScheduledSerialiser.java | 53 - src/plugins/Library/io/serial/Serialiser.java | 161 -- src/plugins/Library/io/serial/Translator.java | 44 - .../Library/io/serial/package-info.java | 11 - src/plugins/Library/package-info.java | 13 - .../search/InvalidSearchException.java | 19 - src/plugins/Library/search/ResultSet.java | 511 ----- src/plugins/Library/search/Search.java | 654 ------ .../Library/search/SearchTokenizer.java | 205 -- src/plugins/Library/search/SearchUtil.java | 55 - .../Library/search/inter/IndexQuery.java | 65 - .../Library/search/inter/Interdex.java | 198 -- .../Library/search/inter/TermResults.java | 56 - src/plugins/Library/search/package-info.java | 15 - src/plugins/Library/ui/L10nString.java | 72 - src/plugins/Library/ui/MainPage.java | 523 ----- src/plugins/Library/ui/MainPageToadlet.java | 139 -- .../Library/ui/RelevanceComparator.java | 43 - .../Library/ui/ResultNodeGenerator.java | 262 --- src/plugins/Library/ui/StaticToadlet.java | 78 - .../Library/ui/TermPageGroupEntry.java | 50 - src/plugins/Library/ui/TestInterface.java | 64 - src/plugins/Library/ui/WebInterface.java | 67 - src/plugins/Library/ui/package-info.java | 14 - src/plugins/Library/ui/staticfiles/detect.js | 1 - src/plugins/Library/ui/staticfiles/script.js | 32 - src/plugins/Library/ui/staticfiles/style.css | 48 - src/plugins/Library/util/BTreeMap.java | 1959 ----------------- src/plugins/Library/util/BTreeSet.java | 76 - src/plugins/Library/util/BytePrefixKey.java | 147 -- .../Library/util/CompositeIterable.java | 68 - .../Library/util/DataNotLoadedException.java | 64 - .../Library/util/IdentityComparator.java | 84 - src/plugins/Library/util/Integers.java | 82 - src/plugins/Library/util/Maps.java | 132 -- src/plugins/Library/util/PrefixTree.java | 615 ------ src/plugins/Library/util/Skeleton.java | 73 - .../Library/util/SkeletonBTreeMap.java | 1774 --------------- .../Library/util/SkeletonBTreeSet.java | 177 -- src/plugins/Library/util/SkeletonMap.java | 58 - src/plugins/Library/util/SkeletonTreeMap.java | 949 -------- src/plugins/Library/util/Sorted.java | 268 --- src/plugins/Library/util/SortedArraySet.java | 219 -- src/plugins/Library/util/SortedMapSet.java | 164 -- src/plugins/Library/util/SortedSetMap.java | 160 -- .../util/TaskAbortExceptionConvertor.java | 13 - .../BoundedPriorityBlockingQueue.java | 196 -- .../util/concurrent/ExceptionConvertor.java | 7 - .../Library/util/concurrent/Executors.java | 73 - .../Library/util/concurrent/Notifier.java | 31 - .../util/concurrent/ObjectProcessor.java | 400 ---- .../Library/util/concurrent/Scheduler.java | 21 - .../Library/util/event/AbstractSweeper.java | 118 - .../Library/util/event/CountingSweeper.java | 56 - src/plugins/Library/util/event/Sweeper.java | 70 - .../Library/util/event/TrackingSweeper.java | 112 - .../Library/util/exec/AbstractExecution.java | 176 -- .../util/exec/BaseCompositeProgress.java | 150 -- .../Library/util/exec/ChainedProgress.java | 42 - .../Library/util/exec/CompositeProgress.java | 54 - src/plugins/Library/util/exec/Execution.java | 76 - .../Library/util/exec/ExecutionAcceptor.java | 20 - src/plugins/Library/util/exec/Progress.java | 54 - .../Library/util/exec/ProgressParts.java | 303 --- .../Library/util/exec/SimpleProgress.java | 239 -- .../Library/util/exec/TaskAbortException.java | 85 - .../util/exec/TaskCompleteException.java | 20 - .../util/exec/TaskInProgressException.java | 37 - .../Library/util/exec/package-info.java | 9 - src/plugins/Library/util/func/Closure.java | 40 - .../Library/util/func/SafeClosure.java | 17 - src/plugins/Library/util/func/Tuples.java | 160 -- .../Library/util/func/package-info.java | 12 - src/plugins/Library/util/package-info.java | 10 - test/plugins/Library/Tester.java | 381 ---- test/plugins/Library/index/BIndexTest.java | 381 ---- test/plugins/Library/index/TermEntryTest.java | 121 - .../plugins/Library/io/serial/PackerTest.java | 84 - .../Library/io/serial/YamlMapTest.java | 123 -- test/plugins/Library/util/BTreeMapTest.java | 97 - .../Library/util/BytePrefixKeyTest.java | 25 - test/plugins/Library/util/Generators.java | 43 - test/plugins/Library/util/IntegersTest.java | 71 - .../Library/util/SkeletonTreeMapTest.java | 126 -- .../Library/util/SortedArraySetTest.java | 57 - .../Library/util/SortedMapTestSkeleton.java | 141 -- test/plugins/Library/util/SortedTest.java | 190 -- .../Library/util/event/SweepersTest.java | 177 -- 129 files changed, 24319 deletions(-) delete mode 100644 src/plugins/Library/Index.java delete mode 100644 src/plugins/Library/Library.java delete mode 100644 src/plugins/Library/Main.java delete mode 100644 src/plugins/Library/SpiderIndexURIs.java delete mode 100644 src/plugins/Library/SpiderIndexUploader.java delete mode 100644 src/plugins/Library/Version.java delete mode 100644 src/plugins/Library/VirtualIndex.java delete mode 100644 src/plugins/Library/WriteableIndex.java delete mode 100644 src/plugins/Library/client/FreenetArchiver.java delete mode 100644 src/plugins/Library/index/ProtoIndex.java delete mode 100644 src/plugins/Library/index/ProtoIndexComponentSerialiser.java delete mode 100644 src/plugins/Library/index/ProtoIndexSerialiser.java delete mode 100644 src/plugins/Library/index/TermEntry.java delete mode 100644 src/plugins/Library/index/TermEntryReaderWriter.java delete mode 100644 src/plugins/Library/index/TermIndexEntry.java delete mode 100644 src/plugins/Library/index/TermPageEntry.java delete mode 100644 src/plugins/Library/index/TermTermEntry.java delete mode 100644 src/plugins/Library/index/URIEntry.java delete mode 100644 src/plugins/Library/index/URIKey.java delete mode 100644 src/plugins/Library/index/package-info.java delete mode 100644 src/plugins/Library/index/xml/FindRequest.java delete mode 100644 src/plugins/Library/index/xml/LibrarianHandler.java delete mode 100644 src/plugins/Library/index/xml/MainIndexParser.java delete mode 100644 src/plugins/Library/index/xml/URLUpdateHook.java delete mode 100644 src/plugins/Library/index/xml/Util.java delete mode 100644 src/plugins/Library/index/xml/XMLIndex.java delete mode 100644 src/plugins/Library/index/xml/package-info.java delete mode 100644 src/plugins/Library/io/DataFormatException.java delete mode 100644 src/plugins/Library/io/ObjectBlueprint.java delete mode 100644 src/plugins/Library/io/ObjectStreamReader.java delete mode 100644 src/plugins/Library/io/ObjectStreamWriter.java delete mode 100644 src/plugins/Library/io/YamlReaderWriter.java delete mode 100644 src/plugins/Library/io/serial/Archiver.java delete mode 100644 src/plugins/Library/io/serial/FileArchiver.java delete mode 100644 src/plugins/Library/io/serial/IterableSerialiser.java delete mode 100644 src/plugins/Library/io/serial/LiveArchiver.java delete mode 100644 src/plugins/Library/io/serial/MapSerialiser.java delete mode 100644 src/plugins/Library/io/serial/Packer.java delete mode 100644 src/plugins/Library/io/serial/ParallelSerialiser.java delete mode 100644 src/plugins/Library/io/serial/ProgressTracker.java delete mode 100644 src/plugins/Library/io/serial/ScheduledSerialiser.java delete mode 100644 src/plugins/Library/io/serial/Serialiser.java delete mode 100644 src/plugins/Library/io/serial/Translator.java delete mode 100644 src/plugins/Library/io/serial/package-info.java delete mode 100644 src/plugins/Library/package-info.java delete mode 100644 src/plugins/Library/search/InvalidSearchException.java delete mode 100644 src/plugins/Library/search/ResultSet.java delete mode 100644 src/plugins/Library/search/Search.java delete mode 100755 src/plugins/Library/search/SearchTokenizer.java delete mode 100644 src/plugins/Library/search/SearchUtil.java delete mode 100644 src/plugins/Library/search/inter/IndexQuery.java delete mode 100644 src/plugins/Library/search/inter/Interdex.java delete mode 100644 src/plugins/Library/search/inter/TermResults.java delete mode 100644 src/plugins/Library/search/package-info.java delete mode 100644 src/plugins/Library/ui/L10nString.java delete mode 100644 src/plugins/Library/ui/MainPage.java delete mode 100644 src/plugins/Library/ui/MainPageToadlet.java delete mode 100644 src/plugins/Library/ui/RelevanceComparator.java delete mode 100644 src/plugins/Library/ui/ResultNodeGenerator.java delete mode 100644 src/plugins/Library/ui/StaticToadlet.java delete mode 100644 src/plugins/Library/ui/TermPageGroupEntry.java delete mode 100644 src/plugins/Library/ui/TestInterface.java delete mode 100644 src/plugins/Library/ui/WebInterface.java delete mode 100644 src/plugins/Library/ui/package-info.java delete mode 100644 src/plugins/Library/ui/staticfiles/detect.js delete mode 100644 src/plugins/Library/ui/staticfiles/script.js delete mode 100644 src/plugins/Library/ui/staticfiles/style.css delete mode 100644 src/plugins/Library/util/BTreeMap.java delete mode 100644 src/plugins/Library/util/BTreeSet.java delete mode 100644 src/plugins/Library/util/BytePrefixKey.java delete mode 100644 src/plugins/Library/util/CompositeIterable.java delete mode 100644 src/plugins/Library/util/DataNotLoadedException.java delete mode 100644 src/plugins/Library/util/IdentityComparator.java delete mode 100644 src/plugins/Library/util/Integers.java delete mode 100644 src/plugins/Library/util/Maps.java delete mode 100644 src/plugins/Library/util/PrefixTree.java delete mode 100644 src/plugins/Library/util/Skeleton.java delete mode 100644 src/plugins/Library/util/SkeletonBTreeMap.java delete mode 100644 src/plugins/Library/util/SkeletonBTreeSet.java delete mode 100644 src/plugins/Library/util/SkeletonMap.java delete mode 100644 src/plugins/Library/util/SkeletonTreeMap.java delete mode 100644 src/plugins/Library/util/Sorted.java delete mode 100644 src/plugins/Library/util/SortedArraySet.java delete mode 100644 src/plugins/Library/util/SortedMapSet.java delete mode 100644 src/plugins/Library/util/SortedSetMap.java delete mode 100644 src/plugins/Library/util/TaskAbortExceptionConvertor.java delete mode 100644 src/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java delete mode 100644 src/plugins/Library/util/concurrent/ExceptionConvertor.java delete mode 100644 src/plugins/Library/util/concurrent/Executors.java delete mode 100644 src/plugins/Library/util/concurrent/Notifier.java delete mode 100644 src/plugins/Library/util/concurrent/ObjectProcessor.java delete mode 100644 src/plugins/Library/util/concurrent/Scheduler.java delete mode 100644 src/plugins/Library/util/event/AbstractSweeper.java delete mode 100644 src/plugins/Library/util/event/CountingSweeper.java delete mode 100644 src/plugins/Library/util/event/Sweeper.java delete mode 100644 src/plugins/Library/util/event/TrackingSweeper.java delete mode 100644 src/plugins/Library/util/exec/AbstractExecution.java delete mode 100644 src/plugins/Library/util/exec/BaseCompositeProgress.java delete mode 100644 src/plugins/Library/util/exec/ChainedProgress.java delete mode 100644 src/plugins/Library/util/exec/CompositeProgress.java delete mode 100644 src/plugins/Library/util/exec/Execution.java delete mode 100644 src/plugins/Library/util/exec/ExecutionAcceptor.java delete mode 100644 src/plugins/Library/util/exec/Progress.java delete mode 100644 src/plugins/Library/util/exec/ProgressParts.java delete mode 100644 src/plugins/Library/util/exec/SimpleProgress.java delete mode 100644 src/plugins/Library/util/exec/TaskAbortException.java delete mode 100644 src/plugins/Library/util/exec/TaskCompleteException.java delete mode 100644 src/plugins/Library/util/exec/TaskInProgressException.java delete mode 100644 src/plugins/Library/util/exec/package-info.java delete mode 100644 src/plugins/Library/util/func/Closure.java delete mode 100644 src/plugins/Library/util/func/SafeClosure.java delete mode 100644 src/plugins/Library/util/func/Tuples.java delete mode 100644 src/plugins/Library/util/func/package-info.java delete mode 100644 src/plugins/Library/util/package-info.java delete mode 100644 test/plugins/Library/Tester.java delete mode 100644 test/plugins/Library/index/BIndexTest.java delete mode 100644 test/plugins/Library/index/TermEntryTest.java delete mode 100644 test/plugins/Library/io/serial/PackerTest.java delete mode 100644 test/plugins/Library/io/serial/YamlMapTest.java delete mode 100644 test/plugins/Library/util/BTreeMapTest.java delete mode 100644 test/plugins/Library/util/BytePrefixKeyTest.java delete mode 100644 test/plugins/Library/util/Generators.java delete mode 100644 test/plugins/Library/util/IntegersTest.java delete mode 100644 test/plugins/Library/util/SkeletonTreeMapTest.java delete mode 100644 test/plugins/Library/util/SortedArraySetTest.java delete mode 100644 test/plugins/Library/util/SortedMapTestSkeleton.java delete mode 100644 test/plugins/Library/util/SortedTest.java delete mode 100644 test/plugins/Library/util/event/SweepersTest.java diff --git a/build.xml b/build.xml index fa7a6670..e2827426 100644 --- a/build.xml +++ b/build.xml @@ -29,79 +29,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/plugins/Library/Index.java b/src/plugins/Library/Index.java deleted file mode 100644 index 196ffb1a..00000000 --- a/src/plugins/Library/Index.java +++ /dev/null @@ -1,30 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.URIEntry; -import plugins.Library.util.exec.Execution; - -import freenet.keys.FreenetURI; - -import java.util.Set; - -/** -** Represents the data for an index. -** -** @author infinity0 -*/ -public interface Index { - - /** - ** Non-blocking fetch of the entries associated with a given term. - ** - ** DOCUMENT - */ - public Execution> getTermEntries(String term); - - public Execution getURIEntry(FreenetURI uri); - -} diff --git a/src/plugins/Library/Library.java b/src/plugins/Library/Library.java deleted file mode 100644 index 9a35b099..00000000 --- a/src/plugins/Library/Library.java +++ /dev/null @@ -1,780 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.net.MalformedURLException; -import java.security.MessageDigest; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import plugins.Library.client.FreenetArchiver; -import plugins.Library.index.ProtoIndex; -import plugins.Library.index.ProtoIndexSerialiser; -import plugins.Library.index.xml.URLUpdateHook; -import plugins.Library.index.xml.XMLIndex; -import plugins.Library.io.ObjectStreamReader; -import plugins.Library.io.ObjectStreamWriter; -import plugins.Library.io.serial.Serialiser.PullTask; -import plugins.Library.search.InvalidSearchException; -import plugins.Library.util.exec.TaskAbortException; - -import freenet.client.FetchContext; -import freenet.client.FetchException; -import freenet.client.FetchException.FetchExceptionMode; -import freenet.client.FetchResult; -import freenet.client.FetchWaiter; -import freenet.client.HighLevelSimpleClient; -import freenet.client.async.ClientContext; -import freenet.client.async.ClientGetter; -import freenet.client.async.PersistenceDisabledException; -import freenet.client.async.USKCallback; -import freenet.client.async.USKManager; -import freenet.client.async.USKRetriever; -import freenet.client.async.USKRetrieverCallback; -import freenet.client.events.ClientEvent; -import freenet.client.events.ClientEventListener; -import freenet.client.events.ExpectedMIMEEvent; -import freenet.keys.FreenetURI; -import freenet.keys.USK; -import freenet.node.NodeClientCore; -import freenet.node.RequestClient; -import freenet.node.RequestStarter; -import freenet.pluginmanager.PluginRespirator; -import freenet.pluginmanager.PluginStore; -import freenet.support.Executor; -import freenet.support.Logger; -import freenet.support.io.FileUtil; - - -/** - * Library class is the api for others to use search facilities, it is used by the interfaces - * @author MikeB - */ -final public class Library implements URLUpdateHook { - - public static final String BOOKMARK_PREFIX = "bookmark:"; - public static final String DEFAULT_INDEX_SITE = BOOKMARK_PREFIX + "liberty-of-information" + " " + BOOKMARK_PREFIX + "free-market-free-people" + " " + - BOOKMARK_PREFIX + "gotcha" + " " + BOOKMARK_PREFIX + "wanna" + " " + BOOKMARK_PREFIX + "wanna.old" + " " + BOOKMARK_PREFIX + "gogo"; - private static int version = 36; - public static final String plugName = "Library " + getVersion(); - - public static String getPlugName() { - return plugName; - } - - public static long getVersion() { - return version; - } - - /** - ** Library singleton. - */ - private static Library lib; - - public synchronized static Library init(PluginRespirator pr) { - if (lib != null) { - throw new IllegalStateException("Library already initialised"); - } - lib = new Library(pr); - return lib; - } - - final private PluginRespirator pr; - final private Executor exec; - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - public static final RequestClient REQUEST_CLIENT = new RequestClient() { - - @Override - public boolean persistent() { - return false; - } - - @Override - public boolean realTimeFlag() { - return false; - } - - }; - - static { - Logger.registerClass(Library.class); - } - - private final PluginStore store; - - static final String STOREKEY = "indexuris"; - - /** - * Method to setup Library class so it has access to PluginRespirator, and load bookmarks - * TODO pull bookmarks from disk - */ - private Library(PluginRespirator pr){ - this.pr = pr; - PluginStore ps; - if(pr!=null) { - this.exec = pr.getNode().executor; - try { - ps = pr.getStore(); - } catch (PersistenceDisabledException e) { - ps = null; - } - } else { - this.exec = null; - ps = null; - } - USKManager uskManager = pr.getNode().clientCore.clientContext.uskManager; - store = ps; - if(store != null && store.subStores.containsKey(STOREKEY)) { - for(Map.Entry entry : store.subStores.get(STOREKEY).strings.entrySet()) { - String name = entry.getKey(); - String target = entry.getValue(); - bookmarks.put(name, target); - } - } - - File persistentFile = new File("LibraryPersistent"); - boolean migrated = false; - if(persistentFile.canRead()){ - try { - ObjectInputStream is = new ObjectInputStream(new FileInputStream(persistentFile)); // These are annoying but it's better than nothing - bookmarks = (Map)is.readObject(); - is.close(); - FileUtil.secureDelete(persistentFile); - Logger.error(this, "Moved LibraryPersistent contents into database and securely deleted old file."); - migrated = true; - } catch (ClassNotFoundException ex) { - Logger.error(this, "Error trying to read bookmarks Map from file.", ex); - } catch (IOException ex) { - Logger.normal(this, "Error trying to read Library persistent data.", ex); - } - } - boolean needNewWanna = false; - for(Map.Entry entry : bookmarks.entrySet()) { - String name = entry.getKey(); - String target = entry.getValue(); - if(name.equals("wanna") && target.startsWith("USK@5hH~39FtjA7A9")) { - name = "wanna.old"; - needNewWanna = true; - } - FreenetURI uri; - long edition = -1; - try { - uri = new FreenetURI(target); - if(uri.isUSK()) - edition = uri.getEdition(); - } catch (MalformedURLException e) { - Logger.error(this, "Invalid bookmark URI: "+target+" for "+name, e); - continue; - } - if(uri.isUSK()) { - BookmarkCallback callback = new BookmarkCallback(name, uri.getAllMetaStrings(), edition); - bookmarkCallbacks.put(name, callback); - USK u; - try { - u = USK.create(uri); - } catch (MalformedURLException e) { - Logger.error(this, "Invalid bookmark USK: "+target+" for "+name, e); - continue; - } - uskManager.subscribe(u, callback, false, rcBulk); - callback.ret = uskManager.subscribeContent(u, callback, false, pr.getHLSimpleClient().getFetchContext(), RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, rcBulk); - } - } - if(bookmarks.isEmpty() || needNewWanna || !bookmarks.containsKey("gotcha") || - !bookmarks.containsKey("liberty-of-information") || - !bookmarks.containsKey("free-market-free-people")) { - if(!bookmarks.containsKey("liberty-of-information")) - addBookmark("liberty-of-information", "USK@5YCPzcs60uab6VSdiKcyvo-G-r-ga2UWCyOzWHaPoYE,a1zEPCf0qkUFQxftFZK5xmxYdxt0JErDb2aJgwG8s~4,AQACAAE/index.yml/25"); - if(!bookmarks.containsKey("free-market-free-people")) - addBookmark("free-market-free-people", "USK@X4lMQ51bXPSicAgbR~XdFzDyizYHYrvzStdeUrIFhes,0ze4TAqd~RdAMZMsshybHZFema3ZP3id4sgN3H8969g,AQACAAE/index.yml/4"); - if(!bookmarks.containsKey("gotcha")) - addBookmark("gotcha", "USK@zcAnAgT-xp5LnnK28-Lc7Qt-GU7pNKnVdkmU4-HCCBc,s2jiTh8~O9MtnGdVqJqgnKGrXrosK8rArcZ8A49hprY,AQACAAE/index.yml/6"); - if(!bookmarks.containsKey("wanna.old")) - addBookmark("wanna.old", "USK@5hH~39FtjA7A9~VXWtBKI~prUDTuJZURudDG0xFn3KA,GDgRGt5f6xqbmo-WraQtU54x4H~871Sho9Hz6hC-0RA,AQACAAE/Search/25/index.xml"); - if(!bookmarks.containsKey("freenetindex")) - addBookmark("freenetindex", "USK@US6gHsNApDvyShI~sBHGEOplJ3pwZUDhLqTAas6rO4c,3jeU5OwV0-K4B6HRBznDYGvpu2PRUuwL0V110rn-~8g,AQACAAE/freenet-index/5/index.xml"); - if(!bookmarks.containsKey("gogo")) - addBookmark("gogo", "USK@shmVvDhwivG1z1onSA5efRl3492Xyoov52hrC0tF6uI,wpMhseMpFHPLpXYbV8why1nfBXf2XdSQkzVaemFOEsA,AQACAAE/index.yml/51"); - if(!bookmarks.containsKey("wanna")) - addBookmark("wanna", "USK@gxuHPaicqxlpPPagBKPVPraZ4bwLdMYBc5vipkWGh3E,08ExdmvZzB8Hfi6H6crbiuCd2~ikWDIpJ8dvr~tLp7k,AQACAAE/index.yml/82"); - migrated = true; - Logger.normal(this, "Added default indexes"); - } - if(migrated) - saveState(); - } - - public synchronized void saveState(){ - if(store == null) return; - PluginStore inner; - inner = store.subStores.get(STOREKEY); - if(inner == null) - store.subStores.put(STOREKEY, inner = new PluginStore()); - inner.strings.clear(); - inner.strings.putAll(bookmarks); - try { - pr.putStore(store); - if(logMINOR) Logger.minor(this, "Stored state to database"); - } catch (PersistenceDisabledException e) { - // Not much we can do... - } - } - - /* FIXME - * Parallel fetch from the same index is not currently supported. This means that the only reliable way to - * do e.g. an intersection search is to run two completely separate fetches. Otherwise we get assertion - * errors in e.g. BTreePacker.preprocessPullBins from TaskInProgressException's we can't handle from - * ProgressTracker.addPullProgress, when the terms are in the same bucket. We should make it possible to - * search for multiple terms in the same btree, but for now, turning off caching is the only viable option. - */ - -// /** -// ** Holds all the read-indexes. -// */ -// private Map rtab = new HashMap(); -// -// /** -// ** Holds all the writeable indexes. -// */ -// private Map wtab = new HashMap(); -// - /** - ** Holds all the bookmarks (aliases into the rtab). - */ - private Map bookmarks = new HashMap(); - - private Map bookmarkCallbacks = new HashMap(); - - /** - ** Get the index type giving a {@code FreenetURI}. This must not contain - ** a metastring (end with "/") or be a USK. - */ - public Class getIndexType(FreenetURI indexuri) throws FetchException { - if(indexuri.lastMetaString()!=null && indexuri.lastMetaString().equals(XMLIndex.DEFAULT_FILE)) - return XMLIndex.class; - if(indexuri.lastMetaString()!=null && indexuri.lastMetaString().equals(ProtoIndex.DEFAULT_FILE)) - return ProtoIndex.class; - if(indexuri.isUSK() && indexuri.getDocName().equals(ProtoIndex.DEFAULT_FILE)) - return ProtoIndex.class; - - NodeClientCore core = pr.getNode().clientCore; - HighLevelSimpleClient hlsc = core.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); - - List uris = Arrays.asList(indexuri, - indexuri.pushMetaString(""), - indexuri.pushMetaString(ProtoIndex.DEFAULT_FILE), - indexuri.pushMetaString(XMLIndex.DEFAULT_FILE)); - - for (FreenetURI uri: uris) { - - for(int i=0;i<5;i++) { - ClientContext cctx = core.clientContext; - FetchContext fctx = hlsc.getFetchContext(); - FetchWaiter fw = new FetchWaiter(REQUEST_CLIENT); - final ClientGetter gu = hlsc.fetch(uri, 0x10000, fw, fctx); - gu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx); - - final Class[] c = new Class[1]; - hlsc.addEventHook(new ClientEventListener() { - /*@Override**/ public void receive(ClientEvent ce, ClientContext context) { - if (!(ce instanceof ExpectedMIMEEvent)) { return; } - synchronized(c) { - String type = ((ExpectedMIMEEvent)ce).expectedMIMEType; - Logger.normal(this, "Expected type in index: "+type); - try { - c[0] = getIndexTypeFromMIME(type); - gu.cancel(context); - } catch (UnsupportedOperationException e) { - // Ignore - } - } - } - }); - - try { - FetchResult res = fw.waitForCompletion(); - return getIndexTypeFromMIME(res.getMimeType()); - - } catch (FetchException e) { - if (e.getMode() == FetchExceptionMode.CANCELLED) { - synchronized(c) { - if(c[0] != null) - return c[0]; - else - throw new UnsupportedOperationException("Unable to get mime type or got an invalid mime type for index"); - } - } else if(e.newURI != null) { - uri = e.newURI; - continue; - } - } - } - - } - throw new UnsupportedOperationException("Could not find appropriate data for index"); - } - - public Class getIndexTypeFromMIME(String mime) { - if (mime.equals(ProtoIndex.MIME_TYPE)) { - //return "YAML index"; - return ProtoIndex.class; - } else if (mime.equals(XMLIndex.MIME_TYPE)) { - //return "XML index"; - return XMLIndex.class; - } else { - throw new UnsupportedOperationException("Unknown mime-type for index: "+mime); - } - } - - -/* - KEYEXPLORER slightly more efficient version that depends on KeyExplorer - - /** - ** Get the index type giving a {@code FreenetURI}. This should have been - ** passed through {@link KeyExplorerUtils#sanitizeURI(List, String)} at - ** some point - ie. it must not contain a metastring (end with "/") or be - ** a USK. - * / - public Class getIndexType(FreenetURI uri) - throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException { - GetResult getresult = KeyExplorerUtils.simpleGet(pr, uri); - byte[] data = BucketTools.toByteArray(getresult.getData()); - - if (getresult.isMetaData()) { - try { - Metadata md = Metadata.construct(data); - - if (md.isArchiveManifest()) { - if (md.getArchiveType() == ARCHIVE_TYPE.TAR) { - return getIndexTypeFromManifest(uri, false, true); - - } else if (md.getArchiveType() == ARCHIVE_TYPE.ZIP) { - return getIndexTypeFromManifest(uri, true, false); - - } else { - throw new UnsupportedOperationException("not implemented - unknown archive manifest"); - } - - } else if (md.isSimpleManifest()) { - return getIndexTypeFromManifest(uri, false, false); - } - - return getIndexTypeFromSimpleMetadata(md); - - } catch (MetadataParseException e) { - throw new RuntimeException(e); - } - } else { - throw new UnsupportedOperationException("Found data instead of metadata; I do not have enough intelligence to decode this."); - } - } - - public Class getIndexTypeFromSimpleMetadata(Metadata md) { - String mime = md.getMIMEType(); - if (mime.equals(ProtoIndex.MIME_TYPE)) { - //return "YAML index"; - return ProtoIndex.class; - } else if (mime.equals(XMLIndex.MIME_TYPE)) { - //return "XML index"; - return XMLIndex.class; - } else { - throw new UnsupportedOperationException("Unknown mime-type for index"); - } - } - - public Class getIndexTypeFromManifest(FreenetURI furi, boolean zip, boolean tar) - throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException { - - boolean automf = true, deep = true, ml = true; - Metadata md = null; - - if (zip) - md = KeyExplorerUtils.zipManifestGet(pr, furi); - else if (tar) - md = KeyExplorerUtils.tarManifestGet(pr, furi, ".metadata"); - else { - md = KeyExplorerUtils.simpleManifestGet(pr, furi); - if (ml) { - md = KeyExplorerUtils.splitManifestGet(pr, md); - } - } - - if (md.isSimpleManifest()) { - // a subdir - HashMap docs = md.getDocuments(); - Metadata defaultDoc = md.getDefaultDocument(); - - if (defaultDoc != null) { - //return "(default doc method) " + getIndexTypeFromSimpleMetadata(defaultDoc); - return getIndexTypeFromSimpleMetadata(defaultDoc); - } - - if (docs.containsKey(ProtoIndex.DEFAULT_FILE)) { - //return "(doclist method) YAML index"; - return ProtoIndex.class; - } else if (docs.containsKey(XMLIndex.DEFAULT_FILE)) { - //return "(doclist method) XML index"; - return XMLIndex.class; - } else { - throw new UnsupportedOperationException("Could not find a supported index in the document-listings for " + furi.toString()); - } - } - - throw new UnsupportedOperationException("Parsed metadata but did not reach a simple manifest: " + furi.toString()); - } -*/ - public Class getIndexType(File f) { - if (f.getName().endsWith(ProtoIndexSerialiser.FILE_EXTENSION)) - return ProtoIndex.class; - if (f.isDirectory() && new File(f, XMLIndex.DEFAULT_FILE).canRead()) - return XMLIndex.class; - - throw new UnsupportedOperationException("Could not determine default index file under the path given: " + f); - } - - public Object getAddressTypeFromString(String indexuri) { - try { - // return KeyExplorerUtils.sanitizeURI(new ArrayList(), indexuri); KEYEXPLORER - // OPT HIGH if it already ends with eg. *Index.DEFAULT_FILE, don't strip - // the MetaString, and have getIndexType behave accordingly - FreenetURI tempURI = new FreenetURI(indexuri); -// if (tempURI.hasMetaStrings()) { tempURI = tempURI.setMetaString(null); } -// if (tempURI.isUSK()) { tempURI = tempURI.sskForUSK(); } - return tempURI; - } catch (MalformedURLException e) { - File file = new File(indexuri); - if (file.canRead()) { - return file; - } else { - throw new UnsupportedOperationException("Could not recognise index type from string: " + indexuri); - } - } - } - - /** - * Add a new bookmark, - * TODO there should be a separate version for untrusted adds from freesites which throws some Security Exception - * @param name of new bookmark - * @param uri of new bookmark - * @return reference of new bookmark - */ - public String addBookmark(String name, String uri) { - FreenetURI u; - USK uskNew = null; - BookmarkCallback callback = null; - long edition = -1; - try { - u = new FreenetURI(uri); - if(u.isUSK()) { - uskNew = USK.create(u); - edition = uskNew.suggestedEdition; - } - } catch (MalformedURLException e) { - Logger.error(this, "Invalid new uri "+uri); - return null; - } - String old; - synchronized(this) { - old = bookmarks.put(name, uri); - callback = bookmarkCallbacks.get(name); - if(callback == null) { - bookmarkCallbacks.put(name, callback = new BookmarkCallback(name, u.getAllMetaStrings(), edition)); - old = null; - } - saveState(); - } - boolean isSame = false; - USKManager uskManager = pr.getNode().clientCore.clientContext.uskManager; - if(old != null) { - try { - FreenetURI uold = new FreenetURI(old); - if(uold.isUSK()) { - USK usk = USK.create(uold); - if(!(uskNew != null && usk.equals(uskNew, false))) { - uskManager.unsubscribe(usk, callback); - uskManager.unsubscribeContent(usk, callback.ret, true); - } else - isSame = true; - } - - } catch (MalformedURLException e) { - // Ignore - } - } - if(!isSame) { - uskManager.subscribe(uskNew, callback, false, rcBulk); - callback.ret = uskManager.subscribeContent(uskNew, callback, false, pr.getHLSimpleClient().getFetchContext(), RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, rcBulk); - } - return name; - } - - final RequestClient rcBulk = new RequestClient() { - - public boolean persistent() { - return false; - } - - public boolean realTimeFlag() { - return false; - } - - }; - - private class BookmarkCallback implements USKRetrieverCallback, USKCallback { - - private final String bookmarkName; - private String[] metaStrings; - USKRetriever ret; - private long origEdition; - - public BookmarkCallback(String name, String[] allMetaStrings, long origEdition) { - this.bookmarkName = name; - this.metaStrings = allMetaStrings; - } - - public short getPollingPriorityNormal() { - return RequestStarter.UPDATE_PRIORITY_CLASS; - } - - public short getPollingPriorityProgress() { - return RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS; - } - - public void onFound(USK origUSK, long edition, FetchResult data) { - data.asBucket().free(); - if(logMINOR) Logger.minor(this, "Bookmark "+bookmarkName+" : fetching edition "+edition); - } - - public void onFoundEdition(long l, USK key, ClientContext context, boolean metadata, short codec, byte[] data, boolean newKnownGood, boolean newSlotToo) { - if(l < origEdition) { - Logger.error(this, "Wrong edition: "+l+" less than "+origEdition); - return; - } - if(newKnownGood) { - String uri = key.copy(l).getURI().setMetaString(metaStrings).toString(); - if(logMINOR) Logger.minor(this, "Bookmark "+bookmarkName+" new last known good edition "+l+" uri is now "+uri); - addBookmark(bookmarkName, uri); - } - } - - } - - public synchronized void removeBookmark(String name) { - bookmarks.remove(name); - saveState(); - } - - public synchronized String getBookmark(String bm) { - return bookmarks.get(bm); - } - /** - * Returns the Set of bookmark names, unmodifiable - * @return The set of bookmarks - */ - public Set bookmarkKeys() { - return Collections.unmodifiableSet(bookmarks.keySet()); - } - - /** - * Returns a set of Index objects one for each of the uri's specified - * gets an existing one if its there else makes a new one - * - * @param indexuris list of index specifiers separated by spaces - * @return Set of Index objects - */ - public final ArrayList getIndices(String indexuris) throws InvalidSearchException, TaskAbortException { - String[] uris = indexuris.split("[ ;]"); - ArrayList indices = new ArrayList(uris.length); - - for ( String uri : uris){ - indices.add(getIndex(uri, null)); - } - return indices; - } - - // See comments near rtab. Can't use in parallel so not acceptable. -// /** -// * Method to get all of the instatiated Indexes -// */ -// public final Iterable getAllIndices() { -// return rtab.values(); -// } -// - public final Index getIndex(String indexuri) throws InvalidSearchException, TaskAbortException { - return getIndex(indexuri, null); - } - - /** - * Gets an index using its id in the form {type}:{uri}
- * known types are xml, bookmark - * @param indexid - * @return Index object - * @throws plugins.Library.util.InvalidSearchException - */ - /** - * Returns an Index object for the uri specified, - * gets an existing one if its there else makes a new one - * - * TODO : identify all index types so index doesn't need to refer to them directly - * @param indexuri index specifier - * @return Index object - */ - public final Index getIndex(String indexuri, String origIndexName) throws InvalidSearchException, TaskAbortException { - Logger.normal(this, "Getting index "+indexuri); - indexuri = indexuri.trim(); - if (indexuri.startsWith(BOOKMARK_PREFIX)){ - indexuri = indexuri.substring(BOOKMARK_PREFIX.length()); - if (bookmarks.containsKey(indexuri)) - return getIndex(bookmarks.get(indexuri), indexuri); - else - throw new InvalidSearchException("Index bookmark '"+indexuri+" does not exist"); - } - - // See comments near rtab. Can't use in parallel so caching is dangerous. -// if (rtab.containsKey(indexuri)) -// return rtab.get(indexuri); -// - Class indextype; - Index index; - Object indexkey; - - try{ - indexkey = getAddressTypeFromString(indexuri); - }catch(UnsupportedOperationException e){ - throw new TaskAbortException("Did not recognise index type in : \""+indexuri+"\"", e); - } - - long edition = -1; - - try { - if (indexkey instanceof File) { - indextype = getIndexType((File)indexkey); - } else if (indexkey instanceof FreenetURI) { - // TODO HIGH make this non-blocking - FreenetURI uri = (FreenetURI)indexkey; - if(uri.isUSK()) - edition = uri.getEdition(); - indextype = getIndexType(uri); - } else { - throw new AssertionError(); - } - - if (indextype == ProtoIndex.class) { - // TODO HIGH this *must* be non-blocking as it fetches the whole index root - PullTask task = new PullTask(indexkey); - ProtoIndexSerialiser.forIndex(indexkey, RequestStarter.INTERACTIVE_PRIORITY_CLASS).pull(task); - index = task.data; - - } else if (indextype == XMLIndex.class) { - index = new XMLIndex(indexuri, edition, pr, this, origIndexName); - - } else { - throw new AssertionError(); - } - - // See comments near rtab. Can't use in parallel so caching is dangerous. - //rtab.put(indexuri, index); - Logger.normal(this, "Loaded index type " + indextype.getName() + " at " + indexuri); - - return index; - - } catch (FetchException e) { - throw new TaskAbortException("Failed to fetch index " + indexuri+" : "+e, e, true); // can retry -/* KEYEXPLORER - } catch (IOException e) { - throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry - - } catch (LowLevelGetException e) { - throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry - - } catch (KeyListenerConstructionException e) { - throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry - - } catch (MetadataParseException e) { - throw new TaskAbortException("Failed to parse index " + indexuri, e); -*/ - } catch (UnsupportedOperationException e) { - throw new TaskAbortException("Failed to parse index " + indexuri+" : "+e, e); - - } catch (RuntimeException e) { - throw new TaskAbortException("Failed to load index " + indexuri+" : "+e, e); - - } - } - - - /** - ** Create a {@link FreenetArchiver} connected to the core of the - ** singleton's {@link PluginRespirator}. - ** - ** @throws IllegalStateException if the singleton has not been initialised - ** or if it does not have a respirator. - */ - public static FreenetArchiver - makeArchiver(ObjectStreamReader r, ObjectStreamWriter w, String mime, int size, short priorityClass) { - if (lib == null || lib.pr == null) { - throw new IllegalStateException("Cannot archive to freenet without a fully live Library plugin connected to a freenet node."); - } else { - return new FreenetArchiver(lib.pr.getNode().clientCore, r, w, mime, size, priorityClass); - } - } - - /** - ** Create a {@link FreenetArchiver} connected to the core of the - ** singleton's {@link PluginRespirator}. - ** - ** @throws IllegalStateException if the singleton has not been initialised - ** or if it does not have a respirator. - */ - public static FreenetArchiver - makeArchiver(S rw, String mime, int size, short priorityClass) { - return Library.makeArchiver(rw, rw, mime, size, priorityClass); - } - - public static String convertToHex(byte[] data) { - StringBuilder buf = new StringBuilder(); - for (int i = 0; i < data.length; i++) { - int halfbyte = (data[i] >>> 4) & 0x0F; - int two_halfs = 0; - do { - if ((0 <= halfbyte) && (halfbyte <= 9)) - buf.append((char) ('0' + halfbyte)); - else - buf.append((char) ('a' + (halfbyte - 10))); - halfbyte = data[i] & 0x0F; - } while (two_halfs++ < 1); - } - return buf.toString(); - } - - //this function will return the String representation of the MD5 hash for the input string - public static String MD5(String text) { - try { - MessageDigest md = MessageDigest.getInstance("MD5"); - byte[] b = text.getBytes("UTF-8"); - md.update(b, 0, b.length); - byte[] md5hash = md.digest(); - return convertToHex(md5hash); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public void update(String updateContext, String indexuri) { - addBookmark(updateContext, indexuri); - } -} diff --git a/src/plugins/Library/Main.java b/src/plugins/Library/Main.java deleted file mode 100644 index 600cb330..00000000 --- a/src/plugins/Library/Main.java +++ /dev/null @@ -1,199 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import freenet.node.RequestStarter; -import freenet.pluginmanager.PluginReplySender; -import freenet.support.MutableBoolean; -import freenet.support.SimpleFieldSet; -import freenet.support.api.Bucket; -import freenet.support.io.BucketTools; -import freenet.support.io.Closer; -import freenet.support.io.FileBucket; -import freenet.support.io.FileUtil; -import freenet.support.io.LineReadingInputStream; -import freenet.support.io.NativeThread; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.logging.Level; - -import plugins.Library.client.FreenetArchiver; -import plugins.Library.index.ProtoIndex; -import plugins.Library.index.ProtoIndexComponentSerialiser; -import plugins.Library.index.ProtoIndexSerialiser; -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermPageEntry; -import plugins.Library.search.Search; -import plugins.Library.ui.WebInterface; -import plugins.Library.util.SkeletonBTreeMap; -import plugins.Library.util.SkeletonBTreeSet; -import plugins.Library.util.TaskAbortExceptionConvertor; -import plugins.Library.util.concurrent.Executors; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.func.Closure; - -import freenet.pluginmanager.FredPlugin; -import freenet.pluginmanager.FredPluginL10n; -import freenet.pluginmanager.FredPluginRealVersioned; -import freenet.pluginmanager.FredPluginThreadless; -import freenet.pluginmanager.FredPluginVersioned; -import freenet.pluginmanager.PluginNotFoundException; -import freenet.pluginmanager.PluginRespirator; -import freenet.support.Executor; -import freenet.client.InsertException; -import freenet.keys.FreenetURI; -import freenet.keys.InsertableClientSSK; -import freenet.l10n.BaseL10n.LANGUAGE; - -import freenet.pluginmanager.FredPluginFCP; -import freenet.support.Logger; -import java.io.BufferedReader; -import java.io.BufferedWriter; -import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.FilenameFilter; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.security.MessageDigest; -import plugins.Library.index.TermEntryReaderWriter; -import plugins.Library.index.xml.LibrarianHandler; -import plugins.Library.io.serial.LiveArchiver; -import plugins.Library.io.serial.Serialiser.PullTask; -import plugins.Library.io.serial.Serialiser.PushTask; - -/** - * Library class is the api for others to use search facilities, it is used by the interfaces - * @author MikeB - */ -public class Main implements FredPlugin, FredPluginVersioned, freenet.pluginmanager.FredPluginHTTP, // TODO remove this later - FredPluginRealVersioned, FredPluginThreadless, FredPluginL10n, FredPluginFCP { - - private static PluginRespirator pr; - private Library library; - private WebInterface webinterface; - private SpiderIndexUploader uploader; - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(Main.class); - } - - public static PluginRespirator getPluginRespirator() { - return pr; - } - - // FredPluginL10n - public void setLanguage(freenet.l10n.BaseL10n.LANGUAGE lang) { - // TODO implement - } - - // FredPluginVersioned - public String getVersion() { - return library.getVersion() + " " + Version.vcsRevision(); - } - - // FredPluginRealVersioned - public long getRealVersion() { - return library.getVersion(); - } - - // FredPluginHTTP - // TODO remove this later - public String handleHTTPGet(freenet.support.api.HTTPRequest request) { - Throwable th; - try { - Class tester = Class.forName("plugins.Library.Tester"); - java.lang.reflect.Method method = tester.getMethod("runTest", Library.class, String.class); - try { - return (String)method.invoke(null, library, request.getParam("plugins.Library.Tester")); - } catch (java.lang.reflect.InvocationTargetException e) { - throw e.getCause(); - } - } catch (ClassNotFoundException e) { - return "

To use Library, go to Browsing -> Search Freenet in the main menu in FProxy.

This page is only where the test suite would be, if it had been compiled in (give -Dtester= to ant).

"; - } catch (Throwable t) { - th = t; - } - java.io.ByteArrayOutputStream bytes = new java.io.ByteArrayOutputStream(); - th.printStackTrace(new java.io.PrintStream(bytes)); - return "
" + bytes + "
"; - } - // TODO remove this later - public String handleHTTPPost(freenet.support.api.HTTPRequest request) { return null; } - - // FredPlugin - public void runPlugin(PluginRespirator pr) { - Main.pr = pr; - Executor exec = pr.getNode().executor; - library = Library.init(pr); - Search.setup(library, exec); - Executors.setDefaultExecutor(exec); - webinterface = new WebInterface(library, pr); - webinterface.load(); - uploader = new SpiderIndexUploader(pr); - uploader.start(); - } - - public void terminate() { - webinterface.unload(); - } - - public String getString(String key) { - return key; - } - - private static String convertToHex(byte[] data) { - StringBuilder buf = new StringBuilder(); - for (int i = 0; i < data.length; i++) { - int halfbyte = (data[i] >>> 4) & 0x0F; - int two_halfs = 0; - do { - if ((0 <= halfbyte) && (halfbyte <= 9)) - buf.append((char) ('0' + halfbyte)); - else - buf.append((char) ('a' + (halfbyte - 10))); - halfbyte = data[i] & 0x0F; - } while (two_halfs++ < 1); - } - return buf.toString(); - } - - //this function will return the String representation of the MD5 hash for the input string - public static String MD5(String text) { - try { - MessageDigest md = MessageDigest.getInstance("MD5"); - byte[] b = text.getBytes("UTF-8"); - md.update(b, 0, b.length); - byte[] md5hash = md.digest(); - return convertToHex(md5hash); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public void handle(PluginReplySender replysender, SimpleFieldSet params, final Bucket data, int accesstype) { - if("pushBuffer".equals(params.get("command"))){ - uploader.handlePushBuffer(params, data); - } else if("getSpiderURI".equals(params.get("command"))) { - uploader.handleGetSpiderURI(replysender); - } else { - Logger.error(this, "Unknown command : \""+params.get("command")); - } - } - -} diff --git a/src/plugins/Library/SpiderIndexURIs.java b/src/plugins/Library/SpiderIndexURIs.java deleted file mode 100644 index 46ce8edd..00000000 --- a/src/plugins/Library/SpiderIndexURIs.java +++ /dev/null @@ -1,121 +0,0 @@ -package plugins.Library; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; - -import freenet.keys.FreenetURI; -import freenet.keys.InsertableClientSSK; -import freenet.pluginmanager.PluginRespirator; -import freenet.support.Logger; -import freenet.support.io.Closer; - -class SpiderIndexURIs { - private FreenetURI privURI; - private FreenetURI pubURI; - private long edition; - private final PluginRespirator pr; - - SpiderIndexURIs(PluginRespirator pr) { - this.pr = pr; - } - - synchronized long setEdition(long newEdition) { - if(newEdition < edition) return edition; - else return edition = newEdition; - } - - synchronized FreenetURI loadSSKURIs() { - if(privURI == null) { - File f = new File(SpiderIndexUploader.PRIV_URI_FILENAME); - FileInputStream fis = null; - InsertableClientSSK privkey = null; - boolean newPrivKey = false; - try { - fis = new FileInputStream(f); - BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); - privURI = new FreenetURI(br.readLine()).setDocName("index.yml"); // Else InsertableClientSSK doesn't like it. - privkey = InsertableClientSSK.create(privURI); - Logger.debug(this, "Read old privkey"); - this.pubURI = privkey.getURI(); - Logger.debug(this, "Recovered URI from disk, pubkey is "+pubURI); - fis.close(); - fis = null; - } catch (IOException e) { - // Ignore - } finally { - Closer.close(fis); - } - if(privURI == null) { - InsertableClientSSK key = InsertableClientSSK.createRandom(pr.getNode().random, "index.yml"); - privURI = key.getInsertURI(); - pubURI = key.getURI(); - newPrivKey = true; - Logger.debug(this, "Created new keypair, pubkey is "+pubURI); - } - FileOutputStream fos = null; - if(newPrivKey) { - try { - fos = new FileOutputStream(new File(SpiderIndexUploader.PRIV_URI_FILENAME)); - OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); - osw.write(privURI.toASCIIString()); - osw.close(); - fos = null; - } catch (IOException e) { - Logger.error(this, "Failed to write new private key : ", e); - } finally { - Closer.close(fos); - } - } - try { - fos = new FileOutputStream(new File(SpiderIndexUploader.PUB_URI_FILENAME)); - OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); - osw.write(pubURI.toASCIIString()); - osw.close(); - fos = null; - } catch (IOException e) { - Logger.error(this, "Failed to write new pubkey", e); - } finally { - Closer.close(fos); - } - try { - fis = new FileInputStream(new File(SpiderIndexUploader.EDITION_FILENAME)); - BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); - try { - edition = Long.parseLong(br.readLine()); - } catch (NumberFormatException e) { - edition = 0; - } - Logger.debug(this, "Edition: "+edition); - fis.close(); - fis = null; - } catch (IOException e) { - // Ignore - edition = 0; - } finally { - Closer.close(fis); - } - } - return privURI; - } - - synchronized FreenetURI getPrivateUSK() { - return loadSSKURIs().setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(edition); - } - - /** Will return edition -1 if no successful uploads so far, otherwise the correct edition. */ - synchronized FreenetURI getPublicUSK() { - loadSSKURIs(); - return pubURI.setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(getLastUploadedEdition()); - } - - private synchronized long getLastUploadedEdition() { - /** If none uploaded, return -1, otherwise return the last uploaded version. */ - return edition-1; - } - -} \ No newline at end of file diff --git a/src/plugins/Library/SpiderIndexUploader.java b/src/plugins/Library/SpiderIndexUploader.java deleted file mode 100644 index 9b853337..00000000 --- a/src/plugins/Library/SpiderIndexUploader.java +++ /dev/null @@ -1,969 +0,0 @@ -package plugins.Library; - -import java.io.BufferedReader; -import java.io.EOFException; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.FileWriter; -import java.io.FilenameFilter; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.OutputStreamWriter; -import java.net.MalformedURLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.SortedSet; -import java.util.TreeSet; -import java.util.logging.Level; - -import plugins.Library.client.FreenetArchiver; -import plugins.Library.index.ProtoIndex; -import plugins.Library.index.ProtoIndexComponentSerialiser; -import plugins.Library.index.ProtoIndexSerialiser; -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermEntryReaderWriter; -import plugins.Library.io.serial.LiveArchiver; -import plugins.Library.io.serial.Serialiser.PullTask; -import plugins.Library.io.serial.Serialiser.PushTask; -import plugins.Library.util.SkeletonBTreeMap; -import plugins.Library.util.SkeletonBTreeSet; -import plugins.Library.util.TaskAbortExceptionConvertor; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.func.Closure; -import freenet.client.InsertException; -import freenet.keys.FreenetURI; -import freenet.node.RequestStarter; -import freenet.pluginmanager.PluginNotFoundException; -import freenet.pluginmanager.PluginReplySender; -import freenet.pluginmanager.PluginRespirator; -import freenet.support.Logger; -import freenet.support.MutableBoolean; -import freenet.support.SimpleFieldSet; -import freenet.support.TimeUtil; -import freenet.support.api.Bucket; -import freenet.support.io.BucketTools; -import freenet.support.io.Closer; -import freenet.support.io.FileBucket; -import freenet.support.io.FileUtil; -import freenet.support.io.LineReadingInputStream; - -public class SpiderIndexUploader { - - SpiderIndexUploader(PluginRespirator pr) { - this.pr = pr; - spiderIndexURIs = new SpiderIndexURIs(pr); - } - - static boolean logMINOR; - static { - Logger.registerClass(SpiderIndexUploader.class); - } - - private final PluginRespirator pr; - private Object freenetMergeSync = new Object(); - private boolean freenetMergeRunning = false; - private boolean diskMergeRunning = false; - - private final ArrayList toMergeToDisk = new ArrayList(); - static final int MAX_HANDLING_COUNT = 5; - // When pushing is broken, allow max handling to reach this level before stalling forever to prevent running out of disk space. - private int PUSH_BROKEN_MAX_HANDLING_COUNT = 10; - // Don't use too much disk space, take into account fact that Spider slows down over time. - - private boolean pushBroken; - - /** The temporary on-disk index. We merge stuff into this until it exceeds a threshold size, then - * we create a new diskIdx and merge the old one into the idxFreenet. */ - ProtoIndex idxDisk; - /** idxDisk gets merged into idxFreenet this long after the last merge completed. */ - static final long MAX_TIME = 24*60*60*1000L; - /** idxDisk gets merged into idxFreenet after this many incoming updates from Spider. */ - static final int MAX_UPDATES = 16; - /** idxDisk gets merged into idxFreenet after it has grown to this many terms. - * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must - * fit into memory during the merge process. */ - static final int MAX_TERMS = 100*1000; - /** idxDisk gets merged into idxFreenet after it has grown to this many terms. - * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must - * fit into memory during the merge process. */ - static final int MAX_TERMS_NOT_UPLOADED = 10*1000; - /** Maximum size of a single entry, in TermPageEntry count, on disk. If we exceed this we force an - * insert-to-freenet and move on to a new disk index. The problem is that the merge to Freenet has - * to keep the whole of each entry in RAM. This is only true for the data being merged in - the - * on-disk index - and not for the data on Freenet, which is pulled on demand. SCALABILITY */ - static final int MAX_DISK_ENTRY_SIZE = 10000; - /** Like pushNumber, the number of the current disk dir, used to create idxDiskDir. */ - private int dirNumber; - static final String DISK_DIR_PREFIX = "library-temp-index-"; - /** Directory the current idxDisk is saved in. */ - File idxDiskDir; - private int mergedToDisk; - - ProtoIndexSerialiser srl = null; - FreenetURI lastUploadURI = null; - String lastDiskIndexName; - /** The uploaded index on Freenet. This never changes, it just gets updated. */ - ProtoIndex idxFreenet; - - private final SpiderIndexURIs spiderIndexURIs; - - long pushNumber; - static final String LAST_URL_FILENAME = "library.index.lastpushed.chk"; - static final String PRIV_URI_FILENAME = "library.index.privkey"; - static final String PUB_URI_FILENAME = "library.index.pubkey"; - static final String EDITION_FILENAME = "library.index.next-edition"; - - static final String LAST_DISK_FILENAME = "library.index.lastpushed.disk"; - - static final String BASE_FILENAME_PUSH_DATA = "library.index.data."; - - /** Merge from the Bucket chain to the on-disk idxDisk. */ - protected void wrapMergeToDisk() { - spiderIndexURIs.loadSSKURIs(); - boolean first = true; - while(true) { - final Bucket data; - synchronized(freenetMergeSync) { - if(pushBroken) { - Logger.error(this, "Pushing broken"); - return; - } - if(first && diskMergeRunning) { - Logger.error(this, "Already running a handler!"); - return; - } else if((!first) && (!diskMergeRunning)) { - Logger.error(this, "Already running yet runningHandler is false?!"); - return; - } - first = false; - if(toMergeToDisk.size() == 0) { - if(logMINOR) Logger.minor(this, "Nothing to handle"); - diskMergeRunning = false; - freenetMergeSync.notifyAll(); - return; - } - data = toMergeToDisk.remove(0); - freenetMergeSync.notifyAll(); - diskMergeRunning = true; - } - try { - mergeToDisk(data); - } catch (Throwable t) { - // Failed. - synchronized(freenetMergeSync) { - diskMergeRunning = false; - pushBroken = true; - freenetMergeSync.notifyAll(); - } - if(t instanceof RuntimeException) - throw (RuntimeException)t; - if(t instanceof Error) - throw (Error)t; - } - } - } - - // This is a member variable because it is huge, and having huge stuff in local variables seems to upset the default garbage collector. - // It doesn't need to be synchronized because it's always used from mergeToDisk, which never runs in parallel. - private Map> newtrees; - // Ditto - private SortedSet terms; - - ProtoIndexSerialiser srlDisk = null; - private ProtoIndexComponentSerialiser leafsrlDisk; - - private long lastMergedToFreenet = -1; - - /** Merge a bucket of TermEntry's into an on-disk index. */ - private void mergeToDisk(Bucket data) { - - boolean newIndex = false; - - if(idxDiskDir == null) { - newIndex = true; - if(!createDiskDir()) return; - } - - if(!makeDiskDirSerialiser()) return; - - // Read data into newtrees and trees. - long entriesAdded = readTermsFrom(data); - - if(terms.size() == 0) { - Logger.debug(this, "Nothing to merge"); - synchronized(this) { - newtrees = null; - terms = null; - } - return; - } - - // Merge the new data to the disk index. - - try { - final MutableBoolean maxDiskEntrySizeExceeded = new MutableBoolean(); - maxDiskEntrySizeExceeded.value = false; - long mergeStartTime = System.currentTimeMillis(); - if(newIndex) { - if(createDiskIndex()) - maxDiskEntrySizeExceeded.value = true; - } else { - // async merge - Closure>, TaskAbortException> clo = - createMergeFromNewtreesClosure(maxDiskEntrySizeExceeded); - assert(idxDisk.ttab.isBare()); - Logger.debug(this, "Merging "+terms.size()+" terms, tree.size = "+idxDisk.ttab.size()+" from "+data+"..."); - idxDisk.ttab.update(terms, null, clo, new TaskAbortExceptionConvertor()); - - } - // Synchronize anyway so garbage collector knows about it. - synchronized(this) { - newtrees = null; - terms = null; - } - assert(idxDisk.ttab.isBare()); - PushTask task4 = new PushTask(idxDisk); - srlDisk.push(task4); - - long mergeEndTime = System.currentTimeMillis(); - Logger.debug(this, entriesAdded + " entries merged to disk in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); - // FileArchiver produces a String, which is a filename not including the prefix or suffix. - String uri = (String)task4.meta; - lastDiskIndexName = uri; - Logger.normal(this, "Pushed new index to file "+uri); - if(writeStringTo(new File(LAST_DISK_FILENAME), uri) && - writeStringTo(new File(idxDiskDir, LAST_DISK_FILENAME), uri)) { - // Successfully uploaded and written new status. Can delete the incoming data. - data.free(); - } - - maybeMergeToFreenet(maxDiskEntrySizeExceeded); - } catch (TaskAbortException e) { - Logger.error(this, "Failed to upload index for spider: ", e); - e.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - } - } - - /** We have just written a Bucket of new data to an on-disk index. We may or may not want to - * upload to an on-Freenet index, depending on how big the data is etc. If we do, we will need - * to create a new on-disk index. - * @param maxDiskEntrySizeExceeded A flag object which is set (off-thread) if any single term - * in the index is very large. - */ - private void maybeMergeToFreenet(MutableBoolean maxDiskEntrySizeExceeded) { - // Maybe chain to mergeToFreenet ??? - - boolean termTooBig = false; - synchronized(maxDiskEntrySizeExceeded) { - termTooBig = maxDiskEntrySizeExceeded.value; - } - - mergedToDisk++; - if((lastMergedToFreenet > 0 && idxDisk.ttab.size() > MAX_TERMS) || - (idxDisk.ttab.size() > MAX_TERMS_NOT_UPLOADED) - || (mergedToDisk > MAX_UPDATES) || termTooBig || - (lastMergedToFreenet > 0 && (System.currentTimeMillis() - lastMergedToFreenet) > MAX_TIME)) { - - final ProtoIndex diskToMerge = idxDisk; - final File dir = idxDiskDir; - Logger.debug(this, "Exceeded threshold, starting new disk index and starting merge from disk to Freenet..."); - mergedToDisk = 0; - lastMergedToFreenet = -1; - idxDisk = null; - srlDisk = null; - leafsrlDisk = null; - idxDiskDir = null; - lastDiskIndexName = null; - - synchronized(freenetMergeSync) { - while(freenetMergeRunning) { - if(pushBroken) return; - Logger.normal(this, "Need to merge to Freenet, but last merge not finished yet. Waiting..."); - try { - freenetMergeSync.wait(); - } catch (InterruptedException e) { - // Ignore - } - } - if(pushBroken) return; - freenetMergeRunning = true; - } - - Runnable r = new Runnable() { - - public void run() { - try { - mergeToFreenet(diskToMerge, dir); - } catch (Throwable t) { - Logger.error(this, "Merge to Freenet failed: ", t); - t.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - } finally { - synchronized(freenetMergeSync) { - freenetMergeRunning = false; - if(!pushBroken) - lastMergedToFreenet = System.currentTimeMillis(); - freenetMergeSync.notifyAll(); - } - } - } - - }; - pr.getNode().executor.execute(r, "Library: Merge data from disk to Freenet"); - } else { - Logger.debug(this, "Not merging to Freenet yet: "+idxDisk.ttab.size()+" terms in index, "+mergedToDisk+" merges, "+(lastMergedToFreenet <= 0 ? "never merged to Freenet" : ("last merged to Freenet "+TimeUtil.formatTime(System.currentTimeMillis() - lastMergedToFreenet))+"ago")); - } - } - - private boolean writeURITo(File filename, FreenetURI uri) { - return writeStringTo(filename, uri.toString()); - } - - private boolean writeStringTo(File filename, String uri) { - FileOutputStream fos = null; - try { - fos = new FileOutputStream(filename); - OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); - osw.write(uri.toString()); - osw.close(); - fos = null; - return true; - } catch (IOException e) { - Logger.error(this, "Failed to write to "+filename+" : "+uri, e); - return false; - } finally { - Closer.close(fos); - } - } - - private String readStringFrom(File file) { - String ret; - FileInputStream fis = null; - try { - fis = new FileInputStream(file); - BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); - ret = br.readLine(); - fis.close(); - fis = null; - return ret; - } catch (IOException e) { - // Ignore - return null; - } finally { - Closer.close(fis); - } - } - - private FreenetURI readURIFrom(File file) { - String s = readStringFrom(file); - if(s != null) { - try { - return new FreenetURI(s); - } catch (MalformedURLException e) { - // Ignore. - } - } - return null; - } - - /** Create a callback object which will do the merging of individual terms. This will be called - * for each term as it is unpacked from the existing on-disk index. It then merges in new data - * from newtrees and writes the subtree for the term back to disk. Most of the work is done in - * update() below. - * @param maxDiskEntrySizeExceeded Will be set if any single term is so large that we need to - * upload to Freenet immediately. */ - private Closure>, TaskAbortException> createMergeFromNewtreesClosure(final MutableBoolean maxDiskEntrySizeExceeded) { - return new - Closure>, TaskAbortException>() { - /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { - String key = entry.getKey(); - SkeletonBTreeSet tree = entry.getValue(); - if(logMINOR) Logger.minor(this, "Processing: "+key+" : "+tree); - if(tree != null) - Logger.debug(this, "Merging data (on disk) in term "+key); - else - Logger.debug(this, "Adding new term to disk index: "+key); - //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); - if (tree == null) { - entry.setValue(tree = makeEntryTree(leafsrlDisk)); - } - assert(tree.isBare()); - SortedSet toMerge = newtrees.get(key); - tree.update(toMerge, null); - if(toMerge.size() > MAX_DISK_ENTRY_SIZE) - synchronized(maxDiskEntrySizeExceeded) { - maxDiskEntrySizeExceeded.value = true; - } - toMerge = null; - newtrees.remove(key); - assert(tree.isBare()); - if(logMINOR) Logger.minor(this, "Updated: "+key+" : "+tree); - //System.out.println("handled " + key); - } - }; - } - - /** Create a new on-disk index from terms and newtrees. - * @return True if the size of any one item in the index is so large that we must upload - * immediately to Freenet. - * @throws TaskAbortException If something broke catastrophically. */ - private boolean createDiskIndex() throws TaskAbortException { - boolean tooBig = false; - // created a new index, fill it with data. - // DON'T MERGE, merge with a lot of data will deadlock. - // FIXME throw in update() if it will deadlock. - for(String key : terms) { - SkeletonBTreeSet tree = makeEntryTree(leafsrlDisk); - SortedSet toMerge = newtrees.get(key); - tree.addAll(toMerge); - if(toMerge.size() > MAX_DISK_ENTRY_SIZE) - tooBig = true; - toMerge = null; - tree.deflate(); - assert(tree.isBare()); - idxDisk.ttab.put(key, tree); - } - idxDisk.ttab.deflate(); - return tooBig; - } - - /** Read the TermEntry's from the Bucket into newtrees and terms, and set up the index - * properties. - * @param data The Bucket containing TermPageEntry's etc serialised with TermEntryReaderWriter. - */ - private long readTermsFrom(Bucket data) { - FileWriter w = null; - newtrees = new HashMap>(); - terms = new TreeSet(); - int entriesAdded = 0; - InputStream is = null; - try { - Logger.normal(this, "Bucket of buffer received, "+data.size()+" bytes"); - is = data.getInputStream(); - SimpleFieldSet fs = new SimpleFieldSet(new LineReadingInputStream(is), 1024, 512, true, true, true); - idxDisk.setName(fs.get("index.title")); - idxDisk.setOwnerEmail(fs.get("index.owner.email")); - idxDisk.setOwner(fs.get("index.owner.name")); - idxDisk.setTotalPages(fs.getLong("totalPages", -1)); - try{ - while(true){ // Keep going til an EOFExcepiton is thrown - TermEntry readObject = TermEntryReaderWriter.getInstance().readObject(is); - SortedSet set = newtrees.get(readObject.subj); - if(set == null) - newtrees.put(readObject.subj, set = new TreeSet()); - set.add(readObject); - terms.add(readObject.subj); - entriesAdded++; - } - }catch(EOFException e){ - // EOF, do nothing - } - } catch (IOException ex) { - java.util.logging.Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex); - } finally { - Closer.close(is); - } - return entriesAdded; - } - - /** Create a directory for an on-disk index. - * @return False if something broke and we can't continue. */ - private boolean createDiskDir() { - dirNumber++; - idxDiskDir = new File(DISK_DIR_PREFIX + Integer.toString(dirNumber)); - Logger.normal(this, "Created new disk dir for merging: "+idxDiskDir); - if(!(idxDiskDir.mkdir() || idxDiskDir.isDirectory())) { - Logger.error(this, "Unable to create new disk dir: "+idxDiskDir); - synchronized(this) { - pushBroken = true; - return false; - } - } - return true; - } - - /** Set up the serialisers for an on-disk index. - * @return False if something broke and we can't continue. */ - private boolean makeDiskDirSerialiser() { - if(srlDisk == null) { - srlDisk = ProtoIndexSerialiser.forIndex(idxDiskDir); - LiveArchiver,SimpleProgress> archiver = - (LiveArchiver,SimpleProgress>)(srlDisk.getChildSerialiser()); - leafsrlDisk = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, archiver); - if(lastDiskIndexName == null) { - try { - idxDisk = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0L); - } catch (java.net.MalformedURLException e) { - throw new AssertionError(e); - } - // FIXME more hacks: It's essential that we use the same FileArchiver instance here. - leafsrlDisk.setSerialiserFor(idxDisk); - } else { - try { - PullTask pull = new PullTask(lastDiskIndexName); - Logger.debug(this, "Pulling previous index "+lastDiskIndexName+" from disk so can update it."); - srlDisk.pull(pull); - Logger.debug(this, "Pulled previous index "+lastDiskIndexName+" from disk - updating..."); - idxDisk = pull.data; - if(idxDisk.getSerialiser().getLeafSerialiser() != archiver) - throw new IllegalStateException("Different serialiser: "+idxFreenet.getSerialiser()+" should be "+leafsrl); - } catch (TaskAbortException e) { - Logger.error(this, "Failed to download previous index for spider update: "+e, e); - e.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - return false; - } - } - } - return true; - } - - static final String INDEX_DOCNAME = "index.yml"; - - private ProtoIndexComponentSerialiser leafsrl; - - /** Merge a disk dir to an on-Freenet index. Usually called on startup, i.e. we haven't just - * created the on-disk index so we need to setup the ProtoIndex etc. */ - protected void mergeToFreenet(File diskDir) { - ProtoIndexSerialiser s = ProtoIndexSerialiser.forIndex(diskDir); - LiveArchiver,SimpleProgress> archiver = - (LiveArchiver,SimpleProgress>)(s.getChildSerialiser()); - ProtoIndexComponentSerialiser leaf = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, archiver); - String f = this.readStringFrom(new File(diskDir, LAST_DISK_FILENAME)); - if(f == null) { - if(diskDir.list().length == 0) { - Logger.debug(this, "Directory "+diskDir+" is empty. Nothing to merge."); - diskDir.delete(); - return; - } - // Ignore - Logger.error(this, "Unable to merge old data "+diskDir); - return; - } else { - Logger.debug(this, "Continuing old bucket: "+f); - } - - ProtoIndex idxDisk = null; - try { - PullTask pull = new PullTask(f); - Logger.debug(this, "Pulling previous index "+f+" from disk so can update it."); - s.pull(pull); - Logger.debug(this, "Pulled previous index "+f+" from disk - updating..."); - idxDisk = pull.data; - if(idxDisk.getSerialiser().getLeafSerialiser() != archiver) - throw new IllegalStateException("Different serialiser: "+idxDisk.getSerialiser()+" should be "+archiver); - } catch (TaskAbortException e) { - Logger.error(this, "Failed to download previous index for spider update: "+e, e); - e.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - return; - } - mergeToFreenet(idxDisk, diskDir); - } - - private final Object inflateSync = new Object(); - - /** Merge from an on-disk index to an on-Freenet index. - * @param diskToMerge The on-disk index. - * @param diskDir The folder the on-disk index is stored in. - */ - protected void mergeToFreenet(ProtoIndex diskToMerge, File diskDir) { - Logger.debug(this, "Merging on-disk index to Freenet: "+diskDir); - if(lastUploadURI == null) { - lastUploadURI = readURIFrom(new File(LAST_URL_FILENAME)); - } - setupFreenetCacheDir(); - - makeFreenetSerialisers(); - - updateOverallMetadata(diskToMerge); - - final SkeletonBTreeMap> newtrees = diskToMerge.ttab; - - // Do the upload - - // async merge - Closure>, TaskAbortException> clo = - createMergeFromTreeClosure(newtrees); - try { - long mergeStartTime = System.currentTimeMillis(); - assert(idxFreenet.ttab.isBare()); - Iterator it = - diskToMerge.ttab.keySetAutoDeflate().iterator(); - TreeSet terms = new TreeSet(); - while(it.hasNext()) terms.add(it.next()); - Logger.debug(this, "Merging "+terms.size()+" terms from disk to Freenet..."); - assert(terms.size() == diskToMerge.ttab.size()); - assert(idxFreenet.ttab.isBare()); - assert(diskToMerge.ttab.isBare()); - long entriesAdded = terms.size(); - // Run the actual merge. - idxFreenet.ttab.update(terms, null, clo, new TaskAbortExceptionConvertor()); - assert(idxFreenet.ttab.isBare()); - // Deflate the main tree. - newtrees.deflate(); - assert(diskToMerge.ttab.isBare()); - - // Push the top node to a CHK. - PushTask task4 = new PushTask(idxFreenet); - task4.meta = FreenetURI.EMPTY_CHK_URI; - srl.push(task4); - - // Now wait for the inserts to finish. They are started asynchronously in the above merge. - FreenetArchiver> arch = - (FreenetArchiver>) srl.getChildSerialiser(); - arch.waitForAsyncInserts(); - - long mergeEndTime = System.currentTimeMillis(); - Logger.debug(this, entriesAdded + " entries merged in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); - FreenetURI uri = (FreenetURI)task4.meta; - lastUploadURI = uri; - Logger.debug(this, "Uploaded new index to "+uri); - if(writeURITo(new File(LAST_URL_FILENAME), uri)) { - newtrees.deflate(); - diskToMerge = null; - terms = null; - Logger.debug(this, "Finished with disk index "+diskDir); - FileUtil.removeAll(diskDir); - } - - // Create the USK to redirect to the CHK at the top of the index. - uploadUSKForFreenetIndex(uri); - - } catch (TaskAbortException e) { - Logger.error(this, "Failed to upload index for spider: "+e, e); - e.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - } - } - - private void uploadUSKForFreenetIndex(FreenetURI uri) { - FreenetURI privUSK = spiderIndexURIs.getPrivateUSK(); - try { - FreenetURI tmp = pr.getHLSimpleClient().insertRedirect(privUSK, uri); - long ed; - synchronized(freenetMergeSync) { - ed = spiderIndexURIs.setEdition(tmp.getEdition()+1); - } - Logger.debug(this, "Uploaded index as USK to "+tmp); - - writeStringTo(new File(EDITION_FILENAME), Long.toString(ed)); - - } catch (InsertException e) { - e.printStackTrace(); - Logger.error(this, "Failed to upload USK for index update", e); - } - } - - /** Create a Closure which will merge the subtrees from one index (on disk) into the subtrees - * of another index (on Freenet). It will be called with each subtree from the on-Freenet - * index, and will merge data from the relevant on-disk subtree. Both subtrees are initially - * deflated, and should be deflated when we leave the method, to avoid running out of memory. - * @param newtrees The on-disk tree of trees to get data from. - * @return - */ - private Closure>, TaskAbortException> createMergeFromTreeClosure(final SkeletonBTreeMap> newtrees) { - return new - Closure>, TaskAbortException>() { - /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { - String key = entry.getKey(); - SkeletonBTreeSet tree = entry.getValue(); - if(logMINOR) Logger.minor(this, "Processing: "+key+" : "+tree); - //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); - boolean newTree = false; - if (tree == null) { - entry.setValue(tree = makeEntryTree(leafsrl)); - newTree = true; - } - assert(tree.isBare()); - SortedSet data; - // Can't be run in parallel. - synchronized(inflateSync) { - newtrees.inflate(key, true); - SkeletonBTreeSet entries; - entries = newtrees.get(key); - // CONCURRENCY: Because the lower-level trees are packed by the top tree, the bottom - // trees (SkeletonBTreeSet's) are not independant of each other. When the newtrees - // inflate above runs, it can deflate a tree that is still in use by another instance - // of this callback. Therefore we must COPY IT AND DEFLATE IT INSIDE THE LOCK. - entries.inflate(); - data = new TreeSet(entries); - entries.deflate(); - assert(entries.isBare()); - } - if(tree != null) - - if(newTree) { - tree.addAll(data); - assert(tree.size() == data.size()); - Logger.debug(this, "Added data to Freenet for term "+key+" : "+data.size()); - } else { - int oldSize = tree.size(); - tree.update(data, null); - // Note that it is possible for data.size() + oldSize != tree.size(), because we might be merging data we've already merged. - // But most of the time it will add up. - Logger.debug(this, "Merged data to Freenet in term "+key+" : "+data.size()+" + "+oldSize+" -> "+tree.size()); - } - tree.deflate(); - assert(tree.isBare()); - if(logMINOR) Logger.minor(this, "Updated: "+key+" : "+tree); - } - }; - } - - /** Update the overall metadata for the on-Freenet index from the on-disk index. */ - private void updateOverallMetadata(ProtoIndex diskToMerge) { - idxFreenet.setName(diskToMerge.getName()); - idxFreenet.setOwnerEmail(diskToMerge.getOwnerEmail()); - idxFreenet.setOwner(diskToMerge.getOwner()); - // This is roughly accurate, it might not be exactly so if we process a bit out of order. - idxFreenet.setTotalPages(diskToMerge.getTotalPages() + Math.max(0,idxFreenet.getTotalPages())); - } - - /** Setup the serialisers for uploading to Freenet. These convert tree nodes to and from blocks - * on Freenet, essentially. */ - private void makeFreenetSerialisers() { - if(srl == null) { - srl = ProtoIndexSerialiser.forIndex(lastUploadURI, RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS); - LiveArchiver,SimpleProgress> archiver = - (LiveArchiver,SimpleProgress>)(srl.getChildSerialiser()); - leafsrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_DEFAULT, archiver); - if(lastUploadURI == null) { - try { - idxFreenet = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0L); - } catch (java.net.MalformedURLException e) { - throw new AssertionError(e); - } - // FIXME more hacks: It's essential that we use the same FreenetArchiver instance here. - leafsrl.setSerialiserFor(idxFreenet); - } else { - try { - PullTask pull = new PullTask(lastUploadURI); - Logger.debug(this, "Pulling previous index "+lastUploadURI+" so can update it."); - srl.pull(pull); - Logger.debug(this, "Pulled previous index "+lastUploadURI+" - updating..."); - idxFreenet = pull.data; - if(idxFreenet.getSerialiser().getLeafSerialiser() != archiver) - throw new IllegalStateException("Different serialiser: "+idxFreenet.getSerialiser()+" should be "+leafsrl); - } catch (TaskAbortException e) { - Logger.error(this, "Failed to download previous index for spider update: "+e, e); - e.printStackTrace(); - synchronized(freenetMergeSync) { - pushBroken = true; - } - return; - } - } - } - } - - /** Set up the on-disk cache, which keeps a copy of everything we upload to Freenet, so we - * won't need to re-download it, which can be very slow and doesn't always succeed. */ - private void setupFreenetCacheDir() { - if(FreenetArchiver.getCacheDir() == null) { - File dir = new File("library-spider-pushed-data-cache"); - dir.mkdir(); - FreenetArchiver.setCacheDir(dir); - } - } - - protected static SkeletonBTreeSet makeEntryTree(ProtoIndexComponentSerialiser leafsrl) { - SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - leafsrl.setSerialiserFor(tree); - return tree; - } - - public void start() { - final String[] oldToMerge; - synchronized(freenetMergeSync) { - oldToMerge = new File(".").list(new FilenameFilter() { - - public boolean accept(File arg0, String arg1) { - if(!(arg1.toLowerCase().startsWith(BASE_FILENAME_PUSH_DATA))) return false; - File f = new File(arg0, arg1); - if(!f.isFile()) return false; - if(f.length() == 0) { f.delete(); return false; } - String s = f.getName().substring(BASE_FILENAME_PUSH_DATA.length()); - pushNumber = Math.max(pushNumber, Long.parseLong(s)+1); - return true; - } - - }); - } - final String[] dirsToMerge; - synchronized(freenetMergeSync) { - dirsToMerge = new File(".").list(new FilenameFilter() { - - public boolean accept(File arg0, String arg1) { - if(!(arg1.toLowerCase().startsWith(DISK_DIR_PREFIX))) return false; - File f = new File(arg0, arg1); - String s = f.getName().substring(DISK_DIR_PREFIX.length()); - dirNumber = Math.max(dirNumber, Integer.parseInt(s)+1); - return true; - } - - }); - } - if(oldToMerge != null && oldToMerge.length > 0) { - Logger.debug(this, "Found "+oldToMerge.length+" buckets of old index data to merge..."); - Runnable r = new Runnable() { - - public void run() { - synchronized(freenetMergeSync) { - for(String filename : oldToMerge) { - File f = new File(filename); - toMergeToDisk.add(new FileBucket(f, true, false, false, true)); - } - } - wrapMergeToDisk(); - } - - }; - pr.getNode().executor.execute(r, "Library: handle index data from previous run"); - } - if(dirsToMerge != null && dirsToMerge.length > 0) { - Logger.debug(this, "Found "+dirsToMerge.length+" disk trees of old index data to merge..."); - Runnable r = new Runnable() { - - public void run() { - synchronized(freenetMergeSync) { - while(freenetMergeRunning) { - if(pushBroken) return; - Logger.normal(this, "Need to merge to Freenet, but last merge not finished yet. Waiting..."); - try { - freenetMergeSync.wait(); - } catch (InterruptedException e) { - // Ignore - } - } - if(pushBroken) return; - freenetMergeRunning = true; - } - try { - for(String filename : dirsToMerge) { - File f = new File(filename); - mergeToFreenet(f); - } - } finally { - synchronized(freenetMergeSync) { - freenetMergeRunning = false; - if(!pushBroken) - lastMergedToFreenet = System.currentTimeMillis(); - freenetMergeSync.notifyAll(); - } - } - - } - - }; - pr.getNode().executor.execute(r, "Library: handle trees from previous run"); - } - } - - public void handlePushBuffer(SimpleFieldSet params, Bucket data) { - - if(data.size() == 0) { - Logger.error(this, "Bucket of data ("+data+") to push is empty", new Exception("error")); - data.free(); - return; - } - - // Process data off-thread, but only one load at a time. - // Hence it won't stall Spider unless we get behind. - - long pn; - synchronized(this) { - pn = pushNumber++; - } - - final File pushFile = new File(BASE_FILENAME_PUSH_DATA+pn); - Bucket output = new FileBucket(pushFile, false, false, false, true); - try { - BucketTools.copy(data, output); - data.free(); - Logger.debug(this, "Written data to "+pushFile); - } catch (IOException e1) { - e1.printStackTrace(); - Logger.error(this, "Unable to back up push data #"+pn, e1); - output = data; - } - - synchronized(freenetMergeSync) { - boolean waited = false; - while(toMergeToDisk.size() > MAX_HANDLING_COUNT && !pushBroken) { - Logger.error(this, "Spider feeding us data too fast, waiting for background process to finish. Ahead of us in the queue: "+toMergeToDisk.size()); - try { - waited = true; - freenetMergeSync.wait(); - } catch (InterruptedException e) { - // Ignore - } - } - toMergeToDisk.add(output); - if(pushBroken) { - if(toMergeToDisk.size() < PUSH_BROKEN_MAX_HANDLING_COUNT) - // We have written the data, it will be recovered after restart. - Logger.error(this, "Pushing is broken, failing"); - else { - // Wait forever to prevent running out of disk space. - // Spider is single threaded. - // FIXME: Use an error return or a throwable to shut down Spider. - while(true) { - try { - freenetMergeSync.wait(); - } catch (InterruptedException e) { - // Ignore - } - } - } - return; - } - if(waited) - Logger.error(this, "Waited for previous handler to go away, moving on..."); - //if(freenetMergeRunning) return; // Already running, no need to restart it. - if(diskMergeRunning) return; // Already running, no need to restart it. - } - Runnable r = new Runnable() { - - public void run() { -// wrapMergeToFreenet(); - wrapMergeToDisk(); - } - - }; - pr.getNode().executor.execute(r, "Library: Handle data from Spider"); - } - - public FreenetURI getPublicUSKURI() { - return spiderIndexURIs.getPublicUSK(); - } - - public void handleGetSpiderURI(PluginReplySender replysender) { - FreenetURI uri = getPublicUSKURI(); - SimpleFieldSet sfs = new SimpleFieldSet(true); - sfs.putSingle("reply", "getSpiderURI"); - sfs.putSingle("publicUSK", uri.toString(true, false)); - try { - replysender.send(sfs); - } catch (PluginNotFoundException e) { - // Race condition, ignore. - } - } -} \ No newline at end of file diff --git a/src/plugins/Library/Version.java b/src/plugins/Library/Version.java deleted file mode 100644 index 042ee3cf..00000000 --- a/src/plugins/Library/Version.java +++ /dev/null @@ -1,17 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -/** - * Necessary to be able to use pluginmanager's versions - */ -public class Version { - - private static final String vcsRevision = "@custom@"; - - public static String vcsRevision() { - return vcsRevision; - } - -} diff --git a/src/plugins/Library/VirtualIndex.java b/src/plugins/Library/VirtualIndex.java deleted file mode 100644 index 9fae7c91..00000000 --- a/src/plugins/Library/VirtualIndex.java +++ /dev/null @@ -1,20 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.URIEntry; -import plugins.Library.util.exec.Execution; - -/** -** Represents a virtual index that gets its data from another plugin. -** -** @author infinity0 -*/ -public interface VirtualIndex extends Index { - - - - -} diff --git a/src/plugins/Library/WriteableIndex.java b/src/plugins/Library/WriteableIndex.java deleted file mode 100644 index 5debe962..00000000 --- a/src/plugins/Library/WriteableIndex.java +++ /dev/null @@ -1,32 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.URIEntry; -import plugins.Library.util.exec.Execution; - -import freenet.keys.FreenetURI; - -import java.util.Collection; -import java.util.Set; - -/** -** Represents a writable Index. -** -** TODO -** -** @author infinity0 -*/ -public interface WriteableIndex extends Index { - - public Execution putTermEntries(Collection entries); - - public Execution remTermEntries(Collection entries); - - public Execution putURIEntries(Collection entries); - - public Execution remURIEntries(Collection entries); - -} diff --git a/src/plugins/Library/client/FreenetArchiver.java b/src/plugins/Library/client/FreenetArchiver.java deleted file mode 100644 index 4401bbd6..00000000 --- a/src/plugins/Library/client/FreenetArchiver.java +++ /dev/null @@ -1,608 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.client; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.ArrayList; -import java.util.HashSet; - -import plugins.Library.Library; -import plugins.Library.io.ObjectStreamReader; -import plugins.Library.io.ObjectStreamWriter; -import plugins.Library.io.serial.LiveArchiver; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.TaskAbortException; - -import freenet.client.ClientMetadata; -import freenet.client.FetchException; -import freenet.client.FetchException.FetchExceptionMode; -import freenet.client.FetchResult; -import freenet.client.HighLevelSimpleClient; -import freenet.client.InsertBlock; -import freenet.client.InsertContext; -import freenet.client.InsertException; -import freenet.client.async.BaseClientPutter; -import freenet.client.async.ClientContext; -import freenet.client.async.ClientPutCallback; -import freenet.client.async.ClientPutter; -import freenet.client.async.PersistenceDisabledException; -import freenet.client.events.ClientEvent; -import freenet.client.events.ClientEventListener; -import freenet.client.events.SplitfileProgressEvent; -import freenet.crypt.SHA256; -import freenet.keys.CHKBlock; -import freenet.keys.FreenetURI; -import freenet.node.NodeClientCore; -import freenet.node.RequestClient; -import freenet.node.RequestStarter; -import freenet.support.Base64; -import freenet.support.Logger; -import freenet.support.SimpleReadOnlyArrayBucket; -import freenet.support.SizeUtil; -import freenet.support.api.Bucket; -import freenet.support.api.RandomAccessBucket; -import freenet.support.io.BucketTools; -import freenet.support.io.Closer; -import freenet.support.io.FileBucket; -import freenet.support.io.ResumeFailedException; - -/** -** Converts between a map of {@link String} to {@link Object}, and a freenet -** key. An {@link ObjectStreamReader} and an {@link ObjectStreamWriter} is -** used to do the hard work once the relevant streams have been established, -** from temporary {@link Bucket}s. -** -** Supports a local cache. -** FIXME: NOT GARBAGE COLLECTED! Eventually will fill up all available disk space! -** -** @author infinity0 -*/ -public class FreenetArchiver -implements LiveArchiver { - - final protected NodeClientCore core; - final protected ObjectStreamReader reader; - final protected ObjectStreamWriter writer; - - final protected String default_mime; - final protected int expected_bytes; - public final short priorityClass; - public final boolean realTimeFlag; - private static File cacheDir; - /** If true, we will insert data semi-asynchronously. That is, we will start the - * insert, with ForceEncode enabled, and return the URI as soon as possible. The - * inserts will continue in the background, and before inserting the final USK, - * the caller should call waitForAsyncInserts(). - * - * FIXME SECURITY: This should be relatively safe, because it's all CHKs, as - * long as the content isn't easily predictable. - * - * FIXME PERFORMANCE: This dirty hack is unnecessary if we split up - * IterableSerialiser.push into a start phase and a stop phase. - * - * The main purpose of this mechanism is to minimise memory usage: Normally the - * data being pushed remains in RAM while we do the insert, which can take a very - * long time. */ - static final boolean SEMI_ASYNC_PUSH = true; - - private final HashSet semiAsyncPushes = new HashSet(); - private final ArrayList pushesFailed = new ArrayList(); - private long totalBytesPushing; - - public static void setCacheDir(File dir) { - cacheDir = dir; - } - - public static File getCacheDir() { - return cacheDir; - } - - public FreenetArchiver(NodeClientCore c, ObjectStreamReader r, ObjectStreamWriter w, String mime, int size, short priority) { - if (c == null) { - throw new IllegalArgumentException("Can't create a FreenetArchiver with a null NodeClientCore!"); - } - this.priorityClass = priority; - // FIXME pass it in somehow - if(priorityClass <= RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS) - realTimeFlag = true; - else - realTimeFlag = false; - core = c; - reader = r; - writer = w; - default_mime = mime; - expected_bytes = size; - } - - public FreenetArchiver(NodeClientCore c, S rw, String mime, int size, short priority) { - this(c, rw, rw, mime, size, priority); - } - - /** - ** {@inheritDoc} - ** - ** This implementation expects metdata of type {@link FreenetURI}. - * - * FIXME OPT NORM: Two-phase pull: First pull to a bucket, then construct the data. - * The reason for this is memory usage: We can limit the number in the second - * phase according to downstream demand (e.g. blocking queues in ObjectProcessor's), - * so that the amount of stuff kept in memory is limited, while still allowing an - * (almost?) unlimited number in the fetching data phase. See comments in ParallelSerialiser.createPullJob. - */ - /*@Override**/ public void pullLive(PullTask task, final SimpleProgress progress) throws TaskAbortException { - // FIXME make retry count configgable by client metadata somehow - // clearly a web UI fetch wants it limited; a merge might want it unlimited - HighLevelSimpleClient hlsc = core.makeClient(priorityClass, false, false); - Bucket tempB = null; InputStream is = null; - - long startTime = System.currentTimeMillis(); - - FreenetURI u; - byte[] initialMetadata; - String cacheKey; - - if(task.meta instanceof FreenetURI) { - u = (FreenetURI) task.meta; - initialMetadata = null; - cacheKey = u.toString(false, true); - } else { - initialMetadata = (byte[]) task.meta; - u = FreenetURI.EMPTY_CHK_URI; - cacheKey = Base64.encode(SHA256.digest(initialMetadata)); - } - - for(int i=0;i<10;i++) { - // USK redirects should not happen really but can occasionally due to race conditions. - - try { - try { - - if(cacheDir != null && cacheDir.exists() && cacheDir.canRead()) { - File cached = new File(cacheDir, cacheKey); - if(cached.exists() && cached.length() != 0) { - tempB = new FileBucket(cached, true, false, false, false); - Logger.debug(this, "Fetching block for FreenetArchiver from disk cache: "+cacheKey); - } - } - - if(tempB == null) { - - if(initialMetadata != null) - Logger.debug(this, "Fetching block for FreenetArchiver from metadata ("+cacheKey+")"); - else - Logger.debug(this, "Fetching block for FreenetArchiver from network: "+u); - - if (progress != null) { - hlsc.addEventHook(new SimpleProgressUpdater(progress)); - } - - // code for async fetch - maybe be useful elsewhere - //ClientContext cctx = core.clientContext; - //FetchContext fctx = hlsc.getFetchContext(); - //FetchWaiter fw = new FetchWaiter(); - //ClientGetter gu = hlsc.fetch(furi, false, null, false, fctx, fw); - //gu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx, null); - //FetchResult res = fw.waitForCompletion(); - - FetchResult res; - - // bookkeeping. detects bugs in the SplitfileProgressEvent handler - if (progress != null) { - ProgressParts prog_old = progress.getParts(); - if(initialMetadata != null) - res = hlsc.fetchFromMetadata(new SimpleReadOnlyArrayBucket(initialMetadata)); - else - res = hlsc.fetch(u); - ProgressParts prog_new = progress.getParts(); - if (prog_old.known - prog_old.done != prog_new.known - prog_new.done) { - Logger.error(this, "Inconsistency when tracking split file progress (pulling): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); - System.err.println("Inconsistency when tracking split file progress (pulling): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); - } - progress.addPartKnown(0, true); - } else { - if(initialMetadata != null) - res = hlsc.fetchFromMetadata(new SimpleReadOnlyArrayBucket(initialMetadata)); - else - res = hlsc.fetch(u); - } - - tempB = res.asBucket(); - } else { - // Make sure SimpleProgress.join() doesn't stall. - if(progress != null) { - progress.addPartKnown(1, true); - progress.addPartDone(); - } - } - long endTime = System.currentTimeMillis(); - Logger.debug(this, "Fetched block for FreenetArchiver in "+(endTime-startTime)+"ms."); - is = tempB.getInputStream(); - task.data = (T)reader.readObject(is); - is.close(); - - } catch (FetchException e) { - if(e.mode == FetchExceptionMode.PERMANENT_REDIRECT && e.newURI != null) { - u = e.newURI; - continue; - } - throw new TaskAbortException("Failed to fetch content", e, true); - - } catch (IOException e) { - throw new TaskAbortException("Failed to read content from local tempbucket", e, true); - - } catch (RuntimeException e) { - throw new TaskAbortException("Failed to complete task: ", e); - - } - } catch (TaskAbortException e) { - if (progress != null) { progress.abort(e); } - throw e; - - } finally { - Closer.close(is); - Closer.close(tempB); - } - break; - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation produces metdata of type {@link FreenetURI}. - ** - ** If the input metadata is an insert URI (SSK or USK), it will be replaced - ** by its corresponding request URI. Otherwise, the data will be inserted - ** as a CHK. Note that since {@link FreenetURI} is immutable, the {@link - ** FreenetURI#suggestedEdition} of a USK is '''not''' automatically - ** incremented. - */ - /*@Override**/ public void pushLive(PushTask task, final SimpleProgress progress) throws TaskAbortException { - HighLevelSimpleClient hlsc = core.makeClient(priorityClass, false, false); - RandomAccessBucket tempB = null; OutputStream os = null; - - - try { - ClientPutter putter = null; - PushCallback cb = null; - try { - tempB = core.tempBucketFactory.makeBucket(expected_bytes, 2); - os = tempB.getOutputStream(); - writer.writeObject(task.data, os); - os.close(); os = null; - tempB.setReadOnly(); - - boolean insertAsMetadata; - FreenetURI target; - - if(task.meta instanceof FreenetURI) { - insertAsMetadata = false; - target = (FreenetURI) task.meta; - } else { - insertAsMetadata = true; - target = FreenetURI.EMPTY_CHK_URI; - } - InsertBlock ib = new InsertBlock(tempB, new ClientMetadata(default_mime), target); - - Logger.debug(this, "Inserting block for FreenetArchiver..."); - long startTime = System.currentTimeMillis(); - - // code for async insert - maybe be useful elsewhere - //ClientContext cctx = core.clientContext; - //InsertContext ictx = hlsc.getInsertContext(true); - //PutWaiter pw = new PutWaiter(); - //ClientPutter pu = hlsc.insert(ib, false, null, false, ictx, pw); - //pu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx, null); - //FreenetURI uri = pw.waitForCompletion(); - - // bookkeeping. detects bugs in the SplitfileProgressEvent handler - ProgressParts prog_old = null; - if(progress != null) - prog_old = progress.getParts(); - - // FIXME make retry count configgable by client metadata somehow - // unlimited for push/merge - InsertContext ctx = hlsc.getInsertContext(false); - ctx.maxInsertRetries = -1; - // Early encode is normally a security risk. - // Hopefully it isn't here. - ctx.earlyEncode = true; - - String cacheKey = null; - -// if(!SEMI_ASYNC_PUSH) { -// // Actually report progress. -// if (progress != null) { -// hlsc.addEventHook(new SimpleProgressUpdater(progress)); -// } -// uri = hlsc.insert(ib, false, null, priorityClass, ctx); -// if (progress != null) -// progress.addPartKnown(0, true); -// } else { - // Do NOT report progress. Pretend we are done as soon as - // we have the URI. This allows us to minimise memory usage - // without yet splitting up IterableSerialiser.push() and - // doing it properly. FIXME - if(progress != null) - progress.addPartKnown(1, true); - cb = new PushCallback(progress, ib); - putter = new ClientPutter(cb, ib.getData(), FreenetURI.EMPTY_CHK_URI, ib.clientMetadata, - ctx, priorityClass, - false, null, false, core.clientContext, null, insertAsMetadata ? CHKBlock.DATA_LENGTH : -1); - cb.setPutter(putter); - long tStart = System.currentTimeMillis(); - try { - core.clientContext.start(putter); - } catch (PersistenceDisabledException e) { - // Impossible - } - WAIT_STATUS status = cb.waitFor(); - if(status == WAIT_STATUS.FAILED) { - cb.throwError(); - } else if(status == WAIT_STATUS.GENERATED_URI) { - FreenetURI uri = cb.getURI(); - task.meta = uri; - cacheKey = uri.toString(false, true); - Logger.debug(this, "Got URI for asynchronous insert: "+uri+" size "+tempB.size()+" in "+(System.currentTimeMillis() - cb.startTime)); - } else { - Bucket data = cb.getGeneratedMetadata(); - byte[] buf = BucketTools.toByteArray(data); - data.free(); - task.meta = buf; - cacheKey = Base64.encode(SHA256.digest(buf)); - Logger.debug(this, "Got generated metadata ("+buf.length+" bytes) for asynchronous insert size "+tempB.size()+" in "+(System.currentTimeMillis() - cb.startTime)); - } - if(progress != null) - progress.addPartDone(); -// } - - if(progress != null) { - ProgressParts prog_new = progress.getParts(); - if (prog_old.known - prog_old.done != prog_new.known - prog_new.done) { - Logger.error(this, "Inconsistency when tracking split file progress (pushing): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); - System.err.println("Inconsistency when tracking split file progress (pushing): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); - } - } - - task.data = null; - - if(cacheKey != null && cacheDir != null && cacheDir.exists() && cacheDir.canRead()) { - File cached = new File(cacheDir, cacheKey); - Bucket cachedBucket = new FileBucket(cached, false, false, false, false); - BucketTools.copy(tempB, cachedBucket); - } - - if(SEMI_ASYNC_PUSH) - tempB = null; // Don't free it here. - - } catch (InsertException e) { - if(cb != null) { - synchronized(this) { - if(semiAsyncPushes.remove(cb)) - totalBytesPushing -= cb.size(); - } - } - throw new TaskAbortException("Failed to insert content", e, true); - - } catch (IOException e) { - throw new TaskAbortException("Failed to write content to local tempbucket", e, true); - - } catch (RuntimeException e) { - throw new TaskAbortException("Failed to complete task: ", e); - - } - } catch (TaskAbortException e) { - if (progress != null) { progress.abort(e); } - throw e; - - } finally { - Closer.close(os); - Closer.close(tempB); - } - } - - enum WAIT_STATUS { - FAILED, - GENERATED_URI, - GENERATED_METADATA; - } - - public class PushCallback implements ClientPutCallback { - - public final long startTime = System.currentTimeMillis(); - private ClientPutter putter; - private FreenetURI generatedURI; - private Bucket generatedMetadata; - private InsertException failed; - // See FIXME's in push(), IterableSerialiser. - // We don't do real progress, we pretend we're done when push() returns. -// private final SimpleProgress progress; - private final long size; - private final InsertBlock ib; - - public PushCallback(SimpleProgress progress, InsertBlock ib) { -// this.progress = progress; - this.ib = ib; - size = ib.getData().size(); - } - - public long size() { - return size; - } - - public synchronized void setPutter(ClientPutter put) { - putter = put; - synchronized(FreenetArchiver.this) { - if(semiAsyncPushes.add(this)) - totalBytesPushing += size; - Logger.debug(this, "Pushing "+totalBytesPushing+" bytes on "+semiAsyncPushes.size()+" inserters"); - } - } - - public synchronized WAIT_STATUS waitFor() { - while(generatedURI == null && generatedMetadata == null && failed == null) { - try { - wait(); - } catch (InterruptedException e) { - // Ignore - } - } - if(failed != null) return WAIT_STATUS.FAILED; - if(generatedURI != null) return WAIT_STATUS.GENERATED_URI; - return WAIT_STATUS.GENERATED_METADATA; - } - - public synchronized void throwError() throws InsertException { - if(failed != null) throw failed; - } - - public synchronized FreenetURI getURI() { - return generatedURI; - } - - public synchronized Bucket getGeneratedMetadata() { - return generatedMetadata; - } - - @Override - public void onFailure(InsertException e, BaseClientPutter state) { - Logger.error(this, "Failed background insert ("+generatedURI+"), now running: "+semiAsyncPushes.size()+" ("+SizeUtil.formatSize(totalBytesPushing)+")."); - synchronized(this) { - failed = e; - notifyAll(); - } - synchronized(FreenetArchiver.this) { - if(semiAsyncPushes.remove(this)) - totalBytesPushing -= size; - pushesFailed.add(e); - FreenetArchiver.this.notifyAll(); - } - if(ib != null) - ib.free(); - } - - @Override - public void onFetchable(BaseClientPutter state) { - // Ignore - } - - @Override - public synchronized void onGeneratedURI(FreenetURI uri, BaseClientPutter state) { - generatedURI = uri; - notifyAll(); - } - - @Override - public void onSuccess(BaseClientPutter state) { - synchronized(FreenetArchiver.this) { - if(semiAsyncPushes.remove(this)) - totalBytesPushing -= size; - Logger.debug(this, "Completed background insert ("+generatedURI+") in "+(System.currentTimeMillis()-startTime)+"ms, now running: "+semiAsyncPushes.size()+" ("+SizeUtil.formatSize(totalBytesPushing)+")."); - FreenetArchiver.this.notifyAll(); - } - if(ib != null) - ib.free(); -// if(progress != null) progress.addPartKnown(0, true); - - } - - @Override - public synchronized void onGeneratedMetadata(Bucket metadata, - BaseClientPutter state) { - generatedMetadata = metadata; - notifyAll(); - } - - @Override - public void onResume(ClientContext context) throws ResumeFailedException { - // Ignore. - } - - @Override - public RequestClient getRequestClient() { - return Library.REQUEST_CLIENT; - } - - } - - - - /*@Override**/ public void pull(PullTask task) throws TaskAbortException { - pullLive(task, null); - } - - /*@Override**/ public void push(PushTask task) throws TaskAbortException { - pushLive(task, null); - } - - public static class SimpleProgressUpdater implements ClientEventListener { - - final int[] splitfile_blocks = new int[2]; - final SimpleProgress progress; - - public SimpleProgressUpdater(SimpleProgress prog) { - progress = prog; - } - - @Override public void receive(ClientEvent ce, ClientContext context) { - progress.setStatus(ce.getDescription()); - if (!(ce instanceof SplitfileProgressEvent)) { return; } - - // update the progress "parts" counters - SplitfileProgressEvent evt = (SplitfileProgressEvent)ce; - int new_succeeded = evt.succeedBlocks; - int new_total = evt.minSuccessfulBlocks; - // fetch can go over 100% - if (new_succeeded > new_total) { - Logger.normal(this, "Received SplitfileProgressEvent greater than 100%: " + evt.getDescription()); - new_succeeded = new_total; - } - synchronized (splitfile_blocks) { - int old_succeeded = splitfile_blocks[0]; - int old_total = splitfile_blocks[1]; - try { - progress.addPartKnown(new_total - old_total, evt.finalizedTotal); // throws IllegalArgumentException - int n = new_succeeded - old_succeeded; - if (n == 1) { - progress.addPartDone(); - } else if (n != 0) { - Logger.normal(this, "Received SplitfileProgressEvent out-of-order: " + evt.getDescription()); - for (int i=0; i extra; - - - - final public /* DEBUG protected*/ SkeletonBTreeMap> ttab; - final protected SkeletonBTreeMap> utab; - - - public ProtoIndex(FreenetURI id, String n, String owner, String ownerEmail, long pages) { - this(id, n, owner, ownerEmail, pages, new Date(), new HashMap(), - new SkeletonBTreeMap>(BTREE_NODE_MIN), - new SkeletonBTreeMap>(BTREE_NODE_MIN)/*, - //filtab = new SkeletonPrefixTreeMap(new Token(), TKTAB_MAX)*/ - ); - } - - protected ProtoIndex(FreenetURI id, String n, String owner, String ownerEmail, long pages, Date m, Map x, - SkeletonBTreeMap> u, - SkeletonBTreeMap> t/*, - SkeletonMap f*/ - ) { - reqID = id; - name = (n == null)? "": n; - modified = m; - extra = x; - indexOwnerName = owner; - indexOwnerEmail = ownerEmail; - totalPages = pages; - - //filtab = f; - ttab = t; - utab = u; - } - - // TODO NORM maybe have more general class than ProtoIndexSerialiser - - protected int serialFormatUID; - protected ProtoIndexComponentSerialiser serialiser; - public ProtoIndexComponentSerialiser getSerialiser() { - return serialiser; - } - - public void setSerialiser(ProtoIndexComponentSerialiser srl) { - // FIXME LOW test for isLive() here... or something - serialFormatUID = srl.serialFormatUID; - serialiser = srl; - } - - - - - private Map>> getTermEntriesProgress = new - HashMap>>(); - - public Execution> getTermEntries(String term) { - Execution> request = getTermEntriesProgress.get(term); - if (request == null) { - request = new getTermEntriesHandler(term); - getTermEntriesProgress.put(term, request); - exec.execute((getTermEntriesHandler)request); - } - return request; - } - - - - - public Execution getURIEntry(FreenetURI uri) { - throw new UnsupportedOperationException("not implemented"); - } - - - public class getTermEntriesHandler extends AbstractExecution> implements Runnable, ChainedProgress { - // TODO NORM have a Runnable field instead of extending Runnable - // basically, redesign this entire class and series of classes - - final Map trackers = new LinkedHashMap(); - Object current_meta; - ProgressTracker current_tracker; - - protected getTermEntriesHandler(String t) { - super(t); - } - - @Override public ProgressParts getParts() throws TaskAbortException { - // TODO NORM tidy this up - Set result = getResult(); - int started = trackers.size(); - int known = started; - int done = started - 1; - // FIXME is ++known valid here? I get an exception with ProgressParts(0/1/0/2) without it, - // which presumably means last != null while trackers.size() == 0. - if (last != null) { ++started; ++done; ++known; } - if (done < 0) { done = 0; } - int estimate = (result != null)? ProgressParts.TOTAL_FINALIZED: Math.max(ttab.heightEstimate()+1, known); - return new ProgressParts(done, started, known, estimate); - } - - @Override public String getStatus() { - Progress cur = getCurrentProgress(); - return (cur == null)? "Starting next stage...": cur.getSubject() + ": " + cur.getStatus(); - } - - /*@Override**/ public Progress getCurrentProgress() { - return last != null? last: current_tracker == null? null: current_tracker.getPullProgressFor(current_meta); - } - - // TODO HIGH tidy this - see SkeletonBTreeMap.inflate() for details - Progress last = null; - /*@Override**/ public void run() { - try { - // get the root container - SkeletonBTreeSet root; - for (;;) { - try { - root = ttab.get(subject); - break; - } catch (DataNotLoadedException d) { - Skeleton p = d.getParent(); - trackers.put(current_meta = d.getValue(), current_tracker = ((Serialiser.Trackable)p.getSerialiser()).getTracker()); - p.inflate(d.getKey()); - } - } - - if (root == null) { - // TODO HIGH better way to handle this - throw new TaskAbortException("Index does not contain term " + subject, new Exception("Index does not contain term " + subject)); - } - last = root.getProgressInflate(); // REMOVE ME - root.inflate(); - - // Post-process relevance. - double multiplier = 1.0; - if(totalPages > 0) { - long total = totalPages; // Number of pages total - long specific = root.size(); // Number of pages in this entry - multiplier = Math.log(((double)total) / ((double)specific)); - if(multiplier < 0) { - Logger.error(this, "Negative multiplier!: "+multiplier+" total = "+total+" specific = "+root.size()); - multiplier = 1.0; - } else - Logger.normal(this, "Correcting results: "+multiplier); - } - Set entries = wrapper(root, multiplier); - - setResult(entries); - - } catch (TaskAbortException e) { - setError(e); - return; - } - } - - private Set wrapper(final SkeletonBTreeSet root, final double relAdjustment) { - return new AbstractSet() { - - public boolean add(TermEntry arg0) { - throw new UnsupportedOperationException(); - } - - public boolean addAll(Collection arg0) { - throw new UnsupportedOperationException(); - } - - public void clear() { - throw new UnsupportedOperationException(); - } - - public boolean contains(Object arg0) { - return root.contains(arg0); - } - - public boolean containsAll(Collection arg0) { - return root.containsAll(arg0); - } - - public boolean isEmpty() { - return root.isEmpty(); - } - - public Iterator iterator() { - final Iterator entries = root.iterator(); - return new Iterator() { - public boolean hasNext() { - return entries.hasNext(); - } - - public TermEntry next() { - TermEntry t = entries.next(); - if(t instanceof TermPageEntry && relAdjustment != 1.0) { - // Adjust relevance - return new TermPageEntry((TermPageEntry)t, (float)(relAdjustment*t.rel)); - } else - return t; - } - - public void remove() { - throw new UnsupportedOperationException(); - } - - }; - } - - public boolean remove(Object arg0) { - throw new UnsupportedOperationException(); - } - - public boolean removeAll(Collection arg0) { - throw new UnsupportedOperationException(); - } - - public boolean retainAll(Collection arg0) { - throw new UnsupportedOperationException(); - } - - public int size() { - return root.size(); - } - - }; - } - - } - - - public void setName(String indexName) { - this.name = indexName; - } - - public void setTotalPages(long total) { - totalPages = total; - } - - public void setOwner(String owner) { - this.indexOwnerName = owner; - } - - public void setOwnerEmail(String address) { - this.indexOwnerEmail = address; - } - - public String getName() { - return name; - } - - public String getOwnerEmail() { - return indexOwnerEmail; - } - - public String getOwner() { - return indexOwnerName; - } - - public long getTotalPages() { - return totalPages; - } - - - -} diff --git a/src/plugins/Library/index/ProtoIndexComponentSerialiser.java b/src/plugins/Library/index/ProtoIndexComponentSerialiser.java deleted file mode 100644 index 73d6f4f4..00000000 --- a/src/plugins/Library/index/ProtoIndexComponentSerialiser.java +++ /dev/null @@ -1,586 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.Library; -import plugins.Library.client.FreenetArchiver; -import plugins.Library.util.SkeletonTreeMap; -import plugins.Library.util.SkeletonBTreeMap; -import plugins.Library.util.SkeletonBTreeSet; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.BaseCompositeProgress; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.exec.TaskInProgressException; -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.Serialiser; -import plugins.Library.io.serial.Translator; -import plugins.Library.io.serial.ProgressTracker; -import plugins.Library.io.serial.Archiver; -import plugins.Library.io.serial.IterableSerialiser; -import plugins.Library.io.serial.MapSerialiser; -import plugins.Library.io.serial.LiveArchiver; -import plugins.Library.io.serial.ParallelSerialiser; -import plugins.Library.io.serial.Packer; -import plugins.Library.io.serial.Packer.Scale; // WORKAROUND javadoc bug #4464323 -import plugins.Library.io.serial.FileArchiver; -import plugins.Library.io.DataFormatException; -import plugins.Library.io.YamlReaderWriter; - -import freenet.keys.FreenetURI; -import freenet.node.RequestStarter; - -import java.io.File; -import java.util.Collection; -import java.util.Set; -import java.util.Map; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.LinkedHashMap; -import java.util.HashMap; -import java.util.TreeSet; -import java.util.TreeMap; -import java.util.Date; - -/** -** Serialiser for the components of a ProtoIndex. -** -** DOCUMENT -** -** @author infinity0 -*/ -public class ProtoIndexComponentSerialiser { - - final protected static HashMap - srl_fmt = new HashMap(); - - final protected static int FMT_FREENET_SIMPLE = 0x2db3c940; - public final static int FMT_FILE_LOCAL = 0xd439e29a; - - public final static int FMT_DEFAULT = FMT_FREENET_SIMPLE; - - /** - ** Converts between a low-level object and a byte stream. - */ - final protected static YamlReaderWriter yamlrw = new YamlReaderWriter(); - - /** - ** Translator for the local entries of a node of the ''term table''. - */ - final protected static Translator>, Map> - ttab_keys_mtr = new TreeMapTranslator>(null); - - /** - ** Translator for the local entries of a node of the ''B-tree'' for a - ** ''term''. - */ - final protected static Translator, Collection> - term_data_mtr = new SkeletonBTreeSet.TreeSetTranslator(); - - /** - ** Translator for {@link URIKey}. - */ - final protected static Translator - utab_keys_ktr = new Translator() { - public String app(URIKey k) { return k.toString(); } - public URIKey rev(String s) { return new URIKey(URIKey.hexToBytes(s)); } - }; - - /** - ** Translator for the local entries of a node of the ''uri table''. - */ - final protected static Translator>, Map> - utab_keys_mtr = new TreeMapTranslator>(utab_keys_ktr); - - /** - ** Serialiser for the ''targets'' of the values stored in a node of the - ** ''B-tree'' for a ''term''. In this case, the values are the actual - ** targets and are stored inside the node, so we use a dummy. - */ - final protected static MapSerialiser term_dummy = new DummySerialiser(); - - /** - ** Serialiser for the ''targets'' of the values stored in a node of the - ** ''B-tree'' for a ''urikey''. In this case, the values are the actual - ** targets and are stored inside the node, so we use a dummy. - */ - final protected static MapSerialiser uri_dummy = new DummySerialiser(); - - /** - ** {@link Scale} for the root node of the ''B-tree'' that holds - ** ''term entries'' for a ''term''. - */ - final protected static Packer.Scale> - term_data_scale = new Packer.Scale>() { - @Override public int weigh(SkeletonBTreeSet element) { - return element.sizeRoot(); - } - }; - - /** - ** {@link Scale} for the root node of the ''B-tree'' that holds - ** ''uri-entry mappings'' for a ''urikey''. - */ - final protected static Packer.Scale> - uri_data_scale = new Packer.Scale>() { - @Override public int weigh(SkeletonBTreeMap element) { - return element.sizeRoot(); - } - }; - - /** - ** Get the instance responsible for the given format ID. - ** - ** @param archiver The LiveArchiver to use, can be null. - ** @throws UnsupportedOperationException if the format ID is unrecognised - ** @throws IllegalStateException if the requirements for creating the - ** instance (eg. existence of a freenet node) are not met. - */ - public synchronized static ProtoIndexComponentSerialiser get(int fmtid, LiveArchiver, SimpleProgress> archiver) { - ProtoIndexComponentSerialiser srl = srl_fmt.get(fmtid); - if (srl == null || srl.leaf_arx != null && srl.leaf_arx != archiver) { - srl = new ProtoIndexComponentSerialiser(fmtid, archiver); - if(archiver == null) - srl_fmt.put(fmtid, srl); - } - return srl; - } - - /** - ** Get the instance responsible for the default format ID. - ** - ** @throws IllegalStateException if the requirements for creating the - ** instance (eg. existence of a freenet node) are not met. - */ - public static ProtoIndexComponentSerialiser get() { - return get(FMT_DEFAULT, null); - } - - /** - ** Unique code for each instance of this class. - */ - final protected int serialFormatUID; - - /** - ** Archiver for the lowest level (leaf). - */ - final protected LiveArchiver, SimpleProgress> - leaf_arx; - - public LiveArchiver, SimpleProgress> getLeafSerialiser() { - return leaf_arx; - } - - /** - ** Serialiser for the ''targets'' of the values stored in a node of the - ** ''term table''. In this case, each target is the root node of the - ** ''B-tree'' that holds ''term entries'' for the ''term'' mapping to the - ** value. - */ - final protected BTreePacker, EntryGroupSerialiser>> - ttab_data; - - /** - ** Serialiser for the ''targets'' of the values stored in a node of the - ** ''uri table''. In this case, each target is the root node of the - ** ''B-tree'' that holds ''uri-entry mappings'' for the ''urikey'' mapping - ** to the value. - */ - final protected BTreePacker, EntryGroupSerialiser>> - utab_data; - - /** - ** Constructs a new instance using the given format. - * @param archiver - ** - ** @throws UnsupportedOperationException if the format ID is unrecognised - ** @throws IllegalStateException if the requirements for creating the - ** instance (eg. existence of a freenet node) are not met. - */ - protected ProtoIndexComponentSerialiser(int fmtid, LiveArchiver, SimpleProgress> archiver) { - if(archiver != null) { - leaf_arx = archiver; - } else { - switch (fmtid) { - case FMT_FREENET_SIMPLE: - short priorityClass = RequestStarter.INTERACTIVE_PRIORITY_CLASS; - if(archiver instanceof FreenetArchiver) - priorityClass = ((FreenetArchiver)archiver).priorityClass; - leaf_arx = Library.makeArchiver(yamlrw, ProtoIndex.MIME_TYPE, 0x180 * ProtoIndex.BTREE_NODE_MIN, priorityClass); - break; - case FMT_FILE_LOCAL: - leaf_arx = new FileArchiver>(yamlrw, true, YamlReaderWriter.FILE_EXTENSION, "", "", null); - break; - default: - throw new UnsupportedOperationException("Unknown serial format id"); - } - } - - serialFormatUID = fmtid; - - ttab_data = new BTreePacker, EntryGroupSerialiser>>( - new EntryGroupSerialiser>( - leaf_arx, - null, - new SkeletonBTreeSet.TreeTranslator(null, term_data_mtr) { - @Override public SkeletonBTreeSet rev(Map tree) throws DataFormatException { - return setSerialiserFor(super.rev(tree)); - } - } - ), - term_data_scale, - ProtoIndex.BTREE_ENT_MAX - ); - - utab_data = new BTreePacker, EntryGroupSerialiser>>( - new EntryGroupSerialiser>( - leaf_arx, - null, - new SkeletonBTreeMap.TreeTranslator(null, null) { - @Override public SkeletonBTreeMap rev(Map tree) throws DataFormatException { - return setSerialiserFor(super.rev(tree)); - } - } - ), - uri_data_scale, - ProtoIndex.BTREE_ENT_MAX - ); - } - - /** - ** Set the serialisers for the ''uri table'' and the ''term table'' on an - ** index. - */ - public ProtoIndex setSerialiserFor(ProtoIndex index) { - // set serialisers on the ttab - BTreeNodeSerialiser> ttab_keys = new BTreeNodeSerialiser>( - "term listings", - leaf_arx, - index.ttab.makeNodeTranslator(null, ttab_keys_mtr) - ); - index.ttab.setSerialiser(ttab_keys, ttab_data); - - // set serialisers on the utab - BTreeNodeSerialiser> utab_keys = new BTreeNodeSerialiser>( - "uri listings", - leaf_arx, - index.utab.makeNodeTranslator(utab_keys_ktr, utab_keys_mtr) - ); - index.utab.setSerialiser(utab_keys, utab_data); - - // set serialiser on the index - index.setSerialiser(this); - return index; - } - - /** - ** Set the serialiser for the ''B-tree'' that holds the ''uri-entry - ** mappings'' for a ''urikey''. - */ - public SkeletonBTreeMap setSerialiserFor(SkeletonBTreeMap entries) { - BTreeNodeSerialiser uri_keys = new BTreeNodeSerialiser( - "uri entries", - leaf_arx, - entries.makeNodeTranslator(null, null) // no translator needed as FreenetURI and URIEntry are both directly serialisable by YamlReaderWriter - ); - entries.setSerialiser(uri_keys, uri_dummy); - return entries; - } - - /** - ** Set the serialiser for the ''B-tree'' that holds the ''term entries'' - ** for a ''term''. - */ - public SkeletonBTreeSet setSerialiserFor(SkeletonBTreeSet entries) { - BTreeNodeSerialiser term_keys = new BTreeNodeSerialiser( - "term entries", - leaf_arx, - entries.makeNodeTranslator(null, term_data_mtr) - ); - entries.setSerialiser(term_keys, term_dummy); - return entries; - } - - - /************************************************************************ - ** Generic {@link SkeletonTreeMap} translator. - ** - ** @author infinity0 - */ - public static class TreeMapTranslator - extends SkeletonTreeMap.TreeMapTranslator { - - final protected Translator ktr; - - public TreeMapTranslator(Translator k) { - ktr = k; - } - - /*@Override**/ public Map app(SkeletonTreeMap map) { - return app(map, new TreeMap(), ktr); - } - - /*@Override**/ public SkeletonTreeMap rev(Map map) throws DataFormatException { - return rev(map, new SkeletonTreeMap(), ktr); - } - } - - - /************************************************************************ - ** Serialiser for a general B-tree node. This can be used for both a - ** {@link SkeletonBTreeMap} and a {@link SkeletonBTreeSet}; they use the - ** same node class. - ** - ** @author infinity0 - */ - public static class BTreeNodeSerialiser - extends ParallelSerialiser.SkeletonNode, SimpleProgress> - implements Archiver.SkeletonNode>, - Serialiser.Translate.SkeletonNode, Map>, - Serialiser.Composite, SimpleProgress>> { - - final protected Translator.SkeletonNode, Map> trans; - final protected LiveArchiver, SimpleProgress> subsrl; - - protected String name; - - /** - ** Constructs a new serialiser. A new one must be constructed for each - ** BTree being serialised. - ** - ** @param n Description of what the map stores. This is used in the - ** progress report. - ** @param t Translator for the node - create this using {@link - ** SkeletonBTreeMap#makeNodeTranslator(Translator, Translator)}. - */ - public BTreeNodeSerialiser(String n, LiveArchiver, SimpleProgress> s, SkeletonBTreeMap.NodeTranslator t) { - super(new ProgressTracker.SkeletonNode, SimpleProgress>(SimpleProgress.class)); - subsrl = s; - trans = t; - name = n; - } - - /*@Override**/ public Translator.SkeletonNode, Map> getTranslator() { - return trans; - } - - /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { - return subsrl; - } - - /*@Override**/ public void pullLive(PullTask.SkeletonNode> task, SimpleProgress p) throws TaskAbortException { - p.enteredSerialiser(); - try { - SkeletonBTreeMap.GhostNode ghost = (SkeletonBTreeMap.GhostNode)task.meta; - p.setSubject("Pulling " + name + ": " + ghost.getRange()); - PullTask> serialisable = new PullTask>(ghost.getMeta()); - subsrl.pullLive(serialisable, p); - ghost.setMeta(serialisable.meta); task.data = trans.rev(serialisable.data); - p.exitingSerialiser(); - } catch (RuntimeException e) { - p.abort(new TaskAbortException("Could not pull B-tree node", e)); - } catch (DataFormatException e) { - p.abort(new TaskAbortException("Could not pull B-tree node", e)); - } - } - - /*@Override**/ public void pushLive(PushTask.SkeletonNode> task, SimpleProgress p) throws TaskAbortException { - p.enteredSerialiser(); - try { - p.setSubject("Pushing " + name + ": " + task.data.getRange()); - Map intermediate = trans.app(task.data); - PushTask> serialisable = new PushTask>(intermediate, task.meta); - subsrl.pushLive(serialisable, p); - task.meta = task.data.makeGhost(serialisable.meta); - p.exitingSerialiser(); - } catch (RuntimeException e) { - p.abort(new TaskAbortException("Could not push B-tree node", e)); - } - } - - } - - - /************************************************************************ - ** Serialiser for the entries contained within a B-tree node. - ** - ** @author infinity0 - */ - public static class BTreePacker> & Serialiser.Trackable>> - extends Packer - implements MapSerialiser, - Serialiser.Trackable { - - final protected ProgressTracker tracker; - final protected S subsrl; - - public BTreePacker(S s, Packer.Scale sc, int cap) { - super(s, sc, cap); - subsrl = s; - tracker = new ProgressTracker(BaseCompositeProgress.class); - } - - /*@Override**/ public ProgressTracker getTracker() { - return tracker; - } - - @Override protected void preprocessPullBins(Map> tasks, Collection>> bintasks) { - for (Map.Entry> en: tasks.entrySet()) { - try { - BaseCompositeProgress p = tracker.addPullProgress(en.getValue()); - p.setSubProgress(ProgressTracker.makePullProgressIterable(subsrl.getTracker(), bintasks)); - p.setEstimate(ProgressParts.TOTAL_FINALIZED); - p.setSubject("Pulling root container for " + en.getKey()); - } catch (TaskInProgressException e) { - throw new AssertionError(e); - } - } - } - - @Override protected void preprocessPushBins(Map> tasks, Collection>> bintasks) { - for (Map.Entry> en: tasks.entrySet()) { - try { - BaseCompositeProgress p = tracker.addPushProgress(en.getValue()); - p.setSubProgress(ProgressTracker.makePushProgressIterable(subsrl.getTracker(), bintasks)); - p.setEstimate(ProgressParts.TOTAL_FINALIZED); - p.setSubject("Pushing root container for " + en.getKey()); - } catch (TaskInProgressException e) { - throw new AssertionError(e); - } - } - } - - } - - - /************************************************************************ - ** Serialiser for the bins containing (B-tree root nodes) that was packed - ** by the {@link BTreePacker}. - ** - ** @author infinity0 - */ - public static class EntryGroupSerialiser - extends ParallelSerialiser, SimpleProgress> - implements IterableSerialiser>, - Serialiser.Composite, SimpleProgress>> { - - final protected LiveArchiver, SimpleProgress> subsrl; - final protected Translator ktr; - final protected Translator> btr; - - public EntryGroupSerialiser(LiveArchiver, SimpleProgress> s, Translator k, Translator> b) { - super(new ProgressTracker, SimpleProgress>(SimpleProgress.class)); - subsrl = s; - ktr = k; - btr = b; - } - - /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { - return subsrl; - } - - /*@Override**/ public void pullLive(PullTask> task, SimpleProgress p) throws TaskAbortException { - p.enteredSerialiser(); - try { - p.setSubject("Pulling root container " + task.meta); - PullTask> t = new PullTask>(task.meta); - subsrl.pullLive(t, p); - - Map map = new HashMap(t.data.size()<<1); - try { - for (Map.Entry en: t.data.entrySet()) { - map.put((ktr == null)? (K)en.getKey(): ktr.rev(en.getKey()), btr.rev((Map)en.getValue())); - } - } catch (ClassCastException e) { - // FIXME NORM more meaningful error message - throw new DataFormatException("Exception in converting data", e, t.data, null, null); - } - - task.data = map; - p.exitingSerialiser(); - } catch (RuntimeException e) { - p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); - } catch (DataFormatException e) { - p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); - } - } - - /*@Override**/ public void pushLive(PushTask> task, SimpleProgress p) throws TaskAbortException { - p.enteredSerialiser(); - try { - p.setSubject("Pushing root container for keys " + task.data); - Map conv = new HashMap(); - for (Map.Entry mp: task.data.entrySet()) { - conv.put((ktr == null)? (String)mp.getKey(): ktr.app(mp.getKey()), btr.app(mp.getValue())); - } - PushTask> t = new PushTask>(conv, task.meta); - subsrl.pushLive(t, p); - task.meta = t.meta; - p.exitingSerialiser(); - } catch (RuntimeException e) { - p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); - } - } - - } - - - /************************************************************************ - ** Dummy serialiser. - ** - ** A SkeletonBTreeMap node was designed to hold values externally, whereas - ** (a B-tree node in the (BTreeSet container for a term's results)) and - ** (a B-tree node in the (BTreeMap container for a uri's results)) hold - ** them internally. This class simplifies the logic required; see the - ** source code of {@link SkeletonBTreeMap} for more details. - ** - ** @deprecated Avoid using this class. It will be removed after option 1 - ** (see the source code of {@link SkeletonBTreeMap}) is coded. - ** @author infinity0 - */ - public static class DummySerialiser - implements IterableSerialiser, - MapSerialiser { - - - public void pull(PullTask task) { - task.data = (T)task.meta; - } - - public void push(PushTask task) { - task.meta = task.data; - } - - public void pull(Iterable> tasks) { - for (PullTask task: tasks) { - task.data = (T)task.meta; - } - } - - public void push(Iterable> tasks) { - for (PushTask task: tasks) { - task.meta = task.data; - } - } - - public void pull(Map> tasks, Object mapmeta) { - for (PullTask task: tasks.values()) { - if(task.meta != null) - task.data = (T)task.meta; - } - } - - public void push(Map> tasks, Object mapmeta) { - for (PushTask task: tasks.values()) { - if(task.data != null) - task.meta = task.data; - } - } - - } - - - -} diff --git a/src/plugins/Library/index/ProtoIndexSerialiser.java b/src/plugins/Library/index/ProtoIndexSerialiser.java deleted file mode 100644 index 09c1a10a..00000000 --- a/src/plugins/Library/index/ProtoIndexSerialiser.java +++ /dev/null @@ -1,230 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.Library; -import plugins.Library.client.FreenetArchiver; -import plugins.Library.util.SkeletonBTreeMap; -import plugins.Library.util.SkeletonBTreeSet; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.LiveArchiver; -import plugins.Library.io.serial.Serialiser; -import plugins.Library.io.serial.Translator; -import plugins.Library.io.serial.Archiver; -import plugins.Library.io.serial.FileArchiver; -import plugins.Library.io.YamlReaderWriter; -import plugins.Library.io.DataFormatException; - -import freenet.keys.FreenetURI; - -import java.util.Collection; -import java.util.Set; -import java.util.Map; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.LinkedHashMap; -import java.util.HashMap; -import java.util.TreeSet; -import java.util.TreeMap; -import java.util.Date; -import java.io.File; - -/** -** Serialiser for ProtoIndex -** -** DOCUMENT -** -** @author infinity0 -*/ -public class ProtoIndexSerialiser -implements Archiver, - Serialiser.Composite>>, // TODO NORM make this a LiveArchiver - Serialiser.Translate>/*, - Serialiser.Trackable*/ { - - final public static String MIME_TYPE = YamlReaderWriter.MIME_TYPE; - final public static String FILE_EXTENSION = YamlReaderWriter.FILE_EXTENSION; - - final protected Translator> - trans; - - final protected LiveArchiver, SimpleProgress> - subsrl; - - public ProtoIndexSerialiser(LiveArchiver, SimpleProgress> s) { - subsrl = s; - trans = new IndexTranslator(subsrl); - } - - /* FIXME HIGH: Parallelism in fetching multiple words for the same query. - * A single serialiser means when we fetch two words for the same query, and they both end up in the same - * bucket, we get an AssertionError when we fetch the bucket twice in ProgressTracker.addPullProgress. - * So the solution, for the time being, is simply to use two separate serialisers. */ - -// final protected static HashMap, ProtoIndexSerialiser> -// srl_cls = new HashMap, ProtoIndexSerialiser>(); - - public static ProtoIndexSerialiser forIndex(Object o, short priorityClass) { - if (o instanceof FreenetURI) { - return forIndex((FreenetURI)o, priorityClass); - } else if (o instanceof File) { - return forIndex((File)o); - } else { - throw new UnsupportedOperationException("Don't know how to retrieve index for object " + o); - } - } - - public static ProtoIndexSerialiser forIndex(FreenetURI uri, short priorityClass) { -// ProtoIndexSerialiser srl = srl_cls.get(FreenetURI.class); -// if (srl == null) { -// // java's type-inference isn't that smart, see -// FreenetArchiver> arx = Library.makeArchiver(ProtoIndexComponentSerialiser.yamlrw, MIME_TYPE, 0x80 * ProtoIndex.BTREE_NODE_MIN); -// srl_cls.put(FreenetURI.class, srl = new ProtoIndexSerialiser(arx)); -// } -// return srl; - - // One serialiser per application. See comments above re srl_cls. - // java's type-inference isn't that smart, see - FreenetArchiver> arx = Library.makeArchiver(ProtoIndexComponentSerialiser.yamlrw, MIME_TYPE, 0x80 * ProtoIndex.BTREE_NODE_MIN, priorityClass); - return new ProtoIndexSerialiser(arx); - } - - public static ProtoIndexSerialiser forIndex(File prefix) { -// ProtoIndexSerialiser srl = srl_cls.get(File.class); -// if (srl == null) { -// srl_cls.put(File.class, srl = new ProtoIndexSerialiser(new FileArchiver>(ProtoIndexComponentSerialiser.yamlrw, true, FILE_EXTENSION))); -// } -// return srl; - - // One serialiser per application. See comments above re srl_cls. - return new ProtoIndexSerialiser(new FileArchiver>(ProtoIndexComponentSerialiser.yamlrw, true, FILE_EXTENSION, "", "", prefix)); - } - - /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { - return subsrl; - } - - /*@Override**/ public Translator> getTranslator() { - return trans; - } - - /*@Override**/ public void pull(PullTask task) throws TaskAbortException { - PullTask> serialisable = new PullTask>(task.meta); - subsrl.pull(serialisable); - task.meta = serialisable.meta; - if (task.meta instanceof FreenetURI) { // if not FreenetURI, skip this silently so we can test on local files - serialisable.data.put("reqID", task.meta); - } - try { - task.data = trans.rev(serialisable.data); - } catch (DataFormatException e) { - throw new TaskAbortException("Could not construct index from data", e); - } - } - - /*@Override**/ public void push(PushTask task) throws TaskAbortException { - PushTask> serialisable = new PushTask>(trans.app(task.data)); - serialisable.meta = serialisable.data.remove("insID"); - subsrl.push(serialisable); - task.meta = serialisable.meta; - } - - public static class IndexTranslator - implements Translator> { - - /** - ** Term-table translator - */ - Translator>, Map> ttrans = new - SkeletonBTreeMap.TreeTranslator>(null, new - ProtoIndexComponentSerialiser.TreeMapTranslator>(null)); - - /** - ** URI-table translator - */ - Translator>, Map> utrans = new - SkeletonBTreeMap.TreeTranslator>(null, new - ProtoIndexComponentSerialiser.TreeMapTranslator>(null)); - - private LiveArchiver, SimpleProgress> subsrl; - - public IndexTranslator(LiveArchiver, SimpleProgress> subsrl) { - this.subsrl = subsrl; - } - - /** - ** {@inheritDoc} - ** - ** Note: the resulting map will contain the insert SSK URI under the - ** key {@code insID}. The intention is for push methods to remove this - ** and add it to the task metadata. '''Failure to do this could result - ** in the insert URI being published'''. - ** - ** FIXME NORM maybe make this more secure, eg. wrap it in a - ** UnserialisableWrapper or something that makes YAML throw an - ** exception if it is accidentally passed to it. - */ - /*@Override**/ public Map app(ProtoIndex idx) { - if (!idx.ttab.isBare() || !idx.utab.isBare()) { - throw new IllegalArgumentException("Data structure is not bare. Try calling deflate() first."); - } - Map map = new LinkedHashMap(); - map.put("serialVersionUID", idx.serialVersionUID); - map.put("serialFormatUID", idx.serialFormatUID); - map.put("insID", idx.insID); - map.put("name", idx.name); - map.put("ownerName", idx.indexOwnerName); - map.put("ownerEmail", idx.indexOwnerEmail); - map.put("totalPages", new Long(idx.totalPages)); - map.put("modified", idx.modified); - map.put("extra", idx.extra); - map.put("utab", utrans.app(idx.utab)); - map.put("ttab", ttrans.app(idx.ttab)); - return map; - } - - /*@Override**/ public ProtoIndex rev(Map map) throws DataFormatException { - long magic = (Long)map.get("serialVersionUID"); - - if (magic == ProtoIndex.serialVersionUID) { - try { - // FIXME yet more hacks related to the lack of proper asynchronous FreenetArchiver... - ProtoIndexComponentSerialiser cmpsrl = ProtoIndexComponentSerialiser.get((Integer)map.get("serialFormatUID"), subsrl); - FreenetURI reqID = (FreenetURI)map.get("reqID"); - String name = (String)map.get("name"); - String ownerName = (String)map.get("ownerName"); - String ownerEmail = (String)map.get("ownerEmail"); - // FIXME yaml idiocy??? It seems to give a Long if the number is big enough to need one, and an Integer otherwise. - long totalPages; - Object o = map.get("totalPages"); - if(o instanceof Long) - totalPages = (Long)o; - else // Integer - totalPages = (Integer)o; - Date modified = (Date)map.get("modified"); - Map extra = (Map)map.get("extra"); - SkeletonBTreeMap> utab = utrans.rev((Map)map.get("utab")); - SkeletonBTreeMap> ttab = ttrans.rev((Map)map.get("ttab")); - - return cmpsrl.setSerialiserFor(new ProtoIndex(reqID, name, ownerName, ownerEmail, totalPages, modified, extra, utab, ttab)); - - } catch (ClassCastException e) { - // TODO LOW maybe find a way to pass the actual bad data to the exception - throw new DataFormatException("Badly formatted data", e, null); - - } catch (UnsupportedOperationException e) { - throw new DataFormatException("Unrecognised format ID", e, map.get("serialFormatUID"), map, "serialFormatUID"); - - } - - } else { - throw new DataFormatException("Unrecognised serial ID", null, magic, map, "serialVersionUID"); - } - } - - } - -} diff --git a/src/plugins/Library/index/TermEntry.java b/src/plugins/Library/index/TermEntry.java deleted file mode 100644 index e5832b5a..00000000 --- a/src/plugins/Library/index/TermEntry.java +++ /dev/null @@ -1,137 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -/** -** Represents data associated with a given subject {@link String} term. -** -** Subclasses '''must''' override the following methods to use information -** specific to that subclass: -** -** * {@link #entryType()} -** * {@link #compareTo(TermEntry)} -** * {@link #equals(Object)} -** * {@link #hashCode()} -** -** @author infinity0 -*/ -abstract public class TermEntry implements Comparable { - - final static long serialVersionUID = 0xF23194B7F015560CL; - - public enum EntryType { - INDEX, TERM, PAGE - }; - - /** - ** Subject term of this entry. - */ - final public String subj; - - /** - ** Relevance rating. Must be in the half-closed interval (0,1]. - ** a relevance of 0 indicates that the relevance is unset - */ - final public float rel; - - public TermEntry(String s, float r) { - if (s == null) { - throw new IllegalArgumentException("can't have a null subject!"); - } - if (r < 0/* || r > 1*/) { // FIXME: I don't see how our relevance algorithm can be guaranteed to produce relevance <1. - throw new IllegalArgumentException("Relevance must be in the half-closed interval (0,1]. Supplied: " + r); - } - subj = s.intern(); - rel = r; - } - - public TermEntry(TermEntry t, float newRel) { - this.subj = t.subj; - this.rel = newRel; - } - - /** - ** Returns the type of TermEntry. This '''must''' be constant for - ** instances of the same class, and different between classes. - */ - abstract public EntryType entryType(); - - /** - ** {@inheritDoc} - ** - ** Compares two entries, based on how useful they might be to the end user. - ** - ** This implementation sorts by order of descending relevance, then by - ** order of ascending {@link #entryType()}. - ** - ** Subclasses '''must overridde this method'' to also use information - ** specific to the subclass. - ** - ** @throws IllegalArgumentException if the entries have different subjects - */ - /*@Override**/ public int compareTo(TermEntry o) { - if (this == o) { return 0; } - int c = subj.compareTo(o.subj); - if (c != 0) { return c; } - if (rel != o.rel) { return (rel > o.rel)? -1: 1; } - - EntryType a = entryType(), b = o.entryType(); - return a.compareTo(b); - } - - /** - ** {@inheritDoc} - ** - ** This implementation tests whether the run-time classes of the argument - ** is identical to the run-time class of this object. If they are, then - ** it tests the relevance and subject fields. - ** - ** Subclasses '''must overridde this method'' to also use information - ** specific to the subclass. - */ - @Override public boolean equals(Object o) { - if (o == null || getClass() != o.getClass()) { return false; } - TermEntry en = (TermEntry)o; - return rel == en.rel && subj.equals(en.subj); - } - - /** - ** Returns whether the parts of this TermEntry other than the subject are equal - ** - ** Subclasses '''must overridde this method'' to use information - ** specific to the subclass. - */ - public abstract boolean equalsTarget(TermEntry entry); - - /** - ** {@inheritDoc} - ** - ** This implementation XORs the hashcode of {@link #subj} and {@link #rel}. - ** - ** Subclasses '''must overridde this method'' to also use information - ** specific to the subclass. - */ - @Override public int hashCode() { - return subj.hashCode() ^ Float.floatToIntBits(rel); - } - - @Override public String toString() { - return subj + ":" + rel; - } - - - /* - ** Calculates an accumulated score to sort the entry by, using the formula - ** relevance^3 * quality; in other words, relevance is (for sorting - ** results) 3 times as important as quality. For example, for (rel, qual) - ** we have (0.85, 0.95) < (1.0, 0.6) < (0.85, 1.00) - * / - public float score() { - if (score_ == null) { - score_ = new Float(rel * rel* rel * qual); - } - return score_; - }*/ - -} diff --git a/src/plugins/Library/index/TermEntryReaderWriter.java b/src/plugins/Library/index/TermEntryReaderWriter.java deleted file mode 100644 index 1b73fd36..00000000 --- a/src/plugins/Library/index/TermEntryReaderWriter.java +++ /dev/null @@ -1,124 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.io.DataFormatException; -import plugins.Library.io.ObjectStreamReader; -import plugins.Library.io.ObjectStreamWriter; - -import freenet.keys.FreenetURI; - -import java.util.Map; -import java.util.HashMap; - -import java.io.InputStream; -import java.io.OutputStream; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; - -/** -** Reads and writes {@link TermEntry}s in binary form, for performance. -** -** @author infinity0 -*/ -public class TermEntryReaderWriter implements ObjectStreamReader, ObjectStreamWriter { - - final private static TermEntryReaderWriter instance = new TermEntryReaderWriter(); - - protected TermEntryReaderWriter() {} - - public static TermEntryReaderWriter getInstance() { - return instance; - } - - /*@Override**/ public TermEntry readObject(InputStream is) throws IOException { - return readObject(new DataInputStream(is)); - } - - public TermEntry readObject(DataInputStream dis) throws IOException { - long svuid = dis.readLong(); - if (svuid != TermEntry.serialVersionUID) { - throw new DataFormatException("Incorrect serialVersionUID", null, svuid); - } - int type = dis.readInt(); - String subj = dis.readUTF(); - float rel = dis.readFloat(); - TermEntry.EntryType[] types = TermEntry.EntryType.values(); - if (type < 0 || type >= types.length) { - throw new DataFormatException("Unrecognised entry type", null, type); - } - switch (types[type]) { - case TERM: - return new TermTermEntry(subj, rel, dis.readUTF()); - case INDEX: - return new TermIndexEntry(subj, rel, FreenetURI.readFullBinaryKeyWithLength(dis)); - case PAGE: - FreenetURI page = FreenetURI.readFullBinaryKeyWithLength(dis); - int size = dis.readInt(); - String title = null; - if (size < 0) { - title = dis.readUTF(); - size = ~size; - } - Map pos = new HashMap(size<<1); - for (int i=0; i p : enn.positionsMap().entrySet()) { - dos.writeInt(p.getKey()); - if(p.getValue() == null) - dos.writeUTF(""); - else - dos.writeUTF(p.getValue()); - } - } else { - for(int x : enn.positionsRaw()) { - dos.writeInt(x); - dos.writeUTF(""); - } - } - } - return; - } - } - -} diff --git a/src/plugins/Library/index/TermIndexEntry.java b/src/plugins/Library/index/TermIndexEntry.java deleted file mode 100644 index a239c9d9..00000000 --- a/src/plugins/Library/index/TermIndexEntry.java +++ /dev/null @@ -1,58 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.index.TermEntry.EntryType; - -import freenet.keys.FreenetURI; - -/** -** A {@link TermEntry} that associates a subject term with another index. -** -** @author infinity0 -*/ -public class TermIndexEntry extends TermEntry { - - /** - ** Index target of this entry. - */ - final public FreenetURI index; - - public TermIndexEntry(String s, float r, FreenetURI i) { - super(s, r); - if (i == null) { - throw new IllegalArgumentException("can't have a null index"); - } - index = i.intern(); - } - - /*======================================================================== - abstract public class TermEntry - ========================================================================*/ - - @Override public EntryType entryType() { - assert(getClass() == TermIndexEntry.class); - return EntryType.INDEX; - } - - @Override public int compareTo(TermEntry o) { - int a = super.compareTo(o); - if (a != 0) { return a; } - // OPT NORM make a more efficient way of comparing these - return index.toString().compareTo(((TermIndexEntry)o).index.toString()); - } - - @Override public boolean equals(Object o) { - return o == this || super.equals(o) && index.equals(((TermIndexEntry)o).index); - } - - @Override public boolean equalsTarget(TermEntry entry) { - return entry == this || (entry instanceof TermIndexEntry) && index.equals(((TermIndexEntry)entry).index); - } - - @Override public int hashCode() { - return super.hashCode() ^ index.hashCode(); - } - -} diff --git a/src/plugins/Library/index/TermPageEntry.java b/src/plugins/Library/index/TermPageEntry.java deleted file mode 100644 index 31604ffe..00000000 --- a/src/plugins/Library/index/TermPageEntry.java +++ /dev/null @@ -1,240 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.index.TermEntry.EntryType; - -import freenet.keys.FreenetURI; -import freenet.support.SortedIntSet; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; - -/** -** A {@link TermEntry} that associates a subject term with a final target -** {@link FreenetURI} that satisfies the term. -** -** @author infinity0 -*/ -public class TermPageEntry extends TermEntry { - - /** - ** URI of the target - */ - final public FreenetURI page; - - /** Positions where the term occurs. May be null if we don't have that data. - * Specified as SortedSet for ObjectBlueprint but will really always be a SortedIntSet. */ - final public Set positions; - - /** - ** Map from positions in the text to a fragment of text around where it occurs. - ** Only non-null if we have the fragments of text (we may have positions but not details), - ** to save memory. - */ - final public Map posFragments; - - /** - ** Here for backwards-compatibility with the old URIWrapper class. - */ - final public String title; - - /** - ** Standard constructor. - ** - ** @param s Subject of the entry - ** @param r Relevance of the entry - ** @param u {@link FreenetURI} of the page - ** @param p Map of positions (where the term appears) to context (fragment - ** surrounding it). - */ - public TermPageEntry(String s, float r, FreenetURI u, Map p) { - this(s, r, u, (String)null, p); - } - - /** - ** Extended constructor with additional {@code title} field for old-style - ** indexes. - ** - ** @param s Subject of the entry - ** @param r Relevance of the entry - ** @param u {@link FreenetURI} of the page - ** @param t Title or description of the page - ** @param p Map of positions (where the term appears) to context (fragment - ** surrounding it). - */ - public TermPageEntry(String s, float r, FreenetURI u, String t, Map p) { - super(s, r); - if (u == null) { - throw new IllegalArgumentException("can't have a null page"); - } - page = u.intern(); // OPT LOW make the translator use the same URI object as from the URI table? - title = t == null ? null : t.intern(); - if(p == null) { - posFragments = null; - positions = null; - } else { - posFragments = Collections.unmodifiableMap(p); - int[] pos = new int[p.size()]; - int x = 0; - for(Integer i : p.keySet()) - pos[x++] = i; - Arrays.sort(pos); - positions = new SortedIntSet(pos); - } - } - - /** - ** For serialisation. - */ - public TermPageEntry(String s, float r, FreenetURI u, String t, Set pos, Map frags) { - super(s, r); - if (u == null) { - throw new IllegalArgumentException("can't have a null page"); - } - page = u.intern(); // OPT LOW make the translator use the same URI object as from the URI table? - title = t; - if(pos != null) { - if(pos instanceof SortedIntSet) - this.positions = (SortedIntSet) pos; - else { - Integer[] p = pos.toArray(new Integer[pos.size()]); - int[] pp = new int[p.length]; - for(int i=0;i positionsMap() { - if(positions == null) return null; - if(posFragments != null) return posFragments; - HashMap ret = new HashMap(positions.size()); - if(positions instanceof SortedIntSet) { - int[] array = ((SortedIntSet)positions).toArrayRaw(); - for(int x : array) - ret.put(x, null); - return ret; - } else { - Integer[] array = positions.toArray(new Integer[positions.size()]); - if(!(positions instanceof SortedSet)) - Arrays.sort(array); - for(int x : array) - ret.put(x, null); - return ret; - } - } - - public boolean hasPosition(int i) { - return positions.contains(i); - } - - public ArrayList positions() { - if(positions instanceof SortedIntSet) { - int[] array = ((SortedIntSet)positions).toArrayRaw(); - ArrayList pos = new ArrayList(array.length); - for(int x : array) - pos.add(x); - return pos; - } else { - Integer[] array = positions.toArray(new Integer[positions.size()]); - if(!(positions instanceof SortedSet)) - Arrays.sort(array); - ArrayList ret = new ArrayList(positions.size()); - for(int i=0;i terms; - - public URIEntry(FreenetURI u) { - subject = u; - date_checked = new Date(); - terms = new HashSet(); - } - - public FreenetURI getSubject() { - return subject; - } - - public void setSubject(FreenetURI u) { - subject = u; - } - - public float getQuality() { - return qual; - } - - public void setQuality(float q) { - if (q < 0 || q > 1) { - throw new IllegalArgumentException("Relevance must be in the closed interval [0,1]."); - } - qual = q; - } - - public Set getTerms() { - return terms; - } - - public void setTerms(Set t) { - terms = (t == null)? new HashSet(): t; - } - -} diff --git a/src/plugins/Library/index/URIKey.java b/src/plugins/Library/index/URIKey.java deleted file mode 100644 index f4c217a1..00000000 --- a/src/plugins/Library/index/URIKey.java +++ /dev/null @@ -1,48 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import plugins.Library.util.BytePrefixKey; - -import freenet.keys.FreenetURI; -import freenet.keys.BaseClientKey; -import freenet.keys.ClientKey; - -/** -** A {@link BytePrefixKey} backed by the 32-byte routing key for the {@link -** freenet.keys.Key NodeKey} constructed from a {@link FreenetURI}. -** -** @author infinity0 -*/ -public class URIKey extends BytePrefixKey { - - public URIKey() { - super(0x20); - } - - public URIKey(byte[] h) { - super(0x20, h); - } - - public URIKey(FreenetURI u) throws java.net.MalformedURLException { - super(0x20, getNodeRoutingKey(u)); - } - - public static byte[] getNodeRoutingKey(FreenetURI u) throws java.net.MalformedURLException { - try { - return ((ClientKey)BaseClientKey.getBaseKey(u.isUSK()? u.sskForUSK(): u)).getNodeKey().getRoutingKey(); - } catch (ClassCastException e) { - throw new UnsupportedOperationException("Could not get the node routing key for FreenetURI " + u + ". Only CHK/SSK/USK/KSKs are supported."); - } - } - - /*======================================================================== - public interface BytePrefixKey - ========================================================================*/ - - @Override public URIKey clone() { - return new URIKey(hash); - } - -} diff --git a/src/plugins/Library/index/package-info.java b/src/plugins/Library/index/package-info.java deleted file mode 100644 index 4afd4344..00000000 --- a/src/plugins/Library/index/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains implementations of the index interfaces from the library package, -** as well as classes representing data held within an index. -** -** @author infinity0 -*/ -package plugins.Library.index; diff --git a/src/plugins/Library/index/xml/FindRequest.java b/src/plugins/Library/index/xml/FindRequest.java deleted file mode 100644 index 74550fcb..00000000 --- a/src/plugins/Library/index/xml/FindRequest.java +++ /dev/null @@ -1,224 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index.xml; - -import freenet.client.events.ClientEvent; -import freenet.client.events.ExpectedFileSizeEvent; -import freenet.client.events.ExpectedHashesEvent; -import freenet.client.events.ExpectedMIMEEvent; -import freenet.client.events.SendingToNetworkEvent; -import freenet.client.events.SplitfileCompatibilityModeEvent; -import freenet.client.events.SplitfileProgressEvent; -import freenet.support.Logger; - -import java.util.Collections; -import java.util.ArrayList; -import java.util.Set; - -import plugins.Library.util.exec.AbstractExecution; -import plugins.Library.util.exec.ChainedProgress; -import plugins.Library.util.exec.Execution; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.index.TermPageEntry; -import plugins.Library.index.TermEntry; - - -/** - * An {@link Execution} implementation for xmlindex operations which have distinct parts working in serial - * eg a search for 1 term on 1 index. Only used for XMLIndex - * @author MikeB - */ -public class FindRequest extends AbstractExecution> implements Comparable, ChainedProgress, Execution> { - private Set resultnotfinished; - private SubProgress currentProgress; - - public enum Stages { - UNSTARTED("Not Started"), - FETCHROOT("Fetching Index Root"), - FETCHSUBINDEX("Fetching Subindex"), - PARSE("Parsing Subindex"), - DONE("Done"); - - public final String description; - Stages(String desc){ - this.description = desc; - } - }; - - /** - * Create Request of stated type & subject - * @param subject - */ - public FindRequest(String subject) { - super(subject); - currentProgress = new SubProgress(Stages.UNSTARTED); - } - - /** - * Log Exception for this request - */ - @Override - public void setError(TaskAbortException e) { - super.setError(e); - } - - /** - * Stores an unfinished result, call setFinished() to mark as complete - * @param result - */ - //@Override - public void setResult(Set result){ - resultnotfinished = result; - } - - /** - * Moves the unfinished result so that the Progress is marked as finsihed - * TODO get rid of this way of doing it - */ - public void setFinished(){ - super.setResult(Collections.unmodifiableSet(resultnotfinished)); - resultnotfinished = null; - setStage(Stages.DONE); - } - - public int compareTo(Execution right){ - return subject.compareTo(right.getSubject()); - } - - @Override - public ProgressParts getParts() throws TaskAbortException { - int stage = currentProgress.stage.ordinal(); - int end = Stages.DONE.ordinal(); - return ProgressParts.normalise(stage, (stage getUnfinishedResult() { - return resultnotfinished; - } - - /** - * Set the stage number, between 0 & 4 inclusive TODO : now stage is not used in Progress make this an enum - * @param stage The stage number to set - */ - void setStage(Stages stage) { - currentProgress= new SubProgress(stage); - } - - /** - * Update the ProgressParts of the currentProgress of all the FindRequests in the List with the contents of the ClientEvent - * @param receivingEvent - * @param ce - */ - static void updateWithEvent(ArrayList receivingEvent, ClientEvent ce) { - for (FindRequest findRequest : receivingEvent) { - findRequest.partsFromEvent(ce); - } - } - - /** - * Update the ProgressParts of the currentProgress with the ClientEvent - * @param ce - */ - private void partsFromEvent(ClientEvent ce) { - currentProgress.partsFromEvent(ce); - } - - @Override - public String toString() { - try { - return "FindRequest: " + getStatus() + " " + getParts() + " - "+currentProgress; - } catch (TaskAbortException ex) { - return "Error forming FindRequest string"; - } - } - - /** - * The FindRequest is a chained request so it has - */ - class SubProgress implements Progress { // TODO finish this - private ProgressParts parts = ProgressParts.normalise(0, 1, 2, -1); - boolean done = false; - public final Stages stage; - - /** - * Constructs a SubProgress for specified stage with a default unstarted ProgressParts - * @param stage - */ - public SubProgress (Stages stage){ - this.stage = stage; - parts = ProgressParts.normalise(0, 0, 1, ProgressParts.ESTIMATE_UNKNOWN); - } - - public String getSubject() { - return stage.description; - } - - public String getStatus() { - return stage.description; - } - - public ProgressParts getParts() throws TaskAbortException { - return parts; - } - - public boolean isStarted() { - return true; - } - - public boolean isDone() throws TaskAbortException { - return parts.isDone(); - } - - /** - * Not used - * @throws java.lang.InterruptedException - */ - public void join() throws InterruptedException, TaskAbortException { - throw new UnsupportedOperationException("Not supported."); - } - - /** - * Sets the Progress of this to the contents of the CLientEvent - * @param ce - */ - public void partsFromEvent(ClientEvent ce) { -// Logger.normal(this, "Setting parts in "+this+" from event "+ce.getClass()); - if(ce instanceof SplitfileProgressEvent){ - SplitfileProgressEvent spe = (SplitfileProgressEvent)ce; - parts = ProgressParts.normalise(spe.succeedBlocks, spe.minSuccessfulBlocks, spe.minSuccessfulBlocks, spe.finalizedTotal?ProgressParts.TOTAL_FINALIZED:ProgressParts.ESTIMATE_UNKNOWN); - }else if(ce instanceof SendingToNetworkEvent || ce instanceof ExpectedMIMEEvent || ce instanceof ExpectedFileSizeEvent || ce instanceof SplitfileCompatibilityModeEvent || ce instanceof ExpectedHashesEvent) { - // Ignore. - return; - }else{ - parts = ProgressParts.normalise(0, 0); - Logger.error(this, "Fetch progress will not update due to unrecognised ClientEvent : "+ce.getClass().getName()); - } - } - - @Override - public String toString(){ - return parts.toString(); - } - } -} diff --git a/src/plugins/Library/index/xml/LibrarianHandler.java b/src/plugins/Library/index/xml/LibrarianHandler.java deleted file mode 100644 index 4f0b6327..00000000 --- a/src/plugins/Library/index/xml/LibrarianHandler.java +++ /dev/null @@ -1,237 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index.xml; - -import plugins.Library.index.TermPageEntry; - -import freenet.support.Logger; -import freenet.keys.FreenetURI; - -import org.xml.sax.Attributes; -import org.xml.sax.Locator; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.DefaultHandler; - -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.HashMap; - -/** - * Required for using SAX parser on XML indices - * - * @author swati - * @author MikeB - * - */ -public class LibrarianHandler extends DefaultHandler { - private boolean processingWord; - - // The data about the pages referenced in this subindex - /** file id -> uri */ - private HashMap uris; - /** file id -> title */ - private HashMap titles; - /** file id -> wordCount */ - private HashMap wordCounts; - - // About the whole index - private int totalFileCount; - - // Requests and matches being made - private List requests; - private List wordMatches; - - private int inWordFileCount; - - // About the file tag being processed - private StringBuilder characters; - private String inFileTitle; - private FreenetURI inFileURI; - private int inFileWordCount; - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(LibrarianHandler.class); - } - - /** - * Construct a LibrarianHandler to look for many terms - * @param requests the requests wanting to be resolved by this LibrarianHandler, results are written back to them - * @throws java.lang.Exception - */ - public LibrarianHandler(List requests) { - this.requests = new ArrayList(requests); - for (FindRequest r : requests){ - r.setResult(new HashSet()); - } - } - - @Override public void setDocumentLocator(Locator value) { - - } - - @Override public void endDocument() throws SAXException { - } - - @Override public void startDocument() throws SAXException { - if(uris==null || titles ==null){ - uris = new HashMap(); - titles = new HashMap(); - wordCounts = new HashMap(); - } - } - - @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) - throws SAXException { - if(requests.size()==0&&wordMatches.size()==0) - return; - if (rawName == null) { - rawName = localName; - } - String elt_name = rawName; - - if (elt_name.equals("files")){ - processingWord = false; - String fileCount = attrs.getValue("", "totalFileCount"); - if(fileCount != null) - this.totalFileCount = Integer.parseInt(fileCount); - if(logMINOR) Logger.minor(this, "totalfilecount = "+this.totalFileCount); - } - if (elt_name.equals("keywords")) - processingWord = true; - /* - * looks for the word in the given subindex file if the word is found then the parser - * fetches the corresponding fileElements - */ - if (elt_name.equals("word")) { - try { - wordMatches = null; - String match = attrs.getValue("v"); - if (requests!=null){ - wordMatches = new ArrayList(); - for (Iterator it = requests.iterator(); it.hasNext();) { - FindRequest r = it.next(); - if (match.equals(r.getSubject())){ - wordMatches.add(r); - it.remove(); - if(logMINOR) Logger.minor(this, "found word match "+wordMatches); - } - } - if (attrs.getValue("fileCount")!=null) - inWordFileCount = Integer.parseInt(attrs.getValue("fileCount")); - } - } catch (Exception e) { - throw new SAXException(e); - } - } - - if (elt_name.equals("file")) { - if (processingWord == true && wordMatches!=null) { - try{ - String suri = uris.get(attrs.getValue("id")); - inFileURI = new FreenetURI(suri); - synchronized(this){ - if(titles.containsKey(attrs.getValue("id"))) - { - inFileTitle = titles.get(attrs.getValue("id")); - if ((suri).equals(inFileTitle)) - inFileTitle = null; - } - else - inFileTitle = null; - if(wordCounts.containsKey(attrs.getValue("id"))) - inFileWordCount = wordCounts.get(attrs.getValue("id")).intValue(); - else - inFileWordCount = -1; - - characters = new StringBuilder(); - } - } - catch (Exception e) { - Logger.error(this, "Index format may be outdated " + e.toString(), e); - } - - } else if (processingWord == false) { - try { - String id = attrs.getValue("id"); - String key = attrs.getValue("key"); - int l = attrs.getLength(); - String title; - synchronized (this) { - if (l >= 3) { - try { - title = attrs.getValue("title"); - titles.put(id, title); - } catch (Exception e) { - Logger.error(this, "Index Format not compatible " + e.toString(), e); - } - try { - String wordCountString = attrs.getValue("wordCount"); - if(wordCountString != null) { - int wordCount = Integer.parseInt(attrs.getValue("wordCount")); - wordCounts.put(id, wordCount); - } - } catch (Exception e) { - //if(logMINOR) Logger.minor(this, "No wordcount found " + e.toString(), e); - } - } - uris.put(id, key); - } - } - catch (Exception e) { - Logger.error(this, "File id and key could not be retrieved. May be due to format clash", e); - } - } - } - } - - @Override public void characters(char[] ch, int start, int length) { - if(processingWord && wordMatches!= null && characters!=null){ - characters.append(ch, start, length); - } - } - - @Override - public void endElement(String namespaceURI, String localName, String qName) { - if(processingWord && wordMatches != null){ - HashMap termpositions = null; - if(characters!=null){ - String[] termposs = characters.toString().split(","); - termpositions = new HashMap(); - for (String pos : termposs) { - try{ - termpositions.put(Integer.valueOf(pos), null); - }catch(NumberFormatException e){ - Logger.error(this, "Position in index not an integer :"+pos, e); - } - } - characters = null; - } - - - - for(FindRequest match : wordMatches){ - Set result = match.getUnfinishedResult(); - float relevance = 0; - -// if(logMINOR) Logger.minor(this, "termcount "+termpositions.size()+" filewordcount = "+inFileWordCount); - if(termpositions!=null && termpositions.size()>0 && inFileWordCount>0 ){ - relevance = (float)(termpositions.size()/(float)inFileWordCount); - if( totalFileCount > 0 && inWordFileCount > 0) - relevance *= Math.log( (float)totalFileCount/(float)inWordFileCount); - //if(logMINOR) Logger.minor(this, "Set relevance of "+pageEntry.getTitle()+" to "+pageEntry.rel+" - "+pageEntry.toString()); - } - - TermPageEntry pageEntry = new TermPageEntry(match.getSubject(), relevance, inFileURI, inFileTitle, termpositions); - result.add(pageEntry); - //if(logMINOR) Logger.minor(this, "added "+inFileURI+ " to "+ match); - } - } - } -} diff --git a/src/plugins/Library/index/xml/MainIndexParser.java b/src/plugins/Library/index/xml/MainIndexParser.java deleted file mode 100644 index 9e14c99a..00000000 --- a/src/plugins/Library/index/xml/MainIndexParser.java +++ /dev/null @@ -1,245 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index.xml; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; -import java.util.Set; - -import org.xml.sax.Attributes; -import org.xml.sax.Locator; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.DefaultHandler; - -/** - * Main Index Parser - * - * Parser for master index index.xml. This does _NOT_ support index file generate by - * XMLSpider earlier then version 8 (Sep-2007). - * - * @author j16sdiz (1024D/75494252) - */ -public class MainIndexParser extends DefaultHandler { - protected enum V1State { - /** Anything else */ - MAIN, - /** Inside <header> */ - HEADER, - /** One-level inside <header>, such as <title> */ - HEADER_INNER, - /** Inside <keywords> */ - KEYWORDS - } - - protected class V1Handler extends DefaultHandler { - V1State state; - String headerKey; - StringBuilder headerValue; - - @Override - public void startDocument() { - state = V1State.MAIN; - headerKey = null; - headerValue = null; - } - - @Override - public void characters(char ch[], int start, int length) throws SAXException { - if (state != V1State.HEADER_INNER || headerKey == null) - return; - headerValue.append(ch, start, length); - } - - @Override - public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { - switch (state) { - case MAIN: - mainStartElement(uri, localName, qName, attributes); - break; - case HEADER: - headerStartElement(uri, localName, qName, attributes); - break; - case HEADER_INNER: - headerInnerStartElement(uri, localName, qName, attributes); - break; - case KEYWORDS: - keywordsStartElement(uri, localName, qName, attributes); - break; - } - } - - private void mainStartElement(String uri, String localName, String qName, Attributes attributes) - throws SAXException { - if ("main_index".equals(localName)) { - state = V1State.MAIN; - } else if ("header".equals(localName)) { - state = V1State.HEADER; - } else if ("keywords".equals(localName)) { - state = V1State.KEYWORDS; - } else if ("prefix".equals(localName)) { - // ignore - } else { - throw new SAXException("Bad tag <" + localName + "> in main @" + location()); - } - } - - private void headerStartElement(String uri, String localName, String qName, Attributes attributes) { - headerKey = localName; - headerValue = new StringBuilder(); - state = V1State.HEADER_INNER; - } - - private void headerInnerStartElement(String uri, String localName, String qName, Attributes attributes) - throws SAXException { - throw new SAXException("Bad tag <" + localName + "> in header @" + location()); - } - - private void keywordsStartElement(String uri, String localName, String qName, Attributes attributes) - throws SAXException { - if (!"subIndex".equals(localName)) - throw new SAXException("Bad tag <" + localName + "> in keywords @" + location()); - - String key = attributes.getValue("", "key"); - if (key == null || key.length() < 1 || key.length() > 32 || !key.matches("^[0-9a-fA-F]*$")) - throw new SAXException("Bad tag @" + location()); - key = key.toLowerCase(Locale.US); - subIndice.add(key); - } - - @Override - public void endElement(String uri, String localName, String qName) throws SAXException { - switch (state) { - case MAIN: - mainEndElement(uri, localName, qName); - break; - case HEADER: - headerEndElement(uri, localName, qName); - break; - case HEADER_INNER: - headerInnerEndElement(uri, localName, qName); - break; - case KEYWORDS: - keywordsEndElement(uri, localName, qName); - break; - } - } - - private void mainEndElement(String uri, String localName, String qName) throws SAXException { - if (!"main_index".equals(localName) && !"prefix".equals(localName)) - throw new SAXException("Bad close tag in main @" + location()); - } - - private void headerEndElement(String uri, String localName, String qName) throws SAXException { - if (!"header".equals(localName)) - throw new SAXException("Bad close tag in header @" + location()); - - state = V1State.MAIN; - } - - private void headerInnerEndElement(String uri, String localName, String qName) throws SAXException { - if (!localName.equals(headerKey)) - throw new SAXException("Bad close tag in header @" + location()); - header.put(headerKey, headerValue.toString()); - headerKey = null; - headerValue = null; - - state = V1State.HEADER; - } - - private void keywordsEndElement(String uri, String localName, String qName) throws SAXException { - if ("subIndex".equals(localName)) { - // ignore - } else if ("keywords".equals(localName)) { - state = V1State.MAIN; - } else - throw new SAXException("Bad close tag in keywords @" + location()); - } - } - - protected int version; - protected Map header; - protected Set subIndice; - protected Set siteIndice; - protected DefaultHandler handler; - protected Locator locator; - - @Override - public void characters(char ch[], int start, int length) throws SAXException { - handler.characters(ch, start, length); - } - - @Override - public void setDocumentLocator(Locator locator) { - this.locator = locator; - } - - @Override - public void startDocument() { - header = new HashMap(); - subIndice = new HashSet(); - siteIndice = new HashSet(); - handler = null; - } - - @Override - public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { - if (handler == null) { - version = detectVersion(uri, localName, qName, attributes); - - switch (version) { - case 1: - handler = new V1Handler(); - break; - default: - throw new SAXException("Unsupported version @(" + location() + " : " + version); - } - - handler.startDocument(); - } - handler.startElement(uri, localName, qName, attributes); - } - - @Override - public void endElement(String uri, String localName, String qName) throws SAXException { - handler.endElement(uri, localName, qName); - } - - @Override - public void endDocument() throws SAXException { - handler.endDocument(); - } - - private int detectVersion(String uri, String localName, String qName, Attributes attributes) throws SAXException { - if ("main_index".equals(localName)) { - return 1; - } else { - throw new SAXException("Unknown tag @" + location() + " : " + localName); - } - } - - private String location() { - if (locator != null) - return "(" + locator.getLineNumber() + "," + locator.getColumnNumber() + ")"; - else - return "()"; - } - - public int getVersion() { - return version; - } - - public String getHeader(String key) { - return header.get(key); - } - - public Set getSubIndice() { - return subIndice; - } - - public Set getSiteIndice() { - return siteIndice; - } -} diff --git a/src/plugins/Library/index/xml/URLUpdateHook.java b/src/plugins/Library/index/xml/URLUpdateHook.java deleted file mode 100644 index a59e3ada..00000000 --- a/src/plugins/Library/index/xml/URLUpdateHook.java +++ /dev/null @@ -1,7 +0,0 @@ -package plugins.Library.index.xml; - -public interface URLUpdateHook { - - void update(String updateContext, String indexuri); - -} diff --git a/src/plugins/Library/index/xml/Util.java b/src/plugins/Library/index/xml/Util.java deleted file mode 100644 index 83004461..00000000 --- a/src/plugins/Library/index/xml/Util.java +++ /dev/null @@ -1,75 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index.xml; - -import java.io.File; -import java.net.MalformedURLException; - -import freenet.client.FetchException; -import freenet.client.FetchResult; -import freenet.client.HighLevelSimpleClient; -import freenet.keys.FreenetURI; -import freenet.support.Logger; -import freenet.support.api.Bucket; -import freenet.support.io.FileBucket; - -/** - * Utility class. - * TODO get rid of this - * @deprecated move these classes out to somewhere else - * @author j16sdiz (1024D/75494252) - */ -public class Util { - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(Util.class); - } - - public static Bucket fetchBucket(String uri, HighLevelSimpleClient hlsc) throws FetchException, MalformedURLException { - // try local file first - File file = new File(uri); - if (file.exists() && file.canRead()) - return new FileBucket(file, true, false, false, false); - else if (hlsc==null) - throw new NullPointerException("No client or file "+uri+" found"); - - // FreenetURI, try to fetch from freenet - if(logMINOR) Logger.minor(Util.class, "Fetching "+uri); - FreenetURI u = new FreenetURI(uri); - FetchResult res; - while (true) { - try { - res = hlsc.fetch(u); - break; - } catch (FetchException e) { - if (e.newURI != null) { - u = e.newURI; - continue; - } else - throw e; - } - } - - return res.asBucket(); - } - - public static boolean isValid(String uri) { - // try local file first - File file = new File(uri); - if (!file.exists() || !file.canRead()) { - // FreenetURI, try to fetch from freenet - try{ - FreenetURI u = new FreenetURI(uri); - }catch(MalformedURLException e){ - return false; - } - } - return true; - } -} - - diff --git a/src/plugins/Library/index/xml/XMLIndex.java b/src/plugins/Library/index/xml/XMLIndex.java deleted file mode 100644 index 2e03796e..00000000 --- a/src/plugins/Library/index/xml/XMLIndex.java +++ /dev/null @@ -1,861 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index.xml; - -import plugins.Library.Library; -import plugins.Library.Index; -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermPageEntry; -import plugins.Library.index.URIEntry; -import plugins.Library.search.InvalidSearchException; -import plugins.Library.util.exec.Execution; -import plugins.Library.util.exec.TaskAbortException; - -import freenet.support.Fields; -import freenet.support.Logger; -import freenet.support.api.Bucket; -import freenet.support.io.FileBucket; -import freenet.support.io.ResumeFailedException; -import freenet.client.async.ClientGetCallback; -import freenet.client.async.ClientGetter; -import freenet.client.async.ClientContext; -import freenet.client.events.ClientEventListener; -import freenet.client.events.ClientEvent; -import freenet.client.FetchException; -import freenet.client.HighLevelSimpleClient; -import freenet.client.FetchResult; -import freenet.node.RequestStarter; -import freenet.node.RequestClient; -import freenet.keys.FreenetURI; - -import freenet.pluginmanager.PluginRespirator; -import freenet.support.Executor; - -import javax.xml.parsers.ParserConfigurationException; -import javax.xml.parsers.SAXParser; -import javax.xml.parsers.SAXParserFactory; - -import org.xml.sax.Attributes; -import org.xml.sax.Locator; -import org.xml.sax.SAXException; -import org.xml.sax.helpers.DefaultHandler; - -import java.io.BufferedInputStream; -import java.io.BufferedOutputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.File; -import java.io.OutputStream; -import java.net.MalformedURLException; -import java.util.logging.Level; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.List; -import java.util.TreeMap; -import java.util.SortedMap; -import java.util.HashMap; - - -/** - * The xml index format - * @author MikeB - */ -public class XMLIndex implements Index, ClientGetCallback { - - final public static String MIME_TYPE = "application/xml"; - final public static String DEFAULT_FILE = "index.xml"; - - private PluginRespirator pr; - private Executor executor; - private ClientGetter rootGetter; - private int fetchFailures = 0; - public enum FetchStatus{UNFETCHED, FETCHING, FETCHED, FAILED} - protected FetchStatus fetchStatus = FetchStatus.UNFETCHED; - protected HashMap indexMeta = new HashMap(); - // TODO it could be tidier if this was converted to a FreenetURI - protected String indexuri; - protected long origEdition; - private URLUpdateHook updateHook; - private String updateContext; - - private HighLevelSimpleClient hlsc; - - /** - * Index format version: - *
    - *
  • 1 = XMLSpider 8 to 33
  • - *
- * Earlier format are no longer supported. - */ - private int version; - private List subIndiceList; - private SortedMap subIndice; - private ArrayList waitingOnMainIndex = new ArrayList(); - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(XMLIndex.class); - } - - /** - * Create an XMLIndex from a URI - * @param baseURI - * Base URI of the index (exclude the index.xml part) - */ - public XMLIndex(String baseURI, long edition, PluginRespirator pr, URLUpdateHook hook, String context) throws InvalidSearchException { - this.pr = pr; - this.origEdition = edition; - this.updateHook = hook; - this.updateContext = context; - if (pr!=null) - executor = pr.getNode().executor; - - if (baseURI.endsWith(DEFAULT_FILE)) - baseURI = baseURI.replace(DEFAULT_FILE, ""); - if (!baseURI.endsWith("/")) - baseURI += "/"; - indexuri = baseURI; - - // check if index is valid file or URI - if(!Util.isValid(baseURI)) - throw new InvalidSearchException(baseURI + " is neither a valid file nor valid Freenet URI"); - - - if(pr!=null){ - hlsc = pr.getNode().clientCore.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); - hlsc.addEventHook(mainIndexListener); - } - } - - /** - * process the bucket containing the main index file - * @param bucket - */ - private void processRequests(Bucket bucket){ - try { - InputStream is = bucket.getInputStream(); - parse(is); - is.close(); - fetchStatus = FetchStatus.FETCHED; - for(FindRequest req : waitingOnMainIndex) - setdependencies(req); - waitingOnMainIndex.clear(); - }catch(Exception e){ - fetchStatus = FetchStatus.FAILED; - for (FindRequest findRequest : waitingOnMainIndex) { - findRequest.setError(new TaskAbortException("Failure parsing "+toString(), e)); - } - Logger.error(this, indexuri, e); - } finally { - bucket.free(); - } - } - - /** - * Listener to receive events when fetching the main index - */ - ClientEventListener mainIndexListener = new ClientEventListener(){ - /** - * Hears an event. - **/ - public void receive(ClientEvent ce, ClientContext context){ - FindRequest.updateWithEvent(waitingOnMainIndex, ce); -// Logger.normal(this, "Updated with event : "+ce.getDescription()); - } - }; - - /** - * Callback for when index fetching completes - */ - /** Called on successful fetch */ - public void onSuccess(FetchResult result, ClientGetter state){ -// Logger.normal(this, "Fetch successful " + toString()); - processRequests(result.asBucket()); - } - - /** Called on failed/canceled fetch */ - public void onFailure(FetchException e, ClientGetter state){ - fetchFailures++; - if(fetchFailures < 20 && e.newURI!=null){ - try { - if(logMINOR) Logger.minor(this, "Trying new URI: "+e.newURI); - indexuri = e.newURI.setMetaString(new String[]{""}).toString(); - if(origEdition != -1 && e.newURI.getEdition() < origEdition) { - Logger.error(this, "Redirect to earlier edition?!?!?!?: "+e.newURI.getEdition()+" from "+origEdition); - } else { - if(logMINOR) Logger.minor(this, "Trying new URI: "+e.newURI+" : "+indexuri); - startFetch(true); - if(updateHook != null && updateContext != null) - updateHook.update(updateContext, indexuri); - return; - } - } catch (FetchException ex) { - e = ex; - } catch (MalformedURLException ex) { - Logger.error(this, "what?", ex); - } - } - fetchStatus = FetchStatus.FAILED; - for (FindRequest findRequest : waitingOnMainIndex) { - findRequest.setError(new TaskAbortException("Failure fetching rootindex of "+toString(), e)); - } - Logger.error(this, "Fetch failed on "+toString() + " -- state = "+state, e); - } - - /** - * Fetch main index & process if local or fetch in background with callback if Freenet URI - * @throws freenet.client.FetchException - * @throws java.net.MalformedURLException - */ - private synchronized void startFetch(boolean retry) throws FetchException, MalformedURLException { - if ((!retry) && (fetchStatus != FetchStatus.UNFETCHED && fetchStatus != FetchStatus.FAILED)) - return; - fetchStatus = FetchStatus.FETCHING; - String uri = indexuri + DEFAULT_FILE; - - - // try local file first - File file = new File(uri); - if (file.exists() && file.canRead()) { - processRequests(new FileBucket(file, true, false, false, false)); - return; - } - - if(logMINOR) Logger.minor(this, "Fetching "+uri); - // FreenetURI, try to fetch from freenet - FreenetURI u = new FreenetURI(uri); - while (true) { - try { - rootGetter = hlsc.fetch(u, -1, this, hlsc.getFetchContext().clone()); - Logger.normal(this, "Fetch started : "+toString()); - break; - } catch (FetchException e) { - if (e.newURI != null) { - u = e.newURI; - if(logMINOR) Logger.minor(this, "New URI: "+uri); - continue; - } else - throw e; - } - } - } - - /** - * Parse the xml in the main index to read fields for this object - * @param is InputStream for main index file - * @throws org.xml.sax.SAXException - * @throws java.io.IOException - */ - private void parse(InputStream is) throws SAXException, IOException { - SAXParserFactory factory = SAXParserFactory.newInstance(); -// Logger.normal(this, "Parsing main index"); - - try { - factory.setNamespaceAware(true); - factory.setFeature("http://xml.org/sax/features/namespaces", true); - factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); - - SAXParser saxParser = factory.newSAXParser(); - MainIndexParser parser = new MainIndexParser(); - saxParser.parse(is, parser); - - version = parser.getVersion(); - if (version == 1) { - indexMeta.put("title", parser.getHeader("title")); - indexMeta.put("ownerName", parser.getHeader("owner")); - indexMeta.put("ownerEmail", parser.getHeader("email")); - - subIndiceList = new ArrayList(); - subIndice = new TreeMap(); - - for (String key : parser.getSubIndice()) { - subIndiceList.add(key); - String stillbase; - try{ - FreenetURI furi = new FreenetURI(indexuri); - stillbase = ( furi.isUSK() ? furi.sskForUSK() : furi ).toString(); - }catch(MalformedURLException e){ - stillbase = indexuri; - } - subIndice.put(key, new SubIndex(stillbase, "index_" + key + ".xml")); - } - Collections.sort(subIndiceList); - } - } catch (ParserConfigurationException e) { - Logger.error(this, "SAX ParserConfigurationException", e); - throw new SAXException(e); - } - } - - @Override - public String toString(){ - String output = "Index:[ "+indexuri+" "+fetchStatus+" "+waitingOnMainIndex+"\n\t"+subIndice + (rootGetter==null?"]":(" GETTING("+rootGetter.toString()+")]")); - //for (SubIndex s : subIndice) - // output = output+"\n -"+s; - return output; - } - - /** - * Gets the SubIndex object which should hold the keyword - */ - private SubIndex getSubIndex(String keyword) { - String md5 = Library.MD5(keyword); - int idx = Collections.binarySearch(subIndiceList, md5); - if (idx < 0) - idx = -idx - 2; - return subIndice.get(subIndiceList.get(idx)); - } - - /** - * Find the term in this Index - */ - public synchronized Execution> getTermEntries(String term){ - try { - FindRequest request = new FindRequest(term); - setdependencies(request); - notifyAll(); - return request; - } catch (FetchException ex) { - Logger.error(this, "Trying to find " + term, ex); - return null; - } catch (MalformedURLException ex) { - Logger.error(this, "Trying to find " + term, ex); - return null; - } - } - - public Execution getURIEntry(FreenetURI uri){ - throw new UnsupportedOperationException("getURIEntry not Implemented in XMLIndex"); - } - - /** - * Puts request into the dependency List of either the main index or the - * subindex depending on whether the main index is availiable - * @param request - * @throws freenet.client.FetchException - * @throws java.net.MalformedURLException - */ - private synchronized void setdependencies(FindRequest request) throws FetchException, MalformedURLException{ -// Logger.normal(this, "setting dependencies for "+request+" on "+this.toString()); - if (fetchStatus!=FetchStatus.FETCHED){ - waitingOnMainIndex.add(request); - request.setStage(FindRequest.Stages.FETCHROOT); - startFetch(false); - }else{ - request.setStage(FindRequest.Stages.FETCHSUBINDEX); - SubIndex subindex = getSubIndex(request.getSubject()); - subindex.addRequest(request); -// Logger.normal(this, "STarting "+getSubIndex(request.getSubject())+" to look for "+request.getSubject()); - if(executor!=null) - executor.execute(subindex, "Subindex:"+subindex.getFileName()); - else - (new Thread(subindex, "Subindex:"+subindex.getFileName())).start(); - } - } - - - /** - * @return the uri of this index prefixed with "xml:" to show what type it is - */ - public String getIndexURI(){ - return indexuri; - } - - private class SubIndex implements Runnable { - String indexuri, filename; - private final ArrayList waitingOnSubindex=new ArrayList(); - private final ArrayList parsingSubindex = new ArrayList(); - FetchStatus fetchStatus = FetchStatus.UNFETCHED; - HighLevelSimpleClient hlsc; - Bucket bucket; - Exception error; - - /** - * Listens for progress on a subIndex fetch - */ - ClientEventListener subIndexListener = new ClientEventListener(){ - /** - * Hears an event and updates those Requests waiting on this subindex fetch - **/ - public void receive(ClientEvent ce, ClientContext context){ - FindRequest.updateWithEvent(waitingOnSubindex, ce); -// Logger.normal(this, "Updated with event : "+ce.getDescription()); - } - }; - - SubIndex(String indexuri, String filename){ - this.indexuri = indexuri; - this.filename = filename; - - if(pr!=null){ - hlsc = pr.getNode().clientCore.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); - hlsc.addEventHook(subIndexListener); - } - } - - String getFileName(){ - return filename; - } - - FetchStatus getFetchStatus(){ - return fetchStatus; - } - - /** - * Add a request to the List of requests looking in this subindex - * @param request - */ - void addRequest(FindRequest request){ - if(fetchStatus==FetchStatus.FETCHED) - request.setStage(FindRequest.Stages.PARSE); - else - request.setStage(FindRequest.Stages.FETCHSUBINDEX); - synchronized(waitingOnSubindex){ - waitingOnSubindex.add(request); - } - } - - @Override - public String toString(){ - return filename+" "+fetchStatus+" "+waitingOnSubindex; - } - - - public synchronized void run(){ - try{ - while(waitingOnSubindex.size()>0){ - if(fetchStatus==FetchStatus.UNFETCHED || fetchStatus == FetchStatus.FAILED){ - try { - fetchStatus = FetchStatus.FETCHING; - // TODO tidy the fetch stuff - bucket = Util.fetchBucket(indexuri + filename, hlsc); - fetchStatus = FetchStatus.FETCHED; - - } catch (Exception e) { // TODO tidy the exceptions - //java.net.MalformedURLException - //freenet.client.FetchException - String msg = indexuri + filename + " could not be opened: " + e.toString(); - Logger.error(this, msg, e); - throw new TaskAbortException(msg, e); - } - }else if(fetchStatus==FetchStatus.FETCHED){ - parseSubIndex(); - } else { - break; - } - } - } catch (TaskAbortException e) { - fetchStatus = FetchStatus.FAILED; - this.error = e; - Logger.error(this, "Dropping from subindex run loop", e); - for (FindRequest r : parsingSubindex) - r.setError(e); - for (FindRequest r : waitingOnSubindex) - r.setError(e); - } - } - - public void parseSubIndex() throws TaskAbortException { - synchronized(parsingSubindex){ - // Transfer all requests waiting on this subindex to the parsing list - synchronized (waitingOnSubindex){ - parsingSubindex.addAll(waitingOnSubindex); - waitingOnSubindex.removeAll(parsingSubindex); - } - // Set status of all those about to be parsed to PARSE - for(FindRequest r : parsingSubindex) - r.setStage(FindRequest.Stages.PARSE); - - // Multi-stage parse to minimise memory usage. - - // Stage 1: Extract the declaration (first tag), copy everything before "". - // Copy the declaration, plus everything between the two (inclusive) to another bucket. - - Bucket mainBucket, filesBucket; - - try { - InputStream is = bucket.getInputStream(); - mainBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1); - filesBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1); - OutputStream mainOS = new BufferedOutputStream(mainBucket.getOutputStream()); - OutputStream filesOS = new BufferedOutputStream(filesBucket.getOutputStream()); - //OutputStream mainOS = new BufferedOutputStream(new FileOutputStream("main.tmp")); - //OutputStream filesOS = new BufferedOutputStream(new FileOutputStream("files.tmp")); - - BufferedInputStream bis = new BufferedInputStream(is); - - byte greaterThan = ">".getBytes("UTF-8")[0]; - byte[] filesPrefix = "'s for the specific ID's. - - is = filesBucket.getInputStream(); - StageThreeHandler stageThreeHandler = new StageThreeHandler(); - saxParser.parse(is, stageThreeHandler); - if(logMINOR) Logger.minor(this, "Finished stage three XML parse"); - is.close(); - - Logger.minor(this, "parsing finished "+ parsingSubindex.toString()); - for (FindRequest findRequest : parsingSubindex) { - findRequest.setFinished(); - } - parsingSubindex.clear(); - } catch (Exception err) { - Logger.error(this, "Error parsing "+filename, err); - throw new TaskAbortException("Could not parse XML: ", err); - } - } - } - - class WordMatch { - public WordMatch(ArrayList searches, int inWordFileCount) { - this.searches = searches; - this.inWordFileCount = inWordFileCount; - } - final List searches; - int inWordFileCount; - } - - class FileMatch { - public FileMatch(String id2, HashMap termpositions2, WordMatch thisWordMatch) { - id = id2; - termpositions = termpositions2; - word = thisWordMatch; - } - final String id; - final HashMap termpositions; - final WordMatch word; - } - - Map> idToFileMatches = new HashMap>(); - - int totalFileCount = -1; - - // Parse the main XML file, including the keywords list. - // We do not see the files list. - class StageTwoHandler extends DefaultHandler { - - private boolean processingWord; - - // Requests and matches being made - private List requests; - private List wordMatches; - - private int inWordFileCount; - - // About the file tag being processed - private StringBuilder characters; - - private String match; - - private ArrayList fileMatches = new ArrayList(); - - private String id; - - private WordMatch thisWordMatch; - - StageTwoHandler() { - this.requests = new ArrayList(parsingSubindex); - for (FindRequest r : parsingSubindex){ - r.setResult(new HashSet()); - } - } - - @Override public void setDocumentLocator(Locator value) { - - } - - @Override public void endDocument() throws SAXException { - } - - @Override public void startDocument() throws SAXException { - // Do nothing - } - - @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) - throws SAXException { - if(requests.size()==0&&(wordMatches == null || wordMatches.size()==0)) - return; - if (rawName == null) { - rawName = localName; - } - String elt_name = rawName; - - if (elt_name.equals("keywords")) - processingWord = true; - - /* - * looks for the word in the given subindex file if the word is found then the parser - * fetches the corresponding fileElements - */ - if (elt_name.equals("word")) { - try { - fileMatches.clear(); - wordMatches = null; - match = attrs.getValue("v"); - if (requests!=null){ - - for (Iterator it = requests.iterator(); it.hasNext();) { - FindRequest r = it.next(); - if (match.equals(r.getSubject())){ - if(wordMatches == null) - wordMatches = new ArrayList(); - wordMatches.add(r); - it.remove(); - Logger.minor(this, "found word match "+wordMatches); - } - } - if(wordMatches != null) { - if (attrs.getValue("fileCount")!=null) - inWordFileCount = Integer.parseInt(attrs.getValue("fileCount")); - thisWordMatch = new WordMatch(new ArrayList(wordMatches), inWordFileCount); - } - } - } catch (Exception e) { - throw new SAXException(e); - } - } - - if (elt_name.equals("file")) { - if (processingWord == true && wordMatches!=null) { - try{ - id = attrs.getValue("id"); - characters = new StringBuilder(); - } - catch (Exception e) { - Logger.error(this, "Index format may be outdated " + e.toString(), e); - } - - } - } - } - - @Override public void characters(char[] ch, int start, int length) { - if(processingWord && wordMatches!= null && characters!=null){ - characters.append(ch, start, length); - } - } - - @Override - public void endElement(String namespaceURI, String localName, String qName) { - if(processingWord && wordMatches != null && qName.equals("file")){ - HashMap termpositions = null; - if(characters!=null){ - String[] termposs = characters.toString().split(","); - termpositions = new HashMap(); - for (String pos : termposs) { - try{ - termpositions.put(Integer.valueOf(pos), null); - }catch(NumberFormatException e){ - Logger.error(this, "Position in index not an integer :"+pos, e); - } - } - characters = null; - } - - FileMatch thisFile = new FileMatch(id, termpositions, thisWordMatch); - - ArrayList matchList = idToFileMatches.get(id); - if(matchList == null) { - matchList = new ArrayList(); - idToFileMatches.put(id, matchList); - } - if(logMINOR) Logger.minor(this, "Match: id="+id+" for word "+match); - matchList.add(thisFile); - } - - } - - } - - class StageThreeHandler extends DefaultHandler { - - @Override public void setDocumentLocator(Locator value) { - - } - - @Override public void endDocument() throws SAXException { - } - - @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) - throws SAXException { - - if(idToFileMatches.isEmpty()) - return; - if (rawName == null) { - rawName = localName; - } - String elt_name = rawName; - - if (elt_name.equals("files")){ - String fileCount = attrs.getValue("", "totalFileCount"); - if(fileCount != null) - totalFileCount = Integer.parseInt(fileCount); - Logger.minor(this, "totalfilecount = "+totalFileCount); - } - - if (elt_name.equals("file")) { - try { - String id = attrs.getValue("id"); - - ArrayList matches = idToFileMatches.get(id); - - if(matches != null) { - - for(FileMatch match : matches) { - - String key = attrs.getValue("key"); - int l = attrs.getLength(); - String title = null; - int wordCount = -1; - if (l >= 3) { - try { - title = attrs.getValue("title"); - } catch (Exception e) { - Logger.error(this, "Index Format not compatible " + e.toString(), e); - } - try { - String wordCountString = attrs.getValue("wordCount"); - if(wordCountString != null) { - wordCount = Integer.parseInt(attrs.getValue("wordCount")); - } - } catch (Exception e) { - //Logger.minor(this, "No wordcount found " + e.toString(), e); - } - } - - for(FindRequest req : match.word.searches) { - - Set result = req.getUnfinishedResult(); - float relevance = 0; - - if(logDEBUG) Logger.debug(this, "termcount "+(match.termpositions == null ? 0 : match.termpositions.size())+" filewordcount = "+wordCount); - if(match.termpositions!=null && match.termpositions.size()>0 && wordCount>0 ){ - relevance = (float)(match.termpositions.size()/(float)wordCount); - if( totalFileCount > 0 && match.word.inWordFileCount > 0) - relevance *= Math.log( (float)totalFileCount/(float)match.word.inWordFileCount); - if(logDEBUG) Logger.debug(this, "Set relevance of "+title+" to "+relevance+" - "+key); - } - - TermPageEntry pageEntry = new TermPageEntry(req.getSubject(), relevance, new FreenetURI(key), title, match.termpositions); - result.add(pageEntry); - //Logger.minor(this, "added "+inFileURI+ " to "+ match); - } - - } - } - - } - catch (Exception e) { - Logger.error(this, "File id and key could not be retrieved. May be due to format clash", e); - } - } - } - - } - - } - - @Override - public void onResume(ClientContext context) throws ResumeFailedException { - // Ignore. Requests not persistent. - } - - @Override - public RequestClient getRequestClient() { - return Library.REQUEST_CLIENT; - } - -} diff --git a/src/plugins/Library/index/xml/package-info.java b/src/plugins/Library/index/xml/package-info.java deleted file mode 100644 index 6ef67706..00000000 --- a/src/plugins/Library/index/xml/package-info.java +++ /dev/null @@ -1,9 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ - -/** - * Where classes which only apply to xml indexes stay - */ -package plugins.Library.index.xml; - diff --git a/src/plugins/Library/io/DataFormatException.java b/src/plugins/Library/io/DataFormatException.java deleted file mode 100644 index 6f7aeb4b..00000000 --- a/src/plugins/Library/io/DataFormatException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io; - -/** -** Thrown when data is not in a recognised format. -** -** @author infinity0 -*/ -public class DataFormatException extends java.io.IOException { - - /** - ** The parent, if any, of the unrecognised data. - */ - final Object parent; - - /** - ** The key, if any, that the data is associated with. - */ - final Object key; - - /** - ** The unrecognised data. - */ - final Object data; - - public DataFormatException(String s, Throwable t, Object v, Object p, Object k) { - super(s); - initCause(t); - data = v; - parent = p; - key = k; - } - - public DataFormatException(String s, Throwable t, Object v) { - this(s, t, v, null, null); - } - - public Object getParent() { return parent; } - public Object getKey() { return key; } - public Object getValue() { return data; } - -} diff --git a/src/plugins/Library/io/ObjectBlueprint.java b/src/plugins/Library/io/ObjectBlueprint.java deleted file mode 100644 index 36969847..00000000 --- a/src/plugins/Library/io/ObjectBlueprint.java +++ /dev/null @@ -1,520 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io; - -import java.util.Iterator; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.Map; -import java.util.AbstractMap; -import java.util.AbstractSet; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.IdentityHashMap; - -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - -/** -** An (immutable) blueprint for an object. This class provides methods to -** construct new objects from a list or map of property parameters, and to -** present (currently immutable) map-views of existing objects. -** -** @author infinity0 -*/ -public class ObjectBlueprint { - - // TODO LOW maybe make the map-view mutable - // TODO LOW move this to another package, maybe util.reflect - - /** - ** The class that this blueprint represents. - */ - final protected Class cls; - - /** - ** Map of properties to their method names. The keys are the property - ** names; the values are the names of the nullary methods to invoke to - ** retrieve the property-values. If any map-value is null, then the field - ** with the same name as its map-key will be used instead. - */ - final protected Map properties; - - /** - ** Map of properties to their corresponding fields. This is automatically - ** generated from the {@link #properties} map. - */ - final protected Map prop_fields = new HashMap(); - - /** - ** Map of properties to their corresponding getter methods. This is - ** automatically generated from the {@link #properties} map. - */ - final protected Map prop_methods = new HashMap(); - - /** - ** Map of properties to their corresponding types. This is automatically - ** generated from the {@link #properties} map. - */ - final protected Map> param_type = new LinkedHashMap>(); - - /** - ** A constructor for the class, whose parameter types are the values of - ** {@link #param_type} in the same iteration order. This is automatically - ** from the class object, and must exist (otherwise construction of the - ** blueprint will throw {@link NoSuchMethodException}). - */ - final protected Constructor constructor; - - /** - ** Constructs a blueprint from the given {@link #properties} map. If you - ** don't have the map ready at hand, and it's a mixture of methods and - ** fields, consider using {@link #init(Class)} instead. - ** - ** The constructor's parameter types are given by {@code ctor_params}. This - ** '''must be a subset''' of {@code blueprint}; otherwise the blueprint - ** constructor will not be able to determine the types of those parameters. - ** For example, if your class has two fields {@code int field_a} and - ** {@code float field_b}, and {@code blueprint} maps {"field_b":null, - ** "field_a":null}, and {@code ctor_params} lists ["field_b", - ** "field_a"], then this method will search for the constructor with - ** parameters {@code (float, int)}. If it doesn't exist, {@link - ** NoSuchMethodException} will be thrown. - ** - ** If {@code ctor_params} is {@code null}, the parameter types will instead - ** be inferred from the keys of the properties map; the end result will be - ** the same as if it was equal to {@code blueprint.keySet()}. - ** - ** @param c The class to represent - ** @param blueprint The map of properties - ** @param ctor_params Properties corresponding to the input parametrs of - ** the object constructor. - ** @throws NoSuchFieldException if Java reflection can't find an inferred - ** field - ** @throws NoSuchMethodException if Java reflection can't find a given - ** method or the inferred constructor - ** @throws NullPointerException if {@code c} or {@code blueprint} is {@code - ** null} - */ - public ObjectBlueprint(Class c, Map blueprint, Iterable ctor_params) - throws NoSuchFieldException, NoSuchMethodException { - if (c == null || blueprint == null) { throw new NullPointerException(); } - cls = c; - properties = blueprint; - setProperties(); - if (ctor_params == null) { - constructor = cls.getConstructor(param_type.values().toArray(new Class[param_type.size()])); - } else { - List> types = new ArrayList>(); - for (String property: ctor_params) { types.add(param_type.get(property)); } - constructor = cls.getConstructor(types.toArray(new Class[types.size()])); - } - } - - /** - ** Constructs a blueprint from the given {@link #properties} map. If you - ** don't have the map ready at hand, and it's a mixture of methods and - ** fields, consider using {@link #init(Class)} instead. - ** - ** This constructor just delegates to {@link #ObjectBlueprint(Class, Map, - ** Iterable) ObjectBlueprint(c, blueprint, null)}. - ** - ** @see #ObjectBlueprint(Class, Map, Iterable) - */ - public ObjectBlueprint(Class c, Map blueprint) - throws NoSuchFieldException, NoSuchMethodException { - this(c, blueprint, (Iterable)null); - } - - /** - ** Constructs a blueprint from the given collection of field names. This - ** constructor just creates the appropriate blueprint map to delegate to - ** {@link #ObjectBlueprint(Class, Map)}. - ** - ** @see #ObjectBlueprint(Class, Map) - */ - public ObjectBlueprint(Class c, Collection fields) - throws NoSuchFieldException, NoSuchMethodException { - this(c, makePropertiesFromFields(fields)); - } - - /** - ** Helper method for {@link #ObjectBlueprint(Class, Collection)}. - */ - private static Map makePropertiesFromFields(Collection fields) { - Map blueprint = new LinkedHashMap(); - for (String s: fields) { - blueprint.put(s, null); - } - return blueprint; - } - - /** - ** Constructs a blueprint from the given properties map and the given - ** object constructor. If you don't have the map ready at hand, and it's a - ** mixture of methods and fields, consider using {@link #init(Class)} - ** instead. - ** - ** If {@code ctor} is {@code null}, then any calls to the blueprint's - ** {@link #objectFromMap(Map)} and {@link #newInstance(Object[])} - ** will throw {@link NullPointerException}. - ** - ** @param c The class to represent - ** @param blueprint The map of properties - ** @param ctor The object constructor - ** @throws NoSuchFieldException if Java reflection can't find an inferred - ** field - ** @throws NoSuchMethodException if Java reflection can't find a given - ** method or the inferred constructor - ** @throws NullPointerException if {@code c} or {@code blueprint} is {@code - ** null} - */ - public ObjectBlueprint(Class c, Map blueprint, Constructor ctor) - throws NoSuchFieldException, NoSuchMethodException { - if (c == null || blueprint == null) { throw new NullPointerException(); } - cls = c; - properties = blueprint; - setProperties(); - constructor = ctor; - } - - /** - ** Helper method for the constructors. Sets {@link #prop_fields}, - ** {@link #prop_methods} and {@link #param_type} from {@link #properties}. - */ - private void setProperties() throws NoSuchFieldException, NoSuchMethodException { - assert(properties != null); - prop_fields.clear(); - prop_methods.clear(); - param_type.clear(); - - for (Map.Entry en: properties.entrySet()) { - String property = en.getKey(); - String method_name = en.getValue(); - - if (method_name == null || method_name.length() == 0) { - Field f = cls.getField(property); - prop_fields.put(property, f); - param_type.put(property, f.getType()); - - } else { - Method m = cls.getMethod(method_name); - if (m.getParameterTypes().length > 0) { - throw new IllegalArgumentException("Bad blueprint: getter method " +method_name+ " for property " +property+ " takes more than 0 arguments"); - } - prop_methods.put(property, m); - param_type.put(property, m.getReturnType()); - } - } - } - - /** - ** Returns a builder for an {@link ObjectBlueprint} for the given class. - ** This should make construction of blueprints neater in source code, eg: - ** - ** ObjectBlueprint bprint = - ** ObjectBlueprint.init(MyClass.class) - ** .addFields("name", "origin", "date") - ** .addMethod("height", "getHeight") - ** .addMethod("width", "getWidth") - ** .build(); - ** - ** - ** @throws NullPointerException if {@code cls} is {@code null} - ** @see
Builder - ** Pattern - */ - public static Builder init(Class cls) { - return new Builder(cls); - } - - public Class getObjectClass() { - return cls; - } - - public Constructor getObjectConstructor() { - return constructor; - } - - /** - ** Constructs a new object by invoking the inferred constructor with the - ** given list of arguments. - ** - ** @throws NullPointerException if no constructor was supplied to this - ** class' own constructor. - ** @see Constructor#newInstance(Object[]) - */ - public T newInstance(Object... initargs) throws InstantiationException, IllegalAccessException, InvocationTargetException { - return constructor.newInstance(initargs); - } - - /** - ** Map of primitive classes to their object classes. - */ - final private static Map, Class> boxes = new IdentityHashMap, Class>(); - static { - boxes.put(boolean.class, Boolean.class); - boxes.put(byte.class, Byte.class); - boxes.put(char.class, Character.class); - boxes.put(short.class, Short.class); - boxes.put(int.class, Integer.class); - boxes.put(long.class, Long.class); - boxes.put(float.class, Float.class); - boxes.put(double.class, Double.class); - } - - /** - ** Casts a value to the object type for the given primitive type. - ** - ** @param cls The primitive class - ** @param val The value to cast - ** @throws ClassCastException if the cast cannot be made - ** @throws IllegalArgumentException if the input class is not a primitive - */ - protected static Object boxCast(Class cls, Object val) throws ClassCastException { - Class tcls = boxes.get(cls); - if (tcls == null) { - throw new IllegalArgumentException("Input class must be a primitive type."); - } - return tcls.cast(val); - } - - /** - ** Constructs a new object from the given map of properties and their - ** desired values. - ** - ** @param map Map of properties to their desired values. - ** @throws NullPointerException if no constructor has been set - ** @see Constructor#newInstance(Object[]) - */ - public T objectFromMap(Map map) throws InstantiationException, IllegalAccessException, InvocationTargetException { - Object[] initargs = new Object[param_type.size()]; - int i=0; - for (Map.Entry> en: param_type.entrySet()) { - String property = en.getKey(); - Class type = en.getValue(); - Object value = map.get(property); - try { - if (type.isPrimitive()) { - value = boxCast(type, value); - } else { - value = type.cast(value); - } - } catch (ClassCastException e) { - throw new IllegalArgumentException("Parameter for property " +property+ " is not of the correct type should be "+type+" but is "+value.getClass(), e); - } - initargs[i++] = value; - } - return constructor.newInstance(initargs); - } - - /** - ** Returns a map-view of an object. The keys are its properties as defined - ** by the blueprint, and the values are the values of those properties. - ** The map is backed by the object, so changes to the object (if any) are - ** reflected in the map. The map itself is immutable, however (for now; - ** this might be changed later). - ** - ** NOTE: the below implementation is technically not optimal since {@link - ** AbstractMap}'s remove methods (and that of its entry/key/value views) - ** actually iterate over the map looking for the key. However, this - ** shouldn't be an issue since the map doesn't support remove (calling it - ** will only throw {@link UnsupportedOperationException} when the key is - ** found), and most objects only have a small (<20) number of properties. - ** If you find that you need such functionality, feel free to implement a - ** more optimal solution. - ** - ** @param object The object to view as a map. - */ - public Map objectAsMap(final T object) { - - return new AbstractMap() { - - final private Set> entrySet = new AbstractSet>() { - - @Override public int size() { - return properties.size(); - } - - @Override public Iterator> iterator() { - return new Iterator>() { - final Iterator> props = properties.entrySet().iterator(); - - /*@Override**/ public boolean hasNext() { - return props.hasNext(); - } - - /*@Override**/ public Map.Entry next() { - Map.Entry en = props.next(); - final String property = en.getKey(); - final String method_name = en.getValue(); - return new Map.Entry() { - /*@Override**/ public String getKey() { return property; } - /*@Override**/ public Object getValue() { return get(property, method_name); } - /*@Override**/ public Object setValue(Object o) { throw new UnsupportedOperationException("Cannot modify an object in this way."); } - }; - } - - /*@Override**/ public void remove() { - throw new UnsupportedOperationException("Cannot modify an object in this way."); - } - - }; - } - - }; - - @Override public int size() { - return properties.size(); - } - - @Override public boolean containsKey(Object property) { - return properties.containsKey(property); - } - - @Override public Object get(Object property) { - return get((String)property, properties.get(property)); - } - - protected Object get(String property, String method_name) { - try { - if (method_name == null || method_name.length() == 0) { - return prop_fields.get(property).get(object); - } else { - return prop_methods.get(method_name).invoke(object); - } - } catch (IllegalAccessException e) { - throw new IllegalStateException(e); - } catch (InvocationTargetException e) { - throw new IllegalStateException(e); - } - } - - @Override public Set> entrySet() { - return entrySet; - } - - }; - } - - /** - ** Builder for a {@link ObjectBlueprint}. This class is protected; use - ** {@link ObjectBlueprint#init(Class)} to create one of these. - */ - protected static class Builder { - - final public Class cls; - - protected Iterable ctor_params; - - protected Constructor constructor; - - protected boolean use_ctor; - - /** - ** Properties map. This uses {@link LinkedHashMap}, which iterates through - ** the properties in the same order in which they were added to the builder. - */ - final protected Map props = new LinkedHashMap(); - - /** - ** Construct a builder for an {@link ObjectBlueprint} for the given class. - ** - ** @throws NullPointerException if {@code c} is {@code null} - */ - public Builder(Class c) { - if (c == null) { throw new NullPointerException(); } - cls = c; - } - - /** - ** @return {@code this} - */ - public Builder addMethod(String property, String method_name) { - props.put(property, method_name); - return this; - } - - /** - ** @return {@code this} - */ - public Builder addField(String field) { - props.put(field, null); - return this; - } - - /** - ** @return {@code this} - */ - public Builder addMethods(Map properties) { - props.putAll(properties); - return this; - } - - /** - ** @return {@code this} - */ - public Builder addFields(Collection fields) { - for (String field: fields) { - props.put(field, null); - } - return this; - } - - /** - ** @return {@code this} - */ - public Builder addFields(String... fields) { - for (String field: fields) { - props.put(field, null); - } - return this; - } - - /** - ** Sets an object constructor, and tell {@link #build()} to use {@link - ** ObjectBlueprint#ObjectBlueprint(Class, Map, Constructor)}. - ** - ** @return {@code this} - */ - public Builder setConstructor(Constructor ctor) { - use_ctor = true; - constructor = ctor; - return this; - } - - /** - ** Sets the constructor parameters, and tell {@link #build()} to use {@link - ** ObjectBlueprint#ObjectBlueprint(Class, Map, Iterable)}. (This is the - ** default). - ** - ** @return {@code this} - */ - public Builder setCtorParams(Iterable params) { - use_ctor = false; - ctor_params = params; - return this; - } - - /** - ** Build a blueprint from the properties given to the builder so far. - ** - ** @return The built blueprint - ** @see ObjectBlueprint#ObjectBlueprint(Class, Map) - */ - public ObjectBlueprint build() throws NoSuchFieldException, NoSuchMethodException { - if (use_ctor) { - return new ObjectBlueprint(cls, props, constructor); - } else { - return new ObjectBlueprint(cls, props, ctor_params); - } - } - - } - -} diff --git a/src/plugins/Library/io/ObjectStreamReader.java b/src/plugins/Library/io/ObjectStreamReader.java deleted file mode 100644 index 05b46786..00000000 --- a/src/plugins/Library/io/ObjectStreamReader.java +++ /dev/null @@ -1,21 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io; - -import java.io.InputStream; -import java.io.IOException; - -/** -** Simpler version of {@link java.io.ObjectInput} that has only one method. -** -** @author infinity0 -*/ -public interface ObjectStreamReader { - - /** - ** Read and return the object from the given stream. - */ - public T readObject(InputStream is) throws IOException; - -} diff --git a/src/plugins/Library/io/ObjectStreamWriter.java b/src/plugins/Library/io/ObjectStreamWriter.java deleted file mode 100644 index 1843c740..00000000 --- a/src/plugins/Library/io/ObjectStreamWriter.java +++ /dev/null @@ -1,21 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io; - -import java.io.OutputStream; -import java.io.IOException; - -/** -** Simpler version of {@link java.io.ObjectOutput} that has only one method. -** -** @author infinity0 -*/ -public interface ObjectStreamWriter { - - /** - ** Write the given object to the given stream. - */ - public void writeObject(T o, OutputStream os) throws IOException; - -} diff --git a/src/plugins/Library/io/YamlReaderWriter.java b/src/plugins/Library/io/YamlReaderWriter.java deleted file mode 100644 index 4c3118e6..00000000 --- a/src/plugins/Library/io/YamlReaderWriter.java +++ /dev/null @@ -1,226 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io; - -import plugins.Library.io.DataFormatException; - -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.error.YAMLException; -import org.yaml.snakeyaml.error.Mark; -import org.yaml.snakeyaml.Loader; -import org.yaml.snakeyaml.Dumper; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.representer.Representer; -import org.yaml.snakeyaml.representer.Represent; -import org.yaml.snakeyaml.nodes.Node; -import org.yaml.snakeyaml.nodes.ScalarNode; -import org.yaml.snakeyaml.nodes.MappingNode; -import org.yaml.snakeyaml.constructor.Constructor; -import org.yaml.snakeyaml.constructor.AbstractConstruct; - -import java.util.Collections; -import java.util.Arrays; -import java.util.Map; -import java.util.LinkedHashMap; -import java.util.concurrent.Semaphore; -import java.io.File; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.IOException; - -/* class definitions added to the extended Yaml processor */ -import plugins.Library.io.serial.Packer; -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermPageEntry; -import plugins.Library.index.TermIndexEntry; -import plugins.Library.index.TermTermEntry; -import freenet.keys.FreenetURI; - - -/** -** Converts between an object and a stream containing a YAML document. By -** default, this uses a {@link Yaml} processor with additional object and tag -** definitions relevant to the Library plugin. -** -** (Ideally this would implement {@link java.io.ObjectInput} and {@link -** java.io.ObjectOutput} but they have too many methods to bother with...) -** -** @see Yaml -** @see Loader -** @see Dumper -** @author infinity0 -*/ -public class YamlReaderWriter -implements ObjectStreamReader, ObjectStreamWriter { - - final public static String MIME_TYPE = "text/yaml"; - final public static String FILE_EXTENSION = ".yml"; - - final static int MAX_PARALLEL = 1; // Limited by memory mainly. If memory is no object it could be limited by threads. - // Each Yaml instance uses a *significant* amount of memory... - static final Semaphore parallelLimiter = new Semaphore(MAX_PARALLEL); - - public YamlReaderWriter() { - } - - /*@Override**/ public Object readObject(InputStream is) throws IOException { - parallelLimiter.acquireUninterruptibly(); - try { - return makeYAML().load(new InputStreamReader(is, "UTF-8")); - } catch (YAMLException e) { - throw new DataFormatException("Yaml could not process the stream: " + is, e, is, null, null); - } finally { - parallelLimiter.release(); - } - } - - /*@Override**/ public void writeObject(Object o, OutputStream os) throws IOException { - parallelLimiter.acquireUninterruptibly(); - try { - makeYAML().dump(o, new OutputStreamWriter(os, "UTF-8")); - } catch (YAMLException e) { - throw new DataFormatException("Yaml could not process the object", e, o, null, null); - } finally { - parallelLimiter.release(); - } - } - - /** We do NOT keep this thread-local, because the Composer is only cleared after - * the next call to load(), so it can persist with a lot of useless data if we - * then use a different thread. So lets just construct them as needed. */ - private Yaml makeYAML() { - DumperOptions opt = new DumperOptions(); - opt.setWidth(Integer.MAX_VALUE); - opt.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); - return new Yaml(new Loader(new ExtendedConstructor()), - new Dumper(new ExtendedRepresenter(), opt)); - } - - final public static ObjectBlueprint tebp_term; - final public static ObjectBlueprint tebp_index; - final public static ObjectBlueprint tebp_page; - static { - try { - tebp_term = new ObjectBlueprint(TermTermEntry.class, Arrays.asList("subj", "rel", "term")); - tebp_index = new ObjectBlueprint(TermIndexEntry.class, Arrays.asList("subj", "rel", "index")); - tebp_page = new ObjectBlueprint(TermPageEntry.class, Arrays.asList("subj", "rel", "page", "title", "positions", "posFragments")); - } catch (NoSuchFieldException e) { - throw new AssertionError(e); - } catch (NoSuchMethodException e) { - throw new AssertionError(e); - } - } - - /************************************************************************ - ** DOCUMENT - */ - public static class ExtendedRepresenter extends Representer { - - public ExtendedRepresenter() { - this.representers.put(FreenetURI.class, new Represent() { - /*@Override**/ public Node representData(Object data) { - return representScalar("!FreenetURI", ((FreenetURI) data).toString()); - } - }); - this.representers.put(Packer.BinInfo.class, new Represent() { - /*@Override**/ public Node representData(Object data) { - Packer.BinInfo inf = (Packer.BinInfo)data; - Map map = Collections.singletonMap(inf.getID(), inf.getWeight()); - return representMapping("!BinInfo", map, true); - } - }); - this.representers.put(TermTermEntry.class, new RepresentTermEntry(tebp_term)); - this.representers.put(TermIndexEntry.class, new RepresentTermEntry(tebp_index)); - this.representers.put(TermPageEntry.class, new RepresentTermEntry(tebp_page)); - } - - public class RepresentTermEntry implements Represent { - - final ObjectBlueprint blueprint; - final String tag; - - public RepresentTermEntry(ObjectBlueprint bp) { - blueprint = bp; - tag = "!" + bp.getObjectClass().getSimpleName(); - } - - /*@Override**/ public Node representData(Object data) { - return representMapping(tag, blueprint.objectAsMap((T)data), true); - } - - } - - } - - - /************************************************************************ - ** DOCUMENT - */ - public static class ExtendedConstructor extends Constructor { - public ExtendedConstructor() { - this.yamlConstructors.put("!FreenetURI", new AbstractConstruct() { - /*@Override**/ public Object construct(Node node) { - String uri = (String) constructScalar((ScalarNode)node); - try { - return new FreenetURI(uri); - } catch (java.net.MalformedURLException e) { - throw new ConstructorException("while constructing a FreenetURI", node.getStartMark(), "found malformed URI " + uri, null); - } - } - }); - this.yamlConstructors.put("!BinInfo", new AbstractConstruct() { - /*@Override**/ public Object construct(Node node) { - Map map = (Map) constructMapping((MappingNode)node); - if (map.size() != 1) { - throw new ConstructorException("while constructing a Packer.BinInfo", node.getStartMark(), "found incorrectly sized map data " + map, null); - } - for (Map.Entry en: map.entrySet()) { - return new Packer.BinInfo(en.getKey(), (Integer)en.getValue()); - } - throw new AssertionError(); - } - }); - this.yamlConstructors.put("!TermTermEntry", new ConstructTermEntry(tebp_term)); - this.yamlConstructors.put("!TermIndexEntry", new ConstructTermEntry(tebp_index)); - this.yamlConstructors.put("!TermPageEntry", new ConstructTermEntry(tebp_page)); - } - - public class ConstructTermEntry extends AbstractConstruct { - - final ObjectBlueprint blueprint; - - public ConstructTermEntry(ObjectBlueprint bp) { - blueprint = bp; - } - - /*@Override**/ public Object construct(Node node) { - Map map = (Map)constructMapping((MappingNode)node); - map.put("rel", new Float(((Double)map.get("rel")).floatValue())); - try { - return blueprint.objectFromMap(map); - } catch (Exception e) { - //java.lang.InstantiationException - //java.lang.IllegalAccessException - //java.lang.reflect.InvocationTargetException - throw new ConstructorException("while constructing a " + blueprint.getObjectClass().getSimpleName(), node.getStartMark(), "could not instantiate map " + map, null, e); - } - } - - } - } - - // for some reason SnakeYAML doesn't make this class publicly constructible - public static class ConstructorException extends org.yaml.snakeyaml.constructor.ConstructorException { - public ConstructorException(String context, Mark contextMark, String problem, Mark problemMark) { - super(context, contextMark, problem, problemMark); - } - public ConstructorException(String context, Mark contextMark, String problem, Mark problemMark, Throwable cause) { - super(context, contextMark, problem, problemMark, cause); - } - } - - -} diff --git a/src/plugins/Library/io/serial/Archiver.java b/src/plugins/Library/io/serial/Archiver.java deleted file mode 100644 index 62ddd136..00000000 --- a/src/plugins/Library/io/serial/Archiver.java +++ /dev/null @@ -1,54 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.exec.TaskAbortException; - -import java.util.Collection; -import java.util.Map; - -/** -** An interface that handles a single {@link Serialiser.Task}. -** -** @author infinity0 -*/ -public interface Archiver extends Serialiser { - - /** - ** Execute a {@link PullTask}, returning only when the task is done. - ** - ** Implementations of this method which act on a recursive data structure - ** that is designed to be partially loaded (such as {@link - ** plugins.Library.util.SkeletonMap}), should only pull the minimal data - ** necessary to give a consistent instance of that data structure. This is - ** to provide a finer degree of control over the pulling process, which is - ** likely to be needed by such a data structure. - ** - ** In other words, the data structure should not be automatically populated - ** after it is formed. - ** - ** @param task The task to execute - */ - public void pull(PullTask task) throws TaskAbortException; - - /** - ** Execute a {@link PushTask}, returning only when the task is done. - ** - ** Implementations of this method which act on a recursive data structure - ** that is designed to be partially loaded (such as {@link - ** plugins.Library.util.SkeletonMap}), should ensure that the data passed - ** into this method is minimal in terms of that data structure. This is to - ** provide a finer degree of control over the pulling process, which is - ** likely to be needed by such a data structure. - ** - ** If the data is not minimal, implementations should throw {@link - ** IllegalArgumentException} rather than automatically depopulating - ** the data structure. - ** - ** @param task The task to execute - */ - public void push(PushTask task) throws TaskAbortException; - -} diff --git a/src/plugins/Library/io/serial/FileArchiver.java b/src/plugins/Library/io/serial/FileArchiver.java deleted file mode 100644 index 34290471..00000000 --- a/src/plugins/Library/io/serial/FileArchiver.java +++ /dev/null @@ -1,198 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.channels.FileLock; - -import plugins.Library.io.ObjectStreamReader; -import plugins.Library.io.ObjectStreamWriter; -import plugins.Library.io.serial.Serialiser.Task; -import plugins.Library.util.exec.SimpleProgress; -import plugins.Library.util.exec.TaskAbortException; - -/** -** Converts between a map of {@link String} to {@link Object}, and a file on -** disk. An {@link ObjectStreamReader} and an {@link ObjectStreamWriter} is -** used to do the hard work once the relevant streams have been established. -** -** This class expects {@link Task#meta} to be of type {@link String}, or an -** array whose first element is of type {@link String}. -** -** @author infinity0 -*/ -public class FileArchiver -implements Archiver, LiveArchiver { - - // DEBUG - private static boolean testmode = false; - public static void setTestMode() { System.out.println("FileArchiver will now randomly pause 5-10s for each task, to simulate network speeds"); testmode = true; } - public static void randomWait(SimpleProgress p) { - int t = (int)(Math.random()*5+5); - p.addPartKnown(t, true); - for (int i=0; i FileArchiver(S rw, String pre, String suf, String ext, File parent) { - this(rw, rw, pre, suf, ext, parent); - } - - public FileArchiver(S rw, boolean rnd, String ext, String prefix, String suffix, File parent) { - this(rw, rw, rnd, ext, prefix, suffix, parent); - } - - public FileArchiver(ObjectStreamReader r, ObjectStreamWriter w, String pre, String suf, String ext, File parent) { - reader = r; - writer = w; - prefix = (pre == null)? "": pre; - suffix = (suf == null)? "": suf; - extension = (ext == null)? "": ext; - random = false; - parentDir = parent; - } - - public FileArchiver(ObjectStreamReader r, ObjectStreamWriter w, boolean rnd, String ext, String prefix, String suffix, File parent) { - reader = r; - writer = w; - this.suffix = suffix; - this.prefix = prefix; - extension = (ext == null)? "": ext; - random = rnd; - parentDir = parent; - } - - protected File getFile(Object meta) { - if (meta instanceof File) { return (File)meta; } - - String main = "", part = ""; - if (meta instanceof String) { - main = (String)(meta); - } else if (meta instanceof Object[]) { - Object[] arr = (Object[])meta; - if (arr.length > 0 && arr[0] instanceof String) { - main = (String)arr[0]; - if (arr.length > 1) { - StringBuilder str = new StringBuilder(arr[1].toString()); - for (int i=2; i t) throws TaskAbortException { - File file = getFile(t.meta); - try { - FileInputStream is = new FileInputStream(file); - try { - FileLock lock = is.getChannel().lock(0L, Long.MAX_VALUE, true); // shared lock for reading - try { - t.data = (T)reader.readObject(is); - } finally { - lock.release(); - } - } finally { - try { is.close(); } catch (IOException f) { } - } - } catch (IOException e) { - throw new TaskAbortException("FileArchiver could not complete pull on " + file, e, true); - } catch (RuntimeException e) { - throw new TaskAbortException("FileArchiver could not complete pull on " + file, e); - } - } - - /*@Override**/ public void push(PushTask t) throws TaskAbortException { - if (random) { t.meta = java.util.UUID.randomUUID().toString(); } - File file = getFile(t.meta); - try { - FileOutputStream os = new FileOutputStream(file); - try { - FileLock lock = os.getChannel().lock(); - try { - writer.writeObject(t.data, os); - } finally { - lock.release(); - } - } finally { - try { os.close(); } catch (IOException f) { } - } - } catch (IOException e) { - throw new TaskAbortException("FileArchiver could not complete push on " + file, e, true); - } catch (RuntimeException e) { - throw new TaskAbortException("FileArchiver could not complete push on " + file, e); - } - } - - /*@Override**/ public void pullLive(PullTask t, SimpleProgress p) throws TaskAbortException { - try { - pull(t); - if (testmode) { randomWait(p); } - else { p.addPartKnown(0, true); } - } catch (TaskAbortException e) { - p.abort(e); - } - } - - /*@Override**/ public void pushLive(PushTask t, SimpleProgress p) throws TaskAbortException { - try { - push(t); - if (testmode) { randomWait(p); } - else { p.addPartKnown(0, true); } - } catch (TaskAbortException e) { - p.abort(e); - } - } - -} diff --git a/src/plugins/Library/io/serial/IterableSerialiser.java b/src/plugins/Library/io/serial/IterableSerialiser.java deleted file mode 100644 index ee457833..00000000 --- a/src/plugins/Library/io/serial/IterableSerialiser.java +++ /dev/null @@ -1,53 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.exec.TaskAbortException; - -/** -** An interface that handles an iterable group of {@link Serialiser.Task}s. -** -** @author infinity0 -*/ -public interface IterableSerialiser extends Archiver { - - /** - ** Execute everything in a group of {@link PullTask}s, returning only when - ** they are all done. - ** - ** @param tasks The group of tasks to execute - */ - public void pull(Iterable> tasks) throws TaskAbortException; - - // FIXME OPT NORM - // Split up pull() into startPull and endPull. startPull would do the actual - // fetch, probably to a Bucket, and endPull would do the YAML parsing. - // This will allow us to run many slow startPull's at once, while limiting the - // number of memory-consuming endPull's so that we don't end up with a backlog - // of lots of parsed data that we don't yet need. - // See comments in FreenetArchiver.pull, ParallelSerialiser.createPullJob. - - /** - ** Execute everything in a group of {@link PushTask}s, returning only when - ** they are all done. - ** - ** @param tasks The group of tasks to execute - */ - public void push(Iterable> tasks) throws TaskAbortException; - - // FIXME OPT HIGH - // Split up push() into startPush and endPush. startPush will return as soon - // as there are valid .meta values for each task. endPush will wait for the - // actual insert (or whatever) to finish, and then clear the .data. - // Currently we have a dirty hack (for FreenetArchiver), which achieves a - // similar outcome: - // FreenetArchiver returns as soon as it has a URI and pretends it's finished. - // The high level caller using FreenetArchiver must call an explicit - // waitForAsyncInserts() method on it before finishing. - // Packer.push() is more or less unchanged as a result. - // Ultimately this is all necessary because inserts can take a long time and - // we need to limit memory usage. - -} diff --git a/src/plugins/Library/io/serial/LiveArchiver.java b/src/plugins/Library/io/serial/LiveArchiver.java deleted file mode 100644 index 85d03422..00000000 --- a/src/plugins/Library/io/serial/LiveArchiver.java +++ /dev/null @@ -1,46 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.TaskAbortException; - -/** -** An interface that handles a single {@link Serialiser.Task} and sends live -** updates of its {@link Progress}. -** -** @author infinity0 -*/ -public interface LiveArchiver extends Archiver { - - /** - ** Executes a {@link PullTask} and update the progress associated with it. - ** {@link TaskAbortException}s are stored in the progress object, rather - ** than thrown. - ** - ** Implementations must also modify the state of the progress object such - ** that after the operation completes, all threads blocked on {@link - ** Progress#join()} will either return normally or throw the exact {@link - ** TaskAbortException} that caused it to abort. - ** - ** '''If this does not occur, deadlock will result'''. - */ - public void pullLive(PullTask task, P p) throws TaskAbortException; - - /** - ** Executes a {@link PushTask} and update the progress associated with it. - ** {@link TaskAbortException}s are stored in the progress object, rather - ** than thrown. - ** - ** Implementations must also modify the state of the progress object such - ** that after the operation completes, all threads blocked on {@link - ** Progress#join()} will either return normally or throw the exact {@link - ** TaskAbortException} that caused it to abort. - ** - ** '''If this does not occur, deadlock will result'''. - */ - public void pushLive(PushTask task, P p) throws TaskAbortException; - -} diff --git a/src/plugins/Library/io/serial/MapSerialiser.java b/src/plugins/Library/io/serial/MapSerialiser.java deleted file mode 100644 index 89a65888..00000000 --- a/src/plugins/Library/io/serial/MapSerialiser.java +++ /dev/null @@ -1,50 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.exec.TaskAbortException; - -import java.util.Map; - -/** -** An interface that handles a map of {@link Serialiser.Task}s. As well as the -** metadata associated with each individual task, each map also has an -** associated metadata for the entire structure. -** -** Data structures which have a map that can be grouped into submaps that each -** have an associated metadata for the entire submap, can just use the methods -** provided here, for each submap-metadata pair. -** -** @author infinity0 -*/ -public interface MapSerialiser extends Serialiser { - - /** - ** Execute everything in a map of {@link PullTask}s, returning only when - ** they are all done. - ** - ** Implementations should only execute the tasks for which the metadata - ** is not null. - ** - ** @param tasks The map of tasks to execute - ** @param mapmeta The map-wide metadata - */ - public void pull(Map> tasks, Object mapmeta) throws TaskAbortException; - - /** - ** Execute everything in a map of {@link PushTask}s, returning only when - ** they are all done. - ** - ** Implementations should only execute the tasks for which the data is not - ** null, but may require that an entire submap (which might include tasks - ** with null data and non-null metadata) be passed into the method in order - ** to perform better optimisation. - ** - ** @param tasks The map of tasks to execute - ** @param mapmeta The map-wide metadata - */ - public void push(Map> tasks, Object mapmeta) throws TaskAbortException; - -} diff --git a/src/plugins/Library/io/serial/Packer.java b/src/plugins/Library/io/serial/Packer.java deleted file mode 100644 index 363155ec..00000000 --- a/src/plugins/Library/io/serial/Packer.java +++ /dev/null @@ -1,995 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.IdentityComparator; -import plugins.Library.util.concurrent.ObjectProcessor; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.exec.TaskCompleteException; - -import java.util.Collections; -import java.util.Collection; -import java.util.Iterator; -import java.util.Comparator; -import java.util.List; -import java.util.Set; -import java.util.Map; -import java.util.SortedSet; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.TreeSet; - -import freenet.support.Logger; - -/** -** {@link MapSerialiser} that packs a map of weighable elements (eg. objects -** with a {@code size()} method) into a group of fixed-capacity bins. There are -** two main modes of operation, depending on the value of {@link #NO_TINY}; in -** addition to this, there is an {@link #aggression} attribute that affects -** the speed vs. optimity of the packing. -** -** The class requires that the weights of each element is never greater than -** BIN_CAP, so that an element can always totally fit into an additional bin; -** {@link IllegalArgumentException} will be thrown when elements that violate -** this are encountered. -** -** This class is useful for packing together a collection of root B-tree nodes. -** -** To use this class, the programmer needs to extend {@link Scale} to override -** {@link Scale#weigh(Object)}, and pass it into the constructor. The other -** methods can also be optionally overridden if an alternate metadata format is -** desired. -** -** The programmer might also like wish to override the following: -** -** * {@link #generator()} -** * {@link #preprocessPullBins(Map, Collection)} -** * {@link #preprocessPushBins(Map, Collection)} -** -** Note that this class effectively '''disables''' the "duplicate" detection of -** {@link ProgressTracker}; pull/push tasks are deemed to be equal if their -** meta/data point to the same object, but this class dynamically generates the -** meta/data for each bin on each pull/push request. This should not intefere -** with correctness (as long as the relevant locks in place); just be slightly -** inefficient when requests for the same bin are given to a child serialiser -** that uses {@link ProgressTracker} to detect duplicates. -** -** If you plan on using a child serialiser that uses some other method to -** detect duplicates, bear in mind that {@link #pullUnloaded(Map, Object)} -** actually '''depends''' on said behaviour for correctness; see its source -** code for more details. -** -** @author infinity0 -*/ -public class Packer -implements MapSerialiser, - Serialiser.Composite>> { - - private static volatile boolean logMINOR; - private static volatile boolean logDEBUG; - - static { - Logger.registerClass(Packer.class); - } - - /** - ** Maximum weight of a bin (except one; see {@link #push(Map, Object)} for - ** details). - */ - final public int BIN_CAP; - - /** - ** {@code BIN_CAP}/2 (or 1/2 less than this, if {@code BIN_CAP} is odd). - */ - final public int BIN_CAPHF; - - /** - ** Whether the "tiny" (weight less than {@code BIN_CAP/2}) bin will be kept - ** as a separate bin, or merged into the next-smallest. - */ - final public boolean NO_TINY; - - /** - ** The {@link Scale} used to weigh bins and read/make their metadata. - */ - final public Scale scale; - - /** - ** How aggressive the bin-packing algorithm is. Eg. will it load bins that - ** are not already loaded, to perform better optimisation? - ** - ** ;0 : discount all unloaded bins (unrecommended) - ** ;1 (default) : discount all unchanged bins after the BFD step - ** ;2 : discount all unchaged bins after the redistribution step - ** ;3 : load all bins before doing any packing (ie. re-pack from scratch) - */ - private Integer aggression = 1; - - final protected IterableSerialiser> subsrl; - public IterableSerialiser> getChildSerialiser() { return subsrl; } - - /** - ** Constructs a Packer with the given parameters. - ** - ** If you are using a child {@link Serialiser} that detects and discards - ** duplicate pull/push requests using a mechanism '''other''' than the - ** default {@link ProgressTracker}, please first read the section relating - ** to this in the class description. - ** - ** @param s The child serialiser - ** @param c The maximum weight of a bin - ** @param n Whether to merge or overlook the "tiny" element - */ - public Packer(IterableSerialiser> s, Scale sc, int c, boolean n) { - if (s == null) { - throw new IllegalArgumentException("Can't have a null child serialiser"); - } - if (c <= 0) { - throw new IllegalArgumentException("Capacity must be greater than zero."); - } - subsrl = s; - scale = sc; - BIN_CAP = c; - BIN_CAPHF = c>>1; - NO_TINY = n; - } - - /** - ** Constructs a Packer with the given parameters and {@link #NO_TINY} - ** set to {@code true}. - ** - ** If you are using a child {@link Serialiser} that detects and discards - ** duplicate pull/push requests using a mechanism '''other''' than the - ** default {@link ProgressTracker}, please first read the section relating - ** to this in the class description. - ** - ** @param s The child serialiser - ** @param c The maximum weight of a bin - */ - public Packer(IterableSerialiser> s, Scale sc, int c) { - this(s, sc, c, true); - } - - /** - ** Atomically set the {@link #aggression}. - */ - public void setAggression(int i) { - if (i < 0 || i > 3) { - throw new IllegalArgumentException("Invalid aggression value: only 0,1,2,3 is allowed."); - } - synchronized (this) { - aggression = i; - } - } - - /** - ** Atomically get the {@link #aggression}. - */ - public int getAggression() { - synchronized (this) { - return aggression; - } - } - - /** - ** Creates a {@link IDGenerator} for use by the rest of the algorithms of - ** this class. - ** - ** The default generator '''requires''' all keys of the backing map to be - ** present in the input task map, since the default {@link IDGenerator} - ** does not have any other way of knowing what bin IDs were handed out in - ** previous push operations, and it must avoid giving out duplicate IDs. - ** - ** You can bypass this requirement by extending {@link IDGenerator} so that - ** it can work out that information (eg. using an algorithm that generates - ** a UUID), and overriding {@link #generator()}. (ID generation based on - ** bin ''content'' is ''not'' supported, and is not a priority at present.) - */ - protected IDGenerator generator() { - return new IDGenerator(); - } - - /** - ** Any tasks that need to be done after the bin tasks have been formed, - ** but before they have been passed to the child serialiser. The default - ** implementation does nothing. - */ - protected void preprocessPullBins(Map> tasks, Collection>> bintasks) { } - - /** - ** Any tasks that need to be done after the bin tasks have been formed, - ** but before they have been passed to the child serialiser. The default - ** implementation does nothing. - */ - protected void preprocessPushBins(Map> tasks, Collection>> bintasks) { } - - /** - ** Looks through {@code elems} for {@link PushTask}s with null data and - ** reads its metadata to determine which bin to add it to. - * @throws TaskAbortException - */ - protected SortedSet> initialiseBinSet(SortedSet> bins, Map> elems, Inventory inv, IDGenerator gen, Object mapmeta) throws TaskAbortException { - - Map> binincubator = new HashMap>(); - - for (Map.Entry> en: elems.entrySet()) { - PushTask task = en.getValue(); - if (task.data == null) { - if (task.meta == null) { - throw new IllegalArgumentException("Packer error: null data and null meta for key" + en.getKey()); - } - Object binID = scale.readMetaID(task.meta); - - Set binegg = binincubator.get(binID); - if (binegg == null) { - binegg = new HashSet(); - binincubator.put(binID, binegg); - } - binegg.add(en.getKey()); - } - } - - HashMap> overflow = null; - for (Map.Entry> en: binincubator.entrySet()) { - Set keys = en.getValue(); - Bin bin = new Bin(BIN_CAP, inv, gen.registerID(en.getKey()), keys); - // Put the data into the bin so we don't repack it unless we need to. - // And so that the assertions in the next method work, which assume that the data is packed already (e.g. if we have 2 bins, they contain something). - for(K k : keys) { - if(bin.add(k)) { - if(bin.weight > BIN_CAP) { - // Overflow. Let packBestFitDecreasing fit it in somewhere else. - bin.remove(k); - if(overflow == null) overflow = new HashMap>(); - overflow.put(k, elems.get(k)); - } - } - } - bins.add(bin); - } - - if(overflow != null) { - // Pull the data so that it can be re-packed into a different bin. - // Otherwise, data will be null, so it won't be packed by packBestFitDecreasing. - System.err.println("Repacking: data has grown, "+overflow.size()+" items don't fit, pulling data to repack"); - pullUnloaded(overflow, mapmeta); - } - - // If the data can shrink, we can have multiple bins smaller than BIN_CAPHF - // FIXME: TEST THIS: In current usage, the data can't shrink, so we're okay. - - Bin second; - Bin lightest; - while(bins.size() > 1 && (second = bins.headSet(lightest = bins.last()).last()).filled() < BIN_CAPHF) { - System.err.println("Merging bins: Data has shrunk!"); - bins.remove(lightest); - bins.remove(second); - for (K k: lightest) { - second.add(k); - } - bins.add(second); - } - - return bins; - } - - /** - ** Looks through {@code elems} for {@link PushTask}s with non-null data and - ** packs it into the given collection of bins. - ** - ** NOTE: this method assumes the conditions described in the description - ** for this class. It is up to the calling code to ensure that they hold. - */ - protected void packBestFitDecreasing(SortedSet> bins, Map> elems, Inventory inv, IDGenerator gen) { - - if (NO_TINY && !bins.isEmpty()) { - // locate the single bin heavier than BIN_CAP (if it exists), and keep - // moving the heaviest element from that bin into another bin, until that - // bin weighs more than BIN_CAP / 2 (and both bins will weigh between - // BIN_CAP / 2 and BIN_CAP - - // proof that this always succeeds: - // - // - since we are visiting elements in descending order of weights, we will - // never reach an element that takes the total weight of the second bin - // from strictly less than BIN_CAP / 2 to strictly more than BIN_CAP - // - in other words, we will always "hit" our target weight range of - // BIN_CAP / 2 and BIN_CAP, and the other bin will be in this zone too - // (since the single bin would weigh between BIN_CAP and BIN_CAP * 3/2 - // - Bin heaviest = bins.first(); - if (heaviest.filled() > BIN_CAP) { - Bin second = new Bin(BIN_CAP, inv, gen.nextID(), null); - bins.remove(heaviest); - while (second.filled() < BIN_CAPHF) { - K key = heaviest.last(); - heaviest.remove(key); - second.add(key); - } - bins.add(heaviest); - bins.add(second); - } - } - - // heaviest bin is <= BIN_CAP // FIXME convert back to assertion when happy with this - if(!(bins.isEmpty() || bins.first().filled() <= BIN_CAP)) { - System.err.println("Bins:"); - for(Bin bin : bins) - System.err.println("Bin: "+bin.filled()); - assert(false); - } - // 2nd-lightest bin is >= BIN_CAP / 2 // FIXME convert back to assertion when happy with this - if(bins.size() >= 2) { - if(!(bins.headSet(bins.last()).last().filled() >= BIN_CAPHF)) { - System.err.println("Last but one bin should be at least "+BIN_CAPHF); - for(Bin bin : bins) - System.err.println("Bin: "+bin.filled()); - assert(false); - } - } - - // sort keys in descending weight order of their elements - SortedSet sorted = new TreeSet(Collections.reverseOrder(inv)); - for (Map.Entry> en: elems.entrySet()) { - if (en.getValue().data != null) { - sorted.add(en.getKey()); - } - } - - // go through the sorted set and try to fit the keys into the bins - for (K key: sorted) { - int weight = inv.getWeight(key); - - // get all bins that can fit the key; tailSet() should do this efficiently - SortedSet> subbins = bins.tailSet(new DummyBin(BIN_CAP, BIN_CAP - weight)); - - if (subbins.isEmpty()) { - // if no bin can fit it, then start a new bin - Bin bin = new Bin(BIN_CAP, inv, gen.nextID(), null); - bin.add(key); - bins.add(bin); - - } else { - // get the fullest bin that can fit the key - Bin lowest = subbins.first(); - // TreeSet assumes its elements are immutable. - bins.remove(lowest); - lowest.add(key); - bins.add(lowest); - } - } - - // heaviest bin is <= BIN_CAP - assert(bins.isEmpty() || bins.first().filled() <= BIN_CAP); - // 2nd-lightest bin is >= BIN_CAP / 2 - assert(bins.size() < 2 || bins.headSet(bins.last()).last().filled() >= BIN_CAPHF); - - if (NO_TINY && bins.size() > 1) { - // locate the single bin lighter than BIN_CAP / 2 (if it exists), and - // combine it with the next smallest bin - Bin lightest = bins.last(); - if (lightest.filled() < BIN_CAPHF) { - bins.remove(lightest); - Bin second = bins.last(); - bins.remove(second); - for (K k: lightest) { - second.add(k); - } - bins.add(second); - } - } - } - - /** - ** Given a group of bins, attempts to redistribute the items already - ** contained within them to make the weights more even. - */ - protected void redistributeWeights(SortedSet> bins, Inventory inv) { - - // keep track of bins we have finalised - List> binsFinal = new ArrayList>(bins.size()); - - // special cases - if (bins.size() < 2) { return; } - - // proof that the algorithm halts: - // - // - let D be the weight difference between the heaviest and lightest bins - // - let N be the set containing all maximum and minimum bins - // - at each stage of the algorithm, |N| strictly decreases, since the - // selected bin pair is either strictly "evened out", or its heavier - // member is discarded - // - when |N| decreases to 0, D strictly decreases, and we get a new N - // - D is bounded-below by 0 - // - hence the algorithm terminates - // - Bin lightest = bins.last(); - while (!bins.isEmpty()) { - // we don't need to find the lightest bin every time - Bin heaviest = bins.first(); - bins.remove(heaviest); - int weightdiff = heaviest.filled() - lightest.filled(); - - // get the lightest element - K feather = heaviest.first(); - if (inv.getWeight(feather) < weightdiff) { - // can be strictly "evened out" - - bins.remove(lightest); - - heaviest.remove(feather); - lightest.add(feather); - bins.add(heaviest); - bins.add(lightest); - - lightest = bins.last(); - - } else { - // if there does not exist such an element, then it is impossible for the - // heaviest bin to be evened out any further, since the loop does not - // decrease the load of the lightest bin. so, remove it from the bins set. - - binsFinal.add(heaviest); - } - - // TODO NORM some tighter assertions than this - assert(bins.isEmpty() || bins.first().filled() - bins.last().filled() <= weightdiff); - } - - for (Bin bin: binsFinal) { - bins.add(bin); - } - - } - - /** - ** Dicards all the bins which remain unchanged from what they were - ** initialised to contain. This means that we don't re-push something that - ** is already pushed. - */ - protected void discardUnchangedBins(SortedSet> bins, Map> elems) { - Iterator> it = bins.iterator(); - while (it.hasNext()) { - Bin bin = it.next(); - if (bin.unchanged()) { - it.remove(); - for (K key: bin) { - elems.remove(key); - } - } - } - } - - /** - ** Pulls all PushTasks with null data. This is used when we need to push a - ** bin that has been assigned to hold the (unloaded) data of these tasks. - */ - protected void pullUnloaded(Map> tasks, Object meta) throws TaskAbortException { - Map> newtasks = new HashMap>(tasks.size()<<1); - for (Map.Entry> en: tasks.entrySet()) { - PushTask task = en.getValue(); - if (task.data == null) { - newtasks.put(en.getKey(), new PullTask(task.meta)); - } - } - - if (newtasks.isEmpty()) { return; } - pull(newtasks, meta); - - for (Map.Entry> en: tasks.entrySet()) { - PushTask task = en.getValue(); - PullTask newtask = newtasks.get(en.getKey()); - if (task.data != null) { continue; } - if (newtask == null) { - // this part of the code should never be reached because pull/push tasks - // are deemed equal if their meta/data point to the same object; and these - // object are re-created from scratch upon each push/pull operation. so a - // child serialiser that extends Serialiser.Trackable should never discard - // a pull/push task for a bin as a "duplicate" of another task already in - // progress, and hence newtask should never be null. - - // this is an inefficiency that doesn't have an easy workaround due to the - // design of the serialiser/progress system, but such cases should be rare. - // external locks should prevent any concurrent modification of the bins; - // it is not the job of the Serialiser to prevent this (and Serialisers in - // general have this behaviour, not just this class). - - // if a future programmer uses a child serialiser that behaves differently, - // we throw this exception to warn them: - throw new IllegalStateException("The child serialiser should *not* discard (what it perceives to be) duplicate requests for bins. See the class documentation for details."); - } else if (newtask.data == null) { - throw new IllegalStateException("Packer did not get the expected data while pulling unloaded bins; the pull method (or the child serialiser) is buggy."); - } - task.data = newtask.data; - task.meta = newtask.meta; - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation loads all the data for each bin referred to in the - ** task map, and will add extra tasks to the map if those pulling those - ** bins result in extra data being loaded. - */ - /*@Override**/ public void pull(Map> tasks, Object mapmeta) throws TaskAbortException { - - try { - Inventory inv = new Inventory(this, tasks); - Map>> bintasks = new HashMap>>(); - - // form the list of bin-tasks from the bins referred to by the map-tasks - for (Map.Entry> en: tasks.entrySet()) { - Object binid = scale.readMetaID(en.getValue().meta); - if (!bintasks.containsKey(binid)) { - bintasks.put(binid, new PullTask>(scale.makeBinMeta(mapmeta, binid))); - } - } - - // pull all the bins - preprocessPullBins(tasks, bintasks.values()); - subsrl.pull(bintasks.values()); - - // for each task, grab and remove its element from its bin - for (Map.Entry> en: tasks.entrySet()) { - PullTask task = en.getValue(); - PullTask> bintask = bintasks.get(scale.readMetaID(task.meta)); - if (bintask == null) { - // task was marked as redundant by child serialiser - tasks.remove(en.getKey()); - } else if (bintask.data == null || (task.data = bintask.data.remove(en.getKey())) == null) { - throw new TaskAbortException("Packer did not find the element (" + en.getKey() + ") in the expected bin (" + scale.readMetaID(task.meta) + "). Either the data is corrupt, or the child serialiser is buggy.", new Exception("internal error")); - } - } - - // if there is any leftover data in the bins, load them anyway - Map> leftovers = new HashMap>(); - for (Map.Entry>> en: bintasks.entrySet()) { - PullTask> bintask = en.getValue(); - for (Map.Entry el: bintask.data.entrySet()) { - if (tasks.containsKey(el.getKey())) { - throw new TaskAbortException("Packer found an extra unexpected element (" + el.getKey() + ") inside a bin (" + en.getKey() + "). Either the data is corrupt, or the child serialiser is buggy.", new Exception("internal error")); - } - PullTask task = new PullTask(scale.makeMeta(en.getKey(), scale.weigh(el.getValue()))); - task.data = el.getValue(); - leftovers.put(el.getKey(), task); - } - } - tasks.putAll(leftovers); - if (tasks.isEmpty()) { throw new TaskCompleteException("All tasks appear to be complete"); } - - } catch (RuntimeException e) { - throw new TaskAbortException("Could not complete the pull operation", e); - } - - } - - /** - ** {@inheritDoc} - ** - ** This implementation will pack all tasks with non-null data into bins, - ** taking into account bins already allocated (as defined by the tasks with - ** null data and non-null metadata). - ** - ** There are two main modes of operation: - ** - ** ; {@link #NO_TINY} is {@code false} : - ** Uses the Best-Fit Decreasing bin-packing algorithm, plus an additional - ** redistribution algorithm that "evens out" the bins. Each bin's weight - ** will be between [BIN_CAP/2, BIN_CAP], except for at most one bin. - ** ; {@link #NO_TINY} is {@code true} : - ** As above, but if the exceptional bin exists, it will be merged with - ** the next-smallest bin, if that exists. As before, each bin will weigh - ** between [BIN_CAP/2, BIN_CAP], except for the merged element (if it - ** exists), which will weigh between (BIN_CAP, BIN_CAP*3/2). - ** - ** In addition to this, the {@link #aggression} attribute will affect when - ** fully-unloaded unchanged bins are discarded. - ** - ** The default generator '''requires''' all keys of the backing map to be - ** present in the input task map, since the default {@link IDGenerator} - ** does not have any other way of knowing what bin IDs were handed out in - ** previous push operations, and it must avoid giving out duplicate IDs. - ** - ** You can bypass this requirement by extending {@link IDGenerator} so that - ** it can work out that information (eg. using an algorithm that generates - ** a UUID), and overriding {@link #generator()}. (ID generation based on - ** bin ''content'' is ''not'' supported, and is not a priority at present.) - */ - /*@Override**/ public void push(Map> tasks, Object mapmeta) throws TaskAbortException { - - try { - // read local copy of aggression - int agg = getAggression(); - if(logDEBUG) Logger.debug(this, "Aggression = "+agg+" tasks size = "+tasks.size()); - - IDGenerator gen = generator(); - Inventory inv = new Inventory(this, tasks); - SortedSet> bins = new TreeSet>(); - - // initialise the binset based on the aggression setting - if (agg <= 0) { - // discard all tasks with null data - Iterator> it = tasks.values().iterator(); - while (it.hasNext()) { - PushTask task = it.next(); - if (task.data == null) { - it.remove(); - } - } - } else if (agg <= 2) { - // initialise already-allocated bins from tasks with null data - initialiseBinSet(bins, tasks, inv, gen, mapmeta); - } else { - // pull all tasks with null data - pullUnloaded(tasks, mapmeta); - } - - // pack elements into bins - packBestFitDecreasing(bins, tasks, inv, gen); - if (agg <= 1) { - // discard all bins not affected by the pack operation - discardUnchangedBins(bins, tasks); - } - - // redistribute weights between bins - redistributeWeights(bins, inv); - if (agg <= 2) { - // discard all bins not affected by the redistribution operation - discardUnchangedBins(bins, tasks); - // pull all data that is as yet unloaded - pullUnloaded(tasks, mapmeta); - } - - // form the list of bin-tasks for each bin, using data from the map-tasks - List>> bintasks = new ArrayList>>(bins.size()); - for (Bin bin: bins) { - Map data = new HashMap(bin.size()<<1); - for (K k: bin) { - data.put(k, tasks.get(k).data); - } - bintasks.add(new PushTask>(data, scale.makeBinMeta(mapmeta, bin.id))); - } - - // push all the bins - preprocessPushBins(tasks, bintasks); - subsrl.push(bintasks); - - // set the metadata for all the pushed bins - for (PushTask> bintask: bintasks) { - for (K k: bintask.data.keySet()) { - tasks.get(k).meta = scale.makeMeta(scale.readBinMetaID(bintask.meta), inv.getWeight(k)); - tasks.get(k).data = null; - } - } - - // FIXME PERFORMANCE: - // Split up IterableSerialiser, call the start phase, which generates - // metadata. Fill in the final metadata based on this and clear the - // tasks.get(k).data. Then wait for the finish. - - } catch (RuntimeException e) { - throw new TaskAbortException("Could not complete the push operation", e); - } - - } - - - /************************************************************************ - ** A class that represents a bin with a certain capacity. - ** - ** NOTE: this implementation is incomplete, since we don't override the - ** remove() methods of the sets and iterators returned by the subset and - ** iterator methods, to also recalculate the weight, but these are never - ** used so it's OK. - ** - ** @author infinity0 - */ - protected static class Bin extends TreeSet implements Comparable> { - - final protected Object id; - final protected Set orig; - final protected int capacity; - final protected Inventory inv; - - int weight = 0; - - public Bin(int c, Inventory v, Object i, Set o) { - super(v); - inv = v; - capacity = c; - id = i; - orig = o; - } - - public boolean unchanged() { - // proper implementations of equals() should not throw NullPointerException - // but check null anyway, just in case... - return orig != null && equals(orig); - } - - public int filled() { - return weight; - } - - public int remainder() { - return capacity - weight; - } - - @Override public boolean add(K c) { - if (super.add(c)) { weight += inv.getWeight(c); return true; } - return false; - } - - @Override public boolean remove(Object c) { - if (super.remove(c)) { weight -= inv.getWeight((K)c); return true; } - return false; - } - - /** - ** Descending comparator for total bin load, ie. ascending comparator - ** for remainding weight capacity. {@link DummyBin}s are treated as the - ** "heaviest", or "least empty" bin for its weight. - */ - /*@Override**/ public int compareTo(Bin bin) { - if (this == bin) { return 0; } - int f1 = filled(), f2 = bin.filled(); - if (f1 != f2) { return (f2 > f1)? 1: -1; } - return (bin instanceof DummyBin)? -1: IdentityComparator.comparator.compare(this, bin); - } - - } - - - /************************************************************************ - ** A class that pretends to be a bin with a certain capacity and weight. - ** This is used when we want a such a bin for some purpose (eg. as an - ** argument to a comparator) but we don't want to have to populate a real - ** bin to get the desired weight (which might be massive). - ** - ** @author infinity0 - */ - protected static class DummyBin extends Bin { - - public DummyBin(int c, int w) { - super(c, null, null, null); - weight = w; - } - - @Override public boolean add(K k) { - throw new UnsupportedOperationException("Dummy bins cannot be modified"); - } - - @Override public boolean remove(Object c) { - throw new UnsupportedOperationException("Dummy bins cannot be modified"); - } - - /** - ** {@inheritDoc} - */ - @Override public int compareTo(Bin bin) { - if (this == bin) { return 0; } - int f1 = filled(), f2 = bin.filled(); - if (f1 != f2) { return (f2 > f1)? 1: -1; } - return (bin instanceof DummyBin)? IdentityComparator.comparator.compare(this, bin): 1; - } - } - - - /************************************************************************ - ** A class that provides a "weight" assignment for each key in a given map, - ** by reading either the data or the metadata of its associated task. - ** - ** @author infinity0 - */ - protected static class Inventory extends IdentityComparator { - - final protected Map weights = new HashMap(); - final protected Map> elements; - final protected Packer packer; - final protected Scale scale; - - /** - ** Keeps track of "the exceptional element" as defined in the - ** description for {@link #push(Map, Object)}. - */ - protected K giant; - - // TODO LOW maybe , or something - protected Inventory(Packer pk, Map> elem) { - packer = pk; - scale = pk.scale; - elements = elem; - } - - /** - ** Get the weight assignment for a given key. - */ - public int getWeight(K key) { - Integer i = weights.get(key); - if (i == null) { - Task task = elements.get(key); - if (task == null) { - throw new IllegalArgumentException("This scale does not have a weight for " + key); - } else if (task.data == null) { - i = scale.readMetaWeight(task.meta); - } else { - i = scale.weigh(task.data); - } - if (i > packer.BIN_CAP) { - if (packer.NO_TINY && giant == null) { - giant = key; - } else { - throw new IllegalArgumentException("Element " + key + " greater than the capacity allowed: " + i + "/" + packer.BIN_CAP); - } - } - weights.put(key, i); - } - return i; - } - - /** - ** Compare keys by the weights of the element it maps to. The {@code - ** null} key is treated as the "lightest" key for its weight. - */ - @Override public int compare(K k1, K k2) { - if (k1 == k2) { return 0; } - int a = getWeight(k1), b = getWeight(k2); - if (a != b) { return (a < b)? -1: 1; } - // treat the null key as the "least" element for its weight - return (k1 == null)? -1: (k2 == null)? 1: super.compare(k1, k2); - } - - /** - ** Set the weight for the {@code null} key. This is useful for when - ** you want to obtain a subset using tailSet() or headSet(), but don't - ** have a key with the desired weight at hand. - */ - public K makeDummyObject(int weight) { - weights.put(null, weight); - return null; - } - - } - - - /************************************************************************ - ** Generates unique IDs for the bins. This generator assigns {@link Long}s - ** in sequence; registering any type of integer object reference will cause - ** future automatically-assigned IDs to be greater than that integer. - ** - ** This implementation cannot ensure uniqueness of IDs generated (with - ** respect to ones generated in a previous session), unless they are - ** explicitly registered using {@link #registerID(Object)}. - ** - ** @author infinity0 - */ - public static class IDGenerator { - - protected long nextID; - - /** - ** Register an ID that was assigned in a previous session, so the - ** generator doesn't output that ID again. - */ - public Object registerID(Object o) { - long id; - if (o instanceof Integer) { - id = (Integer)o; - } else if (o instanceof Long) { - id = (Long)o; - } else if (o instanceof Short) { - id = (Short)o; - } else if (o instanceof Byte) { - id = (Byte)o; - } else { - return o; - } - if (id > nextID) { - nextID = id+1; - } - return o; - } - - /** - ** Generate another ID. - */ - public Long nextID() { - return nextID++; - } - - } - - - /************************************************************************ - ** JavaBean representing bin metadata. - ** - ** @author infinity0 - */ - public static class BinInfo implements Cloneable { - - protected Object id; - protected int weight; - - public BinInfo() { } - public BinInfo(Object i, int w) { id = i; weight = w; } - public Object getID() { return id; } - public int getWeight() { return weight; } - public void setID(Object i) { id = i; } - public void setWeight(int w) { weight = w; } - - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof BinInfo)) { return false; } - BinInfo bi = (BinInfo)o; - return id.equals(bi.id) && weight == bi.weight; - } - - @Override public int hashCode() { - return id.hashCode() + weight*31; - } - - @Override public String toString() { - return "Bin \"" + id + "\" - " + weight + "oz."; - } - - } - - - /************************************************************************ - ** Performs measurements and readings on bins and their elements. - ** - ** To implement this class, the programmer needs to implement the {@link - ** #weigh(Object)} method. - ** - ** @author infinity0 - */ - abstract public static class Scale { - - /** - ** Constructs the metadata for a bin, from the bin ID and the map-wide - ** metadata. - */ - public Object makeBinMeta(Object mapmeta, Object binid) { - return (mapmeta == null)? binid: new Object[]{mapmeta, binid}; - } - - /** - ** Read the bin ID from the given bin's metadata. - */ - public Object readBinMetaID(Object binmeta) { - return (binmeta instanceof Object[])? ((Object[])binmeta)[1]: binmeta; - } - - /** - ** Return the weight of the given element. - */ - abstract public int weigh(T element); - - /** - ** Read the bin ID from the given metadata. - */ - public Object readMetaID(Object meta) { - return ((BinInfo)meta).id; - } - - /** - ** Read the bin weight from the given metadata. - */ - public int readMetaWeight(Object meta) { - return ((BinInfo)meta).weight; - } - - /** - ** Construct the metadata for the given bin ID and weight. - */ - public Object makeMeta(Object id, int weight) { - return new BinInfo(id, weight); - } - - } - -} diff --git a/src/plugins/Library/io/serial/ParallelSerialiser.java b/src/plugins/Library/io/serial/ParallelSerialiser.java deleted file mode 100644 index f59cd10a..00000000 --- a/src/plugins/Library/io/serial/ParallelSerialiser.java +++ /dev/null @@ -1,271 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.TaskAbortExceptionConvertor; -import plugins.Library.util.concurrent.Scheduler; -import plugins.Library.util.concurrent.ObjectProcessor; -import plugins.Library.util.concurrent.Executors; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.exec.TaskInProgressException; -import plugins.Library.util.exec.TaskCompleteException; -import plugins.Library.util.func.SafeClosure; -import static plugins.Library.util.func.Tuples.X2; // also imports the class - -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.ArrayList; - -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.ConcurrentMap; - -/** -** An {@link IterableSerialiser} that uses threads to handle tasks given to it -** in parallel, and keeps track of task progress. -** -** To implement this class, the programmer must implement the {@link -** LiveArchiver#pullLive(Serialiser.PullTask, Progress)} and {@link -** LiveArchiver#pushLive(Serialiser.PushTask, Progress)} methods. -** -** DOCUMENT (rewritten) -** -** @author infinity0 -*/ -public abstract class ParallelSerialiser -implements IterableSerialiser, - ScheduledSerialiser, - LiveArchiver, - Serialiser.Trackable { - - final static protected ThreadPoolExecutor exec = new ThreadPoolExecutor( - 0, 0x40, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new ThreadPoolExecutor.CallerRunsPolicy() // easier than catching RejectedExecutionException, if it ever occurs - ); - - final protected ProgressTracker tracker; - - public ParallelSerialiser(ProgressTracker k) { - if (k == null) { - throw new IllegalArgumentException("ParallelSerialiser must have a progress tracker."); - } - tracker = k; - } - - // return ? extends Progress so as to hide the implementation details of P - /*@Override**/ public ProgressTracker getTracker() { - return tracker; - } - - protected Runnable createJoinRunnable(final K task, final TaskInProgressException e, final SafeClosure> post) { - return new Runnable() { - public void run() { - TaskAbortException ex; - try { - Progress p = e.getProgress(); - p.join(); - ex = new TaskCompleteException("Task complete: " + p.getSubject()); - } catch (InterruptedException e) { - ex = new TaskAbortException("Progress join was interrupted", e, true); - } catch (RuntimeException e) { - ex = new TaskAbortException("failed", e); - } catch (TaskAbortException a) { - ex = a; - } - if (post != null) { post.invoke(X2(task, ex)); } - } - }; - } - - /** - ** DOCUMENT. - ** - ** If {@code post} is {@code null}, it is assumed that {@link Progress} - ** objects are created by the thread that calls this method. Failure to do - ** this '''will result in deadlock'''. - ** - ** Otherwise, this method will create {@link Progress} objects for each - ** task. If they are created before this, '''deadlock will result''' as the - ** handler waits for that progress to complete. - */ - protected Runnable createPullJob(final PullTask task, final SafeClosure, TaskAbortException>> post) { - try { - final P prog = (post != null)? tracker.addPullProgress(task): tracker.getPullProgress(task); - return new Runnable() { - public void run() { - TaskAbortException ex = null; - try { pullLive(task, prog); } - catch (RuntimeException e) { ex = new TaskAbortException("failed", e); } - catch (TaskAbortException e) { ex = e; } - if (post != null) { post.invoke(X2(task, ex)); } - // FIXME OPT NORM Two-phase pull (see FreenetArchiver.pull comments): - // All we'd need to do is: - // Bucket = pullFirst(task, prog) - // Take semaphore (purpose is to limit the number of decoded stuff in memory which is not yet wanted) - // pullSecond(task, prog, bucket) - // Call post-process - // Release semaphore - } - }; - } catch (final TaskInProgressException e) { - return createJoinRunnable(task, e, post); - } - } - - /** - ** DOCUMENT. - ** - ** If {@code post} is {@code null}, it is assumed that {@link Progress} - ** objects are created by the thread that calls this method. Failure to do - ** this '''will result in deadlock'''. - ** - ** Otherwise, this method will create {@link Progress} objects for each - ** task. If they are created before this, '''deadlock will result''' as the - ** handler waits for that progress to complete. - */ - protected Runnable createPushJob(final PushTask task, final SafeClosure, TaskAbortException>> post) { - try { - final P prog = (post != null)? tracker.addPushProgress(task): tracker.getPushProgress(task); - return new Runnable() { - public void run() { - TaskAbortException ex = null; - try { pushLive(task, prog); } - catch (RuntimeException e) { ex = new TaskAbortException("failed", e); } - catch (TaskAbortException e) { ex = e; } - if (post != null) { post.invoke(X2(task, ex)); } - } - }; - } catch (final TaskInProgressException e) { - return createJoinRunnable(task, e, post); - } - } - - /*======================================================================== - public interface IterableSerialiser - ========================================================================*/ - - /*@Override**/ public void pull(PullTask task) throws TaskAbortException { - try { - try { - P p = tracker.addPullProgress(task); - exec.execute(createPullJob(task, null)); - p.join(); - } catch (TaskInProgressException e) { - throw e.join(); - } - } catch (InterruptedException e) { - throw new TaskAbortException("Task pull was interrupted", e, true); - } - } - - /*@Override**/ public void push(PushTask task) throws TaskAbortException { - try { - try { - P p = tracker.addPushProgress(task); - exec.execute(createPushJob(task, null)); - p.join(); - } catch (TaskInProgressException e) { - throw e.join(); - } - } catch (InterruptedException e) { - throw new TaskAbortException("Task push was interrupted", e, true); - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation DOCUMENT - */ - /*@Override**/ public void pull(Iterable> tasks) throws TaskAbortException { - List progs = new ArrayList(); - try { - for (Iterator> it = tasks.iterator(); it.hasNext();) { - PullTask task = it.next(); - try { - progs.add(tracker.addPullProgress(task)); - exec.execute(createPullJob(task, null)); - } catch (TaskInProgressException e) { - it.remove(); - progs.add(e.getProgress()); - } - } - for (Progress p: progs) { p.join(); } - // TODO NORM: toad - if it fails, we won't necessarily know until all of the - // other tasks have completed ... is this acceptable? - - } catch (InterruptedException e) { - throw new TaskAbortException("ParallelSerialiser pull was interrupted", e, true); - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation DOCUMENT - */ - /*@Override**/ public void push(Iterable> tasks) throws TaskAbortException { - List progs = new ArrayList(); - try { - for (Iterator> it = tasks.iterator(); it.hasNext();) { - PushTask task = it.next(); - try { - progs.add(tracker.addPushProgress(task)); - exec.execute(createPushJob(task, null)); - } catch (TaskInProgressException e) { - it.remove(); - progs.add(e.getProgress()); - } - } - for (Progress p: progs) { p.join(); } - // TODO NORM: toad - if it fails, we won't necessarily know until all of the - // other tasks have completed ... is this acceptable? - - } catch (InterruptedException e) { - throw new TaskAbortException("ParallelSerialiser pull was interrupted", e, true); - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation DOCUMENT - */ - /*@Override**/ public ObjectProcessor, E, TaskAbortException> pullSchedule( - BlockingQueue> input, - BlockingQueue, TaskAbortException>> output, - Map, E> deposit - ) { - return new ObjectProcessor, E, TaskAbortException>(input, output, deposit, null, Executors.DEFAULT_EXECUTOR, new TaskAbortExceptionConvertor()) { - @Override protected Runnable createJobFor(PullTask task) { - return createPullJob(task, postProcess); - } - }.autostart(); - } - - /** - ** {@inheritDoc} - ** - ** This implementation DOCUMENT - */ - /*@Override**/ public ObjectProcessor, E, TaskAbortException> pushSchedule( - BlockingQueue> input, - BlockingQueue, TaskAbortException>> output, - Map, E> deposit - ) { - return new ObjectProcessor, E, TaskAbortException>(input, output, deposit, null, Executors.DEFAULT_EXECUTOR, new TaskAbortExceptionConvertor()) { - @Override protected Runnable createJobFor(PushTask task) { - return createPushJob(task, postProcess); - } - }.autostart(); - } - -} diff --git a/src/plugins/Library/io/serial/ProgressTracker.java b/src/plugins/Library/io/serial/ProgressTracker.java deleted file mode 100644 index 8769193c..00000000 --- a/src/plugins/Library/io/serial/ProgressTracker.java +++ /dev/null @@ -1,255 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.TaskInProgressException; -import plugins.Library.util.CompositeIterable; - -import java.util.Iterator; -import java.util.Map; -import java.util.WeakHashMap; - -/** -** Keeps track of a task's progress and provides methods to retrieve this data. -** For this to function properly, the data/meta for push/pull tasks MUST NOT -** be modified for as long as the task is in operation. This is because their -** {@link #equals(Object)} method is defined in terms of those fields, so any -** change to them would interfere with {@link WeakHashMap}'s ability to locate -** the tasks within the map. -** -** TODO HIGH make those fields (PullTask.meta / PushTask.data) final? would -** involve removing Task class. -** -** The progress objects are stored in a {@link WeakHashMap} and are freed -** automatically by the garbage collector if its corresponding task goes out of -** scope '''and''' no other strong references to the progress object exist. -** -** @author infinity0 -*/ -public class ProgressTracker { - - /** - ** Keeps track of the progress of each {@link PullTask}. The key is the - ** metadata of the task. - */ - final protected WeakHashMap, P> pullProgress = new WeakHashMap, P>(); - - /** - ** Keeps track of the progress of each {@link PushTask}. The key is the - ** data of the task. - */ - final protected WeakHashMap, P> pushProgress = new WeakHashMap, P>(); - - /** - ** An element class which is used to instantiate new elements. - */ - final protected Class progressClass; - - /** - ** DOCUMENT - ** - ** @param cc A class with a nullary constructor that is used to instantiate - ** new progress objects. This argument may be null if a subclass - ** overrides {@link #newProgress()}. - */ - public ProgressTracker(Class cc) { - try { - if (cc != null) { cc.newInstance(); } - } catch (InstantiationException e) { - throw new IllegalArgumentException("Cannot instantiate class. Make sure it has a nullary constructor.", e); - } catch (IllegalAccessException e) { - throw new IllegalArgumentException("Cannot instantiate class. Make sure you have access to it.", e); - } - progressClass = cc; - } - - protected P newProgress() { - if (progressClass == null) { - throw new IllegalStateException("ProgressTracker cannot create progress: No class was given to the constructor, but newElement() was not overriden."); - } - try { - return progressClass.newInstance(); - } catch (InstantiationException e) { - return null; // constructor should prevent this from ever happening, but the compiler bitches. - } catch (IllegalAccessException e) { - return null; // constructor should prevent this from ever happening, but the compiler bitches. - } - } - - // TODO NORM there is probably a better way of doing this... - //final protected HashSet> pullWaiters() = new HashSet> pullWaiters(); - //final protected HashSet> pushWaiters() = new HashSet> pushWaiters(); - - /** - ** Get the progress of a {@link PullTask} by its metadata. - ** - ** NOTE: since progress is stored in a {@link WeakHashMap} this means that - ** if the task has finished, then this will return null. (FIXME NORM) - ** - ** This doesn't have to be a major problem - when a task completes, detect - ** this, and deny further attempts to retrieve the progress. - */ - public P getPullProgressFor(Object meta/*, boolean block*/) { - return getPullProgress(new PullTask(meta)/*, block*/); - } - - /** - ** Get the progress of a {@link PushTask} by its metadata. - ** - ** NOTE: since progress is stored in a {@link WeakHashMap} this means that - ** if the task has finished, then this will return null. (FIXME NORM) - ** - ** This doesn't have to be a major problem - when a task completes, detect - ** this, and deny further attempts to retrieve the progress. - */ - public P getPushProgressFor(T data/*, boolean block*/) { - return getPushProgress(new PushTask(data)/*, block*/); - } - - /** - ** Get the progress of a {@link PullTask}. - */ - public P getPullProgress(PullTask task/*, boolean block*/) { - synchronized (pullProgress) { - return pullProgress.get(task); - /*P prog = pullProgress.get(task); - if (prog != null || !block) { return prog; } - - pullWaiters.add(task); - while ((prog = pullProgress.get(task)) == null) { - pullProgress.wait(); - } - return prog;*/ - } - } - - /*public P getPullProgress(PullTask task) { - return getPullProgress(task, false); - }*/ - - /** - ** Get the progress of a {@link PushTask}. - */ - public P getPushProgress(PushTask task/*, boolean block*/) { - synchronized (pushProgress) { - return pushProgress.get(task); - /*P prog = pushProgress.get(task); - if (prog != null || !block) { return prog; } - - pushWaiters.add(task); - while ((prog = pushProgress.get(task)) == null) { - pushProgress.wait(); - } - return prog;*/ - } - } - - /*public P getPushProgress(PushTask task) { - return getPushProgress(task, false); - }*/ - - /** - ** Creates a new pull progress and keeps track of it. If there is already a - ** progress for the metadata, throws {@link TaskInProgressException}. This - ** ensures that the object returned from this method has not been seen by - ** any other threads. - ** - ** @throws TaskInProgressException - */ - public P addPullProgress(PullTask task) throws TaskInProgressException { - synchronized (pullProgress) { - P p = pullProgress.get(task); - if (p != null) { throw new TaskInProgressException(p); } - pullProgress.put(task, p = newProgress()); - - /*if (pullWaiters.contains(task)) { - pullProgress.notifyAll(); - }*/ - return p; - } - } - - /** - ** Creates a new push progress and keeps track of it. If there is already a - ** progress for the metadata, throws {@link TaskInProgressException}. This - ** ensures that the object returned from this method has not been seen by - ** any other threads. - ** - ** @throws TaskInProgressException - */ - public P addPushProgress(PushTask task) throws TaskInProgressException { - synchronized (pushProgress) { - P p = pushProgress.get(task); - if (p != null) { throw new TaskInProgressException(p); } - pushProgress.put(task, p = newProgress()); - - /*if (pushWaiters.remove(task)) { - pushProgress.notifyAll(); - }*/ - return p; - } - } - - - /** - ** Creates an iterable over the {@link Progress} objects corresponding to - ** the given pull tasks, all tracked by the given tracker. - ** - ** @param tracker The single tracker for all the ids - ** @param tasks The tasks to track - */ - public static Iterable

makePullProgressIterable(final ProgressTracker tracker, final Iterable> tasks) { - return new CompositeIterable, P>(tasks, true) { - @Override public P nextFor(PullTask next) { - return tracker.getPullProgress(next); - } - }; - } - - /** - ** Creates an iterable over the {@link Progress} objects corresponding to - ** the given push tasks, all tracked by the given tracker. - ** - ** @param tracker The single tracker for all the ids - ** @param tasks The tasks to track - */ - public static Iterable

makePushProgressIterable(final ProgressTracker tracker, final Iterable> tasks) { - return new CompositeIterable, P>(tasks, true) { - @Override public P nextFor(PushTask next) { - return tracker.getPushProgress(next); - } - }; - } - - /** - ** Creates an iterable over the {@link Progress} objects corresponding to - ** the given pull tracking ids, each tracked by its own tracker. - ** - ** @param ids Map of tracking ids to their respective trackers - */ - public static Iterable

makePullProgressIterable(final Map, ProgressTracker> ids) { - return new CompositeIterable, ProgressTracker>, P>(ids.entrySet(), true) { - @Override public P nextFor(Map.Entry, ProgressTracker> next) { - return next.getValue().getPullProgress(next.getKey()); - } - }; - } - - /** - ** Creates an iterable over the {@link Progress} objects corresponding to - ** the given push tracking ids, each tracked by its own tracker. - ** - ** @param ids Map of tracking ids to their respective trackers - */ - public static Iterable

makePushProgressIterable(final Map, ProgressTracker> ids) { - return new CompositeIterable, ProgressTracker>, P>(ids.entrySet(), true) { - @Override public P nextFor(Map.Entry, ProgressTracker> next) { - return next.getValue().getPushProgress(next.getKey()); - } - }; - } - -} diff --git a/src/plugins/Library/io/serial/ScheduledSerialiser.java b/src/plugins/Library/io/serial/ScheduledSerialiser.java deleted file mode 100644 index 09020c8d..00000000 --- a/src/plugins/Library/io/serial/ScheduledSerialiser.java +++ /dev/null @@ -1,53 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.concurrent.Scheduler; -import plugins.Library.util.concurrent.ObjectProcessor; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.func.Tuples.X2; - -import java.util.Map; -import java.util.concurrent.BlockingQueue; - -/** -** An interface for asynchronous task execution. The methods return objects -** for managing and scheduling tasks. -** -** @author infinity0 -*/ -public interface ScheduledSerialiser extends IterableSerialiser { - - /** - ** Creates a {@link ObjectProcessor} for executing {@link PullTask}s, which - ** should be {@linkplain ObjectProcessor#auto()} automatically started. - ** - ** @param input Queue to add task requests to - ** @param output Queue to pop completed tasks from - ** @param deposit Map of tasks to deposits - */ - // TODO LOW public Scheduler pullSchedule( - public ObjectProcessor, E, TaskAbortException> pullSchedule( - BlockingQueue> input, - BlockingQueue, TaskAbortException>> output, - Map, E> deposit - ); - - /** - ** Creates a {@link ObjectProcessor} for executing {@link PushTask}s, which - ** should be {@linkplain ObjectProcessor#auto()} automatically started. - ** - ** @param input Queue to add task requests to - ** @param output Queue to pop completed tasks from - ** @param deposit Map of tasks to deposits - */ - // TODO LOW public Scheduler pushSchedule( - public ObjectProcessor, E, TaskAbortException> pushSchedule( - BlockingQueue> input, - BlockingQueue, TaskAbortException>> output, - Map, E> deposit - ); - -} diff --git a/src/plugins/Library/io/serial/Serialiser.java b/src/plugins/Library/io/serial/Serialiser.java deleted file mode 100644 index 736d41cf..00000000 --- a/src/plugins/Library/io/serial/Serialiser.java +++ /dev/null @@ -1,161 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.util.exec.Progress; - -import java.util.Collection; -import java.util.Map; - -/** -** An empty marker interface for serialisation classes. It defines some nested -** subclasses that acts as a unified interface between these classes. -** -** Nested data structures such as {@link plugins.Library.util.SkeletonBTreeMap} -** will generally use the metadata of their children directly (ie. without -** using a {@link Translator} during a serialisation operation. Therefore, it -** is recommended that all metadata passing through this class (and related -** classes) be directly serialisable by an {@link Archiver}. This will make the -** task of writing {@link Translator}s much easier, maybe even unnecessary. -** -** The recommended way to ensure this is for it to be either: a primitive or -** Object form of a primitive; an Array, {@link Collection}, or {@link Map}, -** where the elements are also serialisable as defined here; or a Java Bean. -** -** TODO NORM restructure this class, maybe split off into a Serialisers class, -** or a serial.serialiser package... -** -** @author infinity0 -*/ -public interface Serialiser { - - /************************************************************************ - ** Defines a serialisation task for an object. Contains two fields - data - ** and metadata. - */ - abstract public static class Task { - - /** - ** Field for the metadata. This should be serialisable as defined in the - ** description for {@link Serialiser}. - */ - public Object meta = null; - - /** - ** Field for the data. - */ - public T data = null; - - } - - /************************************************************************ - ** Defines a pull task: given some metadata, the task is to retrieve the - ** data for this metadata, possibly updating the metadata in the process. - ** - ** For any single {@code PullTask}, the metadata should uniquely determine - ** what data will be generated during a pull operation, independent of any - ** information from other {@code PullTasks}, even if they are in the same - ** group of tasks given to a compound {@link Serialiser} such as {@link - ** MapSerialiser}. (The converse is not required for {@link PushTask}s.) - ** - ** Consequently, {@code Serialiser}s should be implemented so that its - ** push operations generate metadata for which this property holds. - */ - final public static class PullTask extends Task { - - public PullTask(Object m) { - if (m == null) { throw new IllegalArgumentException("Cowardly refusing to make a PullTask with null metadata."); } - meta = m; - } - - @Override public boolean equals(Object o) { - if (!(o instanceof PullTask)) { return false; } - return meta == ((PullTask)o).meta; - } - - @Override public int hashCode() { - return System.identityHashCode(meta); - } - - } - - /************************************************************************ - ** Defines a push task: given some data and optional metadata, the task is - ** to archive this data and generate the metadata for it in the process. - ** - ** For any single {@code PushTask}, the data may or may not uniquely - ** determine the metadata that will be generated for it during the push - ** operation. For example, compound {@link Serialiser}s that work on groups - ** of tasks may use information from the whole group in order to optimise - ** the operation. (The converse is required for {@link PullTask}s.) - ** - ** Consequently, {@code Serialiser}s may require that extra data is passed - ** to its push operations such that it can... - */ - final public static class PushTask extends Task { - - public PushTask(T d) { - if (d == null) { throw new IllegalArgumentException("Cowardly refusing to make a PushTask with null data."); } - data = d; - } - - public PushTask(T d, Object m) { - // d == null && m != null is allowed in some cases, e.g. MapSerialiser allows us to have some stuff already serialised. - if (d == null && m == null) { throw new IllegalArgumentException("Cowardly refusing to make a PushTask with null data."); } - data = d; - meta = m; - } - - @Override public boolean equals(Object o) { - if (!(o instanceof PushTask)) { return false; } - return data == ((PushTask)o).data; - } - - @Override public int hashCode() { - return System.identityHashCode(data); - } - - } - - /** - ** Represents a serialiser that exposes the progress status of its - ** running operations via a {@link ProgressTracker}. - */ - public interface Trackable extends Serialiser { - - public ProgressTracker getTracker(); - - } - - /** - ** Represents a serialiser which uses a {@link Translator} to do much of - ** its work. This can be used alongside a {@link Serialiser.Composite}. - ** - ** TODO LOW rename this to something better... - */ - public interface Translate extends Serialiser { - - public Translator getTranslator(); - - } - - /** - ** Represents a serialisation process which is divided up into several - ** parts. The basic premise is to convert the target object into another - ** type of object, which can be handled by an already existing {@link - ** Serialiser} (which may also be composite). - ** - ** The conversion can be handled by a {@link Translator}, in which case the - ** class might also implement {@link Serialiser.Translate}. - ** - ** TODO HIGH find a tidy way to make this extend Serialiser - ** "Composite> extends Serialiser" will work... - */ - public interface Composite> /*extends Serialiser*/ { - - public S getChildSerialiser(); - - } - -} diff --git a/src/plugins/Library/io/serial/Translator.java b/src/plugins/Library/io/serial/Translator.java deleted file mode 100644 index a6ffd35a..00000000 --- a/src/plugins/Library/io/serial/Translator.java +++ /dev/null @@ -1,44 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import plugins.Library.io.DataFormatException; - -/** -** A class that translates an object into one of another type. Used mostly in -** conjunction with an {@link Serialiser} that can take objects of the latter -** type but not the former type. -** -** @author infinity0 -*/ -public interface Translator { - - /** - ** Apply the translation. - ** - ** Implementations of this method which act on a recursive data structure - ** that is designed to be partially loaded (such as - ** {@link plugins.Library.util.SkeletonMap}), should follow the same - ** guidelines as in {@link Archiver#push(Serialiser.PushTask)}, - ** particularly with regards to throwing {@link IllegalArgumentException}. - ** - ** TODO LOW maybe this could throw {@link DataFormatException} like {@link - ** #rev(Object)}? (probably no point...) - */ - I app(T translatee);// throws DataFormatException; - - /** - ** Reverse the translation. - ** - ** Implementations of this method which act on a recursive data structure - ** that is designed to be partially loaded (such as - ** {@link plugins.Library.util.SkeletonMap}), should follow the same - ** guidelines as in {@link Archiver#pull(Serialiser.PullTask)}. - ** - ** @throws DataFormatException if some aspect of the input prevents the - ** output from being constructed. - */ - T rev(I intermediate) throws DataFormatException; - -} diff --git a/src/plugins/Library/io/serial/package-info.java b/src/plugins/Library/io/serial/package-info.java deleted file mode 100644 index 5bafbe64..00000000 --- a/src/plugins/Library/io/serial/package-info.java +++ /dev/null @@ -1,11 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains the serialisation engine and related classes and interfaces, such -** as {@link plugins.Library.io.serial.ProgressTracker} and {@link -** plugins.Library.io.serial.Translator} -** -** @author infinity0 -*/ -package plugins.Library.io.serial; diff --git a/src/plugins/Library/package-info.java b/src/plugins/Library/package-info.java deleted file mode 100644 index fc42a0c2..00000000 --- a/src/plugins/Library/package-info.java +++ /dev/null @@ -1,13 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains interfaces and the main Library class that holds them together. -** The index package and its subpackages contain implementations of these -** interfaces. -** -** @author MikeB -** @author infinity0 -*/ -package plugins.Library; - diff --git a/src/plugins/Library/search/InvalidSearchException.java b/src/plugins/Library/search/InvalidSearchException.java deleted file mode 100644 index 191e811a..00000000 --- a/src/plugins/Library/search/InvalidSearchException.java +++ /dev/null @@ -1,19 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ - -package plugins.Library.search; - -/** - * Exception when something is wrong with search parameters but nothing else - * @author MikeB - */ -public class InvalidSearchException extends Exception{ - public InvalidSearchException(String s){ - super (s); - } - - public InvalidSearchException(String string, Exception ex) { - super(string, ex); - } -} diff --git a/src/plugins/Library/search/ResultSet.java b/src/plugins/Library/search/ResultSet.java deleted file mode 100644 index b9df2990..00000000 --- a/src/plugins/Library/search/ResultSet.java +++ /dev/null @@ -1,511 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.search; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermIndexEntry; -import plugins.Library.index.TermTermEntry; -import plugins.Library.index.TermPageEntry; -import plugins.Library.util.exec.Execution; -import plugins.Library.util.exec.TaskAbortException; - -import freenet.support.Logger; - -import java.util.Iterator; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.HashMap; - -/** - * Unmodifiable Set which makes sure all data in results being combined is - * retained and is created as the result of Set operations on other Sets. Does - * not contain references to the original Sets. - *

- * The set isn't finalised until a run is finished and isDone() is true. - * - * @author MikeB - */ -public class ResultSet implements Set, Runnable{ - private ResultOperation resultOperation; - private boolean done = false; - private Set[] subresults; - private RuntimeException exception; - - /** - * What should be done with the results of subsearches\n - * INTERSECTION : ( >2 subRequests) result is common results of subsearches\n - * UNION : ( >2 subRequests) result is all results of subsearches\n - * REMOVE : ( 2 subRequests) result is the results of the first subsearch with the second search removed - * PHRASE : ( >2 subRequests) For a phrase, the subsearches are in the order of the words in a phrase - * SINGLE : ( 1 subRequests) To encapsulate a single request - * DIFFERENTINDEXES : ( >2 subRequests) for a serch made up of requests on multiple indexes - */ - public enum ResultOperation{INTERSECTION, UNION, REMOVE, PHRASE, SINGLE, DIFFERENTINDEXES}; - - private HashMap internal; - private final String subject; - private final boolean ignoreTAEs; - - /** - * @param subject The subject for each of the entries in the Set - * @param resultOperation {@link ResultOperation} to be performed on each of the given Sets - * @param subRequests List of subrequests to test each getResult() - * @throws TaskAbortException if thrown from a higher getResult() method - * - * TODO reevaluate relevance for all combinations, and find a way to calculate relevance of phrases - */ - ResultSet(String subject, ResultOperation resultOperation, List>> subRequests, boolean ignoreTAEs) throws TaskAbortException { - if(resultOperation==ResultOperation.SINGLE && subRequests.size()!=1) - throw new IllegalArgumentException(subRequests.size() + " requests supplied with SINGLE operation"); - if(resultOperation==ResultOperation.REMOVE && subRequests.size()!=2) - throw new IllegalArgumentException("Negative operations can only have 2 parameters"); - if( ( resultOperation==ResultOperation.PHRASE - || resultOperation == ResultOperation.INTERSECTION - || resultOperation == ResultOperation.UNION - || resultOperation == ResultOperation.DIFFERENTINDEXES ) - && subRequests.size()<2) - throw new IllegalArgumentException(resultOperation.toString() + " operations need more than one term"); - - - this.subject = subject; - internal = new HashMap(); - this.resultOperation = resultOperation; - - // Make sure any TaskAbortExceptions are found here and not when it's run - this.ignoreTAEs = ignoreTAEs; - subresults = getResultSets(subRequests, ignoreTAEs); - } - - public synchronized void run() { - if(done) - throw new IllegalStateException("This ResultSet has already run and is finalised."); - - try{ - // Decide what to do - switch(resultOperation){ - case SINGLE: // Just add everything - addAllToEmptyInternal(subresults[0]); - break; - case DIFFERENTINDEXES: // Same as UNION currently - case UNION: // Add every one without overwriting - unite(subresults); - break; - case INTERSECTION: // Add one then retain the others - intersect(subresults); - break; - case REMOVE: // Add one then remove the other - exclude(subresults[0], subresults[1]); - break; - case PHRASE: - phrase(subresults); - break; - } - }catch(RuntimeException e){ - exception = e; // Exeptions thrown here are stored in case this is being run in a thread, in this case it is thrown in isDone() or iterator() - throw e; - } - subresults = null; // forget the subresults - done = true; - } - - /** - * Copy a collection into a ResultSet - * @param subject to change the subject of each entry - * @param copy Collection to copy - */ - private ResultSet(String subject, Collection copy, boolean ignoreTAEs) { - this.subject = subject; - internal = new HashMap(); - this.ignoreTAEs = ignoreTAEs; - addAllToEmptyInternal(copy); - } - - - // Set interface methods, folow the standard contract - - public int size() { - return internal.size(); - } - - public boolean isEmpty() { - return internal.isEmpty(); - } - - public boolean contains(Object o) { - return internal.containsKey(o); - } - - /** - * @return Iterator of Set, remove is unsupported - * @throws RuntimeException if a RuntimeException was caught while generating the Set - */ - public Iterator iterator() { - if(exception != null) - throw new RuntimeException("RuntimeException thrown in ResultSet thread", exception); - return new ResultIterator(internal.keySet().iterator()); - } - - public Object[] toArray() { - return internal.keySet().toArray(); - } - - public T[] toArray(T[] a) { - return internal.keySet().toArray(a); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public boolean add(TermEntry e) { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public boolean remove(Object o) { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - public boolean containsAll(Collection c) { - return internal.keySet().containsAll(c); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public boolean addAll(Collection c) { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public boolean retainAll(Collection c) { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public boolean removeAll(Collection c) { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - /** - * Not supported : Set is unmodifiable - * @throws UnsupportedOperationException always - */ - public void clear() { - throw new UnsupportedOperationException("Set is unmodifiable."); - } - - - - - - // Private internal functions for changing the set - // TODO relevences, metadata extra, check copy constructors - - private void addInternal(TermEntry entry) { - internal.put(entry, entry); - } - - /** - * Overwrite add all entries in result to the internal Set converted to this - * ResultSet's subject. If the Set already has anything inside it - * you should use addAllInternal below. - * @param result - */ - private void addAllToEmptyInternal(Collection result) { - for (TermEntry termEntry : result) { - TermEntry entry = convertEntry(termEntry); - addInternal(entry); - } - } - - /** - * Add all entries in the first collection but not in the second - */ - private void exclude(Collection add, Collection subtract) { - for (TermEntry termEntry : add){ - if(getIgnoreSubject(termEntry, subtract)==null) - addInternal(termEntry); - } - } - - /** - * Iterate over all the collections adding and merging all their entries - * @param collections to be merged into this collection - * TODO proper relevance calculating here, currently i think the relevance of the first one added will have less impact than the others, the other 3 types are more important i believe - */ - private void unite(Collection... collections) { - for(Collection c : collections) - if(c==null) - Logger.error(this, "the result was null"); - else - for (TermEntry termEntry : c) { - TermEntry entry = convertEntry(termEntry); - if(contains(entry)) - addInternal(mergeEntries(internal.get(entry), entry)); - else - addInternal(entry); - } - } - - /** - * Iterate over the first collection adding those elements which exist in all the other collections - * @param collections a bunch of collections to intersect - */ - private void intersect(Collection... collections) { - Collection firstCollection = collections[0]; - // Iterate over it - for (Iterator it = firstCollection.iterator(); it.hasNext();) { - TermEntry termEntry = it.next(); - // if term entry is contained in all the other collections add it - float combinedrelevance = termEntry.rel; - - int i; - for (i = 1; i < collections.length; i++) { - Collection collection = collections[i]; - // See if collection contains termEntry - - TermEntry termEntry2 = getIgnoreSubject(termEntry, collection); - if ( termEntry2 == null ) - break; - else // add to combined relevance - combinedrelevance += termEntry2.rel; - } - if (i==collections.length){ - TermEntry newEntry = convertEntry(termEntry, combinedrelevance/collections.length); - addInternal(newEntry); - } - } - } - - /** - * Iterate over the first collection, and the termpositions of each entry, - * keeping those positions which are followed in the other collections. Keeps - * those entries which have positions remaingin after this process - * @param collections - */ - private void phrase(Collection... collections) { - Collection firstCollection = collections[0]; - // Iterate over it - for (TermEntry termEntry : firstCollection) { - if(!(termEntry instanceof TermPageEntry)) - continue; - // if term entry is followed in all the others, add it to this - TermPageEntry termPageEntry = (TermPageEntry)termEntry; - if(!termPageEntry.hasPositions()) - continue; - Map positions = new HashMap(termPageEntry.positionsMap()); - - int i; // Iterate over the other collections, checking for following - for (i = 1; positions != null && i < collections.length && positions.size() > 0; i++) { - Collection collection = collections[i]; - if(collection == null) - continue; // Treat stop words as blanks, dont check - // See if collection follows termEntry - TermPageEntry termPageEntry1 = (TermPageEntry)getIgnoreSubject(termPageEntry, collection); - if(termPageEntry1==null || !termPageEntry1.hasPositions()) // If collection doesnt contain this termpageentry or has not positions, it does not follow - positions = null; - else{ - for (Iterator it = positions.keySet().iterator(); it.hasNext();) { - int posi = it.next(); - if ( !termPageEntry1.hasPosition(posi+i)) - it.remove(); - else - Logger.minor(this, termPageEntry.page + "["+positions.keySet()+"] is followed by "+termPageEntry1.page+"["+termPageEntry1.positions()+"] +"+i); - } - } - } - // if this termentry has any positions remaining, add it - if(positions != null && positions.size() > 0) - addInternal(new TermPageEntry(subject, termPageEntry.rel, termPageEntry.page, termPageEntry.title, positions)); - } - } - - private TermEntry convertEntry(TermEntry termEntry) { - return convertEntry(termEntry, termEntry.rel); - } - - private TermEntry convertEntry(TermEntry termEntry, float rel) { - TermEntry entry; - if (termEntry instanceof TermTermEntry) - entry = new TermTermEntry(subject, rel, ((TermTermEntry)termEntry).term ); - else if (termEntry instanceof TermPageEntry) - entry = new TermPageEntry(subject, rel, ((TermPageEntry)termEntry).page, ((TermPageEntry)termEntry).title, ((TermPageEntry)termEntry).positionsMap() ); - else if (termEntry instanceof TermIndexEntry) - entry = new TermIndexEntry(subject, rel, ((TermIndexEntry)termEntry).index ); - else - throw new UnsupportedOperationException("The TermEntry type " + termEntry.getClass().getName() + " is not currently supported in ResultSet"); - return entry; - } - - - // TODO merge and combine can be cut down - /** - * Merge a group of TermEntries each pair of which(a, b) must be a.equalsTarget - * The new TermEntry created will have the subject of this ResultSet, it - * will try to combine all the optional fields from the TermEntrys being merged, - * with priority being put on those earliest in the arguments - * - * @param entries - * @return The merged entry - */ - private TermEntry mergeEntries(TermEntry... entries) { - for (int i = 1; i < entries.length; i++) - if(!entries[0].equalsTarget(entries[i])) - throw new IllegalArgumentException("entries were not equal : "+entries[0].toString()+" & "+entries[i].toString()); - - TermEntry combination = entries[0]; - - // This mess could be replaced by a method in the TermEntrys which returns a new TermEntry with a changed subject - if(combination instanceof TermIndexEntry){ - combination = new TermIndexEntry(subject, entries[0].rel, ((TermIndexEntry)combination).index); - } else if(combination instanceof TermPageEntry){ - combination = new TermPageEntry(subject, entries[0].rel, ((TermPageEntry)combination).page, ((TermPageEntry)combination).title, ((TermPageEntry)combination).positionsMap()); - } else if(combination instanceof TermTermEntry){ - combination = new TermTermEntry(subject, entries[0].rel, ((TermTermEntry)combination).term); - } else - throw new IllegalArgumentException("Unknown type : "+combination.getClass().toString()); - - for (int i = 1; i < entries.length; i++) { - TermEntry termEntry = entries[i]; - combination = combine(combination, termEntry); - } - - return combination; - } - - private TermEntry combine(TermEntry entry1, TermEntry entry2) { - if(!entry1.equalsTarget(entry2)) - throw new IllegalArgumentException("Combine can only be performed on equal TermEntrys"); - - float newRel = entry1.rel / ((entry2.rel == 0)? 1 : 2 ) + entry2.rel / ((entry1.rel == 0)? 1 : 2 ); - - if(entry1 instanceof TermPageEntry){ - TermPageEntry pageentry1 = (TermPageEntry)entry1; - TermPageEntry pageentry2 = (TermPageEntry)entry2; - // Merge positions - Map newPos = null; - if((!pageentry1.hasPositions()) && pageentry2.hasPositions()) - newPos = new HashMap(pageentry2.positionsMap()); - else if(pageentry1.hasPositions()){ - newPos = new HashMap(pageentry1.positionsMap()); - if(pageentry2.hasPositions()) - newPos.putAll(pageentry2.positionsMap()); - } - return new TermPageEntry(pageentry1.subj, newRel, pageentry1.page, (pageentry1.title!=null)?pageentry1.title:pageentry2.title, newPos); - - } else if(entry1 instanceof TermIndexEntry){ - TermIndexEntry castEntry = (TermIndexEntry) entry1; - // nothing to merge, no optional fields except relevance - return new TermIndexEntry(castEntry.subj, newRel, castEntry.index); - - } else if(entry1 instanceof TermTermEntry){ - // nothing to merge, no optional fields except relevance - TermTermEntry castEntry = (TermTermEntry) entry1; - - return new TermTermEntry(castEntry.subj, newRel, castEntry.term); - - } else - throw new UnsupportedOperationException("This type of TermEntry is not yet supported in the combine code : "+entry1.getClass().getName()); - - } - - /** - * Strip all results out of List of requests - * @param subRequests - * @return An array containing the stripped sets - * @throws {@link TaskAbortException} from subRequests' {@link Execution#getResult()} - */ - private Set[] getResultSets(List>> subRequests, boolean ignoreTAEs) throws TaskAbortException{ - Set[] sets = new Set[subRequests.size()]; - int x = 0; - for (int i = 0; i < subRequests.size(); i++) { - if(subRequests.get(i) == null){ - if(resultOperation == ResultOperation.PHRASE) - sets[x++] = null; - else - throw new NullPointerException("Nulls not allowed in subRequests for operations other than phrase."); - } else { - try { - sets[x++] = subRequests.get(i).getResult(); - } catch (TaskAbortException e) { - if(!ignoreTAEs) throw e; - } - } - } - if(x != subRequests.size()) { - Set[] newSets = new Set[x]; - System.arraycopy(sets, 0, newSets, 0, newSets.length); - sets = newSets; - } - return sets; - } - - /** - * Gets a TermEntry from the collection which is equal to entry ignoring subject - * @param entry - * @param collection - */ - private TermEntry getIgnoreSubject(TermEntry entry, Collection collection){ - TermEntry result = null; - for (TermEntry termEntry : collection) { - if (entry.equalsTarget(termEntry)){ - result = termEntry; - break; - } - } - return result; - } - - @Override public String toString(){ - return internal.keySet().toString(); - } - - /** - * Returns true if the ResultSet has completed its operations - * @throws RuntimeException if a RuntimeException was caught while generating the Set - */ - public boolean isDone(){ - if(exception != null) - throw new RuntimeException("RuntimeException thrown in ResultSet thread", exception); - return done; - } - - - /** - * Iterator which doesn't allow remove() - */ - class ResultIterator implements Iterator { - Iterator internal; - - private ResultIterator(Iterator iterator) { - internal = iterator; - } - - public boolean hasNext() { - return internal.hasNext(); - } - - public TermEntry next() { - return internal.next(); - } - - public void remove() { - throw new UnsupportedOperationException("Removal not allowed."); - } - - } -} diff --git a/src/plugins/Library/search/Search.java b/src/plugins/Library/search/Search.java deleted file mode 100644 index f01a79a9..00000000 --- a/src/plugins/Library/search/Search.java +++ /dev/null @@ -1,654 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.search; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Set; -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -import plugins.Library.Library; -import plugins.Library.index.TermEntry; -import plugins.Library.search.ResultSet.ResultOperation; -import plugins.Library.ui.ResultNodeGenerator; -import plugins.Library.util.exec.AbstractExecution; -import plugins.Library.util.exec.CompositeProgress; -import plugins.Library.util.exec.Execution; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.TaskAbortException; -import freenet.support.Executor; -import freenet.support.HTMLNode; -import freenet.support.Logger; - -/** - * Performs asynchronous searches over many index or with many terms and search logic - * TODO review documentation - * @author MikeB - */ -public class Search extends AbstractExecution> - implements CompositeProgress, Execution> { - - private static Library library; - private static Executor executor; - - private ResultOperation resultOperation; - - private List>> subsearches; - - private String query; - private String indexURI; - - /** Map of Searches by subject */ - private static HashMap allsearches = new HashMap(); - /** Map of Searches by hashCode */ - private static HashMap searchhashes = new HashMap(); - private ResultSet resultset; - - /** - * Settings for producing result nodes, if true a HTMLNode of the results will be generated after the results are complete which can be accessed via getResultNode() - */ - private boolean formatResult = false; - private boolean htmlgroupusk; - private boolean htmlshowold; - private boolean htmljs; - private ResultNodeGenerator resultNodeGenerator; - private HTMLNode pageEntryNode; - - private enum SearchStatus { Unstarted, Busy, Combining_First, Combining_Last, Formatting, Done }; - private SearchStatus status = SearchStatus.Unstarted; - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(Search.class); - } - - private synchronized static void storeSearch(Search search){ - allsearches.put(search.getSubject(), search); - searchhashes.put(search.hashCode(), search); - } - - private static synchronized void removeSearch(Search search) { - allsearches.remove(search.subject); - searchhashes.remove(search.hashCode()); - } - - /** - * Creates a search for any number of indices, starts and returns the associated Request object - * TODO startSearch with array of indexes - * - * @param search string to be searched - * @param indexuri URI of index(s) to be used - * @return existing Search for this if it exists, new one otherwise or null if query is for a stopword or stop query - * @throws InvalidSearchException if any part of the search is invalid - */ - public static Search startSearch(String search, String indexuri) throws InvalidSearchException, TaskAbortException{ - search = search.toLowerCase(Locale.US).trim(); - if(search.length()==0) - throw new InvalidSearchException("Blank search"); - search = fixCJK(search); - - // See if the same search exists - if (hasSearch(search, indexuri)) - return getSearch(search, indexuri); - - if(logMINOR) Logger.minor(Search.class, "Starting new search for "+search+" in "+indexuri); - - String[] indices = indexuri.split("[ ;]"); - if(indices.length<1 || search.trim().length()<1) - throw new InvalidSearchException("Attempt to start search with no index or terms"); - else if(indices.length==1){ - Search newSearch = splitQuery(search, indexuri); - return newSearch; - }else{ - // create search for multiple terms over multiple indices - ArrayList>> indexrequests = new ArrayList>>(indices.length); - for (String index : indices){ - Search indexsearch = startSearch(search, index); - if(indexsearch==null) - return null; - indexrequests.add(indexsearch); - } - Search newSearch = new Search(search, indexuri, indexrequests, ResultOperation.DIFFERENTINDEXES); - return newSearch; - } - } - - - /** Transform into characters separated by spaces. - * FIXME: I'm sure we could handle this a lot better! */ - private static String fixCJK(String search) { - StringBuffer sb = null; - int offset = 0; - boolean wasCJK = false; - for(;offset < search.length();) { - int character = search.codePointAt(offset); - if(SearchUtil.isCJK(character)) { - if(wasCJK) { - sb.append(' '); // Delimit characters by whitespace so we do an && search. - } - if(sb == null) { - sb = new StringBuffer(); - sb.append(search.substring(0, offset)); - } - wasCJK = true; - } else { - wasCJK = false; - } - if(sb != null) - sb.append(Character.toChars(character)); - offset += Character.charCount(character); - } - if(sb != null) - return sb.toString(); - else - return search; - } - - /** - * Creates Search instance depending on the given requests - * - * @param query the query this instance is being used for, only for reference - * @param indexURI the index uri this search is made on, only for reference - * @param requests subRequests of this search - * @param resultOperation Which set operation to do on the results of the subrequests - * @throws InvalidSearchException if the search is invalid - **/ - private Search(String query, String indexURI, List>> requests, ResultOperation resultOperation) - throws InvalidSearchException{ - super(makeString(query, indexURI)); - if(resultOperation==ResultOperation.SINGLE && requests.size()!=1) - throw new InvalidSearchException(requests.size() + " requests supplied with SINGLE operation"); - if(resultOperation==ResultOperation.REMOVE && requests.size()!=2) - throw new InvalidSearchException("Negative operations can only have 2 parameters"); - if( ( resultOperation==ResultOperation.PHRASE - || resultOperation == ResultOperation.INTERSECTION - || resultOperation == ResultOperation.UNION - || resultOperation == ResultOperation.DIFFERENTINDEXES ) - && requests.size()<2) - throw new InvalidSearchException(resultOperation.toString() + " operations need more than one term"); - - query = query.toLowerCase(Locale.US).trim(); - - // Create a temporary list of sub searches then make it unmodifiable - List>> tempsubsearches = new ArrayList>>(); - for (Execution> request : requests) { - if(request != null || resultOperation == ResultOperation.PHRASE) - tempsubsearches.add(request); - else - throw new NullPointerException("Search cannot encapsulate nulls except in the case of a ResultOperation.PHRASE where they are treated as blanks"); - } - subsearches = Collections.unmodifiableList(tempsubsearches); - - this.query = query; - this.indexURI = indexURI; - this.resultOperation = resultOperation; - try { - setStatus(); - } catch (TaskAbortException ex) { - setError(ex); - } - - storeSearch(this); - if(logMINOR) Logger.minor(this, "Created Search object for with subRequests :"+subsearches); - } - - /** - * Encapsulate a request as a Search, only so original query and uri can be stored - * - * @param query the query this instance is being used for, only for reference - * @param indexURI the index uri this search is made on, only for reference - * @param request Request to encapsulate - */ - private Search(String query, String indexURI, Execution> request){ - super(makeString(query, indexURI)); - if(request == null) - throw new NullPointerException("Search cannot encapsulate null (query=\""+query+"\" indexURI=\""+indexURI+"\")"); - query = query.toLowerCase(Locale.US).trim(); - subsearches = Collections.singletonList(request); - - this.query = query; - this.indexURI = indexURI; - this.resultOperation = ResultOperation.SINGLE; - try { - setStatus(); - } catch (TaskAbortException ex) { - setError(ex); - } - storeSearch(this); - } - - - /** - * Splits query into multiple searches, will be used for advanced queries - * @param query search query, can use various different search conventions - * @param indexuri uri for one index - * @return single Search encompassing everything in the query or null if query is a stop word - * @throws InvalidSearchException if search query is invalid - */ - private static Search splitQuery(String query, String indexuri) throws InvalidSearchException, TaskAbortException{ - query = query.trim(); - if(query.matches("\\A[\\S&&[^-\"]]*\\Z")){ - // single search term - // return null if stopword - if(SearchUtil.isStopWord(query)) - return null; - Execution> request = library.getIndex(indexuri).getTermEntries(query); - if (request == null) - throw new InvalidSearchException( "Something wrong with query=\""+query+"\" or indexURI=\""+indexuri+"\", maybe something is wrong with the index or it's uri is wrong." ); - return new Search(query, indexuri, request ); - } - - // Make phrase search (hyphen-separated words are also treated as phrases) - if(query.matches("\\A\"[^\"]*\"\\Z") || query.matches("\\A((?:[\\S&&[^-]]+-)+[\\S&&[^-]]+)\\Z")){ - ArrayList>> phrasesearches = new ArrayList>>(); - String[] phrase = query.replaceAll("\"(.*)\"", "$1").split("[\\s-]+"); - if(logMINOR) Logger.minor(Search.class, "Phrase split: "+query); - for (String subquery : phrase){ - Search term = startSearch(subquery, indexuri); - phrasesearches.add(term); - } - // Not really sure how stopwords should be handled in phrases - // currently i'm thinking that they should be treated as blanks - // between other words and ignored in other cases "jesus of nazareth" - // is treated as "jesus nazareth". Whereas "the who" will be - // treated as a stop query as just searching for "who" and purporting - // that the results are representative of "the who" is misleading. - - // this makes sure there are no trailing nulls at the start - while(phrasesearches.size() > 0 && phrasesearches.get(0)==null) - phrasesearches.remove(0); - // this makes sure there are no trailing nulls at the end - while(phrasesearches.size() > 0 && phrasesearches.get(phrasesearches.size()-1)==null) - phrasesearches.remove(phrasesearches.size()-1); - - if(phrasesearches.size()>1) - return new Search(query, indexuri, phrasesearches, ResultOperation.PHRASE); - else - return null; - } - - - - if(logMINOR) Logger.minor(Search.class, "Splitting " + query); - // Remove phrases, place them in arraylist and replace them with references to the arraylist - ArrayList phrases = new ArrayList(); - Matcher nextPhrase = Pattern.compile("\"([^\"]*?)\"").matcher(query); - StringBuffer sb = new StringBuffer(); - while (nextPhrase.find()) - { - String phrase = nextPhrase.group(1); - nextPhrase.appendReplacement(sb, "£"+phrases.size()+"€"); - phrases.add(phrase); - } - nextPhrase.appendTail(sb); - - if(logMINOR) Logger.minor(Search.class, "Phrases removed query: "+sb); - - // Remove the unmatched \" (if any) - String formattedquery = sb.toString().replaceFirst("\"", ""); - if(logMINOR) Logger.minor(Search.class, "Removing the unmatched bracket: "+formattedquery); - - // Treat hyphens as phrases, as they are treated equivalently in spider so this is the most effective way now - nextPhrase = Pattern.compile("((?:[\\S&&[^-]]+-)+[\\S&&[^-]]+)").matcher(formattedquery); - sb.setLength(0); - while (nextPhrase.find()) - { - String phrase = nextPhrase.group(1); - nextPhrase.appendReplacement(sb, "£"+phrases.size()+"€"); - phrases.add(phrase); - } - nextPhrase.appendTail(sb); - - formattedquery = sb.toString(); - if(logMINOR) Logger.minor(Search.class, "Treat hyphenated words as phrases: " + formattedquery); - - // Substitute service symbols. Those which are inside phrases should not be seen as "service" ones. - formattedquery = formattedquery.replaceAll("\\s+or\\s+", "||"); - if(logMINOR) Logger.minor(Search.class, "OR-subst query : "+formattedquery); - formattedquery = formattedquery.replaceAll("\\s+(?:not\\s*|-)(\\S+)", "^^($1)"); - if(logMINOR) Logger.minor(Search.class, "NOT-subst query : "+formattedquery); - formattedquery = formattedquery.replaceAll("\\s+", "&&"); - if(logMINOR) Logger.minor(Search.class, "AND-subst query : "+formattedquery); - - // Put phrases back in - String[] phraseparts=formattedquery.split("£"); - formattedquery=phraseparts[0]; - for (int i = 1; i < phraseparts.length; i++) { - String string = phraseparts[i]; - if(logMINOR) Logger.minor(Search.class, "replacing phrase "+string.replaceFirst("(\\d+).*", "$1")); - formattedquery += "\""+ phrases.get(Integer.parseInt(string.replaceFirst("(\\d+).*", "$1"))) +"\"" + string.replaceFirst("\\d+€(.*)", "$1"); - } - if(logMINOR) Logger.minor(Search.class, "Phrase back query: "+formattedquery); - - // Make complement search - if (formattedquery.contains("^^(")){ - ArrayList>> complementsearches = new ArrayList>>(); - String[] splitup = formattedquery.split("(\\^\\^\\(|\\))", 3); - Search add = startSearch(splitup[0]+splitup[2], indexuri); - Search subtract = startSearch(splitup[1], indexuri); - if(add==null || subtract == null) - return null; // If 'and' is not to be searched for 'the -john' is not to be searched for, also 'john -the' wouldnt have shown many results anyway - complementsearches.add(add); - complementsearches.add(subtract); - return new Search(query, indexuri, complementsearches, ResultOperation.REMOVE); - } - // Split intersections - if (formattedquery.contains("&&")){ - ArrayList intersectsearches = new ArrayList(); - String[] intersects = formattedquery.split("&&"); - for (String subquery : intersects){ - Search subsearch = startSearch(subquery, indexuri); - if (subsearch != null) // We will assume that searching for 'the big apple' will near enough show the same results as 'big apple', so just ignore 'the' in interseaction - intersectsearches.add(subsearch); - } - switch(intersectsearches.size()){ - case 0: // eg. 'the that' - return null; - case 1 : // eg. 'cake that' will return a search for 'cake' - return intersectsearches.get(0); - default : - return new Search(query, indexuri, intersectsearches, ResultOperation.INTERSECTION); - } - } - // Split Unions - if (formattedquery.contains("||")){ - ArrayList>> unionsearches = new ArrayList>>(); - String[] unions = formattedquery.split("\\|\\|"); - for (String subquery : unions){ - Search add = startSearch(subquery, indexuri); - if (add == null) // eg a search for 'the or cake' would be almost the same as a search for 'the' and so should be treated as such - return null; - unionsearches.add(add); - } - return new Search(query, indexuri, unionsearches, ResultOperation.UNION); - } - - Logger.error(Search.class, "No split made, "+formattedquery+query); - return null; - } - - - /** - * Sets the parent plugin to be used for logging & plugin api - */ - public static void setup(Library library, Executor executor){ - Search.library = library; - Search.executor = executor; - Search.allsearches = new HashMap(); - } - - /** - * Gets a Search from the Map - * @param search - * @param indexuri - * @return Search or null if not found - */ - public synchronized static Search getSearch(String search, String indexuri){ - if(search==null || indexuri==null) - return null; - search = search.toLowerCase(Locale.US).trim(); - - return allsearches.get(makeString(search, indexuri)); - } - public synchronized static Search getSearch(int searchHash){ - return searchhashes.get(searchHash); - } - - /** - * Looks for a given search in the map of searches - * @param search - * @param indexuri - * @return true if it's found - */ - public static boolean hasSearch(String search, String indexuri){ - if(search==null || indexuri==null) - return false; - search = search.toLowerCase(Locale.US).trim(); - return allsearches.containsKey(makeString(search, indexuri)); - } - - public static boolean hasSearch(int searchHash){ - return searchhashes.containsKey(searchHash); - } - - public static synchronized Map getAllSearches(){ - return Collections.unmodifiableMap(allsearches); - } - - public String getQuery(){ - return query; - } - - public String getIndexURI(){ - return indexURI; - } - - /** - * Creates a string which uniquly identifies this Search object for comparison - * and lookup, wont make false positives but can make false negatives as search and indexuri aren't standardised - * - * @param search - * @param indexuri - */ - public static String makeString(String search, String indexuri){ - return search + "@" + indexuri; - } - - /** - * A descriptive string for logging - */ - @Override - public String toString(){ - return "Search: "+resultOperation+" - " + status + " : "+subject+" : "+subsearches; - } - - /** - * @return List of Progresses this search depends on, it will not return CompositeProgresses - */ - public List getSubProgress(){ - if(logMINOR) Logger.minor(this, toString()); - - if (subsearches == null) - return null; - // Only index splits will allowed as composites - if (resultOperation == ResultOperation.DIFFERENTINDEXES) - return subsearches; - // Everything else is split into leaves - List subprogresses = new ArrayList(); - for (Execution> request : subsearches) { - if(request == null) - continue; - if( request instanceof CompositeProgress && ((CompositeProgress)request).getSubProgress()!=null && ((CompositeProgress) request).getSubProgress().iterator().hasNext()){ - for (Iterator it = ((CompositeProgress)request).getSubProgress().iterator(); it.hasNext();) { - Progress progress1 = it.next(); - subprogresses.add(progress1); - } - }else - subprogresses.add(request); - } - return subprogresses; - } - - - /** - * @return true if all are Finished and Result is ready, also stimulates the creation of the result if all subreqquests are complete and the result isn't made - */ - @Override public boolean isDone() throws TaskAbortException{ - setStatus(); - return status == SearchStatus.Done; - } - - /** - * Returns whether the generator has formatted the results - */ - public boolean hasGeneratedResultNode(){ - return pageEntryNode != null; - } - - /** - * After this finishes running, the status of this Search object will be correct, stimulates the creation of the result if all subreqquests are complete and the result isn't made - * @throws plugins.Library.util.exec.TaskAbortException - */ - private synchronized void setStatus() throws TaskAbortException{ - switch (status){ - case Unstarted : // If Unstarted, status -> Busy - status = SearchStatus.Busy; - case Busy : - if(!isSubRequestsComplete()) - for (Execution> request : subsearches) - if(request != null && (!(request instanceof Search) || ((Search)request).status==SearchStatus.Busy)) - return; // If Busy & still waiting for subrequests to complete, status remains Busy - status = SearchStatus.Combining_First; // If Busy and waiting for subrequests to combine, status -> Combining_First - case Combining_First : // for when subrequests are combining - if(!isSubRequestsComplete()) // If combining first and subsearches still haven't completed, remain - return; - // If subrequests have completed start process to combine results - resultset = new ResultSet(subject, resultOperation, subsearches, innerCanFailAndStillComplete()); - if(executor!=null) - executor.execute(resultset, "Library.Search : combining results"); - else - (new Thread(resultset, "Library.Search : combining results")).start(); - status = SearchStatus.Combining_Last; - case Combining_Last : // for when this is combining - if(!resultset.isDone()) - return; // If Combining & combine not finished, status remains as Combining - subsearches = null; // clear the subrequests after they have been combined - // If finished Combining and asked to generate resultnode, start that process - if(formatResult){ - // resultset doesn't exist but subrequests are complete so we can start up a resultset - resultNodeGenerator = new ResultNodeGenerator(resultset, htmlgroupusk, htmlshowold, htmljs); - if(executor!=null) - executor.execute(resultNodeGenerator, "Library.Search : formatting results"); - else - (new Thread(resultNodeGenerator, "Library.Search : formatting results")).start(); - status = SearchStatus.Formatting; // status -> Formatting - }else // If not asked to format output, status -> done - status = SearchStatus.Done; - case Formatting : - if(formatResult){ - // If asked to generate resultnode and still doing that, status remains as Formatting - if(!resultNodeGenerator.isDone()) - return; - // If finished Formatting or not asked to do so, status -> Done - pageEntryNode = resultNodeGenerator.getPageEntryNode(); - resultNodeGenerator = null; - } - status = SearchStatus.Done; - case Done : - // Done , do nothing - } - } - - /** - * @return true if all are Finished, false otherwise - */ - private boolean isSubRequestsComplete() throws TaskAbortException{ - for(Execution> r : subsearches) { - try { - if(r != null && !r.isDone()) - return false; - } catch (TaskAbortException e) { - if(innerCanFailAndStillComplete()) continue; - throw e; - } - } - return true; - } - - - /** - * Return the set of results or null if it is not ready
- * @return Set of TermEntry - */ - @Override public Set getResult() throws TaskAbortException { - try { - if(!isDone()) - return null; - } catch (TaskAbortException e) { - removeSearch(this); - throw e; - } - - removeSearch(this); - Set rs = resultset; - return rs; - } - - public HTMLNode getHTMLNode(){ - try { - if (!isDone() || !formatResult) { - return null; - } - } catch (TaskAbortException ex) { - Logger.error(this, "Error finding out whether this is done", ex); - return null; - } - - removeSearch(this); - HTMLNode pen = pageEntryNode; - pageEntryNode = null; - - return pen; - } - - public synchronized void setMakeResultNode(boolean groupusk, boolean showold, boolean js){ - formatResult = true; - htmlgroupusk = groupusk; - htmlshowold = showold; - htmljs = js; - } - - @Override - public ProgressParts getParts() throws TaskAbortException { - if(subsearches==null) - return ProgressParts.normalise(0, 0); - return ProgressParts.getParts(this.getSubProgress(), ProgressParts.ESTIMATE_UNKNOWN); - } - - @Override - public String getStatus() { - try { - setStatus(); - return status.name(); - } catch (TaskAbortException ex) { - return "Error finding Status"; - } - } - - public boolean isPartiallyDone() { - throw new UnsupportedOperationException("Not supported yet."); - } - - public void remove() { - // FIXME abort the subsearches. - // FIXME in fact this shouldn't be necessary, the TaskAbortException should be propagated upwards??? - // FIXME really we'd want to convert it into a failed status so we could show that one failed, and still show partial results - if(subsearches != null) { - for(Execution> sub : subsearches) { - if(sub instanceof Search) - ((Search)sub).remove(); - } - } - removeSearch(this); - } - - public boolean innerCanFailAndStillComplete() { - switch(resultOperation) { - case DIFFERENTINDEXES: - case UNION: - return true; - } - return false; - } - -} diff --git a/src/plugins/Library/search/SearchTokenizer.java b/src/plugins/Library/search/SearchTokenizer.java deleted file mode 100755 index 76ef8908..00000000 --- a/src/plugins/Library/search/SearchTokenizer.java +++ /dev/null @@ -1,205 +0,0 @@ -package plugins.Library.search; - -import java.lang.Character.UnicodeBlock; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Locale; -import static plugins.Library.search.SearchUtil.*; - -/** - * Search Tokenizer - * - * Normalize and tokenize text blocks into words (for latin scripts), - * or single-double-tokens (for CJK). - * - * @author SDiZ - */ -public class SearchTokenizer implements Iterable, Iterator { - private ArrayList mode; - private ArrayList segments; - private int nextPos; - private final boolean returnPairs; - static final int KEEP_NON_LETTER_MIN_CHARS = 3; - static final String allowedMidWord = "'"; - static final String discardIfEndWord = "'"; - - private Iterator cjkTokenizer; - - enum Mode { - UNDEF, LATIN, CJK - }; - - public SearchTokenizer(String text, boolean returnPairs) { - this.returnPairs = returnPairs; - // normalize - text = normalize(text); - - // split code points, filter for letter or digit - final int length = text.length(); - segments = new ArrayList(); - mode = new ArrayList(); - - Mode curMode = Mode.UNDEF; - - StringBuilder sb = new StringBuilder(); - for (int offset = 0; offset < length;) { - final int codepoint = text.codePointAt(offset); - int charCount = Character.charCount(codepoint); - offset += charCount; - - if (Character.isLetterOrDigit(codepoint)) { - boolean isCJK = isCJK(codepoint); - boolean isNum = Character.isDigit(codepoint); - - // add seperator across CJK/latin margin - if (isCJK) { - if (curMode == Mode.LATIN && sb.length() != 0) { - segments.add(sb.toString()); - mode.add(curMode); - sb = new StringBuilder(); - } - curMode = Mode.CJK; - } else if (!isNum) { - if (curMode == Mode.CJK && sb.length() != 0) { - segments.add(sb.toString()); - mode.add(curMode); - sb = new StringBuilder(); - } - curMode = Mode.LATIN; - } - - sb.append(Character.toChars(codepoint)); - } else if (sb.length() != 0) { - boolean passed = false; - if(charCount == 1) { - // Allow apostrophes mid-word. - char c = text.charAt(offset-1); - if(allowedMidWord.indexOf(c) != -1) { - sb.append(c); - passed = true; - } - } - if(!passed) { - // last code point is not 0, add a separator - if(curMode != Mode.UNDEF || sb.length() >= KEEP_NON_LETTER_MIN_CHARS) { - // Words can't end in an apostrophe. - while(sb.length() > 0 && discardIfEndWord.indexOf(sb.charAt(sb.length()-1)) != -1) { - sb.setLength(sb.length()-1); - } - if(sb.length() > 0) { - segments.add(sb.toString()); - mode.add(curMode); - } - } - curMode = Mode.UNDEF; - sb = new StringBuilder(); - } - } - } - - if (sb.length() != 0) { - // Words can't end in an apostrophe. - while(sb.length() > 0 && discardIfEndWord.indexOf(sb.charAt(sb.length()-1)) != -1) { - sb.setLength(sb.length()-1); - } - if(sb.length() > 0) { - segments.add(sb.toString()); - mode.add(curMode); - } - } - } - - public String toString() { - StringBuilder str = new StringBuilder(); - str.append("->["); - - Iterator it = segments.iterator(); - if (it.hasNext()) - str.append(it.next()); - while (it.hasNext()) { - str.append(','); - str.append(it.next()); - } - str.append("]"); - - for (String s : this) { - str.append(';'); - str.append(s); - } - - return str.toString(); - } - - public Iterator iterator() { - return this; - } - - public boolean hasNext() { - return (cjkTokenizer != null && cjkTokenizer.hasNext()) || - (nextPos < segments.size()); - } - - public String next() { - if (cjkTokenizer != null) { - if (cjkTokenizer.hasNext()) - return cjkTokenizer.next(); - cjkTokenizer = null; - } - - Mode curMode = mode.get(nextPos); - String curSeg = segments.get(nextPos); - nextPos++; - - switch (curMode) { - case LATIN: - return curSeg; - - case CJK: - cjkTokenizer = cjkIterator(curSeg, returnPairs); - assert cjkTokenizer.hasNext(); - return cjkTokenizer.next(); - - case UNDEF: - // E.g. a number. We do index these. FIXME should we? Probably yes... - return curSeg; - - default: - assert false; // DOH! - } - return null; - } - - /** Iterate a CJK string. Return characters or characters and pairs of characters. - * @param returnPairs If true, return pairs of characters in between characters: - * C1C2C3C4 -> C1, C1C2, C2, C2C3, C3, C3C4, C4 */ - private Iterator cjkIterator(String cjkText, final boolean returnPairs) { - ArrayList cjkToken = new ArrayList(); - - String lastChar = null; - int length = cjkText.length(); - for (int offset = 0; offset < length;) { - final int codepoint = cjkText.codePointAt(offset); - offset += Character.charCount(codepoint); - - String curChar = new String(Character.toChars(codepoint)); - - if (lastChar != null && returnPairs) - cjkToken.add(lastChar + curChar); - if (isCJK(codepoint)) // skip number embedded in cjk - cjkToken.add(curChar); - lastChar = curChar; - } - - return cjkToken.iterator(); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - - protected String normalize(String text) { - // TODO: JAVA6: normalize to NFKC - // Do upper case first for Turkish and friends - return text.toUpperCase(Locale.US).toLowerCase(Locale.US); - } -} diff --git a/src/plugins/Library/search/SearchUtil.java b/src/plugins/Library/search/SearchUtil.java deleted file mode 100644 index 65236427..00000000 --- a/src/plugins/Library/search/SearchUtil.java +++ /dev/null @@ -1,55 +0,0 @@ -package plugins.Library.search; - -import java.lang.Character.UnicodeBlock; -import java.util.Arrays; -import java.util.List; - -public class SearchUtil { - public static boolean isCJK(int codePoint) { - UnicodeBlock block = Character.UnicodeBlock.of(codePoint); - return block == UnicodeBlock.CJK_COMPATIBILITY // CJK - || block == UnicodeBlock.CJK_COMPATIBILITY_FORMS // - || block == UnicodeBlock.CJK_COMPATIBILITY_IDEOGRAPHS // - || block == UnicodeBlock.CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT // - || block == UnicodeBlock.CJK_RADICALS_SUPPLEMENT // - || block == UnicodeBlock.CJK_SYMBOLS_AND_PUNCTUATION // - || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS // - || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A // - || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B // - || block == UnicodeBlock.BOPOMOFO // Chinese - || block == UnicodeBlock.BOPOMOFO_EXTENDED // - || block == UnicodeBlock.HANGUL_COMPATIBILITY_JAMO // Korean - || block == UnicodeBlock.HANGUL_JAMO // - || block == UnicodeBlock.HANGUL_SYLLABLES // - || block == UnicodeBlock.KANBUN // Japanese - || block == UnicodeBlock.HIRAGANA // - || block == UnicodeBlock.KANGXI_RADICALS // - || block == UnicodeBlock.KANNADA // - || block == UnicodeBlock.KATAKANA // - || block == UnicodeBlock.KATAKANA_PHONETIC_EXTENSIONS; - } - - private static List stopWords = Arrays.asList(new String[]{ - "the", "and", "that", "have", "for" // English stop words - }); - - public static boolean isStopWord(String word) { - if (stopWords.contains(word)) - return true; - - int len = word.codePointCount(0, word.length()); - if (len < 3 ) { - // too short, is this CJK? - int cp1 = word.codePointAt(0); - if (isCJK(cp1)) - return false; - if (len == 2) { - // maybe digit+CJK, check the second char - int cp2 = word.codePointAt(Character.charCount(cp1)); - return !isCJK(cp2); - } - return true; - } - return false; - } -} diff --git a/src/plugins/Library/search/inter/IndexQuery.java b/src/plugins/Library/search/inter/IndexQuery.java deleted file mode 100644 index 08c819ec..00000000 --- a/src/plugins/Library/search/inter/IndexQuery.java +++ /dev/null @@ -1,65 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.search.inter; - -import plugins.Library.Index; - -import java.util.Collections; -import java.util.Set; -import java.util.HashSet; - -/** -** DOCUMENT -** -** @author infinity0 -*/ -public class IndexQuery /* implements Index */ { - - /** - ** Reference to the index. - */ - final public Index index; - - /** - ** Score for this index in the WoT. - */ - final public float WoT_score; - - /** - ** Minimum number of hops this index is from the inner-group of indexes. - */ - protected int WoT_hops; - - /** - ** References to this index that we've seen, from other indexes. - ** - ** TODO decide if this is needed. - */ - protected int WoT_refs; - - /** - ** Mutex for manipulating the below sets. - */ - final protected Object terms_lock = new Object(); - - final protected Set terms_done = new HashSet(); - final protected Set terms_started = new HashSet(); - final protected Set terms_pending; - - public IndexQuery(Index i, float s, Set terms, int hops) { - index = i; - WoT_score = s; - WoT_hops = hops; - terms_pending = new HashSet(terms); - } - - public IndexQuery(Index i, float s, String root_term) { - this(i, s, Collections.singleton(root_term), 0); - } - - public void updateMinimumHops(int h) { - if (h < WoT_hops) { WoT_hops = h; } - } - -} diff --git a/src/plugins/Library/search/inter/Interdex.java b/src/plugins/Library/search/inter/Interdex.java deleted file mode 100644 index 86226621..00000000 --- a/src/plugins/Library/search/inter/Interdex.java +++ /dev/null @@ -1,198 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.search.inter; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermIndexEntry; -import plugins.Library.index.TermTermEntry; -import plugins.Library.Index; - -import freenet.keys.FreenetURI; - -import java.util.Set; -import java.util.Map; -import java.util.HashMap; - -/** -** DOCUMENT -** -** @author infinity0 -*/ -public class Interdex /* implements Index */ { - - - - /** - ** Constructs the root set of {@link IndexQuery}s. - */ - protected static Map getRootIndices() { - throw new UnsupportedOperationException("not implemented"); - } - - - - - /** - ** Class to handle a request for a given subject term. - */ - public class InterdexQuery implements Runnable { - - final public String subject; - - /** - ** Table of {@link IndexQuery} objects relevant to this query. - */ - final protected Map queries = new HashMap(); - - /** - ** Table of terms relevant to this query and results retrieved for - ** that particular term. - ** - */ - /*final*/ protected Map results; - - /** - ** Constructs a new InterdexQuery and TODO starts executing it. - */ - public InterdexQuery(String subj) { - subject = subj; - Map roots = getRootIndices(); - for (Map.Entry en: roots.entrySet()) { - Index index = en.getKey(); - // FreenetURI id = index.reqID; - // queries.put(id, new IndexQuery(index, en.getValue(), subj)); - } - } - - /** - ** TODO implement. - ** - ** Returns the average perceived relevance of a given term relative to - ** a parent subject term. "Perceived" because it uses data from indexes - ** in the WoT's nearer rings; "average" because it generates an average - ** rating from the individual values encountered. - ** - ** The value is calculated from the mean of all the relevance values - ** encountered for this subject-term pair (with the weights / - ** probabilities being the normalised trust score of the index which - ** defined the pairing), multiplied by 3. (Values greater than 1 are - ** normalised back down to 1). - ** - ** For example, if we have encountered 9 indexes all with equal weight, - ** and 2 of them defined rel(S, T) = 0.5, then this method will return - ** 0.5 * min(1, 3*2/9) = 1/3. - ** - ** In other words, if less than 1/3 of the total index trust-mass gives - ** a relevance rating for a given subject-term pair, the value returned - ** by this method will be lower (than it would be if more indexes had - ** had supplied a rating). - ** - ** We choose 1/3 as an arbitrary compromise between majority-ignorance - ** (most people might not know obscure term X is related to obscure - ** term Y) and minority-attack (one person might inject information to - ** distort a search query; we shouldn't expect WoT to deal with minor - ** corner cases). '''This value can be discussed further'''. - ** - ** Formally, let - ** - ** : S := subject - ** : T := term - ** : A := { all indexes encountered } - ** : R := { i ∊ A: i.rel(S, T) is defined } - ** : Wi := weight of index i (eg. WoT trust score) - ** - ** Then this method calculates: - ** - ** : (Σi∊RWi·i.rel(S, T)) - ** · min(1, 3·Σi∊RWi - ** / Σi∊AWi) - ** - */ - public float getAvPercvRelevance(String subj, String term) { - throw new UnsupportedOperationException("not implemented"); - } - - /** - ** TODO implement. maybe use JGraphT. DefaultDirectedWeightedGraph - ** and DijkstraShortestPath. - ** - ** Returns the effective relevance of all terms relative to the subject - ** term for this query. - ** - ** The effective relevance of a term (relative to the subject term) is - ** defined as the highest-relevance path from the subject to that term, - ** where the relevance of a path is defined as the product of all the - ** edges on it, where each edge from S to T has weight equal to their - ** {@linkplain #getAvPercvRelevance(String, String) average perceived - ** relevance}. - ** - ** This is itself equivalent to the shortest-path problem from the - ** subject term to the target term, with each edge from S to T having - ** weight {@code -log(R)}, with R being the average perceived relevance - ** of S, T. - ** - ** This definition was chosen because there may be an infinite number - ** of paths between two terms in the semantic web; we want the "most - ** direct" one that exists (by consensus; so we use average perceived - ** relevance). - */ - public Map getEffectiveRelevances() { - throw new UnsupportedOperationException("not implemented"); - } - - - /** - ** Returns true when we have enough data to display something - ** acceptable to the user. The query may still be going on in the - ** background. - ** - ** TODO decide when this is. - */ - /*@Override*/ public boolean isPartiallyDone() { - throw new UnsupportedOperationException("not implemented"); - } - - - public void run() { - - for (;;) { - - while (Math.log(1)!=0 /* completed tasks is not empty */ ) { - // get completed task - - Set entries = null; - - for (TermEntry en: entries) { - - if (en instanceof TermIndexEntry) { - - } else if (en instanceof TermTermEntry) { - - - } else { - - - } - - - } - - } - - while (Math.log(1)!=0 /* pending tasks is not empty */) { - - - - - - - } - - } - } - - } - - -} diff --git a/src/plugins/Library/search/inter/TermResults.java b/src/plugins/Library/search/inter/TermResults.java deleted file mode 100644 index 2595e7f5..00000000 --- a/src/plugins/Library/search/inter/TermResults.java +++ /dev/null @@ -1,56 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.search.inter; - -import java.util.Collection; -import java.util.Map; - -/** -** DOCUMENT -** -** @author infinity0 -*/ -public class TermResults { - - /** - ** Subject term for the original query. - */ - /*final*/ public String query_subject; - - /** - ** Subject term for these results. - */ - /*final*/ public String subject; - - /** - ** Map of parent terms to the relevances that were seen for this term in - ** the results for the parent. - */ - /*final*/ public Map> parents; - - /** - ** Incremental calculation of effective relevance. May not be exact if - ** if additional indexes were encountered referencing this term, after the - ** last update to this field. - */ - protected float av_pcv_rel_; - - /** - ** Get the cached effective relevance. Use this in preference to {@link - ** Interdex#getEffectiveRelevance()} when the search is still ongoing. - ** - ** TODO this should be a good estimate that is always greater than the - ** actual value... - */ - public float getCachedAveragePerceivedRelevance() { - return av_pcv_rel_; - } - - public TermResults(String q, String s) { - query_subject = q; - subject = s; - } - - -} diff --git a/src/plugins/Library/search/package-info.java b/src/plugins/Library/search/package-info.java deleted file mode 100644 index 229a0d44..00000000 --- a/src/plugins/Library/search/package-info.java +++ /dev/null @@ -1,15 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ - -/** - * Handles higher-level search functions such as set operations on the results, - * splitting search queries, searching on multiple indexes. - * - * TODO support for stopwords, maybe it should be an exception passed back from - * getResult() so that that one can be ignored and need to send a message to the user to say it was excluded ( Could be a TermEntry ) - * - * @author MikeB - */ -package plugins.Library.search; - diff --git a/src/plugins/Library/ui/L10nString.java b/src/plugins/Library/ui/L10nString.java deleted file mode 100644 index d04c79be..00000000 --- a/src/plugins/Library/ui/L10nString.java +++ /dev/null @@ -1,72 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import freenet.l10n.BaseL10n; -import freenet.l10n.NodeL10n; -import freenet.l10n.BaseL10n.LANGUAGE; - -/** - * Feeble attempt at a translation interface, this is best left until there is a - * general Plugin method for doing this - * - * @author MikeB - */ -public class L10nString{ - - - static LANGUAGE lang; - - public static String getString(String key){ - lang = NodeL10n.getBase().getSelectedLanguage(); - if("Index".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "Index "; - } - else if("Searching-for".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "Searching for "; - } - else if("in-index".equals(key)) - switch(lang){ - case ENGLISH: - default: - return " in index "; - } - else if("Search-status".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "Search status : "; - } - else if("failed".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "Failed"; - } - else if("title".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "Search Freenet"; - } - else if("page-warning".equals(key)) - switch(lang){ - case ENGLISH: - default: - return "WARNING: This search, like the rest of Freenet, is not filtered, and could find offensive or illegal content. Be careful!"; - } - else - return key; - } - - public static void setLanguage(LANGUAGE newLanguage){ - lang = newLanguage; - } -} diff --git a/src/plugins/Library/ui/MainPage.java b/src/plugins/Library/ui/MainPage.java deleted file mode 100644 index ddbb17c1..00000000 --- a/src/plugins/Library/ui/MainPage.java +++ /dev/null @@ -1,523 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import plugins.Library.Library; -import plugins.Library.search.InvalidSearchException; -import plugins.Library.search.Search; -import plugins.Library.util.exec.ChainedProgress; -import plugins.Library.util.exec.CompositeProgress; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.TaskAbortException; - -import freenet.keys.FreenetURI; -import freenet.pluginmanager.PluginRespirator; -import freenet.support.HTMLNode; -import freenet.support.Logger; -import freenet.support.MultiValueTable; -import freenet.support.api.HTTPRequest; - -import java.io.UnsupportedEncodingException; -import java.net.MalformedURLException; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; - -/** - * Generates the main search page - * - * @author MikeB - */ -class MainPage { - /** map of search hashes to pages on them */ - private static final HashMap searchPages = new HashMap(); - - private synchronized static void addpage(int hashCode, MainPage page) { - searchPages.put(hashCode, page); - } - - private synchronized static void cleanUpPages(){ - for (Iterator it = searchPages.keySet().iterator(); it.hasNext();) { - if (!Search.hasSearch(it.next())) - it.remove(); - } - } - - private synchronized static MainPage getPage(int intParam) { - return searchPages.get(intParam); - } - - - - private final Library library; - private final PluginRespirator pr; - - private Search search = null; - private ArrayList exceptions = new ArrayList(); - private boolean showold = false; - private boolean js = false; - private String query = ""; - /** All of the indexes used in the request together seperated by spaces */ - private String indexstring = Library.DEFAULT_INDEX_SITE; - /** Array of all the bookmark indexes used in this request */ - private ArrayList selectedBMIndexes = new ArrayList(); - /** Any other indexes which are not bookmarks */ - private ArrayList selectedOtherIndexes = new ArrayList(); - /** Any other indexes which are not bookmarks seperated by spaces */ - private boolean groupusk = false; - private StringBuilder messages = new StringBuilder(); - - private String addindexname = ""; - private String addindexuri = ""; - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(MainPage.class); - } - - - MainPage(Exception e, Library library, PluginRespirator pr) { - exceptions.add(e); - this.library = library; - this.pr = pr; - } - - - /** - * FIXME ajaxy stuff isnt working, it doesnt seem to renew the timeout in the js, it stopped working when the response type of the xml was changed to text/xml - * @param library - * @param pr - */ - MainPage(Library library, PluginRespirator pr) { - this.library = library; - this.pr = pr; - } - - /** - * Process a get request, the only parameter allowed for a get request is - * request id (for an ongoing request) - */ - public static MainPage processGetRequest(HTTPRequest request){ - if (request.isParameterSet("request") && searchPages.containsKey(request.getIntParam("request"))){ - return getPage(request.getIntParam("request")); - } - return null; - } - - /** post commands */ - private static enum Commands { - /** performs a search */ - find, - /** adds a new index to the bookmarks in Library */ - addbookmark, - /** puts an index in the add bookmark box to be named, requires an integer parameter between 0 and "extraindexcount" to specify which index is being added */ - addindex, - /** deletes a bookmark from the Library, requires an integer parameter between 0 and the number of bookmarks */ - removebookmark - } - - /** - * Process a post request - * @param userAccess - * - * @see plugins.XMLSpider.WebPage#processPostRequest(freenet.support.api.HTTPRequest, - * freenet.support.HTMLNode) - */ - public static MainPage processPostRequest(HTTPRequest request, HTMLNode contentNode, boolean hasFormPassword, boolean userAccess, Library library, PluginRespirator pr ) { - cleanUpPages(); - MainPage page = new MainPage(library, pr); - - page.js = request.isPartSet("js"); - page.showold = request.isPartSet("showold"); - page.groupusk = request.isPartSet("groupusk"); - String[] etcIndexes = request.getPartAsStringFailsafe("indexuris", 256).trim().split("[ ;]"); - page.query = request.getPartAsStringFailsafe("search", 256); - - if(userAccess) { - page.addindexname = request.getPartAsStringFailsafe("addindexname", 32); - page.addindexuri = request.getPartAsStringFailsafe("addindexuri", 256); - } - - // Get bookmarked index list - page.indexstring = ""; - for (String bm : library.bookmarkKeys()){ - String bmid = (Library.BOOKMARK_PREFIX + bm).trim(); - //Logger.normal(this, "checking for ~" + bm + " - "+request.isPartSet("~"+bm)); - if(request.isPartSet("~"+bm)){ - page.indexstring += bmid + " "; - page.selectedBMIndexes.add(bmid); - } - } - // Get other index list - //Logger.normal(page, "extra indexes : "+request.getIntPart("extraindexcount", 0)); - for (int i = 0; i < request.getIntPart("extraindexcount", 0); i++) { - if (request.isPartSet("index"+i)){ - String otherindexuri = request.getPartAsStringFailsafe("index"+i, 256); - //Logger.normal(page, "Added index : "+otherindexuri); - page.indexstring += otherindexuri + " "; - page.selectedOtherIndexes.add(otherindexuri); - }//else - //Logger.normal(page, "other index #"+i+" unchecked"); - } - for (String string : etcIndexes) { - if(string.length()>0){ - page.indexstring += string + " "; - page.selectedOtherIndexes.add(string); - } - } - page.indexstring = page.indexstring.trim(); - - if("".equals(page.indexstring)) - page.indexstring = Library.DEFAULT_INDEX_SITE; - - if(page.selectedBMIndexes.size() == 0 && page.selectedOtherIndexes.size() == 0) - page.selectedBMIndexes.add(Library.DEFAULT_INDEX_SITE); - - - - - - // get search query - if (request.isPartSet(Commands.find.toString())){ - if(hasFormPassword) { - // Start or continue a search - try { - if(logMINOR) - Logger.minor(MainPage.class, "starting search for "+page.query+" on "+page.indexstring); - page.search = Search.startSearch(page.query, page.indexstring); - if(page.search == null) - page.messages.append("Stopwords too prominent in search term, try removing words like 'the', 'and' and 'that' and any words less than 3 characters"); - else{ - page.search.setMakeResultNode(page.groupusk, page.showold, true); // for the moment js will always be on for results, js detecting isnt being used - - // at this point pages is in a state ready to be saved - addpage(page.search.hashCode(), page); - } - } catch (InvalidSearchException ex) { - page.messages.append("Problem with search : "+ex.getLocalizedMessage()+"\n"); - } catch (TaskAbortException ex) { - page.exceptions.add(ex); // TODO handle these exceptions separately - } catch (RuntimeException ex){ - page.exceptions.add(ex); - } - } - }else if (request.isPartSet(Commands.addbookmark.toString())) { - try { - // adding an index - // TODO make sure name is valid - if (userAccess && hasFormPassword && page.addindexname.length() > 0 && URLEncoder.encode(page.addindexname, "UTF-8").equals(page.addindexname)) { - // Use URLEncoder to have a simple constraint on names - library.addBookmark(page.addindexname, page.addindexuri); - if (page.selectedOtherIndexes.contains(page.addindexuri)) { - page.selectedOtherIndexes.remove(page.addindexuri); - page.selectedBMIndexes.add(Library.BOOKMARK_PREFIX + page.addindexname); - } - page.addindexname = ""; - page.addindexuri = ""; - } - } catch (UnsupportedEncodingException ex) { - Logger.error(page, "Encoding error", ex); - } - }else{ // check if one of the other indexes is being added as a bookmark, this doesnt actually add it but moves it into the add box - for (int i = 0; i < request.getIntPart("extraindexcount", 0); i++) { // TODO make sure name is valid - if (request.isPartSet(Commands.addindex.toString()+i)) - page.addindexuri = request.getPartAsStringFailsafe("index"+i, 256); - } - if(userAccess && hasFormPassword) - // check if one of the bookmarks is being removed - for (String bm : library.bookmarkKeys()) { - if (request.isPartSet(Commands.removebookmark+bm)){ - library.removeBookmark(bm); - break; - } - } - } - - return page; - } - - - - /** - * Write the search page out - * - * - * @see plugins.XMLSpider.WebPage#writeContent(freenet.support.api.HTTPRequest, - * freenet.support.HTMLNode) - */ - public void writeContent(HTMLNode contentNode, MultiValueTable headers) { - HTMLNode errorDiv = contentNode.addChild("div", "id", "errors"); - - for (Exception exception : exceptions) { - addError(errorDiv, exception, messages); - } - - try{ - //Logger.normal(this, "Writing page for "+ query + " " + search + " " + indexuri); - contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", "/library/static/script.js"}).addChild("%", " "); - - // Generate the url to refresh to to update the progress - String refreshURL = null; - if ( search != null) - refreshURL = path()+"?request="+search.hashCode(); - - - - - if(search == null) - contentNode.addChild("#",L10nString.getString("page-warning")); - - contentNode.addChild(searchBox()); - - - - - // If showing a search - if(search != null){ - // show progress - contentNode.addChild(progressBox()); - // If search is complete show results - if (search.isDone()) { - if(search.hasGeneratedResultNode()){ - contentNode.addChild(search.getHTMLNode()); - //Logger.normal(this, "Got pre generated result node."); - }else - try { - //Logger.normal(this, "Blocking to generate resultnode."); - ResultNodeGenerator nodegenerator = new ResultNodeGenerator(search.getResult(), groupusk, showold, true); // js is being switch on always currently due to detection being off - nodegenerator.run(); - contentNode.addChild(nodegenerator.getPageEntryNode()); - } catch (TaskAbortException ex) { - exceptions.add(ex); - } catch (RuntimeException ex) { - exceptions.add(ex); - } - } else { - contentNode.addChild("div", "id", "results").addChild("#"); - } - } - - - - - // Don't refresh if there is no request option, if it is finished or there is an error to show or js is enabled - if (search != null && !"".equals(search.getQuery()) && !search.isDone() && exceptions.size() <= 0) { - // refresh will GET so use a request id - if (!js) { - headers.put("Refresh", "2;url=" + refreshURL); - //contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", path() + "static/" + (js ? "script.js" : "detect.js") + "?request="+search.hashCode()+(showold?"&showold=on":"")}).addChild("%", " "); - //contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", path() + "static/" + (js ? "script.js" : "detect.js") + "?request="+search.hashCode()+(showold?"&showold=on":"")}).addChild("%", " "); - } - } - }catch(TaskAbortException e) { - if(search != null) - search.remove(); - exceptions.add(e); - search = null; - addError(errorDiv, e, messages); - } - - // Show any errors - errorDiv.addChild("p", messages.toString()); - } - - - /** - * Create search form - * - * // @param search already started - * // @param indexuri - * // @param js whether js has been detected - * // @param showold - * - * @return an {@link HTMLNode} representing the search form - */ - private HTMLNode searchBox(){ - HTMLNode searchDiv = new HTMLNode("div", new String[]{"id", "style"}, new String[]{"searchbar", "text-align: center;"}); - if(pr!=null){ - HTMLNode searchForm = pr.addFormChild(searchDiv, path(), "searchform"); - HTMLNode searchBox = searchForm.addChild("div", "style", "display: inline-table; text-align: left; margin: 20px 20px 20px 0px;"); - searchBox.addChild("#", "Search query:"); - searchBox.addChild("br"); - searchBox.addChild("input", new String[]{"name", "size", "type", "value", "title"}, new String[]{"search", "40", "text", query, "Enter a search query. You can use standard search syntax such as 'and', 'or', 'not' and \"\" double quotes around phrases"}); - searchBox.addChild("input", new String[]{"name", "type", "value", "tabindex"}, new String[]{"find", "submit", "Find!", "1"}); - if(js) - searchBox.addChild("input", new String[]{"type","name"}, new String[]{"hidden","js"}); - // Shows the list of bookmarked indexes TODO show descriptions on mouseover ?? - HTMLNode indexeslist = searchBox.addChild("ul", "class", "index-bookmark-list", "Select indexes"); - for (String bm : library.bookmarkKeys()){ - //Logger.normal(this, "Checking for bm="+Library.BOOKMARK_PREFIX+bm+" in \""+indexuri + " = " + selectedBMIndexes.contains(Library.BOOKMARK_PREFIX+bm)+" "+indexuri.contains(Library.BOOKMARK_PREFIX+bm)); - HTMLNode bmItem = indexeslist.addChild("li"); - bmItem.addChild("input", new String[]{"name", "type", "value", "title", (selectedBMIndexes.contains(Library.BOOKMARK_PREFIX+bm) ? "checked" : "size" )}, new String[]{"~"+bm, "checkbox", Library.BOOKMARK_PREFIX+bm, "Index uri : "+library.getBookmark(bm), "1" } , bm); - bmItem.addChild("input", new String[]{"name", "type", "value", "title", "class"}, new String[]{Commands.removebookmark+bm, "submit", "X", "Delete this bookmark", "index-bookmark-delete" }); - } - int i=0; - for (String uri : selectedOtherIndexes) { - HTMLNode removeItem = indexeslist.addChild("li"); - String showuri; - try{ - showuri = (new FreenetURI(uri)).toShortString(); - }catch(MalformedURLException e){ - showuri = uri; - } - removeItem.addChild("input", new String[]{"type", "name", "value", "checked"}, new String[]{"checkbox", "index"+i, uri, "checked", } , showuri); - removeItem.addChild("input", new String[]{"name", "type", "value"}, new String[]{Commands.addindex.toString()+i, "submit", "Add=>" }); - i++; - } - indexeslist.addChild("input", new String[]{"name", "type", "value"}, new String[]{"extraindexcount", "hidden", ""+selectedOtherIndexes.size()}); - indexeslist.addChild("li") - .addChild("input", new String[]{"name", "type", "value", "class", "title"}, new String[]{"indexuris", "text", "", "index", "URI or path of other index(s) to search on"}); - - - HTMLNode optionsBox = searchForm.addChild("div", "style", "margin: 20px 0px 20px 20px; display: inline-table; text-align: left;", "Options"); - HTMLNode optionsList = optionsBox.addChild("ul", "class", "options-list"); - optionsList.addChild("li") - .addChild("input", new String[]{"name", "type", groupusk?"checked":"size", "title"}, new String[]{"groupusk", "checkbox", "1", "If set, the results are returned grouped by site and edition, this makes the results quicker to scan through but will disrupt ordering on relevance, if applicable to the indexs you are using."}, "Group sites and editions"); - optionsList.addChild("li") - .addChild("input", new String[]{"name", "type", showold?"checked":"size", "title"}, new String[]{"showold", "checkbox", "1", "If set, older editions are shown in the results greyed out, otherwise only the most recent are shown."}, "Show older editions"); - - HTMLNode newIndexInput = optionsBox.addChild("div", new String[]{"class", "style"}, new String[]{"index", "display: inline-table;"}, "Add an index:"); - newIndexInput.addChild("br"); - newIndexInput.addChild("div", "style", "display: inline-block; width: 50px;", "Name:"); - newIndexInput.addChild("input", new String[]{"name", "type", "class", "title", "value"}, new String[]{"addindexname", "text", "index", "Name of the bookmark, this will appear in the list to the left", addindexname}); - newIndexInput.addChild("br"); - newIndexInput.addChild("div", "style", "display: inline-block; width: 50px;", "URI:"); - newIndexInput.addChild("input", new String[]{"name", "type", "class", "title", "value"}, new String[]{"addindexuri", "text", "index", "URI or path of index to add to bookmarks, including the main index filename at the end of a Freenet uri will help Library not to block in order to discover the index type.", addindexuri}); - newIndexInput.addChild("br"); - newIndexInput.addChild("input", new String[]{"name", "type", "value"}, new String[]{"addbookmark", "submit", "Add Bookmark"}); - }else - searchDiv.addChild("#", "No PluginRespirater, so Form cannot be displayed"); - return searchDiv; - } - - - /** - * Draw progress box with bars - * - * // @param search - * // @param indexuri - * // @param request request to get progress from - * - * @return an {@link HTMLNode} representing a progress box - */ - private HTMLNode progressBox() throws TaskAbortException{ - HTMLNode progressDiv = new HTMLNode("div", "id", "progress"); - // Search description - HTMLNode progressTable = progressDiv.addChild("table", "width", "100%"); - HTMLNode searchingforCell = progressTable.addChild("tr") - .addChild("td"); - searchingforCell.addChild("#", L10nString.getString("Searching-for")); - searchingforCell.addChild("span", "class", "librarian-searching-for-target") - .addChild("b", query); - searchingforCell.addChild("#", L10nString.getString("in-index")); - searchingforCell.addChild("i", indexstring); - - - // Search status - HTMLNode statusRow = progressTable.addChild("tr"); - statusRow.addChild("td") - .addChild("div", "id", "librarian-search-status") - .addChild("table", new String[]{"id", "class"}, new String[]{"progress-base", "progress-table"}) - .addChild("tbody") - .addChild(progressBar(search, true)); - return progressDiv; - } - - /** - * Put an error on the page, under node, also draws a big grey box around - * the error, unless it is an InvalidSearchException in which case it just shows the description - */ - public static void addError(HTMLNode node, Throwable error, StringBuilder messages){ - if(messages != null && - (error instanceof InvalidSearchException || error instanceof TaskAbortException) && !logMINOR){ // Print description - messages.append(error.getMessage()+"\n"); - // TODO if there is a cause there should be some way to view this if needed for debugging - }else{ // Else print stack trace - HTMLNode error1 = node.addChild("div", "style", "padding:10px;border:5px solid gray;margin:10px", error.toString()); - for (StackTraceElement ste : error.getStackTrace()){ - error1.addChild("br"); - error1.addChild("#", " -- "+ste.toString()); - } - if(error.getCause()!=null) - addError(error1, error.getCause(), messages); - } - } - - - /** - * Draw progress bars and describe progress, CompositeProgess are drawn as a table with each row containing a subProgress - * @param progress The progress to represent - * @return an {@link HTMLNode} representing a progress bar - */ - public static HTMLNode progressBar(Progress progress, boolean canFail) throws TaskAbortException { - synchronized (progress){ - if( progress instanceof CompositeProgress && ((CompositeProgress) progress).getSubProgress()!=null && ((CompositeProgress) progress).getSubProgress().iterator().hasNext()){ - // Put together progress bars for all the subProgress - HTMLNode block = new HTMLNode("#"); - block.addChild("tr").addChild("td", "colspan", "6", progress.getSubject() + " : "+progress.getStatus()); - TaskAbortException firstError = null; - boolean anySuccess = false; - if(canFail && progress instanceof Search) { - if(!(((Search)progress).innerCanFailAndStillComplete())) - canFail = false; - } else canFail = false; - if(((CompositeProgress) progress).getSubProgress() != null) - for (Progress progress1 : ((CompositeProgress) progress).getSubProgress()) { - try { - block.addChild(progressBar(progress1, canFail)); - anySuccess = true; - } catch (TaskAbortException e) { - if(!canFail) throw e; - if(firstError == null) firstError = e; - block.addChild("tr").addChild("td", "colspan", "6", progress1.getSubject() + " : "+L10nString.getString("failed") + " : "+e.getMessage()); - } - } - if(firstError != null && !anySuccess) - throw firstError; - return block; - } else { - // Draw progress bar for single or chained progress - ProgressParts parts; - if(progress instanceof ChainedProgress && ((ChainedProgress)progress).getCurrentProgress()!=null) - parts = ((ChainedProgress)progress).getCurrentProgress().getParts(); - else - parts = progress.getParts(); - - HTMLNode bar = new HTMLNode("tr"); - bar.addChild("td"); - // search term - bar.addChild("td", progress.getSubject()); - // search stage - bar.addChild("td", progress.getStatus()); - // show fetch progress if fetching something - if(progress.isDone() || progress.getParts().known==0){ - bar.addChild("td", ""); bar.addChild("td"); - }else{ - float fractiondone = parts.getKnownFractionDone(); - int percentage = (int)(((float)100)*fractiondone); // TODO cater for all data and invalid (negative) values - boolean fetchFinalized = parts.finalizedTotal(); - - bar.addChild("td", new String[]{"class", "style"}, new String[]{"progress-bar-outline", "padding: 0px 3px;"}) - .addChild("div", new String[]{"class", "style"}, new String[]{fetchFinalized?"progress-bar-inner-final":"progress-bar-inner-nonfinal", "z-index : -1; width:"+percentage+"%;"}); - bar.addChild("td", fetchFinalized?percentage+"%":"Operation length unknown"); - } - return bar; - } - } - } - - - - public static String path() { - return "/library/"; - } - - public String getQuery() { - return query; - } -} diff --git a/src/plugins/Library/ui/MainPageToadlet.java b/src/plugins/Library/ui/MainPageToadlet.java deleted file mode 100644 index 2f9e383b..00000000 --- a/src/plugins/Library/ui/MainPageToadlet.java +++ /dev/null @@ -1,139 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import plugins.Library.Library; - -import freenet.client.HighLevelSimpleClient; -import freenet.clients.http.PageNode; -import freenet.clients.http.RedirectException; -import freenet.clients.http.Toadlet; -import freenet.clients.http.ToadletContext; -import freenet.clients.http.ToadletContextClosedException; -import freenet.node.NodeClientCore; -import freenet.pluginmanager.PluginRespirator; -import freenet.support.HTMLNode; -import freenet.support.Logger; -import freenet.support.MultiValueTable; -import freenet.support.api.HTTPRequest; - -import java.io.IOException; -import java.net.URI; -import java.util.Collection; - -/** - * Encapsulates the MainPage in a Toadlet - * @author MikeB - */ -public class MainPageToadlet extends Toadlet { - private NodeClientCore core; - private final Library library; - private final PluginRespirator pr; - - public MainPageToadlet(HighLevelSimpleClient client, Library library, NodeClientCore core, PluginRespirator pr) { - super(client); - this.core = core; - this.library = library; - this.pr = pr; - } - - public boolean allowPOSTWithoutPassword() { - return true; - } - - public void handleMethodGET(URI uri, final HTTPRequest request, final ToadletContext ctx) - throws ToadletContextClosedException, IOException, RedirectException { - ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); - try { - // process the request - String title = L10nString.getString("title") + " (" + Library.plugName + ")"; - MainPage page = MainPage.processGetRequest(request); - if(page == null) - page = new MainPage(library, pr); - else { - String query = page.getQuery(); - if(query != null && query.length() > 0) - title = query + " - " + L10nString.getString("title") + " (" + Library.plugName + ")"; - } - PageNode p = ctx.getPageMaker().getPageNode(title, ctx); - // Style - p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); - HTMLNode pageNode = p.outer; - HTMLNode contentNode = p.content; - - MultiValueTable headers = new MultiValueTable(); - page.writeContent(contentNode, headers); - // write reply - writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); - } catch(RuntimeException e) { // this way isnt working particularly well, i think the ctx only gives out one page maker - Logger.error(this, "Runtime Exception writing main page", e); - PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); - // Style - p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); - HTMLNode pageNode = p.outer; - HTMLNode contentNode = p.content; - MainPage errorpage = new MainPage(e, library, pr); - MultiValueTable headers = new MultiValueTable(); - errorpage.writeContent(contentNode, headers); - writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); - } finally { - Thread.currentThread().setContextClassLoader(origClassLoader); - } - } - - public void handleMethodPOST(URI uri, HTTPRequest request, final ToadletContext ctx) throws ToadletContextClosedException, IOException, RedirectException { - ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); - - boolean hasFormPassword = ctx.hasFormPassword(request); - - try { - // Get the nodes of the page - PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); - // Style - p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); - HTMLNode pageNode = p.outer; - HTMLNode contentNode = p.content; - - MainPage page = MainPage.processPostRequest(request, contentNode, hasFormPassword, ctx.isAllowedFullAccess(), library, pr); - if(page==null) - page = new MainPage(library,pr); - // Process the request - MultiValueTable headers = new MultiValueTable(); - // write reply - page.writeContent(contentNode, headers); - // write reply - writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); - } catch(RuntimeException e) { - Logger.error(this, "Runtime Exception writing main page", e); - PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); - // Style - p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); - HTMLNode pageNode = p.outer; - HTMLNode contentNode = p.content; - // makes a mainpage for showing errors - MainPage errorpage = new MainPage(e, library, pr); - MultiValueTable headers = new MultiValueTable(); - errorpage.writeContent(contentNode, headers); - writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); - } finally { - Thread.currentThread().setContextClassLoader(origClassLoader); - } - } - - - - @Override public String path() { - return MainPage.path(); - } - - public String name() { - return "WelcomeToadlet.searchFreenet"; - } - - public String menu() { - return "FProxyToadlet.categoryBrowsing"; - } -} diff --git a/src/plugins/Library/ui/RelevanceComparator.java b/src/plugins/Library/ui/RelevanceComparator.java deleted file mode 100644 index 179eb91c..00000000 --- a/src/plugins/Library/ui/RelevanceComparator.java +++ /dev/null @@ -1,43 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ - -package plugins.Library.ui; - -import plugins.Library.index.TermEntry; -import plugins.Library.util.IdentityComparator; - -/** - * Compares the relevance of two TermEntrys, extends IdentityComparator so that two unique entries will not return a comparison of 0 - * - * @author MikeB - */ -public class RelevanceComparator extends IdentityComparator { - - - public static final RelevanceComparator comparator = new RelevanceComparator(); - - @Override - public int compare(TermEntry o1, TermEntry o2) { - float rel1 = o1.rel; - float rel2 = o2.rel; - - if(rel1 == rel2) - return super.compare(o1, o2); - else - return (rel1 < rel2) ? +1 : -1; - } - - @Override - public boolean equals(Object o){ - return getClass() == o.getClass(); - } - - @Override - public int hashCode() { - int hash = 7; - return hash; - } - - -} diff --git a/src/plugins/Library/ui/ResultNodeGenerator.java b/src/plugins/Library/ui/ResultNodeGenerator.java deleted file mode 100644 index ac0bc942..00000000 --- a/src/plugins/Library/ui/ResultNodeGenerator.java +++ /dev/null @@ -1,262 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermIndexEntry; -import plugins.Library.index.TermPageEntry; -import plugins.Library.index.TermTermEntry; - -import freenet.keys.FreenetURI; -import freenet.support.HTMLNode; -import freenet.support.Logger; - -import java.util.Iterator; -import java.util.Map.Entry; -import java.util.Set; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; - -/** - * Class for parsing and formatting search results, once isDone() return true, the nodes are ready to use - * - * @author MikeB - */ -public class ResultNodeGenerator implements Runnable { - private TreeMap groupmap; - private TreeMap pageset; - private TreeSet relatedTerms; - private TreeSet relatedIndexes; - private Set result; - private boolean groupusk; - private boolean showold; - private boolean js; - private boolean done; - private HTMLNode pageEntryNode; - private RuntimeException exception; - - - /** - * Create the generator - * @param result Set of TermEntrys to format - * @param groupusk whether the sites and editions should be grouped - * @param showold whether to show older editions - * @param js whether - */ - public ResultNodeGenerator(Set result, boolean groupusk, boolean showold, boolean js){ - this.result = result; - this.groupusk = groupusk; - this.showold = showold; - this.js = js; - } - - - public synchronized void run(){ - if(done) - throw new IllegalStateException("ResultNodeGenerator can only be run once."); - - try{ - parseResult(); - generatePageEntryNode(); - }catch(RuntimeException e){ - exception = e; // Exeptions thrown here are stored in case this is being run in a thread, in this case it is thrown in isDone() or iterator() - throw e; - } - done = true; - result = null; - } - - /** - * Return the generated HTMLNode of PageEntrys, only call this after checking isDone() - * @throws RuntimeException if a RuntimeException was caught while generating the node - */ - public HTMLNode getPageEntryNode(){ - if(exception != null) - throw new RuntimeException("RuntimeException thrown in ResultNodeGenerator thread", exception); - return pageEntryNode; - } - - /** - * Whether this ResultNodegenerator has finished formatting and get methods can be called - * @throws RuntimeException if a RuntimeException was caught while generating the node - */ - public boolean isDone(){ - if(exception != null) - throw new RuntimeException("RuntimeException thrown in ResultNodeGenerator thread", exception); - return done; - } - - /** - * Parse result into generator - */ - private void parseResult(){ - groupmap = new TreeMap(); - if(!groupusk) - pageset = new TreeMap(RelevanceComparator.comparator); - relatedTerms = new TreeSet(RelevanceComparator.comparator); - relatedIndexes = new TreeSet(RelevanceComparator.comparator); - - Iterator it = result.iterator(); - while(it.hasNext()){ - TermEntry o = (TermEntry)it.next(); - if(o instanceof TermPageEntry){ - TermPageEntry pageEntry = (TermPageEntry)o; - // Put pages into a group hirearchy : USK key/docnames --> USK editions --> Pages - String sitebase; - long uskEdition = Long.MIN_VALUE; - // Get the key and name - FreenetURI uri; - uri = pageEntry.page; - // convert usk's - if(uri.isSSKForUSK()){ - uri = uri.uskForSSK(); - // Get the USK edition - uskEdition = uri.getEdition(); - } - // Get the site base name, key + documentname - uskversion - sitebase = uri.setMetaString(null).setSuggestedEdition(0).toString().replaceFirst("/0", ""); - Logger.minor(this, sitebase); - - // Add site - if(!groupmap.containsKey(sitebase)) - groupmap.put(sitebase, new TermPageGroupEntry(sitebase)); - TermPageGroupEntry siteGroup = groupmap.get(sitebase); - // Add page - siteGroup.addPage(uskEdition, pageEntry); - - }else if(o instanceof TermTermEntry){ - relatedTerms.add((TermTermEntry)o); - }else if(o instanceof TermIndexEntry){ - relatedIndexes.add((TermIndexEntry)o); - }else - Logger.error(this, "Unknown TermEntry type : "+o.getClass().getName()); - } - - - if(!groupusk){ // Move entries from the groupmap to the pageset, marking whether thaey are the newest - for (Iterator it2 = groupmap.values().iterator(); it2.hasNext();) { - TermPageGroupEntry groupentry = it2.next(); - SortedMap> editions = groupentry.getEditions(); // The editions of each site - SortedSet newest = editions.get(editions.lastKey()); // get the newest edition - for (Iterator> editionIterator = groupentry.getEditions().values().iterator(); editionIterator.hasNext();) { - SortedSet edition = editionIterator.next(); // Iterate through all the editions - if(!showold && edition != newest) // If not showing old, skip all but newest - continue; - for (TermPageEntry termPageEntry : edition) { - pageset.put(termPageEntry, edition==newest); // Add pages, marking whether they are from the newest edition - } - } - } - groupmap = null; - } - } - - private HTMLNode generateIndexEntryNode(){ - return new HTMLNode("#", "TermIndexEntry code not done yet"); - } - - private HTMLNode generateTermEntryNode(){ - return new HTMLNode("#", "TermTermEntry code not done yet"); - } - - /** - * Generate node of page results from this generator - */ - private void generatePageEntryNode(){ - pageEntryNode = new HTMLNode("div", "id", "results"); - - int results = 0; - // Loop to separate results into SSK groups - - if(groupmap != null){ // Produce grouped list of pages - SortedSet groupSet = new TreeSet(RelevanceComparator.comparator); - groupSet.addAll(groupmap.values()); - - // Loop over keys - Iterator it2 = groupSet.iterator(); - while (it2.hasNext()) { - TermPageGroupEntry group = it2.next(); - String keybase = group.subj; - SortedMap> siteMap = group.getEditions(); - HTMLNode siteNode = pageEntryNode.addChild("div", "style", "padding-bottom: 6px;"); - // Create a block for old versions of this SSK - HTMLNode siteBlockOldOuter = siteNode.addChild("div", new String[]{"id", "style"}, new String[]{"result-hiddenblock-"+keybase, (!showold?"display:none":"")}); - // put title on block if it has more than one version in it - if(siteMap.size()>1) - siteBlockOldOuter.addChild("a", new String[]{"onClick", "name"}, new String[]{"toggleResult('"+keybase+"')", keybase}).addChild("h3", "class", "result-grouptitle", keybase.replaceAll("\\b.*/(.*)", "$1")); - // inner block for old versions to be hidden - HTMLNode oldEditionContainer = siteBlockOldOuter.addChild("div", new String[]{"class", "style"}, new String[]{"result-hideblock", "border-left: thick black;"}); - // Loop over all editions in this site - Iterator it3 = siteMap.keySet().iterator(); - while(it3.hasNext()){ - long version = it3.next(); - boolean newestVersion = !it3.hasNext(); - if(newestVersion) // put older versions in block, newest outside block - oldEditionContainer = siteNode; - HTMLNode versionCell; - HTMLNode versionNode; - if(siteMap.get(version).size()>1||siteMap.size()>1){ - // table for this version - versionNode = oldEditionContainer.addChild("table", new String[]{"class"}, new String[]{"librarian-result"}); - HTMLNode grouptitle = versionNode.addChild("tr").addChild("td", new String[]{"padding", "colspan"}, new String[]{"0", "3"}); - grouptitle.addChild("h4", "class", (newestVersion?"result-editiontitle-new":"result-editiontitle-old"), keybase.replaceAll("\\b.*/(.*)", "$1")+(version>=0 ? "-"+version:"")); - // Put link to show hidden older versions block if necessary - if(newestVersion && !showold && js && siteMap.size()>1) - grouptitle.addChild("a", new String[]{"href", "onClick"}, new String[]{"#"+keybase, "toggleResult('"+keybase+"')"}, " ["+(siteMap.size()-1)+" older matching versions]"); - HTMLNode versionrow = versionNode.addChild("tr"); - versionrow.addChild("td", "width", "8px"); - // draw black line down the side of the version - versionrow.addChild("td", new String[]{"class"}, new String[]{"sskeditionbracket"}); - - versionCell=versionrow.addChild("td", "style", "padding-left:15px"); - }else - versionCell = oldEditionContainer; - // loop over each result in this version - Iterator it4 = siteMap.get(version).iterator(); - while(it4.hasNext()){ - versionCell.addChild(termPageEntryNode(it4.next(), newestVersion)); - results++; - } - } - } - }else{ // Just produce sorted list of results - for (Iterator> it = pageset.entrySet().iterator(); it.hasNext();) { - Entry entry = it.next(); - TermPageEntry termPageEntry = entry.getKey(); - boolean newestVersion = entry.getValue(); - pageEntryNode.addChild("div").addChild(termPageEntryNode(termPageEntry, newestVersion)); - results++; - } - } - pageEntryNode.addChild("p").addChild("span", "class", "librarian-summary-found", "Found "+results+" results"); - } - - /** - * Returns an {@link HTMLNode} representation of a {@link TermPageEntry} for display in a browser - * @param entry - * @param newestVersion if set, the result is shown in full brightness, if unset the result is greyed out - */ - private HTMLNode termPageEntryNode(TermPageEntry entry,boolean newestVersion) { - FreenetURI uri = entry.page; - String showtitle = entry.title; - String showurl = uri.toShortString(); - if (showtitle == null || showtitle.trim().length() == 0) { - showtitle = showurl; - } - String realurl = "/" + uri.toString(); - HTMLNode pageNode = new HTMLNode("div", new String[]{"class", "style"}, new String[]{"result-entry", ""}); - pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realurl, (newestVersion ? "result-title-new" : "result-title-old"), (entry.rel>0)?"Relevance : "+(entry.rel*100)+"%":"Relevance unknown"}, showtitle); - // create usk url - if (uri.isSSKForUSK()) { - String realuskurl = "/" + uri.uskForSSK().toString(); - pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realuskurl, (newestVersion ? "result-uskbutton-new" : "result-uskbutton-old"), realuskurl}, "[ USK ]"); - } - pageNode.addChild("br"); - pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realurl, (newestVersion ? "result-url-new" : "result-url-old"), uri.toString()}, showurl); - - return pageNode; - } -} diff --git a/src/plugins/Library/ui/StaticToadlet.java b/src/plugins/Library/ui/StaticToadlet.java deleted file mode 100644 index 0259d15e..00000000 --- a/src/plugins/Library/ui/StaticToadlet.java +++ /dev/null @@ -1,78 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URI; -import java.net.URL; -import java.util.Date; - -import freenet.client.DefaultMIMETypes; -import freenet.client.HighLevelSimpleClient; -import freenet.clients.http.RedirectException; -import freenet.clients.http.Toadlet; -import freenet.clients.http.ToadletContext; -import freenet.clients.http.ToadletContextClosedException; -import freenet.support.api.Bucket; -import freenet.support.api.HTTPRequest; - -import plugins.Library.Library; - - -/** - * Encapsulates a WebPage in a Toadlet - * @author MikeB - */ -public class StaticToadlet extends Toadlet { - - public StaticToadlet(HighLevelSimpleClient client) { - super(client); - } - - /** - * Get the path to this page - */ - @Override - public String path() { - return ROOT_URL; - } - - public static final String ROOT_URL = "/library/static/"; - public static final String ROOT_PATH = "staticfiles/"; - - public void handleMethodGET(URI uri, final HTTPRequest httprequest, final ToadletContext ctx) - throws ToadletContextClosedException, IOException, RedirectException { - String path = uri.getPath(); - if (!path.startsWith(ROOT_URL)) - return; // doh! - path = path.substring(ROOT_URL.length()); - - ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); - try { - InputStream strm = getClass().getResourceAsStream(ROOT_PATH+path); - if (strm == null) { - this.sendErrorPage(ctx, 404, "Not found", "Could not find " + httprequest.getPath().substring(path().length()) + " in " + path()); - return; - } - Bucket data = ctx.getBucketFactory().makeBucket(strm.available()); - OutputStream os = data.getOutputStream(); - byte[] cbuf = new byte[4096]; - while(true) { - int r = strm.read(cbuf); - if(r == -1) break; - os.write(cbuf, 0, r); - } - strm.close(); - os.close(); - - ctx.sendReplyHeaders(200, "OK", null, DefaultMIMETypes.guessMIMEType(path, false), data.size()); - ctx.writeData(data); - } finally { - Thread.currentThread().setContextClassLoader(origClassLoader); - } - } -} diff --git a/src/plugins/Library/ui/TermPageGroupEntry.java b/src/plugins/Library/ui/TermPageGroupEntry.java deleted file mode 100644 index fbc87ba7..00000000 --- a/src/plugins/Library/ui/TermPageGroupEntry.java +++ /dev/null @@ -1,50 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import java.util.Collections; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.TreeSet; - -import plugins.Library.index.TermEntry; -import plugins.Library.index.TermPageEntry; - -/** - * TODO make this fit the TermEntry contract - * @author MikeB - */ -public class TermPageGroupEntry extends TermEntry { - private final SortedMap> editions = new TreeMap>(); - - TermPageGroupEntry(String sitebase) { - super(sitebase, 0); - } - - @Override - public TermEntry.EntryType entryType() { - throw new UnsupportedOperationException("Not supported yet."); - } - - @Override - public boolean equalsTarget(TermEntry entry) { - throw new UnsupportedOperationException("Not supported yet."); - } - - public void addPage(long uskEdition, TermPageEntry pageEntry) { - // Add Edition - if(!editions.containsKey(uskEdition)) - editions.put(uskEdition, new TreeSet(RelevanceComparator.comparator)); - editions.get(uskEdition).add(pageEntry); - - // TODO HIGH rework this... TermEntry is supposed to be immutable - //if(rel < pageEntry.rel) // TODO enter better algorithm for calculating relevance here - // rel = pageEntry.rel; // relevance should be on a per-edition basis, probably shouldnt use TermEntry at all - } - - SortedMap> getEditions() { - return Collections.unmodifiableSortedMap(editions); - } -} diff --git a/src/plugins/Library/ui/TestInterface.java b/src/plugins/Library/ui/TestInterface.java deleted file mode 100644 index 86e19e79..00000000 --- a/src/plugins/Library/ui/TestInterface.java +++ /dev/null @@ -1,64 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.ui; - -import plugins.Library.util.exec.Execution; -import plugins.Library.search.Search; -import plugins.Library.*; - -import freenet.support.Logger; - -import java.util.HashMap; -import java.io.BufferedReader; -import java.io.InputStreamReader; - -public class TestInterface{ - static HashMap requests = new HashMap(); - static int requestcount =0; - - public static void main(String[] args){ - try{ - Logger.setupStdoutLogging(Logger.MINOR, "Strarting logging"); - BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); - String command; - String param; - String line; - String index = "../../Freenet/myindex8"; - Library library = Library.init(null); - Search.setup(library, null); - - do{ - line = br.readLine(); - command = line.substring(0, 1); - param = line.substring(1); - - if("f".equals(command)){ - requests.put(""+requestcount,library.getIndex(index).getTermEntries(param)); - System.out.println("Started request "+requestcount); - requestcount++; - } - if("s".equals(command)){ - Search search = Search.startSearch(param, index); - if (search == null) - System.out.println("Stopwords too prominent in query"); - else{ - requests.put(""+requestcount,search); - System.out.println("Started request "+requestcount); - requestcount++; - } - } - if("p".equals(command)) - System.out.println(requests.get(param).toString()); - if("r".equals(command)) - System.out.println(requests.get(param).getResult()); - if("i".equals(command)) - System.out.println(library.getIndex(index)); -// if("w".equals(command)) -// WebUI.resultNodeGrouped(requests.get(param), true, true); - }while(!"x".equals(command)); - }catch(Exception e){ - e.printStackTrace(); - } - } -} diff --git a/src/plugins/Library/ui/WebInterface.java b/src/plugins/Library/ui/WebInterface.java deleted file mode 100644 index 9c1c8927..00000000 --- a/src/plugins/Library/ui/WebInterface.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Web reuqest handlers - * - * @author j16sdiz (1024D/75494252) - */ -package plugins.Library.ui; - -import freenet.client.HighLevelSimpleClient; -import freenet.clients.http.PageMaker; -import freenet.clients.http.ToadletContainer; -import freenet.node.NodeClientCore; -import freenet.pluginmanager.PluginRespirator; -import plugins.Library.Library; - - -public class WebInterface { - private PageMaker pageMaker; - private final ToadletContainer toadletContainer; - private final HighLevelSimpleClient client; - private final NodeClientCore core; - private Library library; - private final PluginRespirator pr; - private MainPageToadlet pluginsToadlet; - private MainPageToadlet mainToadlet; - private StaticToadlet staticToadlet; - - /** - * // @param spider - * // @param client - */ - public WebInterface(Library library, PluginRespirator pr) { - this.library = library; - - pageMaker = pr.getPageMaker(); - this.toadletContainer = pr.getToadletContainer(); - this.client = pr.getHLSimpleClient(); - this.core = pr.getNode().clientCore; - this.pr = pr; - } - - /** - * Load the Library interface into the FProxy interface - */ - public void load() { - //pageMaker.addNavigationCategory("/library/", "Library", "Library", new Main()); - - mainToadlet = new MainPageToadlet(client, library, core, pr); - toadletContainer.register(mainToadlet, mainToadlet.menu(), mainToadlet.path(), true, mainToadlet.name(), mainToadlet.name(), true, null ); - - // Ive just realised that the form filter allows access to /plugins/... so /library/ wont be allowed, this is a temporary Toadlet untilthere is a whitelist for formfilter and /library is on it TODO put /library on formfilter whitelist - pluginsToadlet = new MainPageToadlet(client, library, core, pr); - toadletContainer.register(pluginsToadlet, null, "/plugins/plugin.Library.FreesiteSearch", true, null, null, true, null ); - staticToadlet = new StaticToadlet(client); - toadletContainer.register(staticToadlet, null, "/library/static/", true, false); - - } - - /** - * UNload the Library interface form the FProxy interface - */ - public void unload() { - toadletContainer.unregister(mainToadlet); - pageMaker.removeNavigationLink(mainToadlet.menu(), mainToadlet.name()); - toadletContainer.unregister(pluginsToadlet); - toadletContainer.unregister(staticToadlet); - } -} diff --git a/src/plugins/Library/ui/package-info.java b/src/plugins/Library/ui/package-info.java deleted file mode 100644 index 010f8c8c..00000000 --- a/src/plugins/Library/ui/package-info.java +++ /dev/null @@ -1,14 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ - - -/** - * Generates the web interface for the Library - * - * TODO get rid of code duplication in here, break up methods into smaller ones and cope with all types of TermEntry - * - * @author MikeB - */ -package plugins.Library.ui; - diff --git a/src/plugins/Library/ui/staticfiles/detect.js b/src/plugins/Library/ui/staticfiles/detect.js deleted file mode 100644 index 130b91d3..00000000 --- a/src/plugins/Library/ui/staticfiles/detect.js +++ /dev/null @@ -1 +0,0 @@ -window.location='/library/?js&request=0'; diff --git a/src/plugins/Library/ui/staticfiles/script.js b/src/plugins/Library/ui/staticfiles/script.js deleted file mode 100644 index f0ea4b3b..00000000 --- a/src/plugins/Library/ui/staticfiles/script.js +++ /dev/null @@ -1,32 +0,0 @@ -var url = '/library/xml/?request=0&'; // this bit will have to be put into the page being generated dynamically if used again -var xmlhttp; - -function getProgress(){ - xmlhttp = new XMLHttpRequest(); - xmlhttp.onreadystatechange=xmlhttpstatechanged; - xmlhttp.open('GET', url, true); - xmlhttp.send(null); -} - -function xmlhttpstatechanged(){ - if(xmlhttp.readyState==4){ - var resp = xmlhttp.responseXML; - var progresscontainer = document.getElementById('librarian-search-status'); - progresscontainer.replaceChild(resp.getElementsByTagName('progress')[0].cloneNode(true), progresscontainer.getElementsByTagName('table')[0]); - if(resp.getElementsByTagName('progress')[0].attributes.getNamedItem('RequestState').value=='FINISHED') - document.getElementById('results').appendChild(resp.getElementsByTagName('result')[0].cloneNode(true)); - else if(resp.getElementsByTagName('progress')[0].attributes.getNamedItem('RequestState').value=='ERROR') - document.getElementById('errors').appendChild(resp.getElementsByTagName('error')[0].cloneNode(true)); - else - var t = setTimeout('getProgress()', 1000); - } -} -getProgress(); - -function toggleResult(key){ - var togglebox = document.getElementById('result-hiddenblock-'+key); - if(togglebox.style.display == 'block') - togglebox.style.display = 'none'; - else - togglebox.style.display = 'block'; -} diff --git a/src/plugins/Library/ui/staticfiles/style.css b/src/plugins/Library/ui/staticfiles/style.css deleted file mode 100644 index 45c836f2..00000000 --- a/src/plugins/Library/ui/staticfiles/style.css +++ /dev/null @@ -1,48 +0,0 @@ - -#results { padding-left: 8px; } -.result-group { padding : 0 0 6px; } -.result-grouptitle { font-size: large; font-weight: bold; margin-bottom: 0px; margin-top: 6px; } -.result-editiontitle-new { display:inline; padding-top: 5px; font-size: medium; color: black; } -.result-editiontitle-old { display:inline; padding-top: 5px; font-size: medium; color: darkGrey; } -.result-entry { margin-bottom: 10px; } -.result-sitename {color:black; font-weight:bold; } -.result-table { border-spacing : 5px; } -a.result-title-new:link { color: Blue } -a.result-title-new:visited { color: Purple; } -a.result-title-old:link { color: LightBlue; } -a.result-title-old:visited { color: Orchid; } -a.result-url-new {color:Green; font-size:small; padding-left:15px; } -a.result-url-old {color:LightGreen; font-size:small; padding-left:15px; } -a.result-uskbutton-new {color: #480000; font-variant: small-caps; font-size: small; padding-left: 20px; } -a.result-uskbutton-old {color: #996666; font-variant: small-caps; font-size: small; padding-left: 20px; } -.progress-table {border-spacing:10px 0px; } -table.progress-table td { padding: 5px 15px; } -td.progress-bar-outline { padding: 0px 3px; width:300px; border:1px solid grey; height : 20px; border-spacing: 0px; } -div.progress-bar-inner-final { background-color: red; height:20px; z-index:-1; border-spacing: 0px; } -div.progress-bar-inner-nonfinal { background-color: pink; height:20px; z-index:-1; border-spacing: 0px; } -input.index { font-size: 0.63em; width: 170px; height:1.1em; } -input.index-bookmark-delete { - width: 12px; - height: 11px; - padding-top: 0px; - padding-bottom: 0px; - padding-left: 0px; - padding-right: 0px; - border-top-width: 0px; - border-bottom-width: 0px; - border-left-width: 0px; - border-right-width: 0px; - background-color: DarkSkyBlue; - color: red; - font-size: xx-small; -} -input.index-other-add { } -li.index { } -td.sskeditionbracket { background: black; width: 2px; padding: 0px; } -ul.index-bookmark-list { list-style: none; } -ul.options-list { list-style-type: none; margin-top: 0; } -div#content th, td { border: none; } -div.authorization-box { background: darksalmon; border: 1px solid darkred; padding: 8px; } -div.authorization-box h1 { font-size: 16pt; font-weight: bold; margin-top: 0px; } -div.authorization-box a:link { color: blue; } - diff --git a/src/plugins/Library/util/BTreeMap.java b/src/plugins/Library/util/BTreeMap.java deleted file mode 100644 index 11b21aeb..00000000 --- a/src/plugins/Library/util/BTreeMap.java +++ /dev/null @@ -1,1959 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.util.CompositeIterable; -import plugins.Library.util.func.Tuples.X2; -import plugins.Library.util.func.Tuples.X3; - -import java.util.Comparator; -import java.util.Iterator; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.Map; -import java.util.AbstractSet; -import java.util.AbstractMap; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.TreeMap; -import java.util.HashMap; -import java.util.Stack; -import java.util.NoSuchElementException; -import java.util.ConcurrentModificationException; - -/** -** General purpose B-tree implementation. '''This class is not a general-use -** {@link SortedMap}'''; for that use {@link TreeMap}. -** -** From [http://en.wikipedia.org/wiki/B-tree wikipedia]: A B-tree is a tree -** data structure that keeps data sorted and allows searches, insertions, and -** deletions in logarithmic amortized time. Unlike self-balancing binary search -** trees, it is optimized for systems that read and write large blocks of data. -** It is most commonly used in databases and filesystems. -** -** In B-trees, internal (non-leaf) nodes can have a variable number of child -** nodes within some pre-defined range. When data is inserted or removed from a -** node, its number of child nodes changes. In order to maintain the -** pre-defined range, internal nodes may be joined or split. Because a range of -** child nodes is permitted, B-trees do not need re-balancing as frequently as -** other self-balancing search trees, but may waste some space, since nodes are -** not entirely full. -** -** A B-tree is kept balanced by requiring that all leaf nodes are at the same -** depth. This depth will increase slowly as elements are added to the tree, -** but an increase in the overall depth is infrequent, and results in all leaf -** nodes being one more node further away from the root. -** -** A B-tree of order m (the maximum number of children for each node) is a tree -** which satisfies the following properties: -** -** * Every node has between m/2-1 and m-1 entries, except for the root node, -** which has between 1 and m-1 entries. -** * All leaves are the same distance from the root. -** * Every non-leaf node with k entries has k+1 subnodes arranged in sorted -** order between the entries (for details, see {@link Node}). -** -** B-trees have substantial advantages over alternative implementations when -** node access times far exceed access times within nodes. This usually occurs -** when most nodes are in secondary storage such as hard drives. By maximizing -** the number of child nodes within each internal node, the height of the tree -** decreases, balancing occurs less often, and efficiency increases. Usually -** this value is set such that each node takes up a full disk block or an -** analogous size in secondary storage. -** -** If {@link #NODE_MIN} is {@linkplain #BTreeMap(int) set to} 2, the resulting -** tree is structurally and algorithmically equivalent to a 2-3-4 tree, which -** is itself equivalent to a red-black tree, which is the implementation of -** {@link TreeMap} in the standard Java Class Library. This class has extra -** overheads due to the B-tree generalisation of the structures and algorithms -** involved, and is therefore only meant to be used to represent a B-tree -** stored in secondary storage, as the above paragraph describes. -** -** NOTE: at the moment there is a slight bug in this implementation that causes -** '''indeterminate behaviour''' when used with a comparator that admits {@code -** null} keys. This is due to {@code null} having special semantics, meaning -** "end of map", for the {@link Node#lkey} and {@link Node#rkey} fields. This -** is NOT an urgent priority to fix, but might be done in the future. -** -** Note: this implementation, like {@link TreeMap}, is not thread-safe. -** -** @author infinity0 -** @see TreeMap -** @see Comparator -** @see Comparable -*/ -public class BTreeMap extends AbstractMap -implements Map, SortedMap/*, NavigableMap, Cloneable, Serializable*/ { - - /* - ** list (most urgent first): - ** - ** TODO HIGH clean up the use of Node._size - ** TODO HIGH make Node's fields private/immutable - ** TODO LOW use Node.{merge|split} instead of the long code in BTreeMap.{merge|split} - ** FIXME LOW {@link ConcurrentModificationException} for the entrySet iterator - ** - *************************************************************************** - ** IMPORTANT NOTE FOR MAINTAINERS: - *************************************************************************** - ** - ** When navigating the tree, either Node.isLeaf() or Node.nodeSize() MUST - ** be called at least once **for each node visited**, before accessing or - ** modifying its contents. This is to give SkeletonBTreeMap the chance to - ** throw a DataNotLoadedException in case the node has not been loaded. - ** - ** Additionally, each node has a Node._size field which is a cache for - ** Node.totalSize(), which calculates the number of entries in it and all - ** its children. To invalidate the cache, set Node._size to -1. This must - ** be done whenever an operation has >0 chance of changing the total size. - ** - ** FIXME LOW atm there is a slight bug in this implementation that causes - ** indeterminate behaviour when used with a comparator that admits null - ** keys. This is due to null having special semantics ("end of map"), for - ** the lkey and rkey fields. This is NOT an urgent priority to fix, but - ** might be done in the future. - ** - ** NULLNOTICE marks the parts of the code which are affected by this logic, - ** as well as all occurences of /\.[lr]key == null/. - */ - - /** - ** Minimum number of children of each node. - */ - final public int NODE_MIN; - - /** - ** Maximum number of children of each node. Equal to {@code NODE_MIN * 2}. - */ - final public int NODE_MAX; - - /** - ** Minimum number of entries in each node. Equal to {@code NODE_MIN - 1}. - */ - final public int ENT_MIN; - - /** - ** Maximum number of entries in each node. Equal to {@code NODE_MAX - 1}. - */ - final public int ENT_MAX; - - /** - ** Comparator for this {@link SortedMap}. - */ - final protected Comparator comparator; - - /** - ** Root node of the tree. The only node that can have less than ENT_MIN - ** entries. - */ - protected Node root = newNode(null, null, true); - - /** - ** Number of entries currently in the map. - */ - protected int size = 0;; - - /** - ** Creates a new empty map, sorted according to the given comparator, and - ** with each non-root node having the given minimum number of subnodes. - ** - ** @param cmp The comparator for the tree, or {@code null} to use the keys' - ** {@link Comparable natural} ordering. - ** @param node_min Minimum number of subnodes in each node - */ - public BTreeMap(Comparator cmp, int node_min) { - if (node_min < 2) { - throw new IllegalArgumentException("The minimum number of subnodes must be set to at least 2"); - } - comparator = cmp; - NODE_MIN = node_min; - NODE_MAX = node_min<<1; - ENT_MIN = NODE_MIN - 1; - ENT_MAX = NODE_MAX - 1; - } - - /** - ** Creates a new empty map, sorted according to the keys' {@link Comparable - ** natural} ordering, and with each non-root node having the given minimum - ** number of subnodes. - ** - ** @param node_min Minimum number of subnodes in each node - **/ - public BTreeMap(int node_min) { - this(null, node_min); - } - - /** - ** Returns the subset of the given {@link SortedSet}, exclusively between - ** the given keys. If either key is {@code null}, this will be treated to - ** mean the relevant edge of the set. - ** - ** OPT NORM - */ - public static SortedSet subSet(SortedSet set, K lk, K rk) { - // NULLNOTICE - // JDK6 - if (lk == null) { - return (rk == null)? set: set.headSet(rk); - } else { - SortedSet ss = (rk == null)? set.tailSet(lk): set.subSet(lk, rk); - if (ss.isEmpty() || !compare0(ss.first(), lk, set.comparator())) { return ss; } - K k2 = Sorted.higher(ss, lk); - return (k2 == null)? ss.headSet(lk): ss.tailSet(k2); - // headSet() will create an empty set with the right comparator and of the right class - } - } - - /** - ** DOCUMENT. - */ - public static class PairIterable implements Iterable> { - - final protected E lobj; - final protected E robj; - final protected Iterable ib; - - public PairIterable(E lo, Iterable objs, E ro) { - lobj = lo; - ib = objs; - robj = ro; - } - - public Iterator> iterator() { - return new Iterator>() { - - final Iterator it = ib.iterator(); - E prev = lobj; - - public boolean hasNext() { - return (it.hasNext() || prev != robj); - } - - public X2 next() { - E rk = (!it.hasNext() && prev != robj)? robj: it.next(); - return new X2(prev, prev = rk); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - } - - /** - ** A B-tree node. It has the following structure: - ** - ** * Every entry in a non-leaf node has two adjacent subnodes, holding - ** entries which are all strictly greater and smaller than the entry. - ** * Conversely, every subnode has two adjacent entries, which are strictly - ** greater and smaller than every entry in the subnode. Note: edge - ** subnodes' "adjacent" keys are not part of the node's entries, but - ** inherited from some ancestor parent.) - ** - ** ^ Node ^ - ** | | - ** | V1 V2 V3 V4 V5 V6 V7 V8 | - ** | | | | | | | | | | - ** lkey K1 K2 K3 K4 K5 K6 K7 K8 rkey - ** \ / \ / \ / \ / \ / \ / \ / \ / \ / - ** Node Node Node Node Node Node Node Node Node - ** - ** | - {@link #entries} mappings - ** \ - {@link #rnodes} mappings - ** / - {@link #lnodes} mappings - ** - ** Note: before anyone gets any ideas about making this class static - ** (believe me, I tried), note that {@link TreeMap} '''requires''' the - ** comparator to be passed at construction. The neatest way to do this is - ** to pass either the tree or the comparator to the node's constructor. - ** - ** This is fine in the context of {@link BTreeMap}, but serialisation makes - ** it extremely awkard. Passing the tree requires us to extend {@link - ** plugins.Library.io.serial.Translator} to take a context paramater, and - ** passing the comparator requires us to add checks to ensure that the - ** comparators are equal when the node is re-attached to the tree. - ** - ** And even if this is implemented, nodes still need to know their parent - ** tree (to set {@link plugins.Library.io.serial.Serialiser}s etc) and so you - ** would need to code a secondary initialisation scheme to be called after - ** object construction, and after execution returns from {@link - ** plugins.Library.io.serial.Translator#rev(Object)}. All in all, this is more - ** trouble than it's worth. - ** - ** So I've gone with the non-static class, which means a new {@code - ** Translator} needs to be constructed for each new B-tree. This is only - ** two fields above an {@link Object} (if you re-use the same objects for - ** the {@link SkeletonBTreeMap.NodeTranslator#ktr} and {@code - ** SkeletonBTreeMap.NodeTranslator#mtr} fields) and so should be negligible - ** compared to the memory size of the rest of the B-tree. - ** - ** @author infinity0 - */ - protected class Node implements Comparable { - - /** - ** Whether this node is a leaf. - */ - final protected boolean isLeaf; - - /** - ** Map of entries in this node. - */ - final /*private*/ protected SortedMap entries; - - /** - ** Map of entries to their immediate smaller nodes. The greatest node - ** is mapped to by {@link #rkey}. - */ - final /*private*/ protected Map lnodes; - - /** - ** Map of entries to their immediate greater nodes. The smallest node - ** is mapped to by {@link #lkey}. - */ - final /*private*/ protected Map rnodes; - - /** - ** Greatest key smaller than all keys in this node and subnodes. This - ** is {@code null} when the node is at the minimum-key end of the tree. - */ - /*final*/ protected K lkey; - - /** - ** Smallest key greater than all keys in this node and subnodes. This - ** is {@code null} when the node is at the maximum-key end of the tree. - */ - /*final*/ protected K rkey; - - /** - ** Cache for the total number of entries in this node and all subnodes. - ** A negative value means the cache was invalidated. - */ - transient int _size = -1; - - /** - ** Creates a new node for the BTree, with a custom map to store the - ** entries. - ** - ** It is '''assumed''' that the input map is empty; it is up to the - ** caller to ensure that this holds, or if not, then at least put the - ** node into a consistent state before releasing it for use. - ** - ** @param lk The {@code Node#lkey} for the node. - ** @param rk The {@code Node#rkey} for the node. - ** @param lf Whether to create a leaf node - ** @param map A {@link SortedMap} to use to store the entries - */ - protected Node(K lk, K rk, boolean lf, SortedMap map) { - lkey = lk; - rkey = rk; - isLeaf = lf; - entries = map; - // we don't use sentinel Nil elements because that wastes memory due to - // having to maintain dummy rnodes and lnodes maps. - lnodes = (lf)? null: new HashMap(NODE_MAX); - rnodes = (lf)? null: new HashMap(NODE_MAX); - } - - /** - ** Creates a new node for the BTree - ** - ** @param lk The {@code Node#lkey} for the node. - ** @param rk The {@code Node#rkey} for the node. - ** @param lf Whether to create a leaf node - */ - protected Node(K lk, K rk, boolean lf) { - this(lk, rk, lf, new TreeMap(comparator)); - } - - /** - ** Add the given entries and subnodes to this node. The subnodes are - ** added with {@link #addChildNode(BTreeMap.Node)}. - ** - ** It is '''assumed''' that the entries are located exactly exclusively - ** between the nodes (ie. like in an actual node); it is up to the - ** caller to ensure that this holds. - ** - ** @param ent The entries to add - ** @param nodes The subnodes to add - */ - protected void addAll(SortedMap ent, Iterable nodes) { - assert(ent.comparator() == comparator()); - assert(ent.isEmpty() || compareL(lkey, ent.firstKey()) < 0); - assert(ent.isEmpty() || compareR(ent.lastKey(), rkey) < 0); - entries.putAll(ent); - if (!isLeaf) { - for (Node ch: nodes) { - addChildNode(ch); - } - } - } - - /** - ** @return Number of subnodes - */ - public int childCount() { - return (rnodes == null)? 0: rnodes.size(); - } - - /** - ** @return Number of local entries - */ - public int nodeSize() { - return entries.size(); - } - - /** - ** @return Whether this is a leaf node - */ - public boolean isLeaf() { - return isLeaf; - } - - /** - ** @return Whether the node is a ghost. This cannot be {@code true} for - ** normal maps, but is used by {@link SkeletonBTreeMap}. - */ - public boolean isGhost() { - return entries == null; - } - - /** - ** @return Total number of entries in this node and all subnodes - */ - protected int totalSize() { - if (_size < 0) { - int s = nodeSize(); // makes any future synchronization easier - if (!isLeaf()) { - for (Node n: lnodes.values()) { - s += n.totalSize(); - } - } - _size = s; - } - return _size; - } - - /** - ** Returns the greatest subnode smaller than the given node. - ** - ** It is '''assumed''' that the input is an actual subnode of this - ** node; it is up to the caller to ensure that this holds. - ** - ** @param node The node to lookup its neighbour for - ** @return The neighbour node, or null if the input node is at the edge - ** @throws NullPointerException if this is a leaf node - */ - public Node nodeL(Node node) { - assert(lnodes.get(node.rkey) == node - && rnodes.get(node.lkey) == node); - - return (compare0(lkey, node.lkey))? null: lnodes.get(node.lkey); - } - - /** - ** Returns the smallest subnode greater than the given node. - ** - ** It is '''assumed''' that the input is an actual subnode of this - ** node; it is up to the caller to ensure that this holds. - ** - ** @param node The node to lookup its neighbour for - ** @return The neighbour node, or null if the input node is at the edge - ** @throws NullPointerException if this is a leaf node - */ - public Node nodeR(Node node) { - assert(lnodes.get(node.rkey) == node - && rnodes.get(node.lkey) == node); - - return (compare0(node.rkey, rkey))? null: rnodes.get(node.rkey); - } - - /** - ** Returns the subnode a that given key belongs in. The key must not be - ** local to the node. - ** - ** It is '''assumed''' that the input key is in the correct range; it - ** is up to the caller to ensure that this holds. - ** - ** @param key The key to select the node for - ** @return The subnode for the key, or null if the key is local to the - ** node - ** @throws NullPointerException if this is a leaf node; or if the key - ** is {@code null} and the entries map uses natural order, or - ** its comparator does not tolerate {@code null} keys - */ - public Node selectNode(K key) { - assert(compareL(lkey, key) < 0 && compareR(key, rkey) < 0); - - SortedMap tailmap = entries.tailMap(key); - if (tailmap.isEmpty()) { - return lnodes.get(rkey); - } else { - K next = tailmap.firstKey(); - return (compare(key, next) == 0)? null: lnodes.get(next); - } - } - - /** - ** Attaches a child via its {@code lkey}, {@code rkey} fields. - ** - ** It is '''assumed''' that both keys are in the local entries map, and - ** that there is no node between these keys. It is up to the caller to - ** ensure that this holds. - ** - ** @param child The child to attach - */ - protected void addChildNode(Node child) { - assert(compare0(rkey, child.rkey) || entries.containsKey(child.rkey)); - assert(compare0(lkey, child.lkey) || entries.containsKey(child.lkey)); - assert(lnodes.get(child.rkey) == null); - assert(rnodes.get(child.lkey) == null); - lnodes.put(child.rkey, child); - rnodes.put(child.lkey, child); - } - - private transient Iterable> _iterNodesK; - /** - ** A view of all subnodes with their lkey and rkey, as an {@link - ** Iterable}. Iteration occurs in '''sorted''' order. - ** - ** TODO LOW this method does not check that this node actually has subnodes - ** (ie. non-leaf). This may or may not change in the future. - */ - public Iterable> iterNodesK() { - if (_iterNodesK == null) { - _iterNodesK = new CompositeIterable, X3>(iterKeyPairs(), true) { - @Override protected X3 nextFor(X2 kp) { - return new X3(kp._0, rnodes.get(kp._0), kp._1); - } - }; - } - return _iterNodesK; - } - - private transient Iterable _iterNodes; - /** - ** A view of all subnodes as an {@link Iterable}. Iteration occurs in - ** '''no particular order'''. - ** - ** TODO LOW this method does not check that this node actually has subnodes - ** (ie. non-leaf). This may or may not change in the future. - */ - public Iterable iterNodes() { - if (_iterNodes == null) { - _iterNodes = new CompositeIterable(rnodes.values(), true) { - @Override protected Node nextFor(Node n) { return n; } - }; - } - return _iterNodes; - } - - private transient Iterable _iterKeys; - /** - ** A view of all local keys as an {@link Iterable}. Iteration occurs - ** in '''sorted''' order. - */ - public Iterable iterKeys() { - if (_iterKeys == null) { - _iterKeys = new CompositeIterable(entries.keySet(), true) { - @Override protected K nextFor(K k) { return k; } - }; - } - return _iterKeys; - } - - private transient Iterable _iterAllKeys; - /** - ** A view of all keys (including {@code lkey}, {@code rkey}) as an - ** {@link Iterable}. Iteration occurs in '''sorted''' order. - */ - public Iterable iterAllKeys() { - if (_iterAllKeys == null) { - _iterAllKeys = new Iterable() { - public Iterator iterator() { - return new Iterator() { - - final Iterator it = entries.keySet().iterator(); - byte stage = 0; - - public boolean hasNext() { - return (stage < 2); - } - - public K next() { - switch (stage) { - case 0: - stage = 1; - return lkey; - case 1: - if (it.hasNext()) { return it.next(); } - else { stage = 2; return rkey; } - case 2: - return it.next(); // throws NoSuchElementException - } - throw new AssertionError(); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - }; - } - return _iterAllKeys; - } - - private transient Iterable> _iterKeyPairs; - /** - ** A view of adjacent key pairs (including {@code lkey}, {@code rkey}) - ** as an {@link Iterable}. Iteration occurs in '''sorted''' order. - */ - public Iterable> iterKeyPairs() { - if (_iterKeyPairs == null) { - // TODO NORM replace this with PairIterable when we make lkey/rkey final - _iterKeyPairs = new Iterable>() { - public Iterator> iterator() { - return new Iterator>() { - - final Iterator it = entries.keySet().iterator(); - K lastkey = lkey; - - public boolean hasNext() { - return (it.hasNext() || lastkey != rkey); - } - - public X2 next() { - K rk = (!it.hasNext() && lastkey != rkey)? rkey: it.next(); - return new X2(lastkey, lastkey = rk); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - }; - } - return _iterKeyPairs; - } - - /** - ** A view of child nodes exclusively between {@code lk}, {@code rk}, as - ** an {@link Iterable}. Iteration occurs in '''sorted''' order. - ** - ** It is '''assumed''' that the input keys are all local to the node - ** (or its lkey/rkey), and in the correct order; it is up to the caller - ** to ensure that this holds. - ** - ** Returns {@code null} if this is a leaf. - ** - ** @param lk The key on the L-side, exclusive - ** @param rk The key on the R-side, exclusive - */ - protected Iterable iterNodes(K lk, K rk) { - assert(lk == null || rk == null || compare(lk, rk) < 0); - if (isLeaf) { return null; } - // TODO LOW make a proper iterator.. - List nodes = new java.util.ArrayList(); - SortedSet skey = Sorted.keySet(subEntries(lk, rk)); - if (skey.isEmpty()) { return nodes; } - for (K key: skey) { - nodes.add(lnodes.get(key)); - } - nodes.add(rnodes.get(skey.last())); - return nodes; - } - - /** - ** A view of local entries exclusively between {@code lk}, {@code rk}, - ** as a {@link SortedMap}. - ** - ** It is '''assumed''' that the input keys are all local to the node - ** (or its lkey/rkey), and in the correct order; it is up to the caller - ** to ensure that this holds. - ** - ** @param lk The key on the L-side, exclusive - ** @param rk The key on the R-side, exclusive - */ - protected SortedMap subEntries(K lk, K rk) { - assert(lk == null || rk == null || compare(lk, rk) < 0); - if (compare0(lk, lkey)) { - return (compare0(rk, rkey))? entries: entries.headMap(rk); - } else { - SortedSet tset = Sorted.keySet(entries).tailSet(lk); - assert(!tset.isEmpty()); - if (tset.size() == 1) { - // create an empty set with the right comparator and of the right class - return entries.subMap(lk, lk); - } - K k2 = Sorted.higher(tset, lk); - return (compare0(rk, rkey))? entries.tailMap(k2): entries.subMap(k2, rk); - } - } - - /** - ** Merge child nodes between the given keys. The merged node is added - ** to this node, and the old children are discarded. - ** - ** It is '''assumed''' that the input keys are all local to the node, - ** and in the correct order; it is up to the caller to ensure that this - ** holds. - ** - ** @param lk The {@code lkey} of the new node - ** @param keys The keys of this node at which to merge - ** @param rk The {@code rkey} of the new node - */ - protected void merge(K lk, Iterable keys, K rk) { - Iterator it = iterNodes(lk, rk).iterator(); - Node child = it.next(); - assert(child == rnodes.get(lk)); - - for (K k: keys) { - child.entries.put(k, entries.remove(k)); - } - - while (it.hasNext()) { - Node next = it.next(); - child.addAll(next.entries, (next.isLeaf)? null: next.iterNodes()); - } - - child.rkey = rk; - } - - /** - ** Split a child node at the given keys. The split children are added - ** to this node, and the old child is discarded. - ** - ** It is '''assumed''' that the input keys are all local to the node, - ** and in the correct order; it is up to the caller to ensure that this - ** holds. - ** - ** @param lk The {@code lkey} of the child node - ** @param keys The keys of the child node at which to split - ** @param rk The {@code rkey} of the child node - */ - protected void split(K lk, Iterable keys, K rk) { - Node child = rnodes.get(lk); - assert(compare0(child.rkey, rk)); - - for (K k: keys) { - swapKey(k, child, this); - } - - // to satisfy conditions for addChildNode() - lnodes.remove(rk); - rnodes.remove(lk); - - for (X2 kp: new PairIterable(lk, keys, rk)) { - Node newnode = newNode(kp._0, kp._1, child.isLeaf); - newnode.addAll(child.subEntries(kp._0, kp._1), child.iterNodes(kp._0, kp._1)); - addChildNode(newnode); - } - } - - /** - ** Defines a total ordering on the nodes. This is useful for eg. deciding - ** which nodes to visit first in a breadth-first search algorithm that uses - ** a priority queue ({@link java.util.concurrent.PriorityBlockingQueue}) - ** instead of a FIFO ({@link java.util.concurrent.LinkedBlockingQueue}). - ** - ** A node is "compare-equal" to another iff: - ** - ** * Both their corresponding endpoints are compare-equal. - ** - ** A node is "smaller" than another iff: - ** - ** * Its right endpoint is strictly smaller than the other's, or - ** * Its right endpoint is compare-equal to the other's, and its left - ** endpoint is strictly greater than the other's. (Note that this case - ** would never be reached in a breadth-first search.) - ** - ** Or, equivalently, and perhaps more naturally (but less obvious why it - ** is a total order): - ** - ** * Both its endpoints are strictly smaller than the other's corresponding - ** endpoints, or - ** * Both of its endpoints are contained within the other's endpoints, - ** and one of them strictly so. - ** - ** @param n The other node to compare with - */ - /*@Override**/ public int compareTo(Node n) { - int b = compareR(rkey, n.rkey); - return (b == 0)? compareL(n.lkey, lkey): b; - // the more "natural" version - // return (a == 0)? ((b == 0)? 0: (b < 0)? -1: 1) - // (a < 0)? ((b == 0)? 1: (b < 0)? -1: 1): - // ((b == 0)? -1: (b < 0)? -1: 1); - } - - public String toTreeString(String istr) { - String nistr = istr + "\t"; - StringBuilder s = new StringBuilder(); - s.append(istr).append('(').append(lkey).append(')').append('\n'); - if (isLeaf) { - for (Map.Entry en: entries.entrySet()) { - s.append(istr).append(en.getKey()).append(" : ").append(en.getValue()).append('\n'); - } - } else { - s.append(rnodes.get(lkey).toTreeString(nistr)); - for (Map.Entry en: entries.entrySet()) { - s.append(istr).append(en.getKey()).append(" : ").append(en.getValue()).append('\n'); - s.append(rnodes.get(en.getKey()).toTreeString(nistr)); - } - } - s.append(istr).append('(').append(rkey).append(')').append('\n'); - return s.toString(); - } - - public String getName() { - return "BTreeMap node " + getRange(); - } - - public String getRange() { - return (lkey == null? "*": lkey) + "-" + (rkey == null? "*": rkey); - } - - } - - public String toTreeString() { - return root.toTreeString(""); - } - - /** - ** Compares two keys using the comparator for this tree, or the keys' - ** {@link Comparable natural} ordering if no comparator was given. - ** - ** @throws ClassCastException if the keys cannot be compared by the given - ** comparator, or if they cannot be compared naturally (ie. they - ** don't implement {@link Comparable}) - ** @throws NullPointerException if either of the keys are {@code null}. - */ - final public static int compare(K key1, K key2, Comparator comparator) { - return (comparator != null)? comparator.compare(key1, key2): ((Comparable)key1).compareTo(key2); - } - - protected int compare(K key1, K key2) { - return compare(key1, key2, comparator); - } - - /** - ** Tests for compare-equality. Two values are compare-equal if they are - ** both {@code null} or they {@link #compare(Object, Object)} to {@code 0}. - ** We use this rather than {@link #equals(Object)} to maintain sorting - ** order consistency in case the comparator is inconsistent with equals. - ** - ** @throws ClassCastException if the keys cannot be compared by the given - ** comparator, or if they cannot be compared naturally (ie. they - ** don't implement {@link Comparable}) - */ - final public static boolean compare0(K key1, K key2, Comparator comparator) { - return key1 == null? key2 == null: key2 == null? false: - ((comparator != null)? comparator.compare(key1, key2): ((Comparable)key1).compareTo(key2)) == 0; - } - - protected boolean compare0(K key1, K key2) { - return compare0(key1, key2, comparator); - } - - /** - ** Comparison between {@link Node#lkey} values, where {@code null} means - ** "a value smaller than any other". Use this when one of the operands is - ** the {@code lkey} of some node. - ** - ** @throws ClassCastException if the keys cannot be compared by the given - ** comparator, or if they cannot be compared naturally (ie. they - ** don't implement {@link Comparable}) - */ - protected int compareL(K lkey1, K lkey2) { - return (lkey1 == null)? ((lkey2 == null)? 0: -1): - ((lkey2 == null)? 1: compare(lkey1, lkey2, comparator)); - } - - /** - ** Comparison between {@link Node#rkey} values, where {@code null} means - ** "a value greater than any other". Use this when one of the operands is - ** the {@code rkey} of some node. - ** - ** @throws ClassCastException if the keys cannot be compared by the given - ** comparator, or if they cannot be compared naturally (ie. they - ** don't implement {@link Comparable}) - */ - protected int compareR(K rkey1, K rkey2) { - return (rkey2 == null)? ((rkey1 == null)? 0: -1): - ((rkey1 == null)? 1: compare(rkey1, rkey2, comparator)); - } - - /** - ** Simulates an assertion. - ** - ** @param b The test condition - ** @throws IllegalStateException if the test condition is false - */ - final static void verify(boolean b) { - if (!b) { throw new IllegalStateException("Verification failed"); } - } - - /** - ** Package-private debugging method. This one checks node integrity: - ** - ** * lkey < firstkey, lastkey < rkey - ** - ** For non-leaf nodes: - ** - ** * for each pair of adjacent keys (including two null keys either side of - ** the list of keys), it is possible to traverse between the keys, and - ** the single node between them, using the lnodes, rnodes maps and - ** the lkey, rkey pointers. - ** * lnodes' and rnodes' size is 1 greater than that of entries. Ie. there - ** is nothing else in the node beyond what we visited in the previous - ** step. - ** - ** It also checks the BTree node constraints: - ** - ** * The node has at most ENT_MAX entries. - ** * The node has at least ENT_MIN entries, except the root. - ** * The root has at least 1 entry. - ** - ** @throws IllegalStateException if the constraints are not satisfied - */ - final void verifyNodeIntegrity(Node node) { - if (node.entries.isEmpty()) { - // don't test node == root since it might be a newly-created detached node - verify(node.lkey == null && node.rkey == null); - verify(node.isLeaf() && node.rnodes == null && node.lnodes == null); - return; - } - - verify(compareL(node.lkey, node.entries.firstKey()) < 0); - verify(compareR(node.entries.lastKey(), node.rkey) < 0); - - int s = node.nodeSize(); - if (!node.isLeaf()) { - Iterator it = node.entries.keySet().iterator(); - verify(it.hasNext()); - - K prev = node.lkey; - K curr = it.next(); - while (curr != node.rkey || prev != node.rkey) { - Node node1 = node.rnodes.get(prev); - verify(node1 != null); - s += node1.totalSize(); - verify(compare0(curr, node1.rkey)); - Node node2 = node.lnodes.get(curr); - verify(node1 == node2 && compare0(node2.lkey, prev)); - prev = curr; - curr = it.hasNext()? it.next(): node.rkey; - } - - verify(node.nodeSize() + 1 == node.rnodes.size()); - verify(node.nodeSize() + 1 == node.lnodes.size()); - } - /* DEBUG if (node._size > 0 && node._size != s) { - System.out.println(node._size + " vs " + s); - System.out.println(node.toTreeString("\t")); - }*/ - verify(node._size < 0 || node._size == s); - - verify(node.nodeSize() <= ENT_MAX); - // don't test node == root since it might be a newly-created detached node - verify(node.lkey == null && node.rkey == null && node.nodeSize() >= 1 - || node.nodeSize() >= ENT_MIN); - } - - /** - ** Package-private debugging method. This one checks BTree constraints for - ** the subtree rooted at a given node: - ** - ** * All nodes follow the node constraints, listed above. - ** * All leaves appear in the same level - ** - ** @return depth of all leaves - ** @throws IllegalStateException if the constraints are not satisfied - */ - final int verifyTreeIntegrity(Node node) { - verifyNodeIntegrity(node); - if (node.isLeaf()) { - return 0; - } else { - // breath-first search would take up too much memory for a broad BTree - int depth = -1; - for (Node n: node.lnodes.values()) { - int d = verifyTreeIntegrity(n); - if (depth < 0) { depth = d; } - verify(d == depth); - } - return depth + 1; - } - } - - /** - ** Package-private debugging method. This one checks BTree constraints for - ** the entire tree. - ** - ** @throws IllegalStateException if the constraints are not satisfied - */ - final void verifyTreeIntegrity() { - verifyTreeIntegrity(root); - } - - /** - ** Creates a new node. - ** - ** @param lk The {@code Node#lkey} for the node. - ** @param rk The {@code Node#rkey} for the node. - ** @param lf Whether the node is a leaf. - */ - protected Node newNode(K lk, K rk, boolean lf) { - return new Node(lk, rk, lf); - } - - /** - ** Move the given key from the given source node to the given target node. - ** This method should be used whenever entries need to be moved, so that - ** eg. we can proceed even when the values have not been loaded (such as - ** in a {@link SkeletonBTreeMap}). - ** - ** FIXME HIGH do this for everything in the class. ATM only Node.split() - ** uses it. - */ - protected void swapKey(K key, Node src, Node dst) { - V val = src.entries.remove(key); - dst.entries.put(key, val); - } - - /** - ** Split a maximal child node into two minimal nodes, using the median key - ** as the separator between these new nodes in the parent. If the parent is - ** {@code null}, creates a new {@link Node} and points {@link #root} to it. - ** - ** It is '''assumed''' that the child is an actual subnode of the parent, - ** and that it is full. It is up to the caller to ensure that this holds. - ** - ** The exact implementation of this may change from time to time, so the - ** calling code should regard the input subnode as effectively destroyed, - ** and re-get the desired result subnode from calling the appropriate - ** get methods on the parent node. - ** - ** @param parent The node to (re)attach the split subnodes to. - ** @param child The subnode to split - */ - private K split(Node parent, Node child) { - assert(child.nodeSize() == ENT_MAX); - assert(parent == null? (child.lkey == null && child.rkey == null): - (parent.nodeSize() < ENT_MAX - && !parent.isLeaf() - && parent.rnodes.get(child.lkey) == child - && parent.lnodes.get(child.rkey) == child)); - - if (parent == null) { - parent = root = newNode(null, null, false); - parent.addChildNode(child); - } - parent._size = child._size = -1; - - Node lnode = newNode(null, null, child.isLeaf()); - K mkey; - - if (child.isLeaf()) { - // this is just the same as the code in the else block, but with leaf - // references to rnodes and lnodes removed (since they are null) - Iterator> it = child.entries.entrySet().iterator(); - for (int i=0; i entry = it.next(); it.remove(); - K key = entry.getKey(); - - lnode.entries.put(key, entry.getValue()); - } - Map.Entry median = it.next(); it.remove(); - mkey = median.getKey(); - - lnode.lkey = child.lkey; - lnode.rkey = child.lkey = mkey; - - parent.rnodes.put(lnode.lkey, lnode); - parent.lnodes.put(child.rkey, child); - parent.entries.put(mkey, median.getValue()); - parent.rnodes.put(mkey, child); - parent.lnodes.put(mkey, lnode); - - } else { - lnode.rnodes.put(child.lkey, child.rnodes.remove(child.lkey)); - Iterator> it = child.entries.entrySet().iterator(); - for (int i=0; i entry = it.next(); it.remove(); - K key = entry.getKey(); - - lnode.entries.put(key, entry.getValue()); - lnode.lnodes.put(key, child.lnodes.remove(key)); - lnode.rnodes.put(key, child.rnodes.remove(key)); - } - Map.Entry median = it.next(); it.remove(); - mkey = median.getKey(); - lnode.lnodes.put(mkey, child.lnodes.remove(mkey)); - - lnode.lkey = child.lkey; - lnode.rkey = child.lkey = mkey; - - parent.rnodes.put(lnode.lkey, lnode); - parent.lnodes.put(child.rkey, child); - parent.entries.put(mkey, median.getValue()); - parent.rnodes.put(mkey, child); - parent.lnodes.put(mkey, lnode); - } - - assert(parent.rnodes.get(mkey) == child); - assert(parent.lnodes.get(mkey) == lnode); - return mkey; - } - - /** - ** Merge two minimal child nodes into a maximal node, using the key that - ** separates them in the parent as the key that joins the halfnodes in the - ** merged node. If the parent is the {@link #root} and the merge makes it - ** empty, point {@code root} to the new merged node. - ** - ** It is '''assumed''' that both childs are actual subnodes of the parent, - ** that they are adjacent in the parent, and that they are minimally full. - ** It is up to the caller to ensure that this holds. - ** - ** The exact implementation of this may change from time to time, so the - ** calling code should regard both input subnodes as effectively destroyed, - ** and re-get the desired result subnode from calling the appropriate - ** get methods on the parent node. - ** - ** @param parent The node to (re)attach the merge node to. - ** @param lnode The smaller subnode to merge - ** @param rnode The greater subnode to merge - */ - private K merge(Node parent, Node lnode, Node rnode) { - assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges - assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); - assert(lnode.nodeSize() == ENT_MIN); - assert(rnode.nodeSize() == ENT_MIN); - assert(parent == root && parent.nodeSize() > 0 - || parent.nodeSize() > ENT_MIN); - assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode - && parent.lnodes.get(rnode.lkey) == lnode); - parent._size = lnode._size = rnode._size = -1; - - K mkey = rnode.lkey; // same as lnode.rkey; - - if (rnode.isLeaf()) { - // this is just the same as the code in the else block, but with leaf - // references to rnodes and lnodes removed (since they are null) - rnode.entries.putAll(lnode.entries); - - rnode.entries.put(mkey, parent.entries.remove(mkey)); - rnode.lkey = lnode.lkey; - - parent.rnodes.remove(mkey); - parent.lnodes.remove(mkey); - parent.rnodes.put(lnode.lkey, rnode); - - } else { - rnode.entries.putAll(lnode.entries); - rnode.lnodes.putAll(lnode.lnodes); - rnode.rnodes.putAll(lnode.rnodes); - - rnode.entries.put(mkey, parent.entries.remove(mkey)); - rnode.lnodes.put(mkey, lnode.lnodes.get(mkey)); - rnode.rnodes.put(lnode.lkey, lnode.rnodes.get(lnode.lkey)); - rnode.lkey = lnode.lkey; - - parent.rnodes.remove(mkey); - parent.lnodes.remove(mkey); - parent.rnodes.put(lnode.lkey, rnode); - - } - - if (parent == root && parent.entries.isEmpty()) { - assert(parent.lkey == null && parent.rkey == null - && rnode.lkey == null && rnode.rkey == null); - root = rnode; - } - - assert(parent.rnodes.get(rnode.lkey) == rnode); - assert(parent.lnodes.get(rnode.rkey) == rnode); - return mkey; - } - - /** - ** Performs a rotate operation towards the smaller node. A rotate operation - ** is where: - ** - ** * an entry M from the parent node is moved to a subnode - ** * an entry S from an adjacent subnode is moved to fill gap left by M - ** * a subnode N associated with S is cut from M and attached to S - ** - ** with the operands chosen appropriately to maintain tree constraints. - ** - ** Here, M is the key that separates {@code lnode} and {@code rnode}, S is - ** the smallest key of {@code rnode}, and N its smallest subnode. - ** - ** It is '''assumed''' that both childs are actual subnodes of the parent, - ** that they are adjacent in the parent, and that the child losing an entry - ** has more than {@link #ENT_MIN} entries. It is up to the caller to ensure - ** that this holds. - ** - ** @param parent The parent node of the children. - ** @param lnode The smaller subnode, which accepts an entry from the parent - ** @param rnode The greater subnode, which loses an entry to the parent - */ - private K rotateL(Node parent, Node lnode, Node rnode) { - assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges - assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); - assert(rnode.nodeSize() >= lnode.nodeSize()); - assert(rnode.nodeSize() > ENT_MIN); - assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode - && parent.lnodes.get(rnode.lkey) == lnode); - parent._size = lnode._size = rnode._size = -1; - - K mkey = rnode.lkey; - K skey = rnode.entries.firstKey(); - - lnode.entries.put(mkey, parent.entries.remove(mkey)); - parent.entries.put(skey, rnode.entries.remove(skey)); - parent.rnodes.put(skey, parent.rnodes.remove(mkey)); - parent.lnodes.put(skey, parent.lnodes.remove(mkey)); - - lnode.rkey = rnode.lkey = skey; - - if (!lnode.isLeaf()) { - lnode.rnodes.put(mkey, rnode.rnodes.remove(mkey)); - lnode.lnodes.put(skey, rnode.lnodes.remove(skey)); - } - - assert(parent.rnodes.get(skey) == rnode); - assert(parent.lnodes.get(skey) == lnode); - return mkey; - } - - /** - ** Performs a rotate operation towards the greater node. A rotate operation - ** is where: - ** - ** * an entry M from the parent node is moved to a subnode - ** * an entry S from an adjacent subnode is moved to fill gap left by M - ** * a subnode N associated with S is cut from M and attached to S - ** - ** with the operands chosen appropriately to maintain tree constraints. - ** - ** Here, M is the key that separates {@code lnode} and {@code rnode}, S is - ** the greatest key of {@code rnode}, and N its greatest subnode. - ** - ** It is '''assumed''' that both childs are actual subnodes of the parent, - ** that they are adjacent in the parent, and that the child losing an entry - ** has more than {@link #ENT_MIN} entries. It is up to the caller to ensure - ** that this holds. - ** - ** @param parent The parent node of the children. - ** @param lnode The smaller subnode, which loses an entry to the parent - ** @param rnode The greater subnode, which accepts an entry from the parent - */ - private K rotateR(Node parent, Node lnode, Node rnode) { - assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges - assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); - assert(lnode.nodeSize() >= rnode.nodeSize()); - assert(lnode.nodeSize() > ENT_MIN); - assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode - && parent.lnodes.get(rnode.lkey) == lnode); - parent._size = lnode._size = rnode._size = -1; - - K mkey = lnode.rkey; - K skey = lnode.entries.lastKey(); - - rnode.entries.put(mkey, parent.entries.remove(mkey)); - parent.entries.put(skey, lnode.entries.remove(skey)); - parent.lnodes.put(skey, parent.lnodes.remove(mkey)); - parent.rnodes.put(skey, parent.rnodes.remove(mkey)); - - lnode.rkey = rnode.lkey = skey; - - if (!rnode.isLeaf()) { - rnode.lnodes.put(mkey, lnode.lnodes.remove(mkey)); - rnode.rnodes.put(skey, lnode.rnodes.remove(skey)); - } - - assert(parent.rnodes.get(skey) == rnode); - assert(parent.lnodes.get(skey) == lnode); - return mkey; - } - - /** - ** Returns the number of entries contained in the root node. - */ - public int sizeRoot() { - return root.nodeSize(); - } - - public int nodeMin() { - return NODE_MIN; - } - - public int entMax() { - return ENT_MAX; - } - - /** - ** Gives a quick estimate of the height of the tree. This method will tend - ** to over-estimate rather than underestimate. - ** - ** The exact formula is {@code 1 + floor(log(size)/log(NODE_MIN))}. I - ** couldn't be bothered trying to prove that this is a firm upper bound; - ** feel free. - */ - public int heightEstimate() { - // round(log(0)/x) is -infinity - return 1 + size == 0? 0: (int)Math.floor(Math.log(size) / Math.log(NODE_MIN)); - } - - /** - ** Returns the minimum number of nodes (at a single level) needed for the - ** given number of entries. - */ - public int minNodesFor(int entries) { - // find the smallest k such that k * ENT_MAX + (k-1) >= n - // ie. k * NODE_MAX > n :: k = floor( n / NODE_MAX ) + 1 - return entries / NODE_MAX + 1; - } - - /** - ** Returns the minimum number of separator keys (at a single level) needed - ** for the given number of entries. - */ - public int minKeysFor(int entries) { - // 1 less than minNodesFor() - return entries / NODE_MAX; - } - - /** - ** Returns the entry at a particular (zero-based) index. - */ - public Map.Entry getEntry(int index) { - if (index < 0 || index >= size) { - throw new IndexOutOfBoundsException("Index outside of range [0," + size + ")"); - } - - int current = 0; - Node node = root; - - for (;;) { - if (node.isLeaf()) { - for (Map.Entry en: node.entries.entrySet()) { - if (current == index) { return en; } - ++current; - } - throw new IllegalStateException("BTreeMap getKey method is buggy, please report."); - } - - // OPT LOW better way to do this than linear iteration through the entries. - // this performs badly when large nodes are accessed repeatedly. - // - // one way would be to insert the last sum-key pair we calculate at each node, into - // a cache of SoftReference> for that node. then, next time we visit - // the same node, we can use this cache to generate a submap - ie. - // node.entries.tailMap(cache.get(cache.tailMap(index).firstKey())) // firstEntry().getValue() is Java 6 only :( - // - and iterate only over this submap (don't forget to set "current" to the right value) - // - Node nextnode = node.rnodes.get(node.lkey); - int next = current + nextnode.totalSize(); - if (index < next) { node = nextnode; continue; } - current = next; - - for (Map.Entry en: node.entries.entrySet()) { - if (current == index) { return en; } - ++current; - - nextnode = node.rnodes.get(en.getKey()); - next = current + nextnode.totalSize(); - if (index < next) { node = nextnode; break; } - current = next; - } - - assert(current <= index); - } - } - - /*======================================================================== - public interface Map - ========================================================================*/ - - @Override public int size() { - return size; - } - - @Override public boolean isEmpty() { - return size == 0; - } - - @Override public void clear() { - root = newNode(null, null, true); - size = 0; - } - - /** - ** {@inheritDoc} - ** - ** This implementation just descends the tree, returning {@code true} if it - ** finds the given key. - ** - ** @throws ClassCastException key cannot be compared with the keys - ** currently in the map - ** @throws NullPointerException key is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - @Override public boolean containsKey(Object k) { - K key = (K) k; - Node node = root; - - for (;;) { - if (node.isLeaf()) { - return node.entries.containsKey(key); - } - - Node nextnode = node.selectNode(key); - if (nextnode == null) { - return true; - } - - node = nextnode; - } - } - - /* provided by AbstractMap - @Override public boolean containsValue(Object key) { - throw new UnsupportedOperationException("not implemented"); - }*/ - - /** - ** {@inheritDoc} - ** - ** This implementation just descends the tree, returning the value for the - ** given key if it can be found. - ** - ** @throws ClassCastException key cannot be compared with the keys - ** currently in the map - ** @throws NullPointerException key is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - @Override public V get(Object k) { - K key = (K) k; - Node node = root; - - for (;;) { - if (node.isLeaf()) { - return node.entries.get(key); - } - - Node nextnode = node.selectNode(key); - if (nextnode == null) { - return node.entries.get(key); - } - - node = nextnode; - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation uses the BTree single-pass insertion algorithm. We - ** descend the tree, restructuring it as needed so that we can insert an - ** entry into a leaf. - ** - ** Start at the root node. At each stage of the algorithm, we restructure - ** the tree so that the next node we reach has less than {@link #ENT_MAX} - ** entries, and the insertion can occur without breaking constraints. - ** - ** (*) If the number of entries is {@link #ENT_MAX}, then perform {@link - ** #split(Node, Node) split} on the node. Both the newly created nodes now - ** have less than {@link #ENT_MAX} entries; pick the appropriate halfnode - ** to for the rest of the operation, depending on the original input key. - ** - ** If the node is a leaf, insert the value and stop. Otherwise, if the key - ** is not already in the node, select the appropriate subnode and repeat - ** from (*). - ** - ** Otherwise, replace the value and stop. - ** - ** @throws ClassCastException key cannot be compared with the keys - ** currently in the map - ** @throws NullPointerException key is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - @Override public V put(K key, V value) { - if (key == null) { throw new UnsupportedOperationException("Sorry, this BTreeMap implementation can't handle null keys, even if the comparator supports it."); } - Node node = root, parent = null; - - for (;;) { - node._size = -1; // pre-emptively invalidate node size cache - - if (node.nodeSize() == ENT_MAX) { - K median = split(parent, node); - if (parent == null) { parent = root; } - - node = parent.selectNode(key); - if (node == null) { return parent.entries.put(key, value); } - } - assert(node.nodeSize() < ENT_MAX); - - if (node.isLeaf()) { - int sz = node.nodeSize(); - V v = node.entries.put(key, value); - if (node.nodeSize() != sz) { ++size; } // update tree size counter - return v; - } - - Node nextnode = node.selectNode(key); - if (nextnode == null) { // key is already in the node - return node.entries.put(key, value); - } - - parent = node; - node = nextnode; - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation uses the BTree single-pass deletion algorithm. We - ** descend the tree, restructuring it as needed so that we can delete the - ** entry from a leaf. - ** - ** Start at the root node. At each stage of the algorithm, we restructure - ** the tree so that the next node we reach has more than {@link #ENT_MIN} - ** entries, and the deletion can occur without breaking constraints. - ** - ** (*) If the node is not the root, and if the number of entries is equal - ** to {@link #ENT_MIN}, select its two siblings (L and R). Perform one of - ** the following operations (ties being broken arbitrarily): - ** - ** * {@link #merge(Node, Node, Node) merge} with X, if X also has {@link - ** #ENT_MIN} entries - ** * {@link #rotateL(Node, Node, Node) rotateL} with R, if the R subnode - ** has more entries than L (or equal) - ** * {@link #rotateR(Node, Node, Node) rotateR} with L, if the L subnode - ** has more entries than R (or equal) - ** - ** The selected node now has more than {@link #ENT_MIN} entries. - ** - ** If the node is a leaf, remove the value and stop. Otherwise, if the key - ** is not already in the node, select the appropriate subnode and repeat - ** from (*). - ** - ** Otherwise, select the two subnodes (L and R) that this key separates. - ** Perform one of the following operations (ties being broken arbitrarily): - ** - ** * {@link #merge(Node, Node, Node) merge}, if both subnodes have {@link - ** #ENT_MIN} entries. - ** * {@link #rotateL(Node, Node, Node) rotateL}, if the R subnode has more - ** entries than L (or equal) - ** * {@link #rotateR(Node, Node, Node) rotateR}, if the L subnode has more - ** entries than R (or equal) - ** - ** The node that the key ended up in now has more than {@link #ENT_MIN} - ** entries (and will be selected for the next stage). - ** - ** @throws ClassCastException key cannot be compared with the keys - ** currently in the map - ** @throws NullPointerException key is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - @Override public V remove(Object k) { - K key = (K) k; - Node node = root, parent = null; - - for (;;) { - node._size = -1; // pre-emptively invalidate node size cache - - if (node != root && node.nodeSize() == ENT_MIN) { - Node lnode = parent.nodeL(node), rnode = parent.nodeR(node); - int L = (lnode == null)? -1: lnode.nodeSize(); - int R = (rnode == null)? -1: rnode.nodeSize(); - - K kk = // in java, ?: must be used in a statement :| - // lnode doesn't exist - (L < 0)? ((R == ENT_MIN)? merge(parent, node, rnode): - rotateL(parent, node, rnode)): - // rnode doesn't exist - (R < 0)? ((L == ENT_MIN)? merge(parent, lnode, node): - rotateR(parent, lnode, node)): - // pick the node with more entries - (R > L)? rotateL(parent, node, rnode): - (L > R)? rotateR(parent, lnode, node): - // otherwise pick one at "random" - (size&1) == 1? (R == ENT_MIN? merge(parent, node, rnode): - rotateL(parent, node, rnode)): - (L == ENT_MIN? merge(parent, lnode, node): - rotateR(parent, lnode, node)); - node = parent.selectNode(key); - assert(node != null); - } - assert(node == root || node.nodeSize() >= ENT_MIN); - - if (node.isLeaf()) { // leaf node - int sz = node.nodeSize(); - V v = node.entries.remove(key); - if (node.nodeSize() != sz) { --size; } // update tree size counter - return v; - } - - Node nextnode = node.selectNode(key); - if (nextnode == null) { // key is already in the node - Node lnode = node.lnodes.get(key), rnode = node.rnodes.get(key); - int L = lnode.nodeSize(), R = rnode.nodeSize(); - - K kk = - // both lnode and rnode must exist, so - // pick the one with more entries - (R > L)? rotateL(node, lnode, rnode): - (L > R)? rotateR(node, lnode, rnode): - // otherwise pick one at "random" - (size&1) == 1? (R == ENT_MIN? merge(node, lnode, rnode): - rotateL(node, lnode, rnode)): - (L == ENT_MIN? merge(node, lnode, rnode): - rotateR(node, lnode, rnode)); - nextnode = node.selectNode(key); - assert(nextnode != null); - } - - parent = node; - node = nextnode; - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation iterates over the given map's {@code entrySet}, - ** adding each mapping in turn, except for when {@code this} map is empty, - ** and the input map is a non-empty {@link SortedMap}. In this case, it - ** uses the BTree bulk-loading algorithm: - ** - ** * distribute all the entries of the map across the least number of nodes - ** possible, excluding the entries that will act as separators between - ** these nodes - ** * repeat for the separator entries, and use them to join the nodes from - ** the previous level appropriately to form a node at this level - ** * repeat until there are no more entries on a level, at which point use - ** the (single) node from the previous level as the root - ** - ** (The optimisation is also used if the input map is {@code this}.) - ** - ** @param t mappings to be stored in this map - */ - @Override public void putAll(Map t) { - // t == this to support restructure() - if (t.isEmpty()) { - return; - } else if (t == this || isEmpty() && t instanceof SortedMap) { - SortedMap map = (SortedMap)t, nextmap; - Map lnodes = null, nextlnodes; - - if (!(comparator == null && map.comparator() == null || comparator.equals(map.comparator()))) { - super.putAll(map); - return; - } - - while (map.size() > 0) { - int k = minNodesFor(map.size()); - - nextlnodes = new HashMap(k<<1); - nextmap = new TreeMap(comparator); - - Iterator> it = map.entrySet().iterator(); - K prevkey = null; // NULLNOTICE - - // allocate all entries into these k nodes, except for k-1 parents - for (Integer n: Integers.allocateEvenly(map.size()-k+1, k)) { - // put n entries into a new leaf - Map.Entry en = makeNode(it, n, prevkey, lnodes, nextlnodes); - if (en != null) { nextmap.put(prevkey = en.getKey(), en.getValue()); } - } - - lnodes = nextlnodes; - map = nextmap; - } - - assert(lnodes.size() == 1); - root = lnodes.get(null); - size = map.size(); - - } else { - super.putAll(t); - } - } - - /** - ** Helper method for the bulk-loading algorithm. - ** - ** @param it Iterator over the entries at the current level - ** @param n Number of entries to add to the map - ** @param prevkey Last key encountered before calling this method - ** @param lnodes Complete map of keys to their lnodes at this level - ** @param nextlnodes In-construction lnodes map for the next level - */ - private Map.Entry makeNode(Iterator> it, int n, K prevkey, Map lnodes, Map nextlnodes) { - Node node; - if (lnodes == null) { // create leaf nodes - node = newNode(prevkey, null, true); - for (int i=0; i en = it.next(); - K key = en.getKey(); - node.entries.put(key, en.getValue()); - prevkey = key; - } - } else { - node = newNode(prevkey, null, false); - for (int i=0; i en = it.next(); - K key = en.getKey(); - node.entries.put(key, en.getValue()); - Node subnode = lnodes.get(key); - node.rnodes.put(prevkey, subnode); - node.lnodes.put(key, subnode); - prevkey = key; - } - } - - Map.Entry next; - K key; - if (it.hasNext()) { - next = it.next(); - key = next.getKey(); - } else { - next = null; - key = null; - } - - if (lnodes != null) { - Node subnode = lnodes.get(key); - node.rnodes.put(prevkey, subnode); - node.lnodes.put(key, subnode); - } - node.rkey = key; - nextlnodes.put(key, node); - - return next; - } - - /** - ** Restructures the tree, distributing the entries evenly between the leaf - ** nodes. This method merely calls {@link #putAll(Map)} with {@code this}. - */ - public void restructure() { - putAll(this); - } - - private Set> entrySet = null; - @Override public Set> entrySet() { - if (entrySet == null) { - entrySet = new AbstractSet>() { - - @Override public int size() { return BTreeMap.this.size(); } - - @Override public Iterator> iterator() { - // FIXME LOW - this does NOT yet throw ConcurrentModificationException - // use a modCount counter - return new Iterator>() { - - Stack nodestack = new Stack(); - Stack>> itstack = new Stack>>(); - - Node cnode = BTreeMap.this.root; - Iterator> centit = cnode.entries.entrySet().iterator(); - - K lastkey = null; - boolean removeok = false; - - // DEBUG ONLY, remove when unneeded - /*public String toString() { - StringBuilder s = new StringBuilder(); - for (Node n: nodestack) { - s.append(n.getRange()).append(", "); - } - return "nodestack: [" + s + "]; cnode: " + cnode.getRange() + "; lastkey: " + lastkey; - }*/ - - /*@Override**/ public boolean hasNext() { - // TODO LOW ideally iterate in the reverse order - for (Iterator> it: itstack) { - if (it.hasNext()) { return true; } - } - if (centit.hasNext()) { return true; } - if (!cnode.isLeaf()) { return true; } - return false; - } - - /*@Override**/ public Map.Entry next() { - if (cnode.isLeaf()) { - while (!centit.hasNext()) { - if (nodestack.empty()) { - assert(itstack.empty()); - throw new NoSuchElementException(); - } - cnode = nodestack.pop(); - centit = itstack.pop(); - } - } else { - while (!cnode.isLeaf()) { - // NULLNOTICE lastkey initialised to null, so this will get the right node - // even at the smaller edge of the map - Node testnode = cnode.rnodes.get(lastkey); - testnode.isLeaf(); // trigger DataNotLoadedException if this is a GhostNode - // node OK, proceed - nodestack.push(cnode); - itstack.push(centit); - cnode = testnode; - centit = cnode.entries.entrySet().iterator(); - } - } - Map.Entry next = centit.next(); lastkey = next.getKey(); - removeok = true; - return next; - } - - /*@Override**/ public void remove() { - if (!removeok) { - throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); - } - // OPT LOW this could probably be a *lot* more efficient... - BTreeMap.this.remove(lastkey); - - // we need to find our position in the tree again, since there may have - // been structural modifications to it. - // OPT LOW have a "structual modifications counter" that is incremented by - // split, merge, rotateL, rotateR, and do the below only if it changes - nodestack.clear(); - itstack.clear(); - cnode = BTreeMap.this.root; - centit = cnode.entries.entrySet().iterator(); - - K stopkey = null; - while(!cnode.isLeaf()) { - stopkey = findstopkey(stopkey); - nodestack.push(cnode); - itstack.push(centit); - // NULLNOTICE stopkey initialised to null, so this will get the right node - // even at the smaller edge of the map - cnode = cnode.rnodes.get(stopkey); - centit = cnode.entries.entrySet().iterator(); - } - - lastkey = findstopkey(stopkey); - removeok = false; - } - - private K findstopkey(K stopkey) { - SortedMap headmap = cnode.entries.headMap(lastkey); - if (!headmap.isEmpty()) { - stopkey = headmap.lastKey(); - while (compare(centit.next().getKey(), stopkey) < 0); - } - return stopkey; - } - - }; - } - - @Override public void clear() { - BTreeMap.this.clear(); - } - - @Override public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) { return false; } - Map.Entry e = (Map.Entry)o; - Object value = BTreeMap.this.get(e.getKey()); - return value != null && value.equals(e.getValue()); - } - - @Override public boolean remove(Object o) { - if (contains(o)) { - Map.Entry e = (Map.Entry)o; - BTreeMap.this.remove(e.getKey()); - return true; - } - return false; - } - - }; - } - return entrySet; - } - - /* provided by AbstractMap - ** OPT LOW - the default clear() method is inefficient - @Override public Set keySet() { - throw new UnsupportedOperationException("not implemented"); - }*/ - - /* provided by AbstractMap - ** OPT LOW - the default clear() method is inefficient - @Override public Collection values() { - throw new UnsupportedOperationException("not implemented"); - }*/ - - /*======================================================================== - public interface SortedMap - ========================================================================*/ - - /*@Override**/ public Comparator comparator() { - return comparator; - } - - /*@Override**/ public K firstKey() { - Node node = root; - for (;;) { - K fkey = node.entries.firstKey(); - if (node.isLeaf()) { return fkey; } - node = node.lnodes.get(fkey); - } - } - - /*@Override**/ public K lastKey() { - Node node = root; - for (;;) { - K lkey = node.entries.lastKey(); - if (node.isLeaf()) { return lkey; } - node = node.rnodes.get(lkey); - } - } - - /** - ** {@inheritDoc} - ** - ** Not yet implemented - throws {@link UnsupportedOperationException} - */ - /*@Override**/ public SortedMap headMap(K rkey) { - throw new UnsupportedOperationException("not implemented"); - } - - /** - ** {@inheritDoc} - ** - ** Not yet implemented - throws {@link UnsupportedOperationException} - */ - /*@Override**/ public SortedMap tailMap(K lkey) { - throw new UnsupportedOperationException("not implemented"); - } - - /** - ** {@inheritDoc} - ** - ** Not yet implemented - throws {@link UnsupportedOperationException} - */ - /*@Override**/ public SortedMap subMap(K lkey, K rkey) { - throw new UnsupportedOperationException("not implemented"); - } - -} diff --git a/src/plugins/Library/util/BTreeSet.java b/src/plugins/Library/util/BTreeSet.java deleted file mode 100644 index 75e24054..00000000 --- a/src/plugins/Library/util/BTreeSet.java +++ /dev/null @@ -1,76 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version) {. See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Comparator; -import java.util.Set; -import java.util.SortedSet; - -/** -** A B-tree set implementation backed by a {@link BTreeMap}. DOCUMENT -** -** @author infinity0 -*/ -public class BTreeSet extends SortedMapSet> -implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { - - /** - ** Creates a new empty set, sorted according to the given comparator, and - ** with each non-root node having the given minimum number of subnodes. - ** - ** @param cmp The comparator for the tree, or {@code null} to use the keys' - ** {@link Comparable natural} ordering. - ** @param node_min Minimum number of subnodes in each node - */ - public BTreeSet(Comparator cmp, int node_min) { - super(new BTreeMap(cmp, node_min)); - } - - /** - ** Creates a new empty set, sorted according to the keys' {@link Comparable - ** natural} ordering, and with each non-root node having the given minimum - ** number of subnodes. - ** - ** @param node_min Minimum number of subnodes in each node - **/ - public BTreeSet(int node_min) { - super(new BTreeMap(node_min)); - } - - /** - ** Protected constructor for use by the {@link SkeletonBTreeSet} - ** constructors. - */ - protected BTreeSet(BTreeMap m) { - super(m); - } - - /** - ** Returns the number of entries contained in the root node. - */ - public int sizeRoot() { - return bkmap.sizeRoot(); - } - - public int nodeMin() { - return bkmap.nodeMin(); - } - - public int entMax() { - return bkmap.entMax(); - } - - public int heightEstimate() { - return bkmap.heightEstimate(); - } - - /** - ** Returns the element at a particular (zero-based) index. - */ - public E get(int i) { - return bkmap.getEntry(i).getKey(); - } - - -} diff --git a/src/plugins/Library/util/BytePrefixKey.java b/src/plugins/Library/util/BytePrefixKey.java deleted file mode 100644 index ae0efad0..00000000 --- a/src/plugins/Library/util/BytePrefixKey.java +++ /dev/null @@ -1,147 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.util.PrefixTree.PrefixKey; -import plugins.Library.util.PrefixTree.AbstractPrefixKey; - -import java.util.Arrays; - -/** -** A PrefixKey backed by an array of bytes. -** -** @author infinity0 -*/ -abstract public class BytePrefixKey> extends AbstractPrefixKey implements PrefixKey { - - /** - ** Returns the string representation of a byte array. - */ - public static String bytesToHex(byte[] hash) { - StringBuilder buf = new StringBuilder(); - for (int i = 0; i < hash.length; i++) { - int h = (hash[i] >> 4) & 0x0F; - int l = hash[i] & 0x0F; - buf.append((char)((0<=h && h<=9)? '0'+h: 'a'+h-10)); - buf.append((char)((0<=l && l<=9)? '0'+l: 'a'+l-10)); - } - return buf.toString(); - } - - /** - ** Returns the byte array representation of a hex string. - */ - public static byte[] hexToBytes(String hex) { - byte[] bs = new byte[hex.length()>>1]; - for (int i=0, j=0; i clone(); - - /*@Override**/ public int symbols() { - return 256; - } - - /*@Override**/ public int size() { - return hash.length; - } - - /*@Override**/ public int get(int i) { - return hash[i] & 0xFF; - } - - /*@Override**/ public void set(int i, int v) { - hash[i] = (byte)v; - } - - /*@Override**/ public void clear(int i) { - hash[i] = 0; - } - - /*======================================================================== - public class Object - ========================================================================*/ - - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (o instanceof BytePrefixKey) { - return Arrays.equals(hash, ((BytePrefixKey)o).hash); - } - return false; - } - - @Override public int hashCode() { - return Arrays.hashCode(hash); - } - -} diff --git a/src/plugins/Library/util/CompositeIterable.java b/src/plugins/Library/util/CompositeIterable.java deleted file mode 100644 index 1fa8cd20..00000000 --- a/src/plugins/Library/util/CompositeIterable.java +++ /dev/null @@ -1,68 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Iterator; - -/** -** An {@link Iterable} backed by another {@link Iterable}. -** -** @param Type of source -** @param Type of target -** @author infinity0 -*/ -abstract public class CompositeIterable implements Iterable { - - /** - ** The backing {@link Iterable}. - */ - final protected Iterable ib; - - /** - ** Whether the iterator supports {@link Iterator#remove()}. - */ - final public boolean immutable; - - /** - ** Create a new mutable iterable backed by the given iterable. Note that - ** mutability will only have an effect if the backing iterator is also - ** mutable. - */ - public CompositeIterable(Iterable i) { - ib = i; - immutable = false; - } - - /** - ** Create a new iterable backed by the given iterable, with the given - ** mutability setting. Note that mutability will only have an effect if the - ** backing iterator is also mutable. - */ - public CompositeIterable(Iterable i, boolean immute) { - ib = i; - immutable = immute; - } - - /*@Override**/ public Iterator iterator() { - return immutable? - new Iterator() { - final Iterator it = ib.iterator(); - /*@Override**/ public boolean hasNext() { return it.hasNext(); } - /*@Override**/ public T next() { return CompositeIterable.this.nextFor(it.next()); } - /*@Override**/ public void remove() { throw new UnsupportedOperationException("Immutable iterator"); } - }: - new Iterator() { - final Iterator it = ib.iterator(); - /*@Override**/ public boolean hasNext() { return it.hasNext(); } - /*@Override**/ public T next() { return CompositeIterable.this.nextFor(it.next()); } - /*@Override**/ public void remove() { it.remove(); } - }; - } - - /** - ** Returns an object of the target type given an object of the source type. - */ - abstract protected T nextFor(S elem); - -} diff --git a/src/plugins/Library/util/DataNotLoadedException.java b/src/plugins/Library/util/DataNotLoadedException.java deleted file mode 100644 index 1212c5ea..00000000 --- a/src/plugins/Library/util/DataNotLoadedException.java +++ /dev/null @@ -1,64 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -/** -** Thrown when data hasn't been loaded yet, eg. when a {@link Skeleton} hasn't -** been fully loaded into memory. Exceptions should be constructed such that -** a method call to {@link Skeleton#inflate()} on {@link #getParent()} with -** {@link #getKey()} as its argument will load the data and prevent an -** exception from being thrown the next time round the data is accessed. -** -** Ideally, this would be {@code DataNotLoadedException}, where {@code K} is -** the type parameter for {@link Skeleton}, but java does not currently allow -** parametrised classes to extend {@link Throwable}. -** -** @author infinity0 -** @see Skeleton -*/ -public class DataNotLoadedException extends RuntimeException { - - /** - ** The parent container of the not-yet-loaded data. - */ - final Skeleton parent; - - /** - ** The key, if any, that the not-yet-loaded data was associated with. - */ - final Object key; - - /** - ** The metadata for the not-yet-loaded data. - */ - final Object meta; - - public DataNotLoadedException(String s, Throwable t, Skeleton p, Object k, Object v) { - super(s, t); - parent = p; - key = k; - meta = v; - } - - public DataNotLoadedException(Throwable t, Skeleton p, Object k, Object v) { - this(null, t, p, k, v); - } - - public DataNotLoadedException(String s, Skeleton p, Object k, Object v) { - this(s, null, p, k, v); - } - - public DataNotLoadedException(String s, Skeleton p, Object k) { - this(s, null, p, k, null); - } - - public DataNotLoadedException(String s, Skeleton p) { - this(s, null, p, null, null); - } - - public Skeleton getParent() { return parent; } - public Object getKey() { return key; } - public Object getValue() { return meta; } - -} diff --git a/src/plugins/Library/util/IdentityComparator.java b/src/plugins/Library/util/IdentityComparator.java deleted file mode 100644 index 4320731b..00000000 --- a/src/plugins/Library/util/IdentityComparator.java +++ /dev/null @@ -1,84 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Comparator; -import java.util.SortedSet; -import java.util.IdentityHashMap; -import java.util.HashMap; - -/** -** A comparator that is consistent with the identity operator on objects. The -** ordering is arbitrary, but consistent with the general contract for -** compare methods. This is useful when you want to have a sorted collection -** like {@link SortedSet}, but want to be able to have two objects (that would -** otherwise compare equal) to be present in the collection simultaneously. -** -** This implementation will remain consistent even if it encounters two objects -** with the *same* identity hash code, by assigning a unique 64-bit id to each -** distinct object, as it encounters such objects. -** -** Since identity hashcodes are generated arbitrarily by the JVM, the ordering -** imposed by this comparator will change between different runs of the JVM. -** Hence, the exact ordering must not be treated as an instrinsic or immutable -** property of the underlying collection. -** -** The intended use of this class is for another class to subclass it, or to -** have a {@link Comparable#compareTo(Object)} call the method of the singleton -** provided in the {@link #comparator} field. -** -** @author infinity0 -** @see System#identityHashCode(Object) -** @see Comparator#compare(Object, Object) -** @see Comparable#compareTo(Object) -*/ -abstract public class IdentityComparator implements Comparator { - - /** - ** A singleton comparator for use by {@link Comparable#compareTo(Object)}. - */ - final public static IdentityComparator comparator = new IdentityComparator() {}; - - /** - ** Keeps track of objects with the same identity hashcode, and the unique - ** IDs assigned to them by the comparator. This map is *ONLY* used if the - ** comparator encounters two distinct objects with the same identity - ** hashcodes. - */ - final private static IdentityHashMap objectid = new IdentityHashMap(4); - - /** - ** Counts the number of objects that have a given identity hashcode. This - ** map is *ONLY* used if the comparator encounters two distinct objects - ** with the same identity hashcodes. - */ - final private static HashMap idcounter = new HashMap(4); - - /** - ** Compare two objects by identity. - */ - public int compare(T o1, T o2) { - if (o1 == o2) { return 0; } - int h1 = System.identityHashCode(o1); - int h2 = System.identityHashCode(o2); - if (h1 != h2) { - return (h1 > h2)? 1: -1; - } else { - synchronized (IdentityComparator.class) { - Long counter = idcounter.get(h1); - if (counter == null) { counter = 0L; } - - Long l1 = objectid.get(o1); - Long l2 = objectid.get(o2); - if (l1 == null) { l1 = counter++; objectid.put(o1, l1); } - if (l2 == null) { l2 = counter++; objectid.put(o2, l2); } - - idcounter.put(h1, counter); - assert((long)l1 != (long)l2); - return (l1 > l2)? 1: -1; - } - } - } - -} diff --git a/src/plugins/Library/util/Integers.java b/src/plugins/Library/util/Integers.java deleted file mode 100644 index c81d7d5b..00000000 --- a/src/plugins/Library/util/Integers.java +++ /dev/null @@ -1,82 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Iterator; -import java.util.NoSuchElementException; - -/** -** Various integer methods. -** -** @author infinity0 -*/ -final public class Integers { - - private Integers() { } - - // TODO LOW "allocateEvenlyRandom" ie. distributely evenly but with ties - // being broken randomly, in the style of dithering - - /** - ** Allocate {@code total} resources evenly over {@code num} recipients. - ** - ** @return An {@link Iterable} of {@link Integer}, that "contains" {@code - ** num} elements, each of which is either {@code k+1} or {@code k}. - ** The {@code k+1} will themselves also be dispersed evenly - ** throughout the iteration. - */ - public static Iterable allocateEvenly(final int total, final int num) { - if (total < 0) { - throw new IllegalArgumentException("Total must be non-negative."); - } - if (num <= 0) { - throw new IllegalArgumentException("Count must be positive."); - } - - return new Iterable() { - - final int k = total / num; - final int r = total % num; - final double step = (double)num / r; - - public Iterator iterator() { - return new Iterator() { - double d = step / 2; - int i = 0; - - /*@Override**/ public boolean hasNext() { - return i < num; - } - - /*@Override**/ public Integer next() { - if (i >= num) { - throw new NoSuchElementException(); - } else if ((int)d == i++) { - d += step; - return k+1; - } else { - return k; - } - } - - /*@Override**/ public void remove() { - throw new UnsupportedOperationException(); - } - - }; - } - - @Override public String toString() { - char[] xs = new char[num]; - int j=0; - for (Integer ii: this) { - xs[j++] = (ii == k)? ' ': '+'; - } - return total + "/" + num + ": " + k + "+[" + new String(xs) + "]"; - } - - }; - } - -} diff --git a/src/plugins/Library/util/Maps.java b/src/plugins/Library/util/Maps.java deleted file mode 100644 index 34678982..00000000 --- a/src/plugins/Library/util/Maps.java +++ /dev/null @@ -1,132 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import static plugins.Library.util.Maps.$; - -import java.util.Map.Entry; // WORKAROUND javadoc bug #4464323 -import java.util.Map; - -/** -** Methods for maps. -** -** @author infinity0 -*/ -final public class Maps { - - /** - ** A simple {@link Entry} with no special properties. - */ - public static class BaseEntry implements Map.Entry { - - final public K key; - protected V val; - - public BaseEntry(K k, V v) { key = k; val = v; } - - public K getKey() { return key; } - public V getValue() { return val; } - public V setValue(V n) { V o = val; val = n; return o; } - - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof Map.Entry)) { return false; } - Map.Entry en = (Map.Entry)o; - return (key == null? en.getKey() == null: key.equals(en.getKey())) && (val == null? en.getValue() == null : val.equals(en.getValue())); - } - - @Override public int hashCode() { - return (key == null? 0: key.hashCode()) ^ (val == null? 0: val.hashCode()); - } - - } - - /** - ** A {@link Entry} whose value cannot be modified. - */ - public static class ImmutableEntry extends BaseEntry { - - public ImmutableEntry(K k, V v) { super(k, v); } - - /** - ** @throws UnsupportedOperationException always - */ - @Override public V setValue(V n) { - throw new UnsupportedOperationException("ImmutableEntry: cannot modify value after creation"); - } - - } - - /** - ** A {@link Entry} whose {@link Object#equals(Object)} and {@link - ** Object#hashCode()} are defined purely in terms of the key, which is - ** immutable in the entry. - ** - ** Note: technically this breaks the contract of {@link Entry}. Use only - ** if you know what you're doing. - */ - public static class KeyEntry extends BaseEntry { - - public KeyEntry(K k, V v) { super(k, v); } - - /** - ** Whether the object is also a {@link KeyEntry} and has the same - ** {@code #key} as this entry. - */ - @Override public boolean equals(Object o) { - if (!(o instanceof KeyEntry)) { return false; } - KeyEntry en = (KeyEntry)o; - return key == null && en.key == null || key.equals(en.key); - } - - @Override public int hashCode() { - return key == null ? 0 : 1 + key.hashCode(); - } - - } - - private Maps() { } - - /** - ** Returns a new {@link BaseEntry} with the given key and value. - */ - public static Map.Entry $(final K k, final V v) { - return new BaseEntry(k, v); - } - - /** - ** Returns a new {@link ImmutableEntry} with the given key and value. - */ - public static Map.Entry $$(final K k, final V v) { - return new ImmutableEntry(k, v); - } - - /** - ** Returns a new {@link KeyEntry} with the given key and value. - */ - public static Map.Entry $K(K k, V v) { - return new KeyEntry(k, v); - } - - public static Map of(Class mapcl, Map.Entry... items) { - try { - Map map = mapcl.newInstance(); - for (Map.Entry en: items) { - map.put(en.getKey(), en.getValue()); - } - return map; - } catch (InstantiationException e) { - throw new IllegalArgumentException("Could not instantiate map class", e); - } catch (IllegalAccessException e) { - throw new IllegalArgumentException("Could not access map class", e); - } - } - - /*final private static Map testmap = of(SkeletonTreeMap.class, - $("test1", "test1"), - $("test2", "test2"), - $("test3", "test3") - );*/ - -} diff --git a/src/plugins/Library/util/PrefixTree.java b/src/plugins/Library/util/PrefixTree.java deleted file mode 100644 index 252023c9..00000000 --- a/src/plugins/Library/util/PrefixTree.java +++ /dev/null @@ -1,615 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Set; -import java.util.Iterator; - -/** -** Trie-like map or map-like structure (eg. multimap) with each node mapping -** keys that match a given prefix. Each node in the tree will admit a certain -** maximum number of mappings before it starts creating subtrees. -** -** @deprecated -** @author infinity0 -*/ -abstract public class PrefixTree { - - /** - ** The prefix that all keys in this tree must match. - */ - final protected K prefix; - - /** - ** The length of the prefix for this tree. - */ - final protected int preflen; - - /** - ** Array holding the child trees. There is one cell in the array for each - ** symbol in the alphabet of the key. For all i: {@link #child}[i] != null, - ** or {@link #child}[i].{@link #lastIndex()} == i. - */ - final protected PrefixTree[] child; - - /** - ** The size of the {@link #child} array. - */ - final public transient int subtreesMax; - - /** - ** Maximum size of ({@link #sizeLocal()} + {@link #child}) before we start - ** to create subtrees. - */ - final public int capacityLocal; - - /** - ** Number of subtrees. At all times, this should equal the number of - ** non-null members of {@link #child}. - */ - protected int subtrees = 0; - - /** - ** Number of elements contained in the tree. At all times, it should equal - ** the sum of the size of the subtrees, plus the {@link #sizeLocal()}. - */ - protected int size = 0; - - /** - ** Counts the number of mappings in each prefix group, meaning the set of - ** all mappings whose keys give the same key.get(preflen). At all times, - ** the sum the array elements should equal the size field. Additionally, - ** for all i: ({@link #child}[i] != null) implies ({@link #sizePrefix}[i] - ** == {@link #child}[i].size()); and {@link #sizeLocal()} == sum{ {@link - ** #sizePrefix}[j] : {@link #child}[j] == null } - */ - protected int sizePrefix[]; - - /** - ** Cache for {@link #smallestChild()}. At all times, this should either be - ** -1, or be the index of the subtree with the smallest size. If -1, then - ** either there are no subtrees (check {@link #subtrees} == 0) or the cache - ** has been invalidated. - */ - protected transient int smch_ = -1; - - protected PrefixTree(K p, int len, int maxsz, PrefixTree[] chd) { - if (chd != null) { - // if the child array is null, we assume the caller knows what they're - // doing... this is needed for SkeletonPrefixTreeMap.DummyChild - if (chd.length != p.symbols()) { - throw new IllegalArgumentException("The child array must be able to exactly hold all its potential children, of which there are " + p.symbols()); - } - for (PrefixTree c: chd) { - if (c != null) { throw new IllegalArgumentException("Child array to attach must be empty."); } - } - } - if (maxsz < p.symbols()) { - throw new IllegalArgumentException("This tree must be able to hold all its potential children, of which there are " + p.symbols()); - } - if (len > p.size()) { - throw new IllegalArgumentException("The key is shorter than the length specified."); - } - if (len < 0) { - throw new IllegalArgumentException("Length cannot be negative."); - } - - prefix = p; - prefix.clearFrom(len); - preflen = len; - - child = chd; - sizePrefix = (child == null)? null: new int[child.length]; - - subtreesMax = p.symbols(); - capacityLocal = maxsz; - } - - /** - ** Return the space left in the local map. - */ - public int sizeLeft() { - return capacityLocal - (sizeLocal() + subtrees); - } - - /** - ** Returns the prefix in string form. - */ - public String prefixString() { - // OPTIMISE maybe have prefixString save into a cache? or too bothersome... - String ps = prefix.toString(); - return ps.substring(0, ps.length() * preflen / prefix.size()); - } - - /** - ** Returns the value at the last significant index of the prefix - */ - public int lastIndex() { - return prefix.get(preflen-1); - } - - /** - ** Returns the index of the smallest child. - */ - protected int smallestChild() { - if (smch_ < 0 && subtrees > 0) { - for (int i=0; i 0); - assert(subtrees < subtreesMax); - - int mcount = -1; - int msym = 0; - for (int i=0; i mcount) { - mcount = sizePrefix[i]; - msym = i; - } - } - - Object[] mkeys = new Object[mcount]; - - // TODO make this use Sorted methods after this class implements Sorted - int i = 0; - for (K ikey: keySetLocal()) { - int isym = ikey.get(preflen); - if (isym < msym) { - continue; - } else if (isym == msym) { - mkeys[i] = ikey; - ++i; - } else if (isym > msym) { - break; - } - } - //assert(i == mkeys.length); - assert(i <= mkeys.length); // Multimaps should be <=, Maps should be == - assert(child[msym] == null); - - child[msym] = makeSubTree(msym); - ++subtrees; - - for (i=0; i -1 && sizePrefix[msym] < sizePrefix[smch_]) { - smch_ = msym; - } - - return msym; - } - - /** - ** Put all entries of a subtree into this node, if there is enough space. - ** (If there is enough space, then this implies that the subtree itself has - ** no child trees, due to the size constraints in the constructor.) - ** - ** @param ch Subtree to free - ** @return Whether there was enough space, and the operation completed. - */ - private boolean freeSubTree(int ch) { - if (sizePrefix[ch] <= sizeLeft()+1) { - // cache invalidated due to child removal - if (ch == smch_) { smch_ = -1; } - - transferSubtreeToLocal(child[ch]); - child[ch] = null; - --subtrees; - return true; - } - return false; - } - - /** - ** After a put operation, move some entries into new subtrees, with big - ** subtrees taking priority, until there is enough space to fit all the - ** remaining entries. That is, ensure that there exists sz such that: - ** - {@link #sizePrefix}[{@link #smallestChild()}] == sz - ** - sz > {@link #sizeLeft()} - ** - and that for all i: - ** - ({@link #child}[i] == null) implies {@link #sizePrefix}[i] <= sz - ** - ({@link #child}[i] != null) implies {@link #sizePrefix}[i] >= sz - ** - ** @param i Subgroup of element that was inserted - */ - protected void reshuffleAfterPut(int i) { - ++sizePrefix[i]; ++size; - - if (child[i] != null) { - // cache possibly invalidated due to put operation on child - if (i == smch_) { smch_ = -1; } - - } else { - if (sizeLeft() < 0) { - bindSubTree(); - // clearly, a maximum of one subtree can be freed here - freeSubTree(smallestChild()); // smallestChild() exists due to bindSubTree - - } else { - int sm = smallestChild(); - if (sm > -1 && sizePrefix[sm] < sizePrefix[i]) { - // Let sz be the size of the smallest child, which is (*) greater or - // equal to the size of the largest non-child subgroup. Then: - // - before the put, the maximum space left in the data structure was sz-1 - // since we have sz > sizeLeft() - // - before the put, the maximum size of subgroup i was sz, due to (*) - // - so the maximum space left after binding this subgroup is 2sz-1 - // (any elements added due to the put are irrelevant to this) - // - now, the minimum size of two subtrees is 2sz > 2sz-1 - // - so a maximum of one subtree can be freed after binding the subgroup - int j = bindSubTree(); - assert(i == j); - freeSubTree(sm); - } - } - } - } - - /** - ** Reshuffle after a put operation of many elements, keeping the same - ** constraints. The elements must belong to the same subgroup. - ** - ** @param i Subgroup of elements - ** @param n Number of elements - */ - protected void reshuffleAfterPut(int i, int n) { - // logic of reshuffleAfterPut is independent of how many elements are - // actually added during the put operation - sizePrefix[i] += (n-1); size += (n-1); - reshuffleAfterPut(i); - } - - /** - ** After a remove operation, merge some subtrees into this node, with small - ** subtrees taking priority, until there is no more space to fit any more - ** subtrees. That is, ensure that there exists sz such that: - ** - {@link #sizePrefix}[{@link #smallestChild()}] == sz - ** - sz > {@link #sizeLeft()} - ** - and that for all i: - ** - ({@link #child}[i] == null) implies {@link #sizePrefix}[i] <= sz - ** - ({@link #child}[i] != null) implies {@link #sizePrefix}[i] >= sz - ** - ** @param i Subgroup of element that was removed - */ - protected void reshuffleAfterRemove(int i) { - --sizePrefix[i]; --size; - - if (child[i] != null) { - if (smch_ < 0 || i == smch_) { - freeSubTree(smallestChild()); // smallestChild() exists since child[i] != null - - } else if (sizePrefix[i] < sizePrefix[smch_]) { - // we potentially have a new (real) smallestChild, but wait and see... - - if (!freeSubTree(i)) { - // if the tree wasn't freed then we have a new smallestChild - smch_ = i; - } - // else, if the tree was freed, then freeSubTree() would not have reset - // smch_, since it still pointed to the old value (which was - // incorrect before the method call, but now correct, so nothing needs to - // be done). - } - // else, the smallestChild hasn't changed, so no more trees can be freed. - - } else { - int t = smallestChild(); - if (t > -1) { freeSubTree(t); } - } - } - - /** - ** Reshuffle after a remove operation of many elements, keeping the same - ** constraints. The elements must belong to the same subgroup. - ** - ** @param i Subgroup of elements - ** @param n Number of elements - */ - protected void reshuffleAfterRemove(int i, int n) { - // Let sz be the size of the smallest child, which is (*) greater or equal - // to the size of the largest non-child subgroup. Then, in the case where - // child[i] == null: - // - the maximum space left in the data structure is sz-1, since we have - // sz > sizeLeft() - // - the maximum space freed by a remove operation is sz, due to (*) - // - so the maximum space left after a remove operation is 2sz-1 - // - now, the minimum size of two subtrees is 2sz > 2sz-1 - // - so a maximum of one subtree can be freed after a remove operation - // and in the case where child[i] != null, the same logic applies as for - // one-element removal - sizePrefix[i] -= (n-1); size -= (n-1); - reshuffleAfterRemove(i); - } - - /** - ** Make a subtree with the appropriate prefix. For effeciency, this method - ** assumes that {@link #child}[i] does not already have a tree attached; it - ** is up to the calling code to ensure that this holds. - ** - ** @param msym The last symbol in the prefix for the subtree. - ** @return The subtree. - */ - abstract protected PrefixTree makeSubTree(int msym); - - /** - ** Transfer the value for a key to the appropriate subtree. For efficiency, - ** this method assumes that {@link #child}[i] already exists and that its - ** prefix matches the key; it is up to the calling code to ensure that this - ** holds. - */ - abstract protected void transferLocalToSubtree(int i, K key); - - /** - ** Transfer all mappings of a subtree to the local map. For efficiency, the - ** subtree is assumed to be an actual direct subtree of this map, ie. the - ** same object as {@link #child}[i] for some i; it is up to the calling - ** code to ensure that this holds. - */ - abstract protected void transferSubtreeToLocal(PrefixTree ch); - - /** - ** Select the object which handles keys with i as the next prefix element. - ** Should return {@link #child}[i] if and only if {@link #child}[i] != null - ** and a pointer to the local map otherwise. - */ - abstract protected Object selectNode(int i); - - /** - ** Returns the local map. - */ - abstract protected Object getLocalMap(); - - /** - ** Clears the local map. - */ - abstract protected void clearLocal(); - - /** - ** Returns the set of keys of the local map. - */ - abstract protected Set keySetLocal(); - - /** - ** Returns the size of the local map. - */ - abstract public int sizeLocal(); - - /** - ** TODO Returns the set of keys of the whole node. - */ - abstract protected Set keySet(); - - - /*======================================================================== - public interface Map, Multimap - ========================================================================*/ - - public void clear() { - size = 0; - clearLocal(); - smch_ = -1; - for (int i=0; i ch: child) { - if (ch == null) { continue; } - sum += ch.hashCode(); - } - return sum; - } - - - /** - ** Defines an interface that provides prefix-related operations, such as - ** returning the next component of a key, - ** - ** @author infinity0 - */ - public interface PrefixKey> extends Cloneable, Comparable { - - public PrefixKey clone(); - - /** - ** Returns the number of possible symbols at each cell of the key. This - ** should return the same value for any instance. - */ - public int symbols(); - - /** - ** Returns the size of the key. This should return the same value for - ** any instance. - */ - public int size(); - - /** - ** Gets one cell of the key. - */ - public int get(int i); - - /** - ** Sets one cell of the key. - */ - public void set(int i, int v); - - /** - ** Clears one cell of the key. - */ - public void clear(int i); - - /** - ** Returns a new key with a new value set for one of the cells. - */ - public K spawn(int i, int v); - - /** - ** Clears all cells from a given index. - */ - public void clearFrom(int len); - - /** - ** Whether two keys have matching prefixes. - ** - ** @param p The key to match against - ** @param len Length of prefix to match - */ - public boolean match(K p, int len); - - } - - /** - ** Provides implementations of various higher-level functionalities of - ** PrefixKey in terms of lower-level ones. - ** - ** @author infinity0 - */ - abstract public static class AbstractPrefixKey> implements PrefixKey { - - @Override abstract public AbstractPrefixKey clone(); - - public K spawn(int i, int v) { - K p = (K)clone(); - p.set(i, v); - return p; - } - - public void clearFrom(int len) { - for (int i=len; i y) ? 1: -1; } - } - return 0; - } - - } - - /** - ** TODO Provides an iterator over the PrefixTree. incomplete - */ - abstract public class PrefixTreeKeyIterator implements Iterator { - - final Iterator iterLocal; - Iterator iterChild; - - protected int index = 0; - protected K nextLocal = null; - - protected PrefixTreeKeyIterator() { - iterLocal = keySetLocal().iterator(); - while (index < subtreesMax && child[index++] == null); - if (index < subtreesMax) { iterChild = child[index].keySet().iterator(); } - if (iterLocal.hasNext()) { nextLocal = iterLocal.next(); } - } - - public boolean hasNext() { - return nextLocal != null || iterChild != null; - } - - public K next() { - while (index < subtreesMax || nextLocal != null) { - if (nextLocal != null && nextLocal.get(preflen) < index) { - K k = nextLocal; - nextLocal = (iterLocal.hasNext())? iterLocal.next(): null; - return k; - - } else if (iterChild.hasNext()) { - return iterChild.next(); - - } else { - do { ++index; } while (index < subtreesMax && child[index] == null); - iterChild = (index < subtreesMax)? child[index].keySet().iterator(): null; - continue; - - } - } - throw new java.util.NoSuchElementException(); - } - - public void remove() { - throw new UnsupportedOperationException("Not implemented."); - } - - } - -} diff --git a/src/plugins/Library/util/Skeleton.java b/src/plugins/Library/util/Skeleton.java deleted file mode 100644 index 652e808a..00000000 --- a/src/plugins/Library/util/Skeleton.java +++ /dev/null @@ -1,73 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.io.serial.Serialiser; -import plugins.Library.util.exec.TaskAbortException; - -/** -** Defines an interface for an extension of a data structure which is only -** partially loaded into memory. Operations on the missing data should throw -** {@link DataNotLoadedException}. -** -** @author infinity0 -** @see Serialiser -** @see DataNotLoadedException -*/ -public interface Skeleton> { - - /** - ** Whether the skeleton is fully loaded and has no data missing. - */ - public boolean isLive(); - - /** - ** Whether the skeleton is bare and has no data loaded at all. - */ - public boolean isBare(); - - /** - ** Get the serialiser for this skeleton. - */ - public S getSerialiser(); - - /** - ** Get the meta data associated with this skeleton. - */ - public Object getMeta(); - - /** - ** Set the meta data associated with this skeleton. - */ - public void setMeta(Object m); - - /** - ** Inflate the entire skeleton so that after the method call, {@link - ** #isLive()} returns true. - */ - public void inflate() throws TaskAbortException; - - /** - ** Deflate the entire skeleton so that after the method call, {@link - ** #isBare()} returns true. - */ - public void deflate() throws TaskAbortException; - - /** - ** Partially inflate the skeleton based on some parameter object. This - ** method may or may not also inflate other parts of the skeleton. - ** - ** @param param The parameter for the partial inflate. - */ - public void inflate(K param) throws TaskAbortException; - - /** - ** Partially deflate the skeleton based on some parameter object. This - ** method may or may not also deflate other parts of the skeleton. - ** - ** @param param The parameter for the partial deflate. - */ - public void deflate(K param) throws TaskAbortException; - -} diff --git a/src/plugins/Library/util/SkeletonBTreeMap.java b/src/plugins/Library/util/SkeletonBTreeMap.java deleted file mode 100644 index 84628770..00000000 --- a/src/plugins/Library/util/SkeletonBTreeMap.java +++ /dev/null @@ -1,1774 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.IterableSerialiser; -import plugins.Library.io.serial.ScheduledSerialiser; -import plugins.Library.io.serial.MapSerialiser; -import plugins.Library.io.serial.Translator; -import plugins.Library.io.DataFormatException; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.exec.TaskCompleteException; -import plugins.Library.util.func.Tuples.X2; -import plugins.Library.util.func.Tuples.X3; - -import java.util.AbstractSet; -import java.util.Comparator; -import java.util.Iterator; -import java.util.Collection; -import java.util.Map; -import java.util.List; -import java.util.LinkedHashMap; -import java.util.ArrayList; -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.Stack; - -// TODO NORM tidy this -import java.util.Queue; -import java.util.PriorityQueue; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executor; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import plugins.Library.util.exec.Progress; -import plugins.Library.util.exec.ProgressParts; -import plugins.Library.util.exec.BaseCompositeProgress; -import plugins.Library.io.serial.Serialiser; -import plugins.Library.io.serial.ProgressTracker; -import plugins.Library.util.exec.TaskCompleteException; -import plugins.Library.util.BTreeMap.Node; -import plugins.Library.util.concurrent.Scheduler; - -import java.util.Collections; -import java.util.SortedSet; -import java.util.SortedMap; -import java.util.TreeSet; -import java.util.TreeMap; -import java.util.HashMap; - -import freenet.support.Logger; -import plugins.Library.util.Sorted; -import plugins.Library.util.concurrent.BoundedPriorityBlockingQueue; -import plugins.Library.util.concurrent.ExceptionConvertor; -import plugins.Library.util.concurrent.Notifier; -import plugins.Library.util.concurrent.ObjectProcessor; -import plugins.Library.util.concurrent.Executors; -import plugins.Library.util.event.TrackingSweeper; -import plugins.Library.util.event.CountingSweeper; -import plugins.Library.util.func.Closure; -import plugins.Library.util.func.SafeClosure; -import static plugins.Library.util.Maps.$K; - -/** -** {@link Skeleton} of a {@link BTreeMap}. DOCUMENT -** -** TODO HIGH get rid of uses of rnode.get(K). All other uses of rnode, as well -** as lnode and entries, have already been removed. This will allow us to -** re-implement the Node class. -** -** To the maintainer: this class is very very unstructured and a lot of the -** functionality should be split off elsewhere. Feel free to move stuff around. -** -** @author infinity0 -*/ -public class SkeletonBTreeMap extends BTreeMap implements SkeletonMap { - - /* - ** Whether entries are "internal to" or "contained within" nodes, ie. - ** are the entries for a node completely stored (including values) with - ** that node in the serialised representation, or do they refer to other - ** serialised data that is external to the node? - ** - ** This determines whether a {@link TreeMap} or a {@link SkeletonTreeMap} - ** is used to back the entries in a node. - ** - ** Eg. a {@code BTreeMap>} would have this - ** {@code true} for the map, and {@code false} for the map backing the set. - */ - //final protected boolean internal_entries; - /* - ** OPT HIGH disable for now, since I can't think of a good way to - ** implement this tidily. - ** - ** three options: - ** - ** 0 have SkeletonNode use TreeMap when int_ent is true rather than - ** SkeletonTreeMap but this will either break the Skeleton contract of - ** deflate(), which expects isBare() to be true afterwards, or it will - ** break the contract of isBare(), if we modifiy that method to return - ** true for TreeMaps instead. - ** - ** - pros: uses an existing class, efficient - ** - cons: breaks contracts (no foreseeable way to avoid), complicated - ** to implement - ** - ** 1 have another class that extends SkeletonTreeMap which has one single - ** boolean value isDeflated, alias *flate(K) to *flate(), and all those - ** functions do is set that boolean. then override get() etc to throw - ** DNLEx depending on the value of that boolean; and have SkeletonNode - ** use this class when int_ent is true. - ** - ** - pros: simple, efficient OPT HIGH - ** - cons: requires YetAnotherClass - ** - ** 2 don't have the internal_entries, and just use a dummy serialiser that - ** copies task.data to task.meta for push tasks, and vice versa for pull - ** tasks. - ** - ** - pros: simple to implement - ** - cons: a hack, inefficient - ** - ** for now using option 2, will probably implement option 1 at some point.. - */ - - /** - ** Serialiser for the node objects. - */ - protected IterableSerialiser nsrl; - - /** - ** Serialiser for the value objects. - */ - protected MapSerialiser vsrl; - - public void setSerialiser(IterableSerialiser n, MapSerialiser v) { - if ((nsrl != null || vsrl != null) && !isLive()) { - throw new IllegalStateException("Cannot change the serialiser when the structure is not live."); - } - nsrl = n; - vsrl = v; - ((SkeletonNode)root).setSerialiser(); - } - - static volatile boolean logMINOR; - static volatile boolean logDEBUG; - - static { - Logger.registerClass(SkeletonBTreeMap.class); - } - - final public Comparator> CMP_PULL = new Comparator>() { - /*@Override**/ public int compare(PullTask t1, PullTask t2) { - return ((GhostNode)t1.meta).compareTo((GhostNode)t2.meta); - } - }; - - final public Comparator> CMP_PUSH = new Comparator>() { - /*@Override**/ public int compare(PushTask t1, PushTask t2) { - return t1.data.compareTo(t2.data); - } - }; - - final public Comparator> CMP_ENTRY = new Comparator>() { - /*@Override**/ public int compare(Map.Entry t1, Map.Entry t2) { - return SkeletonBTreeMap.this.compare(t1.getKey(), t2.getKey()); - } - }; - - public class SkeletonNode extends Node implements Skeleton> { - - protected int ghosts = 0; - - protected SkeletonNode(K lk, K rk, boolean lf, SkeletonTreeMap map) { - super(lk, rk, lf, map); - setSerialiser(); - } - - protected SkeletonNode(K lk, K rk, boolean lf) { - this(lk, rk, lf, new SkeletonTreeMap(comparator)); - } - - protected SkeletonNode(K lk, K rk, boolean lf, SkeletonTreeMap map, Collection gh) { - this(lk, rk, lf, map); - _size = map.size(); - if (!lf) { - if (map.size()+1 != gh.size()) { - throw new IllegalArgumentException("SkeletonNode: in constructing " + getName() + ", got size mismatch: map:" + map.size() + "; gh:" + gh.size()); - } - Iterator it = gh.iterator(); - for (X2 kp: iterKeyPairs()) { - GhostNode ghost = it.next(); - ghost.lkey = kp._0; - ghost.rkey = kp._1; - ghost.parent = this; - _size += ghost._size; - addChildNode(ghost); - } - ghosts = gh.size(); - } - } - - /** - ** Set the value-serialiser for this node and all subnodes to match - ** the one assigned for the entire tree. - */ - public void setSerialiser() { - ((SkeletonTreeMap)entries).setSerialiser(vsrl); - if (!isLeaf()) { - for (Node n: iterNodes()) { - if (!n.isGhost()) { - ((SkeletonNode)n).setSerialiser(); - } - } - } - } - - /** - ** Create a {@link GhostNode} object that represents this node. - */ - public GhostNode makeGhost(Object meta) { - GhostNode ghost = new GhostNode(lkey, rkey, totalSize()); - ghost.setMeta(meta); - return ghost; - } - - /*@Override**/ public Object getMeta() { return null; } - - /*@Override**/ public void setMeta(Object m) { } - - /*@Override**/ public IterableSerialiser getSerialiser() { return nsrl; } - - /*@Override**/ public boolean isLive() { - if (ghosts > 0 || !((SkeletonTreeMap)entries).isLive()) { return false; } - if (!isLeaf()) { - for (Node n: iterNodes()) { - SkeletonNode skel = (SkeletonNode)n; - if (!skel.isLive()) { return false; } - } - } - return true; - } - - /*@Override**/ public boolean isBare() { - if (!isLeaf()) { - if (ghosts < childCount()) { - return false; - } - } - return ((SkeletonTreeMap)entries).isBare(); - } - - /** - ** {@inheritDoc} - */ - @Override protected void addChildNode(Node child) { - super.addChildNode(child); - if (child.isGhost()) { ++ghosts; } - } - - /** - ** DOCUMENT. - */ - protected void setChildNode(Node child) { - assert(lnodes.containsKey(child.rkey)); - assert(rnodes.containsKey(child.lkey)); - assert(lnodes.get(child.rkey) == rnodes.get(child.lkey)); - lnodes.put(child.rkey, child); - rnodes.put(child.lkey, child); - } - - /** - ** Attaches a child {@link GhostNode}. - ** - ** It is '''assumed''' that there is already a {@link SkeletonNode} in - ** its place; the ghost will replace it. It is up to the caller to - ** ensure that this holds. - ** - ** @param ghost The GhostNode to attach - */ - protected void attachGhost(GhostNode ghost) { - assert(!rnodes.get(ghost.lkey).isGhost()); - ghost.parent = this; - setChildNode(ghost); - ++ghosts; - } - - /** - ** Attaches a child {@link SkeletonNode}. - ** - ** It is '''assumed''' that there is already a {@link GhostNode} in its - ** place; the skeleton will replace it. It is up to the caller to - ** ensure that this holds. - ** - ** @param skel The SkeletonNode to attach - */ - protected void attachSkeleton(SkeletonNode skel) { - assert(rnodes.get(skel.lkey).isGhost()); - setChildNode(skel); - --ghosts; - } - - /*@Override**/ public void deflate() throws TaskAbortException { - if (!isLeaf()) { - List> tasks = new ArrayList>(childCount() - ghosts); - for (Node node: iterNodes()) { - if (node.isGhost()) { continue; } - if (!((SkeletonNode)node).isBare()) { - ((SkeletonNode)node).deflate(); - } - tasks.add(new PushTask((SkeletonNode)node)); - } - - nsrl.push(tasks); - for (PushTask task: tasks) { - try { - attachGhost((GhostNode)task.meta); - } catch (RuntimeException e) { - throw new TaskAbortException("Could not deflate BTreeMap Node " + getRange(), e); - } - } - } - ((SkeletonTreeMap)entries).deflate(); - assert(isBare()); - } - - // OPT make this parallel - /*@Override**/ public void inflate() throws TaskAbortException { - ((SkeletonTreeMap)entries).inflate(); - if (!isLeaf()) { - for (Node node: iterNodes()) { - inflate(node.lkey, true); - } - } - assert(isLive()); - } - - /*@Override**/ public void inflate(K key) throws TaskAbortException { - inflate(key, false); - } - - /** - ** Deflates the node to the immediate right of the given key. - ** - ** Expects metadata to be of type {@link GhostNode}. - ** - ** @param key The key - */ - /*@Override**/ public void deflate(K key) throws TaskAbortException { - if (isLeaf()) { return; } - Node node = rnodes.get(key); - if (node.isGhost()) { return; } - - if (!((SkeletonNode)node).isBare()) { - throw new IllegalStateException("Cannot deflate non-bare BTreeMap node"); - } - - PushTask task = new PushTask((SkeletonNode)node); - try { - nsrl.push(task); - attachGhost((GhostNode)task.meta); - - // TODO LOW maybe just ignore all non-error abortions - } catch (TaskCompleteException e) { - assert(node.isGhost()); - } catch (RuntimeException e) { - throw new TaskAbortException("Could not deflate BTreeMap Node " + node.getRange(), e); - } - } - - /** - ** Inflates the node to the immediate right of the given key. - ** - ** Passes metadata of type {@link GhostNode}. - ** - ** @param key The key - ** @param auto Whether to recursively inflate the node's subnodes. - */ - public void inflate(K key, boolean auto) throws TaskAbortException { - if (isLeaf()) { return; } - Node node = rnodes.get(key); - if (!node.isGhost()) { return; } - - PullTask task = new PullTask(node); - try { - nsrl.pull(task); - postPullTask(task, this); - if (auto) { task.data.inflate(); } - - } catch (TaskCompleteException e) { - assert(!node.isGhost()); - } catch (DataFormatException e) { - throw new TaskAbortException("Could not inflate BTreeMap Node " + node.getRange(), e); - } catch (RuntimeException e) { - throw new TaskAbortException("Could not inflate BTreeMap Node " + node.getRange(), e); - } - } - - } - - public class GhostNode extends Node { - - /** - ** Points to the parent {@link SkeletonNode}. - ** - ** Maintaining this field's value is a bitch, so I've tried to remove uses - ** of this from the code. Currently, it is only used for the parent field - ** a {@link DataFormatException}, which lets us retrieve the serialiser, - ** progress, etc etc etc. TODO NORM somehow find another way of doing that - ** so we can get rid of it completely. - */ - protected SkeletonNode parent; - - protected Object meta; - - protected GhostNode(K lk, K rk, SkeletonNode p, int s) { - super(lk, rk, false, null); - parent = p; - _size = s; - } - - protected GhostNode(K lk, K rk, int s) { - this(lk, rk, null, s); - } - - public Object getMeta() { - return meta; - } - - public void setMeta(Object m) { - meta = m; - } - - @Override public int nodeSize() { - throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); - } - - @Override public int childCount() { - throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); - } - - @Override public boolean isLeaf() { - throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); - } - - @Override public Node nodeL(Node n) { - // this method-call should never be reached in the B-tree algorithm - throw new AssertionError("GhostNode: called nodeL()"); - } - - @Override public Node nodeR(Node n) { - // this method-call should never be reached in the B-tree algorithm - throw new AssertionError("GhostNode: called nodeR()"); - } - - @Override public Node selectNode(K key) { - // this method-call should never be reached in the B-tree algorithm - throw new AssertionError("GhostNode: called selectNode()"); - } - - } - - public SkeletonBTreeMap(Comparator cmp, int node_min) { - super(cmp, node_min); - } - - public SkeletonBTreeMap(int node_min) { - super(node_min); - } - - /** - ** Post-processes a {@link PullTask} and returns the {@link SkeletonNode} - ** pulled. - ** - ** The tree will be in a consistent state after the operation, if it was - ** in a consistent state before it. - */ - protected SkeletonNode postPullTask(PullTask task, SkeletonNode parent) throws DataFormatException { - SkeletonNode node = task.data; - GhostNode ghost = (GhostNode)task.meta; - if (!compare0(ghost.lkey, node.lkey) || !compare0(ghost.rkey, node.rkey)) { - throw new DataFormatException("BTreeMap Node lkey/rkey does not match", null, node); - } - - parent.attachSkeleton(node); - return node; - } - - /** - ** Post-processes a {@link PushTask} and returns the {@link GhostNode} - ** pushed. - ** - ** The tree will be in a consistent state after the operation, if it was - ** in a consistent state before it. - */ - protected GhostNode postPushTask(PushTask task, SkeletonNode parent) { - GhostNode ghost = (GhostNode)task.meta; - parent.attachGhost(ghost); - return ghost; - } - - @Override protected Node newNode(K lk, K rk, boolean lf) { - return new SkeletonNode(lk, rk, lf); - } - - @Override protected void swapKey(K key, Node src, Node dst) { - SkeletonTreeMap.swapKey(key, (SkeletonTreeMap)src.entries, (SkeletonTreeMap)dst.entries); - } - - /*======================================================================== - public interface SkeletonMap - ========================================================================*/ - - /*@Override**/ public Object getMeta() { return null; } - - /*@Override**/ public void setMeta(Object m) { } - - /*@Override**/ public MapSerialiser getSerialiser() { return vsrl; } - - /*@Override**/ public boolean isLive() { - return ((SkeletonNode)root).isLive(); - } - - /*@Override**/ public boolean isBare() { - return ((SkeletonNode)root).isBare(); - } - - /*@Override**/ public void deflate() throws TaskAbortException { - ((SkeletonNode)root).deflate(); - } - - // TODO NORM tidy this; this should proboably go in a serialiser - // and then we will access the Progress of a submap with a task whose - // metadata is (lkey, rkey), or something..(PROGRESS) - BaseCompositeProgress pr_inf = new BaseCompositeProgress(); - public BaseCompositeProgress getProgressInflate() { return pr_inf; } // REMOVE ME - /** - ** Parallel bulk-inflate. At the moment, this will inflate all the values - ** of each nodes too. - ** - ** Not yet thread safe, but ideally it should be. See source for details. - */ - /*@Override**/ public void inflate() throws TaskAbortException { - - // TODO NORM adapt the algorithm to track partial loads of submaps (SUBMAP) - // TODO NORM if we do that, we'll also need to make it thread-safe. (THREAD) - // TODO NORM and do the PROGRESS stuff whilst we're at it - - if (!(nsrl instanceof ScheduledSerialiser)) { - // TODO LOW could just use the code below - since the Scheduler would be - // unavailable, the tasks could be executed in the current thread, and the - // priority queue's comparator would turn it into depth-first search - // automatically. - ((SkeletonNode)root).inflate(); - return; - } - - final Queue nodequeue = new PriorityQueue(); - - Map, ProgressTracker> ids = null; - ProgressTracker ntracker = null;; - - if (nsrl instanceof Serialiser.Trackable) { - ids = new LinkedHashMap, ProgressTracker>(); - ntracker = ((Serialiser.Trackable)nsrl).getTracker(); - // PROGRESS make a ProgressTracker track this instead of "pr_inf". - pr_inf.setSubProgress(ProgressTracker.makePullProgressIterable(ids)); - pr_inf.setSubject("Pulling all entries in B-tree"); - } - - final ObjectProcessor, SkeletonNode, TaskAbortException> proc_pull - = ((ScheduledSerialiser)nsrl).pullSchedule( - new PriorityBlockingQueue>(0x10, CMP_PULL), - new LinkedBlockingQueue, TaskAbortException>>(0x10), - new HashMap, SkeletonNode>() - ); - //System.out.println("Using scheduler"); - //int DEBUG_pushed = 0, DEBUG_popped = 0; - - try { - nodequeue.add((SkeletonNode)root); - - // FIXME HIGH make a copy of the deflated root so that we can restore it if the - // operation fails - - do { - //System.out.println("pushed: " + DEBUG_pushed + "; popped: " + DEBUG_popped); - - // handle the inflated tasks and attach them to the tree. - // THREAD progress tracker should prevent this from being run twice for the - // same node, but what if we didn't use a progress tracker? hmm... - while (proc_pull.hasCompleted()) { - X3, SkeletonNode, TaskAbortException> res = proc_pull.accept(); - PullTask task = res._0; - SkeletonNode parent = res._1; - TaskAbortException ex = res._2; - if (ex != null) { - assert(!(ex instanceof plugins.Library.util.exec.TaskInProgressException)); // by contract of ScheduledSerialiser - if (!(ex instanceof TaskCompleteException)) { - // TODO LOW maybe dump it somewhere else and throw it at the end... - throw ex; - } - // retrieve the inflated SkeletonNode and add it to the queue... - GhostNode ghost = (GhostNode)task.meta; - // THREAD race condition here... if another thread has inflated the task - // but not yet attached the inflated node to the tree, the assertion fails. - // could check to see if the Progress for the Task still exists, but the - // performance of this depends on the GC freeing weak referents quickly... - assert(!parent.rnodes.get(ghost.lkey).isGhost()); - nodequeue.add((SkeletonNode)parent.rnodes.get(ghost.lkey)); - } else { - SkeletonNode node = postPullTask(task, parent); - nodequeue.add(node); - } - //++DEBUG_popped; - } - - // go through the nodequeue and add any child ghost nodes to the tasks queue - while (!nodequeue.isEmpty()) { - SkeletonNode node = nodequeue.remove(); - // TODO HIGH this needs to be asynchronous - ((SkeletonTreeMap)node.entries).inflate(); // SUBMAP here - - if (node.isLeaf()) { continue; } - for (Node next: node.iterNodes()) { // SUBMAP here - if (!next.isGhost()) { - SkeletonNode skel = (SkeletonNode)next; - if (!skel.isLive()) { nodequeue.add(skel); } - continue; - } - PullTask task = new PullTask((GhostNode)next); - if (ids != null) { ids.put(task, ntracker); } - ObjectProcessor.submitSafe(proc_pull, task, node); - //++DEBUG_pushed; - } - } - - Thread.sleep(0x40); // FIXME HIGH - // we really should put this higher, but BIndexTest tries to inflate a - // of trees in serial and with a 1 second-wait between each, this would - // take ages - } while (proc_pull.hasPending()); - - pr_inf.setEstimate(ProgressParts.TOTAL_FINALIZED); - - } catch (DataFormatException e) { - throw new TaskAbortException("Bad data format", e); - } catch (InterruptedException e) { - throw new TaskAbortException("interrupted", e); - } finally { - proc_pull.close(); - //System.out.println("pushed: " + DEBUG_pushed + "; popped: " + DEBUG_popped); - //assert(DEBUG_pushed == DEBUG_popped); - } - } - - /*@Override**/ public void deflate(K key) throws TaskAbortException { - throw new UnsupportedOperationException("not implemented"); - } - - /*@Override**/ public void inflate(K key) throws TaskAbortException { - inflate(key, false); - } - - /*@Override**/ public void inflate(K key, boolean deflateRest) throws TaskAbortException { - // TODO NORM tidy up - // OPT LOW could write a more efficient version by keeping track of - // the already-inflated nodes so get() doesn't keep traversing down the - // tree - would only improve performance from O(log(n)^2) to O(log(n)) so - // not that big a priority - for (;;) { - try { - if(deflateRest) - getDeflateRest(key); - else - get(key); - break; - } catch (DataNotLoadedException e) { - e.getParent().inflate(e.getKey()); - } - } - } - - /** - ** {@inheritDoc} - ** - ** This implementation just descends the tree, returning the value for the - ** given key if it can be found. - * @throws TaskAbortException - ** - ** @throws ClassCastException key cannot be compared with the keys - ** currently in the map - ** @throws NullPointerException key is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - public V getDeflateRest(Object k) throws TaskAbortException { - K key = (K) k; - Node node = root; - - for (;;) { - if (node.isLeaf()) { - return node.entries.get(key); - } - - Node nextnode = node.selectNode(key); - if (nextnode == null) { - for(Node sub : node.iterNodes()) { - if(sub != nextnode && sub instanceof SkeletonBTreeMap.SkeletonNode) { - ((SkeletonNode)sub).deflate(); - ((SkeletonNode)node).deflate(sub.lkey); - } - } - return node.entries.get(key); - } - - node = nextnode; - } - } - - /** - ** @param putmap Entries to insert into this map - ** @param remkey Keys to remove from this map - ** @see #update(SortedSet, SortedSet, SortedMap, Closure) - */ - public void update(SortedMap putmap, SortedSet remkey) throws TaskAbortException { - update(null, remkey, putmap, null, new TaskAbortExceptionConvertor()); - } - - /** - ** @param putkey Keys to insert into this map - ** @param remkey Keys to remove from this map - ** @param value_handler Closure to retrieve the value for each putkey - ** @see #update(SortedSet, SortedSet, SortedMap, Closure) - */ - public void update(SortedSet putkey, SortedSet remkey, Closure, X> value_handler, ExceptionConvertor conv) throws TaskAbortException { - update(putkey, remkey, null, value_handler, conv); - } - - /** - ** The executor backing {@link #VALUE_EXECUTOR}. - */ - private static Executor value_exec = null; - - /** - * Separate executor for value handlers. Value handlers can themselves call - * update() and end up polling, so we need to keep them separate from the - * threads that do the actual work! */ - final public static Executor VALUE_EXECUTOR = new Executor() { - /*@Override**/ public void execute(Runnable r) { - synchronized (Executors.class) { - if (value_exec == null) { - value_exec = new ThreadPoolExecutor( - 0, 0x40, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new ThreadPoolExecutor.CallerRunsPolicy() - ); - } - } - value_exec.execute(r); - } - }; - - /** - ** The executor backing {@link #VALUE_EXECUTOR}. - */ - private static Executor deflate_exec = null; - - /** - * Separate executor for deflating. We don't want update()'s to prevent actual - * pushes. */ - final public static Executor DEFLATE_EXECUTOR = new Executor() { - /*@Override**/ public void execute(Runnable r) { - synchronized (Executors.class) { - if (deflate_exec == null) { - deflate_exec = new ThreadPoolExecutor( - 0, 0x40, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new ThreadPoolExecutor.CallerRunsPolicy() - ); - } - } - deflate_exec.execute(r); - } - }; - - /** - ** Asynchronously updates a remote B-tree. This uses two-pass merge/split - ** algorithms (as opposed to the one-pass algorithms of the standard {@link - ** BTreeMap}) since it assumes a copy-on-write backing data store, where - ** the advantages (concurrency, etc) of one-pass algorithms disappear, and - ** the disadvantages (unneeded merges/splits) remain. - ** - ** Unlike the (ideal design goal of the) inflate/deflate methods, this is - ** designed to support accept only one concurrent update. It is up to the - ** caller to ensure that this holds. TODO NORM enforce this somehow - ** - ** Currently, this method assumes that the root.isBare(). TODO NORM enforce - ** this.. - ** - ** This is a wrapper method to deal with the occasional key rejection due to - ** trying to add too many keys to a single node. It also checks parameters. - ** - ** Note: {@code remkey} is not implemented yet. - ** - ** @throws UnsupportedOperationException if {@code remkey} is not empty - */ - protected void update( - SortedSet putkey, SortedSet remkey, - final SortedMap putmap, Closure, X> value_handler, - ExceptionConvertor conv - ) throws TaskAbortException { - - // Check parameters. - - if (value_handler == null) { - // synchronous value callback - null, remkey, putmap, null - assert(putkey == null); - putkey = Sorted.keySet(putmap); - } else { - // asynchronous value callback - putkey, remkey, null, closure - assert(putmap == null); - } - - if (remkey != null && !remkey.isEmpty()) { - throw new UnsupportedOperationException("SkeletonBTreeMap: update() currently only supports merge operations"); - } - - // Handle keys rejected due to node too small for the number of keys we are adding to it. - - while(true) { - - final SortedSet rejected; - if(value_handler == null) - rejected = null; - else - rejected = new TreeSet(); - - update(putkey, putmap, value_handler, conv, rejected); - - if(rejected == null || rejected.isEmpty()) return; - System.err.println("Rejected keys: "+rejected.size()+" - re-running merge with rejected keys."); - putkey = rejected; - } - } - - protected void update( - SortedSet putkey, - final SortedMap putmap, Closure, X> value_handler, - ExceptionConvertor conv, final SortedSet rejected - ) throws TaskAbortException { - - // Avoid polling. - final Notifier notifier = new Notifier(); - - /* - ** The code below might seem confusing at first, because the action of - ** the algorithm on a single node is split up into several asynchronous - ** parts, which are not visually adjacent. Here is a more contiguous - ** description of what happens to a single node between being inflated - ** and then eventually deflated. - ** - ** Life cycle of a node: - ** - ** - node gets popped from proc_pull - ** - enter InflateChildNodes - ** - subnodes get pushed into proc_pull - ** - (recurse for each subnode) - ** - subnode gets popped from proc_pull - ** - etc - ** - split-subnodes get pushed into proc_push - ** - wait for all: split-subnodes get popped from proc_push - ** - enter SplitNode - ** - for each item in the original node's DeflateNode - ** - release the item and acquire it on - ** - the parent's DeflateNode if the item is a separator - ** - a new DeflateNode if the item is now in a split-node - ** - for each split-node: - ** - wait for all: values get popped from proc_val; SplitNode to close() - ** - enter DeflateNode - ** - split-node gets pushed into proc_push (except for root) - ** - */ - - // TODO LOW - currently, the algorithm will automatically completely - // inflate each node as it is pulled in from the network (ie. inflate - // all of its values). this is not necessary, since not all of the - // values may be updated. it would be possible to make it work more - // efficiently, which would save some network traffic, but this would - // be quite fiddly. also, the current way allows the Packer to be more - // aggressive in packing the values into splitfiles. - - // FIXME NORM - currently, there is a potential deadlock issue here, - // because proc_{pull,push,val} all use the same ThreadPoolExecutor to - // execute tasks. if the pool fills up with manager tasks {pull,push} - // then worker tasks {val} cannot execute, resulting in deadlock. at - // present, this is avoided by having ObjectProcessor.maxconc less than - // the poolsize, but future developers may change this unknowingly. - // - // a proper solution would be to have manager and worker tasks use - // different thread pools, ie. different ThreadPoolExecutors. care - // needs to be taken when doing this because the call can be recursive; - // ie. if the values are also SkeletonBTreeMaps, then the workers might - // start child "manager" tasks by calling value.update() - // - // arguably a better solution would be to not use threads, and use - // the async interface (i should have done this when i first coded it). - // see doc/todo.txt for details - - final ObjectProcessor, SafeClosure, TaskAbortException> proc_pull - = ((ScheduledSerialiser)nsrl).pullSchedule( - new PriorityBlockingQueue>(0x10, CMP_PULL), - new LinkedBlockingQueue, TaskAbortException>>(8), - new HashMap, SafeClosure>() - ); - proc_pull.setNotifier(notifier); - - final ObjectProcessor, CountingSweeper, TaskAbortException> proc_push - = ((ScheduledSerialiser)nsrl).pushSchedule( - new PriorityBlockingQueue>(0x10, CMP_PUSH), - new LinkedBlockingQueue, TaskAbortException>>(0x10), - new HashMap, CountingSweeper>() - ); - proc_push.setNotifier(notifier); - - /** - ** Deposit for a value-retrieval operation - */ - class DeflateNode extends TrackingSweeper> implements Runnable, SafeClosure> { - - /** The node to be pushed, when run() is called. */ - final SkeletonNode node; - /** A closure for the parent node. This is called when all of its children (of - ** which this.node is one) have been pushed. */ - final CountingSweeper parNClo; - private boolean deflated; - - protected DeflateNode(SkeletonNode n, CountingSweeper pnc) { - super(true, true, new TreeSet(comparator), null); - parNClo = pnc; - node = n; - } - - /** - ** Push this node. This is run when this sweeper is cleared, which - ** happens after all the node's local values have been obtained, - ** '''and''' the node has passed through SplitNode (which closes - ** this sweeper). - */ - public void run() { - try { - deflate(); - } catch (TaskAbortException e) { - throw new RuntimeException(e); // FIXME HIGH - } - assert(node.isBare()); - if (parNClo == null) { - // do not deflate the root - assert(node == root); - return; - } - ObjectProcessor.submitSafe(proc_push, new PushTask(node), parNClo); - } - - /** - ** Update the key's value in the node. Runs whenever an entry is - ** popped from proc_val. - */ - public void invoke(Map.Entry en) { - assert(node.entries.containsKey(en.getKey())); - node.entries.put(en.getKey(), en.getValue()); - if(logMINOR) Logger.minor(this, "New value for key "+en.getKey()+" : "+en.getValue()+" in "+node+" parent = "+parNClo); - } - - public void deflate() throws TaskAbortException { - synchronized(this) { - if(deflated) return; - deflated = true; - } - ((SkeletonTreeMap)node.entries).deflate(); - } - - } - - // must be located after DeflateNode's class definition - // The proc_val output queue must comfortably cover everything generated by a single invocation of InflateChildNodes, or else we will deadlock. - // Note that this is all deflated sub-trees, so they are relatively small - some of the other queues - // handle much larger structures in their counters. - final ObjectProcessor, DeflateNode, X> proc_val = (value_handler == null)? null - : new ObjectProcessor, DeflateNode, X>( - new BoundedPriorityBlockingQueue>(0x10, CMP_ENTRY), - new LinkedBlockingQueue, X>>(ENT_MAX*3), - new HashMap, DeflateNode>(), - value_handler, VALUE_EXECUTOR, conv, notifier // These can block so pool them separately. - ).autostart(); - - final Comparator CMP_DEFLATE = new Comparator() { - - public int compare(DeflateNode arg0, DeflateNode arg1) { - return arg0.node.compareTo(arg1.node); - } - - }; - - final ObjectProcessor proc_deflate = - new ObjectProcessor( - new BoundedPriorityBlockingQueue(0x10, CMP_DEFLATE), - new LinkedBlockingQueue>(), - new HashMap(), - new Closure() { - - public void invoke(DeflateNode param) throws TaskAbortException { - param.deflate(); - } - - }, DEFLATE_EXECUTOR, new TaskAbortExceptionConvertor(), notifier).autostart(); - - // Limit it to 2 at once to minimise memory usage. - // The actual inserts will occur in parallel anyway. - proc_deflate.setMaxConc(2); - - // Dummy constant for SplitNode - final SortedMap EMPTY_SORTEDMAP = new TreeMap(comparator); - - /** - ** Deposit for a PushTask - */ - class SplitNode extends CountingSweeper implements Runnable { - - /** The node to be split, when run() is called. */ - final SkeletonNode node; - /** The node's parent, where new nodes/keys are attached during the split process. */ - /*final*/ SkeletonNode parent; - /** The value-closure for this node. This keeps track of all unhandled values - * in this node. If this node is split, and the values have not been handled, - * they are passed onto the value-closures of the relevant split nodes (or - * that of the parent node, if the key turns out to be a separator key). */ - final DeflateNode nodeVClo; - /** The closure for the parent node, also a CountingSweeper. We keep a - * reference to this so we can add the new split nodes to it. */ - /*final*/ SplitNode parNClo; - /** The value-closure for the parent node. The comment for nodeVClo explains - * why we need a reference to this. */ - /*final*/ DeflateNode parVClo; - - protected SplitNode(SkeletonNode n, SkeletonNode p, DeflateNode vc, SplitNode pnc, DeflateNode pvc) { - super(true, false); - node = n; - parent = p; - nodeVClo = vc; - parNClo = pnc; - parVClo = pvc; - } - - /** - ** Closes the node's DeflateNode, and (if appropriate) splits the - ** node and updates the deposits for each key moved. This is run - ** after all its children have been deflated. - */ - public void run() { - assert(node.ghosts == node.childCount()); - - // All subnodes have been deflated, so nothing else can possibly add keys - // to this node. - nodeVClo.close(); - - int sk = minKeysFor(node.nodeSize()); - // No need to split - if (sk == 0) { - if (nodeVClo.isCleared()) { - try { - proc_deflate.submit(nodeVClo, nodeVClo.node); - } catch (InterruptedException e) { - // Impossible ? - throw new RuntimeException(e); - } - } - return; - } - - if (parent == null) { - assert(parNClo == null && parVClo == null); - // create a new parent, parNClo, parVClo - // similar stuff as for InflateChildNodes but no merging - parent = new SkeletonNode(null, null, false); - parent.addAll(EMPTY_SORTEDMAP, Collections.singleton(node)); - parVClo = new DeflateNode(parent, null); - parNClo = new SplitNode(parent, null, parVClo, null, null); - parNClo.acquire(node); - parNClo.close(); - root = parent; - } - - Collection keys = Sorted.select(Sorted.keySet(node.entries), sk); - parent.split(node.lkey, keys, node.rkey); - Iterable nodes = parent.iterNodes(node.lkey, node.rkey); - - // WORKAROUND unnecessary cast here - bug in SunJDK6; works fine on OpenJDK6 - SortedSet held = (SortedSet)nodeVClo.view(); - // if synchronous, all values should have already been handled - assert(proc_val != null || held.size() == 0); - - // reassign appropriate keys to parent sweeper - for (K key: keys) { - if (!held.contains(key)) { continue; } - reassignKeyToSweeper(key, parVClo); - } - - //System.out.println("parent:"+parent.getRange()+"\nseps:"+keys+"\nheld:"+held); - parNClo.open(); - - // for each split-node, create a sweeper that will run when all its (k,v) - // pairs have been popped from value_complete - for (Node nn: nodes) { - SkeletonNode n = (SkeletonNode)nn; - DeflateNode vClo = new DeflateNode(n, parNClo); - - // reassign appropriate keys to the split-node's sweeper - SortedSet subheld = subSet(held, n.lkey, n.rkey); - //try { - assert(subheld.isEmpty() || compareL(n.lkey, subheld.first()) < 0 && compareR(subheld.last(), n.rkey) < 0); - //} catch (AssertionError e) { - // System.out.println(n.lkey + " " + subheld.first() + " " + subheld.last() + " " + n.rkey); - // throw e; - //} - for (K key: subheld) { - reassignKeyToSweeper(key, vClo); - } - vClo.close(); - // if no keys were added - if (vClo.isCleared()) { - try { - proc_deflate.submit(vClo, vClo.node); - } catch (InterruptedException e) { - // Impossible ? - throw new RuntimeException(e); - } - } - - parNClo.acquire(n); - } - - // original (unsplit) node had a ticket on the parNClo sweeper, release it - parNClo.release(node); - - parNClo.close(); - assert(!parNClo.isCleared()); // we always have at least one node to deflate - } - - /** - ** When we move a key to another node (eg. to the parent, or to a new node - ** resulting from the split), we must deassign it from the original node's - ** sweeper and reassign it to the sweeper for the new node. - ** - ** NOTE: if the overall co-ordinator algorithm is ever made concurrent, - ** this section MUST be made atomic - ** - ** @param key The key - ** @param clo The sweeper to reassign the key to - */ - private void reassignKeyToSweeper(K key, DeflateNode clo) { - clo.acquire(key); - //assert(((UpdateValue)value_closures.get(key)).node == node); - proc_val.update($K(key, (V)null), clo); - // FIXME what if it has already run??? - if(logMINOR) Logger.minor(this, "Reassigning key "+key+" to "+clo+" on "+this+" parent="+parent+" parent split node "+parNClo+" parent deflate node "+parVClo); - // nodeVClo.release(key); - // this is unnecessary since nodeVClo() will only be used if we did not - // split its node (and never called this method) - } - - } - - /** - ** Deposit for a PullTask - */ - class InflateChildNodes implements SafeClosure { - - /** The parent node, passed to closures which need it. */ - final SkeletonNode parent; - /** The keys to put into this node. */ - final SortedSet putkey; - /** The closure for the parent node, passed to closures which need it. */ - final SplitNode parNClo; - /** The value-closure for the parent node, passed to closures which need it. */ - final DeflateNode parVClo; - - protected InflateChildNodes(SkeletonNode p, SortedSet ki, SplitNode pnc, DeflateNode pvc) { - parent = p; - putkey = ki; - parNClo = pnc; - parVClo = pvc; - } - - protected InflateChildNodes(SortedSet ki) { - this(null, ki, null, null); - } - - /** - ** Merge the relevant parts of the map into the node, and inflate its - ** children. Runs whenever a node is popped from proc_pull. - */ - public void invoke(SkeletonNode node) { - assert(compareL(node.lkey, putkey.first()) < 0); - assert(compareR(putkey.last(), node.rkey) < 0); - - // FIXME HIGH make this asynchronous - try { ((SkeletonTreeMap)node.entries).inflate(); } catch (TaskAbortException e) { throw new RuntimeException(e); } - - // closure to be called when all local values have been obtained - DeflateNode vClo = new DeflateNode(node, parNClo); - - // closure to be called when all subnodes have been handled - SplitNode nClo = new SplitNode(node, parent, vClo, parNClo, parVClo); - - // invalidate every totalSize cache directly after we inflate it - node._size = -1; - - // OPT LOW if putkey is empty then skip - - // each key in putkey is either added to the local entries, or delegated to - // the the relevant child node. - if (node.isLeaf()) { - // add all keys into the node, since there are no children. - - if (proc_val == null) { - // OPT: could use a splice-merge here. for TreeMap, there is not an - // easy way of doing this, nor will it likely make a lot of a difference. - // however, if we re-implement SkeletonNode, this might become relevant. - for (K key: putkey) { - if(putmap.get(key) == null) throw new NullPointerException(); - node.entries.put(key, putmap.get(key)); - } - } else { - if(putkey.size() < proc_val.outputCapacity()) { - for (K key: putkey) { handleLocalPut(node, key, vClo); } - } else { - // Add as many as we can, then put the rest into rejected. - TreeSet accepted = new TreeSet(Sorted.select(putkey, proc_val.outputCapacity()/2)); - List> notAccepted = Sorted.split(putkey, accepted, new TreeSet()); // FIXME NullSet ??? - System.err.println("Too many keys to add to a single node: We can add "+proc_val.outputCapacity()+" but are being asked to add "+putkey.size()+" - adding "+accepted.size()+" and rejecting the rest."); - for (K key: accepted) { handleLocalPut(node, key, vClo); } - synchronized(rejected) { - for(SortedSet set : notAccepted) { - rejected.addAll(set); - } - } - } - } - - } else { - // only add keys that already exist locally in the node. other keys - // are delegated to the relevant child node. - - SortedSet fkey = new TreeSet(comparator); - Iterable> range = Sorted.split(putkey, Sorted.keySet(node.entries), fkey); - - if (proc_val == null) { - for (K key: fkey) { - if(putmap.get(key) == null) throw new NullPointerException(); - node.entries.put(key, putmap.get(key)); - } - } else { - for (K key: fkey) { handleLocalPut(node, key, vClo); } - } - - for (SortedSet rng: range) { - GhostNode n; - try { - n = (GhostNode)node.selectNode(rng.first()); - } catch (ClassCastException e) { - // This has been seen in practice. I have no idea what it means. - // FIXME HIGH !!! - Logger.error(this, "Node is already loaded?!?!?!: "+node.selectNode(rng.first())); - continue; - } - PullTask task = new PullTask(n); - - // possibly re-design CountingSweeper to not care about types, or have acquire() instead - nClo.acquire((SkeletonNode)null); // dirty hack. FIXME LOW - // WORKAROUND unnecessary cast here - bug in SunJDK6; works fine on OpenJDK6 - ObjectProcessor.submitSafe(proc_pull, task, (SafeClosure)new InflateChildNodes(node, rng, nClo, vClo)); - } - } - - nClo.close(); - if (nClo.isCleared()) { nClo.run(); } // eg. if no child nodes need to be modified - } - - /** - ** Handle a planned local put to the node. - ** - ** Keys added locally have their values set to null. These will be updated - ** with the correct values via UpdateValue, when the value-getter completes - ** its operation on the key. We add the keys now, **before** the value is - ** obtained, so that SplitNode can work out how to split the node as early - ** as possible. - ** - ** @param n The node to put the key into - ** @param key The key - ** @param vClo The sweeper that tracks if the key's value has been obtained - */ - private void handleLocalPut(SkeletonNode n, K key, DeflateNode vClo) { - V oldval = n.entries.put(key, null); - vClo.acquire(key); - if(logMINOR) Logger.minor(this, "handleLocalPut for key "+key+" old value "+oldval+" for deflate node "+vClo+" - passing to proc_val"); - ObjectProcessor.submitSafe(proc_val, $K(key, oldval), vClo); - } - - private void handleLocalRemove(SkeletonNode n, K key, TrackingSweeper> vClo) { - throw new UnsupportedOperationException("not implemented"); - } - - } - - proc_pull.setName("pull"); - proc_push.setName("push"); - if (proc_val != null) { proc_val.setName("val"); } - proc_deflate.setName("deflate"); - - try { - - (new InflateChildNodes(putkey)).invoke((SkeletonNode)root); - - // FIXME HIGH make a copy of the deflated root so that we can restore it if the - // operation fails - int olds = size; - - boolean progress = true; - int count = 0; - int ccount = 0; - - do { - - //System.out.println(System.identityHashCode(this) + " " + proc_pull + " " + proc_push + " " + ((proc_val == null)? "": proc_val)); - - // Only sleep if we run out of jobs. - if((!progress) && (count++ > 10)) { - count = 0; -// if(ccount++ > 10) { - System.out.println(/*System.identityHashCode(this) + " " + */proc_val + " " + proc_pull + " " + proc_push+ " "+proc_deflate); -// ccount = 0; -// } - notifier.waitUpdate(1000); - } - progress = false; - - boolean loop = false; - while (proc_push.hasCompleted()) { - X3, CountingSweeper, TaskAbortException> res = proc_push.accept(); - PushTask task = res._0; - CountingSweeper sw = res._1; - TaskAbortException ex = res._2; - if (ex != null) { - // FIXME HIGH - throw new UnsupportedOperationException("SkeletonBTreeMap.update(): PushTask aborted; handler not implemented yet", ex); - } - - postPushTask(task, ((SplitNode)sw).node); - sw.release(task.data); - if (sw.isCleared()) { ((Runnable)sw).run(); } - loop = true; - progress = true; - } - if(loop) continue; - - //System.out.println(System.identityHashCode(this) + " " + proc_push + " " + ((proc_val == null)? "": proc_val+ " ") + proc_pull); - - if(proc_deflate.hasCompleted()) { - X3 res = proc_deflate.accept(); - DeflateNode sw = res._0; - TaskAbortException ex = res._2; - if(ex != null) - // FIXME HIGH - throw ex; - sw.run(); - progress = true; - continue; - } - if(loop) continue; - - if (proc_val != null && proc_val.hasCompleted()) { - X3, DeflateNode, X> res = proc_val.accept(); - Map.Entry en = res._0; - DeflateNode sw = res._1; - X ex = res._2; - if (ex != null) { - // FIXME HIGH - throw new UnsupportedOperationException("SkeletonBTreeMap.update(): value-retrieval aborted; handler not implemented yet", ex); - } - - sw.invoke(en); - sw.release(en.getKey()); - if (sw.isCleared()) { - proc_deflate.submit(sw, sw.node); - } - progress = true; - continue; - } - - if (proc_pull.hasCompleted()) { - X3, SafeClosure, TaskAbortException> res = proc_pull.accept(); - PullTask task = res._0; - SafeClosure clo = res._1; - TaskAbortException ex = res._2; - if (ex != null) { - // FIXME HIGH - throw new UnsupportedOperationException("SkeletonBTreeMap.update(): PullTask aborted; handler not implemented yet", ex); - } - - SkeletonNode node = postPullTask(task, ((InflateChildNodes)clo).parent); - clo.invoke(node); - progress = true; - continue; - } - - } while (proc_pull.hasPending() || proc_push.hasPending() || (proc_val != null && proc_val.hasPending()) || proc_deflate.hasPending()); - - size = root.totalSize(); - - } catch (RuntimeException e) { - throw new TaskAbortException("Task failed", e); - } catch (DataFormatException e) { - throw new TaskAbortException("Bad data format", e); - } catch (InterruptedException e) { - throw new TaskAbortException("interrupted", e); - } finally { - proc_pull.close(); - proc_push.close(); - if (proc_val != null) { proc_val.close(); } - proc_deflate.close(); - } - - } - - - /** - ** Creates a translator for the nodes of the B-tree. This method is - ** necessary because {@link NodeTranslator} is a non-static class. - ** - ** For an in-depth discussion on why that class is not static, see the - ** class description for {@link BTreeMap.Node}. - ** - ** TODO LOW maybe store these in a WeakHashSet or something... will need to - ** code equals() and hashCode() for that - ** - ** @param ktr Translator for the keys - ** @param mtr Translator for each node's local entries map - */ - public NodeTranslator makeNodeTranslator(Translator ktr, Translator, R> mtr) { - return new NodeTranslator(ktr, mtr); - } - - /************************************************************************ - ** DOCUMENT. - ** - ** For an in-depth discussion on why this class is not static, see the - ** class description for {@link BTreeMap.Node}. - ** - ** @param Target type of key-translator - ** @param Target type of map-translater - ** @author infinity0 - */ - public class NodeTranslator implements Translator> { - - /** - ** An optional translator for the keys. - */ - final Translator ktr; - - /** - ** An optional translator for each node's local entries map. - */ - final Translator, R> mtr; - - public NodeTranslator(Translator k, Translator, R> m) { - ktr = k; - mtr = m; - } - - /*@Override**/ public Map app(SkeletonNode node) { - if (!node.isBare()) { - throw new IllegalStateException("Cannot translate non-bare node " + node.getRange() + "; size:" + node.nodeSize() + "; ghosts:" + node.ghosts + "; root:" + (root == node)); - } - Map map = new LinkedHashMap(); - map.put("lkey", (ktr == null)? node.lkey: ktr.app(node.lkey)); - map.put("rkey", (ktr == null)? node.rkey: ktr.app(node.rkey)); - map.put("entries", (mtr == null)? node.entries: mtr.app((SkeletonTreeMap)node.entries)); - - if (!node.isLeaf()) { - Map subnodes = new LinkedHashMap(); - for (X3 next: node.iterNodesK()) { - GhostNode gh = (GhostNode)(next._1); - subnodes.put(gh.getMeta(), gh.totalSize()); - } - map.put("subnodes", subnodes); - } - return map; - } - - /*@Override**/ public SkeletonNode rev(Map map) throws DataFormatException { - try { - boolean notleaf = map.containsKey("subnodes"); - List gh = null; - if (notleaf) { - Map subnodes = (Map)map.get("subnodes"); - gh = new ArrayList(subnodes.size()); - for (Map.Entry en: subnodes.entrySet()) { - GhostNode ghost = new GhostNode(null, null, null, en.getValue()); - ghost.setMeta(en.getKey()); - gh.add(ghost); - } - } - SkeletonNode node = new SkeletonNode( - (ktr == null)? (K)map.get("lkey"): ktr.rev((Q)map.get("lkey")), - (ktr == null)? (K)map.get("rkey"): ktr.rev((Q)map.get("rkey")), - !notleaf, - (mtr == null)? (SkeletonTreeMap)map.get("entries") - : mtr.rev((R)map.get("entries")), - gh - ); - if (!node.isBare()) { - throw new IllegalStateException("map-translator did not produce a bare SkeletonTreeMap: " + mtr); - } - - verifyNodeIntegrity(node); - return node; - } catch (ClassCastException e) { - throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); - } catch (IllegalArgumentException e) { - throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); - } catch (IllegalStateException e) { - throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); - } - } - - } - - - /************************************************************************ - ** {@link Translator} with access to the members of {@link BTreeMap}. - ** DOCUMENT. - ** - ** @author infinity0 - */ - public static class TreeTranslator implements Translator, Map> { - - final Translator ktr; - final Translator, ?> mtr; - - public TreeTranslator(Translator k, Translator, ?> m) { - ktr = k; - mtr = m; - } - - /*@Override**/ public Map app(SkeletonBTreeMap tree) { - if (tree.comparator() != null) { - throw new UnsupportedOperationException("Sorry, this translator does not (yet) support comparators"); - } - Map map = new LinkedHashMap(); - map.put("node_min", tree.NODE_MIN); - map.put("size", tree.size); - Map rmap = tree.makeNodeTranslator(ktr, mtr).app((SkeletonBTreeMap.SkeletonNode)tree.root); - map.put("entries", rmap.get("entries")); - if (!tree.root.isLeaf()) { - map.put("subnodes", rmap.get("subnodes")); - } - return map; - } - - /*@Override**/ public SkeletonBTreeMap rev(Map map) throws DataFormatException { - try { - SkeletonBTreeMap tree = new SkeletonBTreeMap((Integer)map.get("node_min")); - tree.size = (Integer)map.get("size"); - // map.put("lkey", null); // NULLNOTICE: get() gives null which matches - // map.put("rkey", null); // NULLNOTICE: get() gives null which matches - tree.root = tree.makeNodeTranslator(ktr, mtr).rev(map); - if (tree.size != tree.root.totalSize()) { - throw new DataFormatException("Mismatched sizes - tree: " + tree.size + "; root: " + tree.root.totalSize(), null, null); - } - return tree; - } catch (ClassCastException e) { - throw new DataFormatException("Could not build SkeletonBTreeMap from data", e, map, null, null); - } - } - - } - - public String toString() { - // Default toString for an AbstractCollection dumps everything underneath it. - // We don't want that here, especially as they may not be loaded. - return getClass().getName() + "@" + System.identityHashCode(this)+":size="+size; - } - - /** - * FIXME implement entrySet() along similar lines - auto-activation and auto-deactivation. - * keySet() achieves a slightly different purpose. It never activates values. - * - * NOTE: This is not called keySet() because some callers may not want us to auto-deflate. - */ - private Set keySet = null; - public Set keySetAutoDeflate() { - if (keySet == null) { - keySet = new AbstractSet() { - - @Override public int size() { return SkeletonBTreeMap.this.size(); } - - @Override public Iterator iterator() { - // FIXME LOW - this does NOT yet throw ConcurrentModificationException - // use a modCount counter - return new Iterator() { - - Stack nodestack = new Stack(); - Stack>> itstack = new Stack>>(); - - SkeletonNode cnode = (SkeletonNode)(SkeletonBTreeMap.this.root); - Iterator> centit = cnode.entries.entrySet().iterator(); - - K lastkey = null; - boolean removeok = false; - - - // DEBUG ONLY, remove when unneeded - /*public String toString() { - StringBuilder s = new StringBuilder(); - for (Node n: nodestack) { - s.append(n.getRange()).append(", "); - } - return "nodestack: [" + s + "]; cnode: " + cnode.getRange() + "; lastkey: " + lastkey; - }*/ - - /*@Override**/ public boolean hasNext() { - // TODO LOW ideally iterate in the reverse order - for (Iterator> it: itstack) { - if (it.hasNext()) { return true; } - } - if (centit.hasNext()) { return true; } - if (!cnode.isLeaf()) { return true; } - try { - deflate(); - } catch (TaskAbortException e) { - throw new RuntimeException(e); - } - return false; - } - - /*@Override**/ public K next() { - if (cnode.isLeaf()) { - while (!centit.hasNext()) { - if (nodestack.empty()) { - assert(itstack.empty()); - throw new NoSuchElementException(); - } - SkeletonNode parent = nodestack.pop(); - try { - cnode.deflate(); - parent.deflate(cnode.lkey); - } catch (TaskAbortException e) { - throw new RuntimeException(e); - } - cnode = parent; - centit = itstack.pop(); - } - } else { - while (!cnode.isLeaf()) { - // NULLNOTICE lastkey initialised to null, so this will get the right node - // even at the smaller edge of the map - Node testnode = cnode.rnodes.get(lastkey); - if(testnode.isGhost()) { - try { - cnode.inflate(testnode.lkey, false); - } catch (TaskAbortException e) { - throw new RuntimeException(e); - } - testnode = cnode.rnodes.get(lastkey); - assert(testnode instanceof SkeletonBTreeMap.SkeletonNode); - } - assert(!testnode.isGhost()); - SkeletonNode n = (SkeletonNode) testnode; - // node OK, proceed - nodestack.push(cnode); - itstack.push(centit); - cnode = n; - centit = cnode.entries.entrySet().iterator(); - } - } - if(!centit.hasNext()) { - try { - deflate(); - } catch (TaskAbortException e) { - throw new RuntimeException(e); - } - // We will throw but we may as well clean up in case anyone decides to be clever. - } - K next = centit.next().getKey(); lastkey = next; - removeok = true; - return next; - } - - /*@Override**/ public void remove() { - throw new UnsupportedOperationException(); - // FIXME -// if (!removeok) { -// throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); -// } -// // OPT LOW this could probably be a *lot* more efficient... -// BTreeMap.this.remove(lastkey); -// -// // we need to find our position in the tree again, since there may have -// // been structural modifications to it. -// // OPT LOW have a "structual modifications counter" that is incremented by -// // split, merge, rotateL, rotateR, and do the below only if it changes -// nodestack.clear(); -// itstack.clear(); -// cnode = BTreeMap.this.root; -// centit = cnode.entries.entrySet().iterator(); -// -// K stopkey = null; -// while(!cnode.isLeaf()) { -// stopkey = findstopkey(stopkey); -// nodestack.push(cnode); -// itstack.push(centit); -// // NULLNOTICE stopkey initialised to null, so this will get the right node -// // even at the smaller edge of the map -// cnode = cnode.rnodes.get(stopkey); -// centit = cnode.entries.entrySet().iterator(); -// } -// -// lastkey = findstopkey(stopkey); -// removeok = false; - } - - private K findstopkey(K stopkey) { - SortedMap headmap = cnode.entries.headMap(lastkey); - if (!headmap.isEmpty()) { - stopkey = headmap.lastKey(); - while (compare(centit.next().getKey(), stopkey) < 0); - } - return stopkey; - } - - }; - } - - @Override public void clear() { - SkeletonBTreeMap.this.clear(); - } - - @Override public boolean contains(Object o) { - Object value = SkeletonBTreeMap.this.get(o); - return value != null; - } - - @Override public boolean remove(Object o) { - if (contains(o)) { - SkeletonBTreeMap.this.remove(o); - return true; - } - return false; - } - - }; - } - return keySet; - } - - - -} diff --git a/src/plugins/Library/util/SkeletonBTreeSet.java b/src/plugins/Library/util/SkeletonBTreeSet.java deleted file mode 100644 index 33647eec..00000000 --- a/src/plugins/Library/util/SkeletonBTreeSet.java +++ /dev/null @@ -1,177 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.io.DataFormatException; -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.IterableSerialiser; -import plugins.Library.io.serial.MapSerialiser; -import plugins.Library.io.serial.Translator; -import plugins.Library.util.exec.TaskAbortException; - -import java.util.Comparator; -import java.util.Collection; -import java.util.Map; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.ArrayList; - -/** -** {@link Skeleton} of a {@link BTreeSet}. DOCUMENT -** -** TODO LOW make this implement Skeleton, ?> ? - ie. -** deflate(int, int) to get the relevant subset? -** -** @author infinity0 -*/ -public class SkeletonBTreeSet extends BTreeSet /*implements Skeleton>*/ { - - // TODO NORM when we implement internal_entries in SkeletonBTreeMap, then - // we can have it automatically set to TRUE here. - - public void setSerialiser(IterableSerialiser.SkeletonNode> n, MapSerialiser v) { - ((SkeletonBTreeMap)bkmap).setSerialiser(n, v); - } - - public SkeletonBTreeSet(Comparator cmp, int node_min) { - super(new SkeletonBTreeMap(cmp, node_min)); - } - - public SkeletonBTreeSet(int node_min) { - super(new SkeletonBTreeMap(node_min)); - } - - protected SkeletonBTreeSet(SkeletonBTreeMap m) { - super(m); - } - - /*======================================================================== - public interface Skeleton TODO NORM actually implement this... - ========================================================================*/ - - public boolean isBare() { - return ((SkeletonBTreeMap)bkmap).isBare(); - } - - public boolean isLive() { - return ((SkeletonBTreeMap)bkmap).isLive(); - } - - public void deflate() throws TaskAbortException { - ((SkeletonBTreeMap)bkmap).deflate(); - } - - public void inflate() throws TaskAbortException { - ((SkeletonBTreeMap)bkmap).inflate(); - } - - // TODO NORM tidy this - see SkeletonBTreeMap.inflate() for details - public plugins.Library.util.exec.BaseCompositeProgress getProgressInflate() { - return ((SkeletonBTreeMap)bkmap).pr_inf; - } - - // TODO NORM tidy this, etc. note that we don't need async update() because - // we don't have a value that can be updated (it's always just the key) - public void update(SortedSet putset, SortedSet remset) throws TaskAbortException { - ((SkeletonBTreeMap)bkmap).update(null, remset, new SortedSetMap>(putset), null, new TaskAbortExceptionConvertor()); - } - - /** - ** Creates a translator for the nodes of the B-tree. This method just calls - ** the {@link SkeletonBTreeMap#makeNodeTranslator(Translator, Translator) - ** corresponding method} for the map backing this set. - ** - ** @param ktr Translator for the keys - ** @param mtr Translator for each node's local entries map - */ - public SkeletonBTreeMap.NodeTranslator makeNodeTranslator(Translator ktr, Translator, R> mtr) { - return ((SkeletonBTreeMap)bkmap).makeNodeTranslator(ktr, mtr); - } - - - /************************************************************************ - ** {@link Translator} between a {@link Collection} and a {@link Map} of - ** keys to themselves. Used by {@link TreeSetTranslator}. - ** - ** OPT HIGH make this return a *view* instead of a copy - ** - ** @author infinity0 - */ - abstract public static class MapCollectionTranslator, C extends Collection> - implements Translator { - - public static , C extends Collection> C app(M src, C dst) { - for (E k: src.keySet()) { dst.add(k); } - return dst; - } - - public static , C extends Collection> M rev(C src, M dst) { - for (E e: src) { dst.putGhost(e, e); } - return dst; - } - - } - - - /************************************************************************ - ** Translator for the {@link java.util.TreeMap} backing a {@link - ** java.util.TreeSet}. This will turn a map into a list and vice versa, - ** so that the serialised representation doesn't store duplicates. - ** - ** maybe this belongs in a SkeletonTreeSet? if that's ever implemented.. - ** - ** @author infinity0 - */ - public static class TreeSetTranslator - extends MapCollectionTranslator, Collection> { - - public Collection app(SkeletonTreeMap src) { - Collection dst = new ArrayList(src.size()); - app(src, dst); - return dst; - } - - public SkeletonTreeMap rev(Collection src) { - // TODO LOW make it use a comparator that can be passed to the constructor - SkeletonTreeMap dst = new SkeletonTreeMap(); - rev(src, dst); - assert(dst.isBare()); - return dst; - } - - } - - - /************************************************************************ - ** {@link Translator} with access to the members of {@link BTreeSet}. - ** - ** This implementation just serialises the map backing the set. - ** - ** @author infinity0 - */ - public static class TreeTranslator implements Translator, Map> { - - final SkeletonBTreeMap.TreeTranslator trans; - - public TreeTranslator(Translator k, Translator, ? extends Collection> m) { - trans = new SkeletonBTreeMap.TreeTranslator(k, m); - } - - /*@Override**/ public Map app(SkeletonBTreeSet tree) { - return trans.app((SkeletonBTreeMap)tree.bkmap); - } - - /*@Override**/ public SkeletonBTreeSet rev(Map tree) throws DataFormatException { - return new SkeletonBTreeSet(trans.rev(tree)); - } - - } - - public String toString() { - // Default toString for an AbstractCollection dumps everything underneath it. - // We don't want that here, especially as they may not be loaded. - return getClass().getName() + "@" + System.identityHashCode(this); - } - -} diff --git a/src/plugins/Library/util/SkeletonMap.java b/src/plugins/Library/util/SkeletonMap.java deleted file mode 100644 index 0e4bc7a1..00000000 --- a/src/plugins/Library/util/SkeletonMap.java +++ /dev/null @@ -1,58 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.io.serial.MapSerialiser; -import plugins.Library.util.exec.TaskAbortException; - -import java.util.Map; - -/** -** A {@link Skeleton} of a {@link Map}. -** -** @author infinity0 -*/ -public interface SkeletonMap extends Map, Skeleton> { - - /** - ** {@inheritDoc} - ** - ** In other words, for all keys k: {@link Map#get(Object) get(k)} must - ** return the appropriate value. - */ - /*@Override**/ public boolean isLive(); - - /** - ** {@inheritDoc} - ** - ** In other words, for all keys k: {@link Map#get(Object) get(k)} must - ** throw {@link DataNotLoadedException}. - */ - /*@Override**/ public boolean isBare(); - - /*@Override**/ public MapSerialiser getSerialiser(); - - /** - ** {@inheritDoc} - ** - ** For a {@code SkeletonMap}, this inflates the value for a key so that - ** after the method call, {@link Map#get(Object) get(key)} will not throw - ** {@link DataNotLoadedException}. - ** - ** @param key The key for whose value to inflate. - */ - /*@Override**/ public void inflate(K key) throws TaskAbortException; - - /** - ** {@inheritDoc} - ** - ** For a {@code SkeletonMap}, this deflates the value for a key so that - ** after the method call, {@link Map#get(Object) get(k)} will throw a - ** {@link DataNotLoadedException}. - ** - ** @param key The key for whose value to deflate. - */ - /*@Override**/ public void deflate(K key) throws TaskAbortException; - -} diff --git a/src/plugins/Library/util/SkeletonTreeMap.java b/src/plugins/Library/util/SkeletonTreeMap.java deleted file mode 100644 index 866fb2bc..00000000 --- a/src/plugins/Library/util/SkeletonTreeMap.java +++ /dev/null @@ -1,949 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.Translator; -import plugins.Library.io.serial.MapSerialiser; -import plugins.Library.io.DataFormatException; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.util.exec.TaskCompleteException; - -import java.util.Iterator; -import java.util.Comparator; -import java.util.Collection; -import java.util.Set; -import java.util.Map; -import java.util.AbstractCollection; -import java.util.AbstractSet; -import java.util.AbstractMap; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.HashMap; - -import freenet.support.Logger; - -/** -** A {@link SkeletonMap} of a {@link TreeMap}. DOCUMENT -** -** Note: this implementation, like {@link TreeMap}, is not thread-safe. -** -** TODO NORM "issub" should not be necessary, but need to check bounds on -** submaps before trying to remove an item. -** -** @author infinity0 -*/ -public class SkeletonTreeMap extends TreeMap -implements Map, SortedMap, SkeletonMap, Cloneable { - - // We don't actually use any of the methods or fields of TreeMap directly, - // but we need to extend it to pretend that this "is" a TreeMap. - - /** - ** The backing map. This map is only ever modified structurally - ie. the - ** {@link SkeletonValue} for a given key is never overwritten; only its - ** contents are. This ensures correct behaviour for the {@link - ** UnwrappingIterator} class. - */ - final protected TreeMap> skmap; - - /** - ** The meta data for this skeleton. - */ - protected Object mapmeta; - - /** - ** Keeps track of the number of ghost values in the map. - */ - protected transient int ghosts; - - public SkeletonTreeMap() { - skmap = new TreeMap>(); - } - - public SkeletonTreeMap(Comparator c) { - skmap = new TreeMap>(c); - } - - public SkeletonTreeMap(Map m) { - skmap = new TreeMap>(); - putAll(m); - } - - public SkeletonTreeMap(SortedMap m) { - skmap = new TreeMap>(m.comparator()); - putAll(m); - } - - public SkeletonTreeMap(SkeletonTreeMap m) { - skmap = new TreeMap>(m.comparator()); - for (Map.Entry> en: m.skmap.entrySet()) { - skmap.put(en.getKey(), en.getValue().clone()); - } - ghosts = m.ghosts; - } - - /** - ** Set the metadata for the {@link SkeletonValue} for a given key. - */ - public Object putGhost(K key, Object o) { - if(key == null) throw new IllegalArgumentException("No null keys"); - if (o == null) { - throw new IllegalArgumentException("Cannot put a null dummy into the map. Use put(K, V) to mark an object as loaded."); - } - SkeletonValue sk = skmap.get(key); - if (sk == null) { - skmap.put(key, sk = new SkeletonValue(null, o, false)); - ++ghosts; // One new ghost - return null; - } else { - if (sk.isLoaded()) { ++ghosts; } - return sk.setGhost(o); - } - } - - protected MapSerialiser serialiser; - public void setSerialiser(MapSerialiser s) { - if (serialiser != null && !isLive()) { - throw new IllegalStateException("Cannot change the serialiser when the structure is not live."); - } - serialiser = s; - } - - public static void swapKey(K key, SkeletonTreeMap src, SkeletonTreeMap dst) { - if (dst.containsKey(key)) { throw new IllegalArgumentException("SkeletonTreeMap.swapKey: key " + key + " already exists in target map"); } - if (!src.containsKey(key)) { throw new IllegalArgumentException("SkeletonTreeMap.swapKey: key " + key + " does not exist in source map"); } - SkeletonValue sk = src.skmap.remove(key); - if (!sk.isLoaded()) { --src.ghosts; ++dst.ghosts; } - dst.skmap.put(key, sk); - } - - /*======================================================================== - public interface SkeletonMap - ========================================================================*/ - - /*@Override**/ public boolean isLive() { - return ghosts == 0; - } - - /*@Override**/ public boolean isBare() { - return ghosts == skmap.size(); - } - - /*@Override**/ public MapSerialiser getSerialiser() { - return serialiser; - } - - /*@Override**/ public Object getMeta() { - return mapmeta; - } - - /*@Override**/ public void setMeta(Object m) { - mapmeta = m; - } - - /*@Override**/ public void inflate() throws TaskAbortException { - if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } - if (isLive()) { return; } - - Map> tasks = new HashMap>(size()<<1); - // load only the tasks that need pulling - for (Map.Entry> en: skmap.entrySet()) { - SkeletonValue skel = en.getValue(); - if (skel.isLoaded()) { continue; } - tasks.put(en.getKey(), new PullTask(skel.meta())); - } - serialiser.pull(tasks, mapmeta); - - for (Map.Entry> en: tasks.entrySet()) { - assert(skmap.get(en.getKey()) != null); - // TODO NORM atm old metadata is retained, could update? - put(en.getKey(), en.getValue().data); - } - } - - /*@Override**/ public void deflate() throws TaskAbortException { - if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } - if (isBare()) { return; } - - Map> tasks = new HashMap>(size()<<1); - // load all the tasks, most MapSerialisers need this info - for (Map.Entry> en: skmap.entrySet()) { - SkeletonValue skel = en.getValue(); - if(skel.data() == null) { - if(skel.isLoaded()) - // FIXME is a null loaded value legal? Should it be? Disallowing nulls is not an unreasonable policy IMHO... - // Currently nulls are used in SkeletonBTreeMap.update() as placeholders, but we do not want them to be serialised... - throw new TaskAbortException("Skeleton value in "+this+" : isLoaded() = true, data = null, meta = "+skel.meta()+" for "+en.getKey(), new NullPointerException()); - else if(skel.meta() == null) - throw new TaskAbortException("Skeleton value in "+this+" : isLoaded() = false, data = null, meta = null for "+en.getKey(), new NullPointerException()); - else { - // MapSerialiser understands PushTask's where data == null and metadata != null - // So just let it through. - // It will be fine after push() has completed. - } - } - tasks.put(en.getKey(), new PushTask(skel.data(), skel.meta())); - if (skel.data() instanceof SkeletonBTreeSet) { - SkeletonBTreeSet ss = (SkeletonBTreeSet)skel.data(); - } - } - serialiser.push(tasks, mapmeta); - - for (Map.Entry> en: tasks.entrySet()) { - assert(skmap.get(en.getKey()) != null); - putGhost(en.getKey(), en.getValue().meta); - } - } - - /*@Override**/ public void inflate(K key) throws TaskAbortException { - if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } - - SkeletonValue skel = skmap.get(key); - if (skel == null) { - throw new IllegalArgumentException("Key " + key + " does not belong to the map"); - } else if (skel.isLoaded()) { - return; - } - - Map> tasks = new HashMap>(); - tasks.put(key, new PullTask(skel.meta())); - - try { - serialiser.pull(tasks, mapmeta); - - put(key, tasks.remove(key).data); - // TODO NORM atm old metadata is retained, could update? - if (tasks.isEmpty()) { return; } - - for (Map.Entry> en: tasks.entrySet()) { - // other keys may also have been inflated, so add them, but only if the - // generated metadata match. - PullTask t = en.getValue(); - if(t.data == null) - throw new DataFormatException("Inflate got null from PullTask for "+key+" on "+this, null, null, tasks, en.getKey()); - SkeletonValue sk = skmap.get(en.getKey()); - if (sk == null) { throw new DataFormatException("SkeletonTreeMap got unexpected extra data from the serialiser.", null, sk, tasks, en.getKey()); } - if (sk.meta().equals(t.meta)) { - if (!sk.isLoaded()) { --ghosts; } - sk.set(t.data); - } - // if they don't match, then the data was only partially inflated - // (note: at the time of coding, only SplitPacker does this, and - // that is deprecated) so ignore for now. (needs a new data - // structure to allow partial inflates of values...) - } - - } catch (TaskCompleteException e) { - assert(skmap.get(key).isLoaded()); - } catch (DataFormatException e) { - throw new TaskAbortException("Could not complete inflate operation", e); - } - } - - /*@Override**/ public void deflate(K key) throws TaskAbortException { - if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } - - SkeletonValue skel = skmap.get(key); - if (skel == null || !skel.isLoaded()) { return; } - - Map> tasks = new HashMap>(size()<<1); - // add the data we want to deflate, plus all already-deflated data - // OPTMISE think of a way to let the serialiser signal what - // information it needs? will be quite painful to implement that... - tasks.put(key, new PushTask(skel.data(), skel.meta())); - for (Map.Entry> en: skmap.entrySet()) { - skel = en.getValue(); - if (skel.isLoaded()) { continue; } - assert(skel.data() == null); - tasks.put(en.getKey(), new PushTask(null, skel.meta())); - } - serialiser.push(tasks, mapmeta); - - for (Map.Entry> en: tasks.entrySet()) { - assert(skmap.get(en.getKey()) != null); - // serialiser may have pulled and re-pushed some of the - // eg. Packer does this. so set all the metadata for all the tasks - // (unchanged tasks should have been discarded anyway) - putGhost(en.getKey(), en.getValue().meta); - } - } - - /************************************************************************ - ** {@link Translator} with access to the members of {@link TreeMap}. - ** - ** This implementation provides static methods to translate between this - ** class and a map from ({@link String} forms of the key) to ({@link - ** Task#meta metadata} of the values). - ** - ** TODO LOW atm this can't handle custom comparators - ** - ** @author infinity0 - */ - abstract public static class TreeMapTranslator - implements Translator, Map> { - - /** - ** Forward translation. If the translator is given is {@code null}, - ** it will use {@link Object#toString()}. - ** - ** @param map The data structue to translate - ** @param intm A map to populate with the translated mappings - ** @param ktr An optional translator between key and {@link String}. - */ - public static Map app(SkeletonTreeMap map, Map intm, Translator ktr) { - if (!map.isBare()) { - throw new IllegalArgumentException("Data structure is not bare. Try calling deflate() first."); - } - if (map.comparator() != null) { - throw new UnsupportedOperationException("Sorry, this translator does not (yet) support comparators"); - } - // FIXME LOW maybe get rid of intm and just always use HashMap - if (ktr != null) { - for (Map.Entry> en: map.skmap.entrySet()) { - intm.put(ktr.app(en.getKey()), en.getValue().meta()); - } - } else { - for (Map.Entry> en: map.skmap.entrySet()) { - intm.put(en.getKey().toString(), en.getValue().meta()); - } - } - return intm; - } - - /** - ** Backward translation. If no translator is given, a direct cast will - ** be attempted. - ** - ** @param intm The map of translated mappings to extract - ** @param map The data structue to populate with metadata - ** @param ktr A translator between key and {@link String} - */ - public static SkeletonTreeMap rev(Map intm, SkeletonTreeMap map, Translator ktr) throws DataFormatException { - if (ktr == null) { - try { - for (Map.Entry en: intm.entrySet()) { - map.putGhost((K)en.getKey(), en.getValue()); - } - } catch (ClassCastException e) { - throw new DataFormatException("TreeMapTranslator: reverse translation failed. Try supplying a non-null key-translator.", e, intm, null, null); - } - } else { - for (Map.Entry en: intm.entrySet()) { - map.putGhost(ktr.rev(en.getKey()), en.getValue()); - } - } - return map; - } - - } - - /*======================================================================== - public interface Map - ========================================================================*/ - - @Override public int size() { return skmap.size(); } - - @Override public boolean isEmpty() { return skmap.isEmpty(); } - - @Override public void clear() { - skmap.clear(); - ghosts = 0; - } - - @Override public Comparator comparator() { return skmap.comparator(); } - - @Override public boolean containsKey(Object key) { return skmap.containsKey(key); } - - @Override public boolean containsValue(Object value) { - if (!isLive()) { - // TODO LOW maybe make this work even when map is partially loaded - throw new DataNotLoadedException("TreeMap not fully loaded.", this); - } else { - for (SkeletonValue v: skmap.values()) { - if (value.equals(v.data())) { return true; } - } - return false; - } - } - - @Override public V get(Object key) { - SkeletonValue sk = skmap.get(key); - if (sk == null) { return null; } - if (!sk.isLoaded()) { throw new DataNotLoadedException("Data not loaded for key " + key + ": " + sk.meta(), this, key, sk.meta()); } - return sk.data(); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: if the value for the key hasn't been loaded yet, then this method - ** will return **null** instead of returning the actual previous value - ** (that hasn't been loaded yet). - ** - ** TODO LOW could code a setStrictChecksMode() or something to have this - ** method throw {@link DataNotLoadedException} in such circumstances, at - ** the user's discretion. This applies for CombinedEntry.setValue() too. - */ - @Override public V put(K key, V value) { - if(key == null) throw new NullPointerException(); - SkeletonValue sk = skmap.get(key); - if (sk == null) { - skmap.put(key, sk = new SkeletonValue(value, null, true)); - // ghosts is unchanged. - return null; - } else { - if (!sk.isLoaded()) { --ghosts; } - return sk.set(value); - } - } - - @Override public void putAll(Map map) { - SortedMap> putmap; - if (map instanceof SkeletonTreeMap) { - putmap = ((SkeletonTreeMap)map).skmap; - } else if (map instanceof SkeletonTreeMap.UnwrappingSortedSubMap) { - putmap = ((UnwrappingSortedSubMap)map).bkmap; - } else { - for (Map.Entry en: map.entrySet()) { - put((K)en.getKey(), (V)en.getValue()); - } - return; - } - int g = 0; - for (Map.Entry> en: putmap.entrySet()) { - SkeletonValue sk = en.getValue(); - SkeletonValue old = skmap.put(en.getKey(), sk); - if (old == null) { - if (!sk.isLoaded()) { ++g; } - } else { - if (old.isLoaded()) { - if (!sk.isLoaded()) { ++g; } - } else { - if (sk.isLoaded()) { --g; } - } - } - } - ghosts += g; - } - - /** - ** {@inheritDoc} - ** - ** NOTE: if the value for the key hasn't been loaded yet, then this method - ** will return **null** instead of returning the actual previous value - ** (that hasn't been loaded yet). - ** - ** TODO LOW could code a setStrictChecksMode() or something to have this - ** method throw {@link DataNotLoadedException} in such circumstances, at - ** the user's discretion. - */ - @Override public V remove(Object key) { - SkeletonValue sk = skmap.remove(key); - if (sk == null) { return null; } - if (!sk.isLoaded()) { --ghosts; } - return sk.data(); - } - - private transient Set> entries; - @Override public Set> entrySet() { - if (entries == null) { - entries = new UnwrappingEntrySet(skmap, false); - } - return entries; - } - - private transient Set keys; - @Override public Set keySet() { - if (keys == null) { - // OPT NORM make this implement a SortedSet - keys = new AbstractSet() { - - @Override public int size() { return skmap.size(); } - - @Override public Iterator iterator() { - return new UnwrappingIterator(skmap.entrySet().iterator(), UnwrappingIterator.KEY, false); - } - - @Override public void clear() { SkeletonTreeMap.this.clear(); } - - @Override public boolean contains(Object o) { - return SkeletonTreeMap.this.containsKey(o); - } - - @Override public boolean remove(Object o) { - boolean c = contains(o); - SkeletonTreeMap.this.remove(o); - return c; - } - - }; - } - return keys; - } - - private transient Collection values; - @Override public Collection values() { - if (values == null) { - values = new AbstractCollection() { - - @Override public int size() { return SkeletonTreeMap.this.size(); } - - @Override public Iterator iterator() { - return new UnwrappingIterator(skmap.entrySet().iterator(), UnwrappingIterator.VALUE, false); - } - - @Override public void clear() { SkeletonTreeMap.this.clear(); } - - }; - } - return values; - } - - @Override public K firstKey() { - return skmap.firstKey(); - } - - @Override public K lastKey() { - return skmap.lastKey(); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method is limited in its - ** functionality, and will throw {@link UnsupportedOperationException} for - ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. - */ - @Override public SortedMap subMap(K fr, K to) { - return new UnwrappingSortedSubMap(skmap.subMap(fr, to)); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method is limited in its - ** functionality, and will throw {@link UnsupportedOperationException} for - ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. - */ - @Override public SortedMap headMap(K to) { - return new UnwrappingSortedSubMap(skmap.headMap(to)); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method is limited in its - ** functionality, and will throw {@link UnsupportedOperationException} for - ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. - */ - @Override public SortedMap tailMap(K fr) { - return new UnwrappingSortedSubMap(skmap.tailMap(fr)); - } - - - /*======================================================================== - public class Object - ========================================================================*/ - - @Override public boolean equals(Object o) { - if (o instanceof SkeletonTreeMap) { - return skmap.equals(((SkeletonTreeMap)o).skmap); - } - return super.equals(o); - } - - /* provided by AbstractMap - @Override public int hashCode() { - throw new UnsupportedOperationException("not implemented"); - }*/ - - @Override public Object clone() { - return new SkeletonTreeMap(this); - } - - // public String toString() { return super.toString(); } - - - /************************************************************************ - ** {@link Iterator} that will throw {@link DataNotLoadedException} when it - ** encounters such data. Precise behaviour: - ** - ** * The key iterator will never throw an exception - ** * The entry iterator will never throw an exception, but the {@link - ** UnwrappingEntry#getValue()} method of the entry will throw one if - ** data is not loaded at the time of the method call. This allows you to - ** load ''and retrieve'' the data during the middle of an iteration, by - ** polling the {@code getValue()} method of the entry. - ** * The value iterator will throw an exception if the value is not loaded, - ** and keep throwing this exception for subsequent method calls, pausing - ** the iteration until the value is loaded. - ** - ** The {@link #remove()} method will set the {@link #ghosts} field - ** correctly as long as (and only if) access to (the {@link SkeletonValue} - ** corresponding to the item being removed) is either synchronized, or - ** contained within one thread. - ** - ** @author infinity0 - */ - protected class UnwrappingIterator implements Iterator { - - final protected Iterator>> iter; - - /** - ** Last entry returned by the backing iterator. Used by {@link - ** #remove()} to update the {@link #ghosts} counter correctly. - */ - protected Map.Entry> last; - - final protected static int KEY = 0; - final protected static int VALUE = 1; - final protected static int ENTRY = 2; - - /** - ** Whether this is a view of a submap {@link SkeletonTreeMap}. Currently, - ** this determines whether attempts to modify the view will fail, since - ** it's a bitch to keep track of the ghost counter. - */ - final protected boolean issub; - protected boolean gotvalue; - - /** - ** Type of iterator. - ** - ** ;{@link #KEY} : Key iterator - ** ;{@link #VALUE} : Value iterator - ** ;{@link #ENTRY} : Entry iterator - */ - final protected int type; - - /** - ** Construct an iterator backed by the given iterator over the entries - ** of the backing map. - ** - ** @param it The backing iterator - ** @param t The {@link #type} of iterator - */ - protected UnwrappingIterator(Iterator>> it, int t, boolean sub) { - assert(t == KEY || t == VALUE || t == ENTRY); - type = t; - iter = it; - issub = sub; - } - - public boolean hasNext() { - return iter.hasNext(); - } - - public T next() { - switch (type) { - case KEY: - last = iter.next(); - return (T)last.getKey(); - case VALUE: - if (last == null || gotvalue) { last = iter.next(); } - SkeletonValue skel = last.getValue(); - if (!(gotvalue = skel.isLoaded())) { - throw new DataNotLoadedException("SkeletonTreeMap: Data not loaded for key " + last.getKey() + ": " + skel.meta(), SkeletonTreeMap.this, last.getKey(), skel.meta()); - } - return (T)skel.data(); - case ENTRY: - last = iter.next(); - return (T)new UnwrappingEntry(last); - } - throw new AssertionError(); - } - - public void remove() { - if (issub) { throw new UnsupportedOperationException("not implemented"); } - if (last == null) { throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); } - if (!last.getValue().isLoaded()) { --ghosts; } - iter.remove(); - last = null; - } - - - /************************************************************************ - ** {@link java.util.Map.Entry} backed by one whose value is wrapped inside a {@link - ** SkeletonValue}. This class will unwrap the value when it is required, - ** throwing {@link DataNotLoadedException} as necessary. - ** - ** @author infinity0 - */ - protected class UnwrappingEntry implements Map.Entry { - - final K key; - final SkeletonValue skel; - - protected UnwrappingEntry(Map.Entry> en) { - key = en.getKey(); - skel = en.getValue(); - } - - protected void verifyLoaded() { - if (!skel.isLoaded()) { - throw new DataNotLoadedException("SkeletonTreeMap: Data not loaded for key " + key + ": " + skel.meta(), SkeletonTreeMap.this, key, skel.meta()); - } - } - - public K getKey() { - return key; - } - - public V getValue() { - verifyLoaded(); - return skel.data(); - } - - /** - ** See also note for {@link SkeletonTreeMap#put(Object, Object)}. - */ - public V setValue(V value) { - if (!skel.isLoaded()) { --ghosts; } - return skel.set(value); - } - - @Override public int hashCode() { - verifyLoaded(); - return (key==null? 0: key.hashCode()) ^ - (skel.data()==null? 0: skel.data().hashCode()); - } - - @Override public boolean equals(Object o) { - if (!(o instanceof Map.Entry)) { return false; } - verifyLoaded(); - Map.Entry en = (Map.Entry)o; - return (key==null? en.getKey()==null: key.equals(en.getKey())) && - (skel.data()==null? en.getValue()==null: skel.data().equals(en.getValue())); - } - - } - - } - - - /************************************************************************ - ** Submap of a {@link SkeletonTreeMap}. Currently, this only implements the - ** {@link #firstKey()}, {@link #lastKey()} and {@link #isEmpty()} methods. - ** This is because it's a bitch to implement an entire {@link SortedMap}, - ** and I only needed those three methods for {@link SkeletonBTreeMap} to - ** work. Feel free to expand it. - ** - ** @author infinity0 - */ - protected class UnwrappingSortedSubMap extends AbstractMap implements SortedMap { - - final SortedMap> bkmap; - - protected UnwrappingSortedSubMap(SortedMap> sub) { - bkmap = sub; - } - - @Override public int size() { - return bkmap.size(); - } - - @Override public boolean isEmpty() { - return bkmap.isEmpty(); - } - - /*@Override**/ public Comparator comparator() { - return bkmap.comparator(); - } - - /*@Override**/ public K firstKey() { - return bkmap.firstKey(); - } - - /*@Override**/ public K lastKey() { - return bkmap.lastKey(); - } - - private transient Set> entries; - @Override public Set> entrySet() { - if (entries == null) { - entries = new UnwrappingEntrySet(bkmap, true); - } - return entries; - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method has the {@link - ** UnwrappingSortedSubMap same limitations} as this class. - */ - /*@Override**/ public SortedMap subMap(K fr, K to) { - return new UnwrappingSortedSubMap(bkmap.subMap(fr, to)); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method has the {@link - ** UnwrappingSortedSubMap same limitations} as this class. - */ - /*@Override**/ public SortedMap headMap(K to) { - return new UnwrappingSortedSubMap(bkmap.headMap(to)); - } - - /** - ** {@inheritDoc} - ** - ** NOTE: the sorted map returned by this method has the {@link - ** UnwrappingSortedSubMap same limitations} as this class. - */ - /*@Override**/ public SortedMap tailMap(K fr) { - return new UnwrappingSortedSubMap(bkmap.tailMap(fr)); - } - - } - - - /************************************************************************ - ** DOCUMENT - ** - ** @author infinity0 - */ - protected class UnwrappingEntrySet extends AbstractSet> { - - final protected SortedMap> bkmap; - /** - ** Whether this is a view of a submap {@link SkeletonTreeMap}. Currently, - ** this determines whether attempts to modify the view will fail, since - ** it's a bitch to keep track of the ghost counter. - */ - final protected boolean issub; - - protected UnwrappingEntrySet(SortedMap> bk, boolean sub) { - bkmap = bk; - issub = sub; - } - - @Override public int size() { return bkmap.size(); } - - @Override public Iterator> iterator() { - return new UnwrappingIterator>(bkmap.entrySet().iterator(), UnwrappingIterator.ENTRY, issub); - } - - @Override public void clear() { - if (issub) { throw new UnsupportedOperationException("not implemented"); } - SkeletonTreeMap.this.clear(); - } - - @Override public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) { return false; } - Map.Entry en = (Map.Entry)o; - Object key = en.getKey(); Object val = en.getValue(); - // return SkeletonTreeMap.this.get(e.getKey()).equals(e.getValue()); - SkeletonValue sk = bkmap.get(key); - if (sk == null) { return false; } - if (!sk.isLoaded()) { throw new DataNotLoadedException("Data not loaded for key " + key + ": " + sk.meta(), SkeletonTreeMap.this, key, sk.meta()); } - return sk.data() == null && val == null || sk.data().equals(val); - } - - @Override public boolean remove(Object o) { - if (issub) { throw new UnsupportedOperationException("not implemented"); } - // SkeletonTreeMap.remove() called, which automatically updates _ghosts - boolean c = contains(o); - if (c) { SkeletonTreeMap.this.remove(((Map.Entry)o).getKey()); } - return c; - } - - } - - public String toString() { - if(this.isLive()) - return getClass().getName() + "@" + System.identityHashCode(this) +":" + super.toString(); - else - return getClass().getName() + "@" + System.identityHashCode(this); - } - - - -} - -/************************************************************************ -** A class that wraps the associated value (which may not be loaded) of a -** key in a {@link SkeletonTreeMap}, along with metadata and its loaded -** status. -** -** meta and data are private with accessors to ensure proper encapsulation -** i.e. the only way data can be != null is if isLoaded(), and the only way -** meta can be != null is if !isLoaded(). Note that with the methods and the -** class final, there should be no performance penalty. -** -** @author infinity0 -*/ -final class SkeletonValue implements Cloneable { - - private Object meta; - private V data; - - private boolean isLoaded; - - public SkeletonValue(V d, Object o, boolean loaded) { - if(loaded) { - assert(o == null); - // Null data is allowed as we use it in SkeletonBTreeMap.update() as a placeholder. - isLoaded = true; - data = d; - } else { - assert(d == null); - assert(o != null); // null metadata is invalid, right? - isLoaded = false; - meta = o; - } - } - - public final V data() { - return data; - } - - public final Object meta() { - return meta; - } - - public final boolean isLoaded() { - return isLoaded; - } - - /** - ** Set the data and mark the value as loaded. - */ - public V set(V v) { - V old = data; - data = v; - // meta = null; TODO LOW decide what to do with this. - isLoaded = true; - return old; - } - - /** - ** Set the metadata and mark the value as not loaded. - */ - public Object setGhost(Object m) { - if(m == null) throw new NullPointerException(); - Object old = meta; - meta = m; - data = null; - isLoaded = false; - return old; - } - - @Override public String toString() { - return "(" + data + ", " + meta + ", " + isLoaded + ")"; - } - - @Override public SkeletonValue clone() { - try { - return (SkeletonValue)super.clone(); - } catch (CloneNotSupportedException e) { - throw new AssertionError(e); - } - } - -} - - diff --git a/src/plugins/Library/util/Sorted.java b/src/plugins/Library/util/Sorted.java deleted file mode 100644 index 0b390d51..00000000 --- a/src/plugins/Library/util/Sorted.java +++ /dev/null @@ -1,268 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Set; -import java.util.SortedMap; -import java.util.SortedSet; -import java.util.AbstractSet; -import java.util.ArrayList; - -/** -** Methods on sorted collections -** -** Most of the stuff here is completely fucking pointless and only because we -** need to support Java 5. Otherwise, we can just use standard Navigable* -** methods. :| -** -** @author infinity0 -*/ -final public class Sorted { - - public enum Inclusivity { NONE, LEFT, RIGHT, BOTH } - - private Sorted() { } - - private static class SortedKeySet extends AbstractSet implements SortedSet { - - final SortedMap map; - - public SortedKeySet(SortedMap m) { - map = m; - } - - @Override public int size() { return map.size(); } - - @Override public Iterator iterator() { - return map.keySet().iterator(); - } - - @Override public void clear() { map.clear(); } - - @Override public boolean contains(Object o) { - return map.containsKey(o); - } - - @Override public boolean remove(Object o) { - boolean c = contains(o); - map.remove(o); - return c; - } - - /*@Override**/ public Comparator comparator() { return map.comparator(); } - /*@Override**/ public K first() { return map.firstKey(); } - /*@Override**/ public K last() { return map.lastKey(); } - /*@Override**/ public SortedSet headSet(K r) { return new SortedKeySet(map.headMap(r)); } - /*@Override**/ public SortedSet tailSet(K l) { return new SortedKeySet(map.tailMap(l)); } - /*@Override**/ public SortedSet subSet(K l, K r) { return new SortedKeySet(map.subMap(l, r)); } - - } - - /** - ** Returns the second element in an iterable, or {@code null} if there is - ** no such element. The iterable is assumed to have at least one element. - */ - public static E second(Iterable ib) { - Iterator it = ib.iterator(); - it.next(); - return (it.hasNext())? it.next(): null; - } - - /** - ** Returns the least element (in the given set) >= the given element, or - ** {@code null} if there is no such element. - ** - ** This is identical to Java 6's {@code NavigableSet.ceiling(E)}, but works - ** for {@link SortedSet}. - */ - public static E ceiling(SortedSet set, E el) { - SortedSet tail = set.tailSet(el); - return (tail.isEmpty())? null: tail.first(); - } - - /** - ** Returns the greatest element (in the given set) <= the given element, or - ** {@code null} if there is no such element. - ** - ** This is identical to Java 6's {@code NavigableSet.floor(E)}, but works - ** for {@link SortedSet}. - */ - public static E floor(SortedSet set, E el) { - SortedSet head; - return (set.contains(el))? el: ((head = set.headSet(el)).isEmpty())? null: head.last(); - } - - /** - ** Returns the least element (in the given set) > the given element, or - ** {@code null} if there is no such element. - ** - ** This is identical to Java 6's {@code NavigableSet.higher(E)}, but works - ** for {@link SortedSet}. - */ - public static E higher(SortedSet set, E el) { - SortedSet tail = set.tailSet(el); - return (set.contains(el))? second(tail): (tail.isEmpty())? null: tail.first(); - } - - /** - ** Returns the greatest element (in the given set) < the given element, or - ** {@code null} if there is no such element. - ** - ** This is identical to Java 6's {@code NavigableSet.lower(E)}, but works - ** for {@link SortedSet}. - */ - public static E lower(SortedSet set, E el) { - SortedSet head = set.headSet(el); - return (head.isEmpty())? null: head.last(); - } - - /** - ** Returns a {@link SortedSet} view of the given {@link SortedMap}'s keys. - ** You would think that the designers of the latter would have overridden - ** the {@link SortedMap#keySet()} method to do this, but noooo... - ** - ** JDK6 remove this pointless piece of shit. - */ - public static SortedSet keySet(SortedMap map) { - Set ks = map.keySet(); - return (ks instanceof SortedSet)? (SortedSet)ks: new SortedKeySet(map); - } - - /** - ** Splits the given sorted set at the given separators. - ** - ** FIXME LOW currently this method assumes that the comparator of the set - ** is consistent with equals(). we should use the SortedSet's comparator() - ** instead, or "natural" ordering. - ** - ** JDK6 use a NavigableSet instead of a SortedSet. - ** - ** @param subj Subject of the split - ** @param sep Separators to split at - ** @param foundsep An empty set which will be filled with the separators - ** that were also contained in the subject set. - ** @return A list of subsets; each subset contains all entries between two - ** adjacent separators, or an edge separator and the corresponding - ** edge of the set. The list is in sorted order. - ** @throws NullPointerException if any of the inputs are {@code null} - */ - public static List> split(SortedSet subj, SortedSet sep, SortedSet foundsep) { - if (!foundsep.isEmpty()) { - throw new IllegalArgumentException("split(): Must provide an empty set to add found separators to"); - } - - if (subj.isEmpty()) { - return Collections.emptyList(); - } else if (sep.isEmpty()) { - return Collections.singletonList(subj); - } - - List> res = new ArrayList>(sep.size()+2); - E csub = subj.first(), csep, nsub, nsep; - - do { - csep = floor(sep, csub); // JDK6 sep.floor(csub); - assert(csub != null); - if (csub.equals(csep)) { - assert(subj.contains(csep)); - foundsep.add(csep); - csub = second(subj.tailSet(csep)); // JDK6 subj.higher(csep); - continue; - } - - nsep = (csep == null)? sep.first(): second(sep.tailSet(csep)); // JDK6 sep.higher(csep); - assert(nsep == null || ((Comparable)csub).compareTo(nsep) < 0); - - nsub = (nsep == null)? subj.last(): floor(subj, nsep); // JDK6 subj.floor(nsep); - assert(nsub != null); - if (nsub.equals(nsep)) { - foundsep.add(nsep); - nsub = lower(subj, nsep); // JDK6 subj.lower(nsep); - } - - assert(csub != null && ((Comparable)csub).compareTo(nsub) <= 0); - assert(csep != null || nsep != null); // we already took care of sep.size() == 0 - res.add( - (csep == null)? subj.headSet(nsep): - (nsep == null)? subj.tailSet(csub): - subj.subSet(csub, nsep) - ); - if (nsep == null) { break; } - - csub = second(subj.tailSet(nsub)); // JDK6 subj.higher(nsub); - } while (csub != null); - - return res; - } - - /** - ** Select {@code n} separator elements from the subject sorted set. The - ** elements are distributed evenly amongst the set. - ** - ** @param subj The set to select elements from - ** @param num Number of elements to select - ** @param inc Which sides of the set to select. For example, if this is - ** {@code BOTH}, then the first and last elements will be selected. - ** @return The selected elements - */ - public static List select(SortedSet subj, int num, Inclusivity inc) { - if (num >= 2) { - // test most common first - } else if (num == 1) { - switch (inc) { - case LEFT: return Collections.singletonList(subj.first()); - case RIGHT: return Collections.singletonList(subj.last()); - case BOTH: throw new IllegalArgumentException("select(): can't have num=1 and inc=BOTH"); - // case NONE falls through to main section - } - } else if (num == 0) { - if (inc == Inclusivity.NONE) { - return Collections.emptyList(); - } else { - throw new IllegalArgumentException("select(): can't have num=0 and inc!=NONE"); - } - } else { // num < 0 - throw new IllegalArgumentException("select(): cannot select a negative number of items"); - } - - if (subj.size() < num) { - throw new IllegalArgumentException("select(): cannot select " + num + " elements from a set of size " + subj.size()); - } else if (subj.size() == num) { - return new ArrayList(subj); - } - - List sel = new ArrayList(num); - int n = num; - Iterator it = subj.iterator(); - - switch (inc) { - case NONE: ++n; break; - case BOTH: --n; - case LEFT: sel.add(it.next()); break; - } - - for (Integer s: Integers.allocateEvenly(subj.size() - num, n)) { - for (int i=0; i List select(SortedSet subj, int n) { - return select(subj, n, Inclusivity.NONE); - } - -} diff --git a/src/plugins/Library/util/SortedArraySet.java b/src/plugins/Library/util/SortedArraySet.java deleted file mode 100644 index bf9564e6..00000000 --- a/src/plugins/Library/util/SortedArraySet.java +++ /dev/null @@ -1,219 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version) {. See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import freenet.support.Fields; // JDK6: use this instead of Arrays.binarySearch - -import java.util.Comparator; -import java.util.Iterator; -import java.util.Arrays; -import java.util.Map; -import java.util.Set; -import java.util.SortedSet; -import java.util.SortedMap; -import java.util.AbstractSet; - -/** -** An immutable {@link SortedSet} backed by a sorted array. -** -** DOCUMENT -** -** OPT LOW make a SortedArrayMap out of this, and make the BTreeMap node -** use that (or a SortedArrayTripleMap) instead of TreeMap -** -** @author infinity0 -*/ -public class SortedArraySet extends AbstractSet -implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { - - /** - ** The backing array. - */ - final protected E[] bkarr; - - /** - ** The comparator used to sort the array. - */ - final protected Comparator comparator; - - /** - ** Left index, inclusive. - */ - final int li; - - /** - ** Right index, exclusive. - */ - final int ri; - - - public SortedArraySet(E[] arr) { - this(arr, null, false); - } - - /** - ** Construct a new set from the given array, sorted according to the given - ** comparator. The new set will be '''backed''' by the array, so it must - ** not be changed elsewhere in a program. Ideally, all references to it - ** should be discarded, and all access to the array should occur through - ** this wrapper class. - ** - ** @param arr The backing array - ** @param cmp The comparator used for sorting. A {@code null} value means - ** {@linkplain Comparable natural ordering} is used. - ** @param sorted Whether the array is already sorted. If this is {@code - ** true}, the input array will not be sorted again. - ** @throws IllegalArgumentException if the array contains duplicate - ** elements, or if {@code sorted} is {@code true} but the array is - ** not actually sorted. - */ - public SortedArraySet(E[] arr, Comparator cmp, boolean sorted) { - this(arr, 0, arr.length, cmp); - if (!sorted) { - Arrays.sort(bkarr, noDuplicateComparator(cmp)); - } else { - Comparator dcmp = noDuplicateComparator(cmp); - for (int i=0, j=1; j 0) { - throw new IllegalArgumentException("Array is not sorted"); - } - } - } - } - - protected SortedArraySet(E[] arr, int l, int r, Comparator cmp) { - assert(0 <= l && l <= r && r <= arr.length); - bkarr = arr; - li = l; - ri = r; - comparator = cmp; - } - - public static Comparator noDuplicateComparator(final Comparator cmp) { - if (cmp == null) { - return new Comparator() { - public int compare(E e1, E e2) { - int d = ((Comparable)e1).compareTo(e2); - if (d == 0) { throw new IllegalArgumentException("A set cannot have two duplicate elements"); } - return d; - } - }; - } else { - return new Comparator() { - public int compare(E e1, E e2) { - int d = cmp.compare(e1, e2); - if (d == 0) { throw new IllegalArgumentException("A set cannot have two duplicate elements"); } - return d; - } - }; - } - } - - /*======================================================================== - public interface Set - ========================================================================*/ - - @Override public int size() { - return ri - li; - } - - @Override public boolean isEmpty() { - return ri == li; - } - - @Override public boolean contains(Object o) { - return Arrays.binarySearch(bkarr, li, ri, (E)o, comparator) > 0; - } - - @Override public Iterator iterator() { - return new Iterator() { - int i = li; - - public boolean hasNext() { - return i < ri; - } - - public E next() { - return bkarr[i++]; - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - /* provided by AbstractSet - @Override public Object[] toArray() { } - */ - - /* provided by AbstractSet - @Override public T[] toArray(T[] a) { } - */ - - @Override public boolean add(E o) { - throw new UnsupportedOperationException("This set is immutable"); - } - - @Override public boolean remove(Object o) { - throw new UnsupportedOperationException("This set is immutable"); - } - - /* provided by AbstractSet - @Override public boolean containsAll(Collection c) { } - @Override public boolean addAll(Collection c) { } - @Override public boolean retainAll(Collection c) { } - @Override public boolean removeAll(Collection c) { } - */ - - @Override public void clear() { - if (!isEmpty()) { - throw new UnsupportedOperationException("This set is immutable"); - } - } - - /* provided by AbstractSet - @Override public boolean equals(Object o) { } - @Override public int hashCode() { } - */ - - /*======================================================================== - public interface SortedSet - ========================================================================*/ - - /*@Override**/ public Comparator comparator() { - return comparator; - } - - /*@Override**/ public E first() { - return bkarr[li]; - } - - /*@Override**/ public E last() { - return bkarr[ri-1]; - } - - /*@Override**/ public SortedSet headSet(E to) { - int d = Arrays.binarySearch(bkarr, li, ri, to, comparator); - if (d < 0) { d = ~d; } - if (d < li || ri < d) { - throw new IllegalArgumentException("Argument not in this subset's range"); - } - return new SortedArraySet(bkarr, li, d, comparator); - } - - /*@Override**/ public SortedSet tailSet(E fr) { - int d = Arrays.binarySearch(bkarr, li, ri, fr, comparator); - if (d < 0) { d = ~d; } - if (d < li || ri < d) { - throw new IllegalArgumentException("Argument not in this subset's range"); - } - return new SortedArraySet(bkarr, d, ri, comparator); - } - - /*@Override**/ public SortedSet subSet(E fr, E to) { - return tailSet(fr).headSet(to); - } - -} diff --git a/src/plugins/Library/util/SortedMapSet.java b/src/plugins/Library/util/SortedMapSet.java deleted file mode 100644 index 82fff6fb..00000000 --- a/src/plugins/Library/util/SortedMapSet.java +++ /dev/null @@ -1,164 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version) {. See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Comparator; -import java.util.Iterator; -import java.util.Set; -import java.util.Map; -import java.util.SortedSet; -import java.util.SortedMap; -import java.util.AbstractSet; - -/** -** A {@link SortedSet} backed by a {@link SortedMap}, with one additional bonus -** method which I felt was lacking from the {@link Set} interface, namely -** {@link #get(Object)}. -** -** This implementation assumes that for the backing map, looking up items by -** key is more efficient than by value. It is up to the programmer to ensure -** that this holds; most maps are designed to have this property though. -** -** TODO LOW this could be made to extend a new {@code MapSet} class. -** -** @author infinity0 -*/ -public class SortedMapSet> extends AbstractSet -implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { - - /** - ** {@link SortedMap} backing this {@link SortedSet}. - */ - final protected M bkmap; - - /** - ** Construct a set backed by the given {@link SortedMap}. - ** - ** It is '''assumed''' that every key already in this map, is mapped to - ** itself. It is up to the caller to ensure that this holds. - */ - protected SortedMapSet(M m) { - bkmap = m; - } - - /** - ** Verifies the backing map for the condition described in the constructor. - ** - ** Note: this test is disabled (will always return true) since it is - ** impossible to test this for partially-loaded data structures. Code kept - ** here to remind future programmers of this. - ** - ** @throws UnsupportedOperationException - */ - final static boolean verifyBackingMapIntegrity(SortedMap m) { - /*for (Map.Entry en: m.entrySet()) { - assert(en.getKey() == en.getValue()); - }*/ - throw new UnsupportedOperationException("impossible to implement"); - } - - /** - ** Returns the object reference-identical to the one added into the set. - ** - ** @param o An object "equal" (ie. compares 0) to the one in the set. - ** @return The object contained in the set. - ** @throws ClassCastException object cannot be compared with the objects - ** currently in the map - ** @throws NullPointerException o is {@code null} and this map uses - ** natural order, or its comparator does not tolerate {@code null} - ** keys - */ - public E get(Object o) { - return bkmap.get(o); - } - - /*======================================================================== - public interface Set - ========================================================================*/ - - @Override public int size() { - return bkmap.size(); - } - - @Override public boolean isEmpty() { - return bkmap.isEmpty(); - } - - @Override public boolean contains(Object o) { - return bkmap.containsKey(o); - } - - @Override public Iterator iterator() { - return bkmap.keySet().iterator(); - } - - /* provided by AbstractSet - @Override public Object[] toArray() { } - */ - - /* provided by AbstractSet - @Override public T[] toArray(T[] a) { } - */ - - @Override public boolean add(E o) { - if (o == null) { - // BTreeMap doesn't support null keys at the time of coding, but this may change - return !bkmap.containsKey(null)? bkmap.put(null, null) == null: false; - } - return bkmap.put(o, o) == null; - } - - @Override public boolean remove(Object o) { - if (o == null) { - // BTreeMap doesn't support null keys at the time of coding, but this may change - return bkmap.containsKey(null)? bkmap.remove(null) == null: false; - } - return bkmap.remove(o) == o; - } - - /* provided by AbstractSet - @Override public boolean containsAll(Collection c) { } - @Override public boolean addAll(Collection c) { } - @Override public boolean retainAll(Collection c) { } - @Override public boolean removeAll(Collection c) { } - */ - - @Override public void clear() { - bkmap.clear(); - } - - /* provided by AbstractSet - @Override public boolean equals(Object o) { } - @Override public int hashCode() { } - */ - - /*======================================================================== - public interface SortedSet - ========================================================================*/ - - /*@Override**/ public Comparator comparator() { - return bkmap.comparator(); - } - - /*@Override**/ public E first() { - return bkmap.firstKey(); - } - - /*@Override**/ public E last() { - return bkmap.lastKey(); - } - - /*@Override**/ public SortedSet headSet(E to) { - return new SortedMapSet>(bkmap.headMap(to)); - } - - /*@Override**/ public SortedSet tailSet(E fr) { - return new SortedMapSet>(bkmap.tailMap(fr)); - } - - /*@Override**/ public SortedSet subSet(E fr, E to) { - return new SortedMapSet>(bkmap.subMap(fr, to)); - } - -} diff --git a/src/plugins/Library/util/SortedSetMap.java b/src/plugins/Library/util/SortedSetMap.java deleted file mode 100644 index ff14de5e..00000000 --- a/src/plugins/Library/util/SortedSetMap.java +++ /dev/null @@ -1,160 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version) {. See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import java.util.Comparator; -import java.util.Iterator; -import java.util.Collection; -import java.util.Set; -import java.util.Map; -import java.util.SortedSet; -import java.util.SortedMap; -import java.util.AbstractSet; -import java.util.AbstractMap; - -/** -** A {@link SortedMap} view of a {@link SortedSet}, with each key mapped to -** itself. The {@link #put(Object, Object)} method only accepts both arguments -** if they are the same object. -** -** Note: this class assumes the set's comparator is consistent with {@link -** Object#equals(Object)}. FIXME LOW -** -** @author infinity0 -*/ -public class SortedSetMap> extends AbstractMap -implements Map, SortedMap/*, NavigableMap, Cloneable, Serializable*/ { - - /** - ** {@link SortedSet} backing this {@link SortedMap}. - */ - final protected S bkset; - - /** - ** Construct a map-view of the given set. - */ - public SortedSetMap(S s) { - bkset = s; - } - - /*======================================================================== - public interface Map - ========================================================================*/ - - @Override public int size() { - return bkset.size(); - } - - @Override public boolean isEmpty() { - return bkset.isEmpty(); - } - - @Override public boolean containsKey(Object o) { - return bkset.contains(o); - } - - @Override public boolean containsValue(Object o) { - return bkset.contains(o); - } - - @Override public void clear() { - bkset.clear(); - } - - @Override public E put(E k, E v) { - if (k != v) { - throw new IllegalArgumentException("SortedMapSet: cannot accept a non-self mapping"); - } - return bkset.add(k)? null: v; - } - - @Override public E get(Object o) { - return bkset.contains(o)? (E)o: null; - } - - @Override public E remove(Object o) { - return bkset.remove(o)? (E)o: null; - } - - private transient Set> entries; - @Override public Set> entrySet() { - if (entries == null) { - entries = new AbstractSet>() { - - @Override public int size() { return bkset.size(); } - - @Override public Iterator> iterator() { - return new Iterator>() { - final Iterator it = bkset.iterator(); - /*@Override**/ public boolean hasNext() { return it.hasNext(); } - /*@Override**/ public Map.Entry next() { - E e = it.next(); - return Maps.$$(e, e); - } - /*@Override**/ public void remove() { it.remove(); } - }; - } - - @Override public void clear() { - bkset.clear(); - } - - @Override public boolean contains(Object o) { - if (!(o instanceof Map.Entry)) { return false; } - Map.Entry en = (Map.Entry)o; - if (en.getKey() != en.getValue()) { return false; } - return bkset.contains(en.getKey()); - } - - @Override public boolean remove(Object o) { - boolean c = contains(o); - if (c) { bkset.remove(((Map.Entry)o).getKey()); } - return c; - } - - }; - } - return entries; - } - - @Override public SortedSet keySet() { - // FIXME LOW Map.keySet() should be remove-only - return bkset; - } - - @Override public Collection values() { - // FIXME LOW Map.values() should be remove-only - return bkset; - } - - /*======================================================================== - public interface SortedMap - ========================================================================*/ - - /*@Override**/ public Comparator comparator() { - return bkset.comparator(); - } - - /*@Override**/ public E firstKey() { - return bkset.first(); - } - - /*@Override**/ public E lastKey() { - return bkset.last(); - } - - /*@Override**/ public SortedMap headMap(E to) { - return new SortedSetMap>(bkset.headSet(to)); - } - - /*@Override**/ public SortedMap tailMap(E fr) { - return new SortedSetMap>(bkset.tailSet(fr)); - } - - /*@Override**/ public SortedMap subMap(E fr, E to) { - return new SortedSetMap>(bkset.subSet(fr, to)); - } - - -} diff --git a/src/plugins/Library/util/TaskAbortExceptionConvertor.java b/src/plugins/Library/util/TaskAbortExceptionConvertor.java deleted file mode 100644 index 5cefdd02..00000000 --- a/src/plugins/Library/util/TaskAbortExceptionConvertor.java +++ /dev/null @@ -1,13 +0,0 @@ -package plugins.Library.util; - -import plugins.Library.util.concurrent.ExceptionConvertor; -import plugins.Library.util.exec.TaskAbortException; - -public class TaskAbortExceptionConvertor implements - ExceptionConvertor { - - public TaskAbortException convert(RuntimeException e) { - return new TaskAbortException(e.getMessage(), e); - } - -} diff --git a/src/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java b/src/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java deleted file mode 100644 index d9e4c802..00000000 --- a/src/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java +++ /dev/null @@ -1,196 +0,0 @@ -package plugins.Library.util.concurrent; - -import java.util.Collection; -import java.util.Comparator; -import java.util.Iterator; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.TimeUnit; - -public class BoundedPriorityBlockingQueue extends PriorityBlockingQueue { - - int maxSize; - - /* Locking strategy: - * We do not have access to the parent's lock object. - * We need to be able to wait for the queue to become non-full, meaning we need to be able to signal it. - * We need to be able to do so reliably i.e. we need a lock surrounding it. - * We could do this with a Lock and a Condition, but the easiest solution is just to lock on an Object. - * Because callers might expect us to use an internal lock, we'll use a separate object. - */ - protected final Object fullLock = new Object(); - - public BoundedPriorityBlockingQueue(int capacity) { - super(capacity); - this.maxSize = capacity; - } - - public BoundedPriorityBlockingQueue(int capacity, Comparator comp) { - super(capacity, comp); - this.maxSize = capacity; - } - - // add() calls offer() - // put() calls offer() - - @Override - public void put(E o) { - synchronized(fullLock) { - if(super.size() < maxSize) { - super.offer(o); - return; - } - // We have to wait. - while(true) { - try { - fullLock.wait(); - } catch (InterruptedException e) { - // Ignore. - // PriorityBlockingQueue.offer doesn't throw it so we can't throw it. - } - if(super.size() < maxSize) { - super.offer(o); - return; - } - } - } - } - - @Override - public boolean offer(E o, long timeout, TimeUnit unit) { - synchronized(fullLock) { - if(super.size() < maxSize) { - super.offer(o); - return true; - } - long waitTime = unit.toMillis(timeout); - if(waitTime <= 0) waitTime = 0; - long now = System.currentTimeMillis(); - // We have to wait. - long end = now + waitTime; - while(now < end) { - try { - fullLock.wait((int)(Math.min(Integer.MAX_VALUE, end - now))); - } catch (InterruptedException e) { - // Ignore. - // PriorityBlockingQueue.offer doesn't throw it so we can't throw it. - } - if(super.size() < maxSize) { - super.offer(o); - return true; - } - now = System.currentTimeMillis(); - } - return false; - } - } - - @Override - public boolean offer(E o) { - synchronized(fullLock) { - if(super.size() < maxSize) { - super.offer(o); - return true; - } else return false; - } - } - - public E poll() { - E o = super.poll(); - if(o == null) return null; - synchronized(fullLock) { - fullLock.notifyAll(); - } - return o; - } - - public E poll(long timeout, TimeUnit unit) throws InterruptedException { - E o = super.poll(timeout, unit); - if(o == null) return null; - synchronized(fullLock) { - fullLock.notifyAll(); - } - return o; - } - - public E take() throws InterruptedException { - E o = super.take(); - synchronized(fullLock) { - fullLock.notifyAll(); - } - return o; - } - - // peek() is safe. - // size() is safe. - - public int remainingCapacity() { - synchronized(fullLock) { - return maxSize - size(); - } - } - - public boolean remove(Object o) { - if(super.remove(o)) { - synchronized(fullLock) { - fullLock.notifyAll(); - } - return true; - } else return false; - } - - // contains() is safe - // toArray() is safe - // toString() is safe - - public int drainTo(Collection c) { - int moved = super.drainTo(c); - if(moved > 0) { - synchronized(fullLock) { - fullLock.notifyAll(); - } - } - return moved; - } - - public int drainTo(Collection c, int maxElements) { - int moved = super.drainTo(c, maxElements); - if(moved > 0) { - synchronized(fullLock) { - fullLock.notifyAll(); - } - } - return moved; - } - - public void clear() { - super.clear(); - synchronized(fullLock) { - fullLock.notifyAll(); - } - } - - public Iterator iterator() { - final Iterator underlying = super.iterator(); - return new Iterator() { - - public boolean hasNext() { - return underlying.hasNext(); - } - - public E next() { - return underlying.next(); - } - - public void remove() { - underlying.remove(); - synchronized(fullLock) { - fullLock.notifyAll(); - } - } - - }; - } - - // writeObject() is safe - -} diff --git a/src/plugins/Library/util/concurrent/ExceptionConvertor.java b/src/plugins/Library/util/concurrent/ExceptionConvertor.java deleted file mode 100644 index 62454895..00000000 --- a/src/plugins/Library/util/concurrent/ExceptionConvertor.java +++ /dev/null @@ -1,7 +0,0 @@ -package plugins.Library.util.concurrent; - -public interface ExceptionConvertor { - - public X convert(RuntimeException e); - -} diff --git a/src/plugins/Library/util/concurrent/Executors.java b/src/plugins/Library/util/concurrent/Executors.java deleted file mode 100644 index b8d9513f..00000000 --- a/src/plugins/Library/util/concurrent/Executors.java +++ /dev/null @@ -1,73 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.concurrent; - -import plugins.Library.util.func.SafeClosure; - -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; // WORKAROUND javadoc bug #4464323 -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; - -/** -** Class providing various {@link Executor}s. -** -** @author infinity0 -*/ -public class Executors { - - /** - ** A {@link SafeClosure} for shutting down an {@link ExecutorService}. (If - ** the input is not an {@link ExecutorService}, does nothing.) - */ - final public static SafeClosure CLEANUP_EXECSERV = new SafeClosure() { - /*@Override**/ public void invoke(Executor exec) { - if (!(exec instanceof ExecutorService)) { return; } - ExecutorService x = (ExecutorService)exec; - x.shutdown(); - try { - while (!x.awaitTermination(1, TimeUnit.SECONDS)); - } catch (InterruptedException e) { } - } - }; - - /** - ** A JVM-wide executor that objects/classes can reference when they don't - ** want to create a new executor themselves. This is a wrapper around a - ** real executor, which can be set ({@link #setDefaultExecutor(Executor)}) - ** without calling methods on every object/class that uses the wrapper. - ** If no backing executor has been set by the time the first call to {@link - ** Executor#execute(Runnable)} is made, a {@link ThreadPoolExecutor} is - ** created, with a thread-cache size of 64, timeout of 1s, and rejection - ** policy of "{@link CallerRunsPolicy caller runs}". - */ - final public static Executor DEFAULT_EXECUTOR = new Executor() { - /*@Override**/ public void execute(Runnable r) { - synchronized (Executors.class) { - if (default_exec == null) { - default_exec = new ThreadPoolExecutor( - 1, 0x40, 60, TimeUnit.SECONDS, - new LinkedBlockingQueue(), - new ThreadPoolExecutor.CallerRunsPolicy() - ); - } - } - default_exec.execute(r); - } - }; - - public static synchronized void setDefaultExecutor(Executor e) { - default_exec = e; - } - - /** - ** The executor backing {@link #DEFAULT_EXECUTOR}. - */ - private static Executor default_exec = null; - - private Executors() { } - -} diff --git a/src/plugins/Library/util/concurrent/Notifier.java b/src/plugins/Library/util/concurrent/Notifier.java deleted file mode 100644 index c1a6fd7f..00000000 --- a/src/plugins/Library/util/concurrent/Notifier.java +++ /dev/null @@ -1,31 +0,0 @@ -package plugins.Library.util.concurrent; - -public class Notifier { - - private boolean notified; - - public synchronized void notifyUpdate() { - notified = true; - notifyAll(); - } - - public synchronized void waitUpdate(int maxWait) { - long start = -1; - long now = -1; - while(!notified) { - if(start == -1) { - start = now = System.currentTimeMillis(); - } else { - now = System.currentTimeMillis(); - } - if(start + maxWait <= now) return; - try { - wait((start + maxWait - now)); - } catch (InterruptedException e) { - // Ignore - } - } - notified = false; - } - -} diff --git a/src/plugins/Library/util/concurrent/ObjectProcessor.java b/src/plugins/Library/util/concurrent/ObjectProcessor.java deleted file mode 100644 index b0049a34..00000000 --- a/src/plugins/Library/util/concurrent/ObjectProcessor.java +++ /dev/null @@ -1,400 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.concurrent; - -import static plugins.Library.util.func.Tuples.X2; -import static plugins.Library.util.func.Tuples.X3; - -import java.lang.ref.WeakReference; -import java.util.Iterator; -import java.util.Map; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.Executor; -import java.util.concurrent.RejectedExecutionException; - -import plugins.Library.util.func.Closure; -import plugins.Library.util.func.SafeClosure; -import freenet.support.Logger; - -/** -** A class that wraps around an {@link Executor}, for processing any given type -** of object, not just {@link Runnable}. Each object must be accompanied by a -** secondary "deposit" object, which is returned with the object when it has -** been processed. Any exceptions thrown are also returned. -** -** @param Type of object to be processed -** @param Type of object to be used as a deposit -** @param Type of exception thrown by {@link #clo} -** @author infinity0 -*/ -public class ObjectProcessor implements Scheduler { - - final protected BlockingQueue in; - final protected BlockingQueue> out; - final protected Map dep; - final protected Closure clo; - final protected Executor exec; - final protected ExceptionConvertor convertor; - // Allows a group of ObjectProcessor's to all notify the same object so that the caller doesn't have to poll. - protected Notifier notifier; - - protected volatile boolean open = true; - protected int dispatched = 0; - protected int completed = 0; - protected int started = 0; - - private static volatile boolean logMINOR; - private static volatile boolean logDEBUG; - - static { - Logger.registerClass(ObjectProcessor.class); - } - - // Most ObjectProcessor's are likely to be autostart()'ed, and this way we can - // still use ConcurrentMap for pending while having garbage collection. - private final WeakReference myRef = new WeakReference(this); - - // TODO NORM make a more intelligent way of adjusting this - final public static int default_maxconc = 0x28; - int maxconc = default_maxconc; - - final protected SafeClosure> postProcess = new SafeClosure>() { - /*@Override**/ public void invoke(X2 res) { - try { - out.put(res); - if(notifier != null) notifier.notifyUpdate(); - synchronized(ObjectProcessor.this) { ++completed; } - } catch (InterruptedException e) { - throw new UnsupportedOperationException(); - } - } - }; - - public int outputCapacity() { - return out.remainingCapacity(); - } - - public int outputSize() { - return out.size(); - } - - final private static ConcurrentMap, Boolean> pending = new ConcurrentHashMap, Boolean>(); - // This must only be modified in a static synchronized block - private static Thread auto = null; - - /** - ** Constructs a new processor. The processor itself will be thread-safe - ** as long as the queues and deposit map are not exposed to other threads, - ** and the closure's invoke method is also thread-safe. - ** - ** If the {@code closure} parameter is {@code null}, it is expected that - ** {@link #createJobFor(Object)} will be overridden appropriately. - ** - ** @param input Queue for input items - ** @param output Queue for output/error items - ** @param deposit Map for item deposits - ** @param closure Closure to call on each item - ** @param executor Executor to run each closure call - ** @param autostart Whether to start an {@link #auto()} autohandler - ** @param notifier A Notifier to tell whenever something has happened - e.g. results are ready. - */ - public ObjectProcessor( - BlockingQueue input, BlockingQueue> output, Map deposit, - Closure closure, Executor executor, ExceptionConvertor conv, Notifier n - ) { - in = input; - out = output; - dep = deposit; - clo = closure; - exec = executor; - convertor = conv; - notifier = n; - } - - public ObjectProcessor( - BlockingQueue input, BlockingQueue> output, Map deposit, - Closure closure, Executor executor, ExceptionConvertor conv - ) { - this(input, output, deposit, closure, executor, conv, null); - } - - // FIXME should notifier be volatile? If we set it before starting, maybe that's not necessary? - public void setNotifier(Notifier n) { - this.notifier = n; - } - - public synchronized void setMaxConc(int x) { - maxconc = x; - } - - /** - ** Safely submits the given item and deposit to the given processer. Only - ** use this when the input queue's {@link BlockingQueue#put(Object)} method - ** does not throw {@link InterruptedException}, such as that of {@link - ** java.util.concurrent.PriorityBlockingQueue}. - */ - public static void submitSafe(ObjectProcessor proc, T item, E deposit) { - try { - proc.submit(item, deposit); - } catch (InterruptedException e) { - throw new IllegalArgumentException("ObjectProcessor: abuse of submitSafe(). Blame the programmer, who did not know what they were doing", e); - } - } - - /** - ** Submits an item for processing, with the given deposit. - ** - ** @throws IllegalStateException if the processor has already been {@link - ** #close() closed} - ** @throws IllegalArgumentException if the item is already being held - */ - public void submit(T item, E deposit) throws InterruptedException { - synchronized(this) { - if (!open) { throw new IllegalStateException("ObjectProcessor: not open"); } - if (dep.containsKey(item)) { - throw new IllegalArgumentException("ObjectProcessor: object " + item + " already submitted"); - } - - dep.put(item, deposit); - } - // in.put() can block. Don't hold the outer lock during pushing. - // Note that this can result in more stuff being in dep than is in in. This is okay, assuming that - // we don't have an infinite number of calling threads. - in.put(item); - } - - /** - ** Updates the deposit for a given item. - ** - ** @throws IllegalStateException if the processor has already been {@link - ** #close() closed} - ** @throws IllegalArgumentException if the item is not currently being held - */ - public synchronized void update(T item, E deposit) { - if (!open) { throw new IllegalStateException("ObjectProcessor: not open"); } - if (!dep.containsKey(item)) { - throw new IllegalArgumentException("ObjectProcessor: object " + item + " not yet submitted"); - } - - dep.put(item, deposit); - } - - /** - ** Retrieved a processed item, along with its deposit and any exception - ** that caused processing to abort. - */ - public X3 accept() throws InterruptedException { - X2 item = out.take(); - // DO NOT hold the lock while blocking on out. - synchronized(this) { - return X3(item._0, dep.remove(item._0), item._1); - } - } - - /** - ** Whether there are any unprocessed items (including completed tasks not - ** yet retrieved by the submitter). - */ - public synchronized boolean hasPending() { - return !dep.isEmpty(); - } - - /** - ** Whether there are any completed items that have not yet been retrieved. - */ - public synchronized boolean hasCompleted() { - return !out.isEmpty(); - } - - /** - ** Number of unprocessed tasks. - */ - public synchronized int size() { - return dep.size(); - } - - /** - ** Retrieves an item by calling {@link BlockingQueue#take()} on the input - ** queue. If this succeeds, a job is {@linkplain #createJobFor(Object) - ** created} for it, and sent to {@link #exec} to be executed. - ** - ** This method is provided for completeness, in case anyone needs it; - ** {@link #auto()} should be adequate for most purposes. - ** - ** @throws InterruptedExeception if interrupted whilst waiting - */ - public synchronized void dispatchTake() throws InterruptedException { - throw new UnsupportedOperationException("not implemented"); - /* - * TODO NORM first needs a way of obeying maxconc - T item = in.take(); - exec.execute(createJobFor(item)); - ++dispatched; - */ - } - - /** - ** Retrieves an item by calling {@link BlockingQueue#poll()} on the input - ** queue. If this succeeds, a job is {@linkplain #createJobFor(Object) - ** created} for it, and sent to {@link #exec} to be executed. - ** - ** This method is provided for completeness, in case anyone needs it; - ** {@link #auto()} should be adequate for most purposes. - ** - ** @return Whether a task was retrieved and executed - */ - public boolean dispatchPoll() { - synchronized(this) { - if (dispatched - completed >= maxconc) { return false; } - } - // DO NOT hold the lock while blocking on in. - T item = in.poll(); - synchronized(this) { - if (item == null) { return false; } - exec.execute(createJobFor(item)); - ++dispatched; - return true; - } - } - - /** - ** Creates a {@link Runnable} to process the item and push it onto the - ** output queue, along with any exception that aborted the process. - ** - ** The default implementation invokes {@link #clo} on the item, and then - ** adds the appropriate data onto the output queue. - */ - protected Runnable createJobFor(final T item) { - if (clo == null) { - throw new IllegalStateException("ObjectProcessor: no closure given, but createJobFor() was not overidden"); - } - return new Runnable() { - /*@Override**/ public void run() { - X ex = null; - synchronized(ObjectProcessor.this) { ++started; } - RuntimeException ee = null; - try { clo.invoke(item); } - // FIXME NORM this could throw RuntimeException - catch (RuntimeException e) { - Logger.error(this, "Caught "+e, e); - System.err.println("In ObjProc-"+name+" : "+e); - e.printStackTrace(); - ex = convertor.convert(e); - } - catch (Exception e) { ex = (X)e; } - postProcess.invoke(X2(item, ex)); - } - }; - } - - /** - ** Start a new thread to run the {@link #pending} processors, if one is not - ** already running. - */ - private static synchronized void ensureAutoHandler() { - if (auto != null) { return; } - auto = new Thread() { - @Override public void run() { - final int timeout = 4; - int t = timeout; - while (!pending.isEmpty() && (t=timeout) == timeout || t-- > 0) { - for (Iterator> it = pending.keySet().iterator(); it.hasNext();) { - WeakReference ref = it.next(); - ObjectProcessor proc = ref.get(); - if(proc == null) { - it.remove(); - continue; - } - try { - boolean o = proc.open; - while (proc.dispatchPoll()); - if (!o) { it.remove(); } - } catch (RejectedExecutionException e) { - // FIXME NORM - // neither Executors.DEFAULT_EXECUTOR nor Freenet's in-built executors - // throw this, so this is not a high priority - Logger.error(this, "REJECTED EXECUTION", e); - } - } - try { - // FIXME why do we sleep here at all? - // FIXME for small jobs, such as merging to disk in SpiderIndexUploader, this can - // dominate the execution time! - // FIXME the executors are perfectly capable of limiting the number of threads running... - Thread.sleep(100); - } catch (InterruptedException e) { - // TODO LOW log this somewhere - } - // System.out.println("pending " + pending.size()); - - if (t > 0) { continue; } - synchronized (ObjectProcessor.class) { - // if auto() was called just before we entered this synchronized block, - // then its ensureAutoHandler() would have done nothing. so we want to keep - // this thread running to take care of the new addition. - if (!pending.isEmpty()) { continue; } - // otherwise we can safely discard this thread, since ensureAutoHandler() - // cannot be called as long as we are in this block. - auto = null; - return; - } - } - throw new AssertionError("ObjectProcessor: bad exit in autohandler. this is a bug; please report."); - } - }; - auto.start(); - } - - /** - ** Add this processor to the collection of {@link #pending} processes, and - ** makes sure there is a thread to handle them. - ** - ** @return Whether the processor was not already being handled. - */ - public boolean auto() { - Boolean r = ObjectProcessor.pending.put(myRef, Boolean.TRUE); - ObjectProcessor.ensureAutoHandler(); - return r == null; - } - - /** - ** Call {@link #auto()} and return {@code this}. - */ - public ObjectProcessor autostart() { - auto(); - return this; - } - - /** - ** Stop accepting new submissions or deposit updates. Held items can still - ** be processed and retrieved, and if an {@linkplain #auto() auto-handler} - ** is running, it will run until all such items have been processed. - */ - /*@Override**/ public void close() { - open = false; - } - - // public class Object - - /** - ** {@inheritDoc} - ** - ** This implementation just calls {@link #close()}. - */ - @Override public void finalize() { - close(); - } - - - protected String name; - public void setName(String n) { - name = n; - } - @Override public String toString() { - return "ObjProc-" + name + ":{" + size() + "|" + dispatched + "|" + started + "|" + completed + "}"; - } - -} diff --git a/src/plugins/Library/util/concurrent/Scheduler.java b/src/plugins/Library/util/concurrent/Scheduler.java deleted file mode 100644 index e1759445..00000000 --- a/src/plugins/Library/util/concurrent/Scheduler.java +++ /dev/null @@ -1,21 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.concurrent; - -/** -** An interface for a general class that accepts objects to be acted on. -** -** TODO NORM maybe just get rid of this since we now have ObjectProcessor, or -** import its methods here? -** -** @author infinity0 -*/ -public interface Scheduler extends java.io.Closeable { - - /** - ** Stop accepting objects (but continue any started actions). - */ - public void close(); - -} diff --git a/src/plugins/Library/util/event/AbstractSweeper.java b/src/plugins/Library/util/event/AbstractSweeper.java deleted file mode 100644 index 3eaa8d03..00000000 --- a/src/plugins/Library/util/event/AbstractSweeper.java +++ /dev/null @@ -1,118 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.event; - -/** -** A partial implementation of {@link Sweeper}, defining some high-level -** methods in terms of lower ones. -** -** This implementation is '''not''' thread-safe. -** -** @author infinity0 -*/ -abstract public class AbstractSweeper implements Sweeper { - - protected State state; - final public boolean once; - - /** - ** Construct a new sweeper. - ** - ** @param autostart Whether to construct the sweeper already open - ** @param onceonly Whether the sweeper can only be opened once - */ - protected AbstractSweeper(boolean autostart, boolean onceonly) { - state = (autostart)? State.OPEN: State.NEW; - once = onceonly; - } - - /** - ** Add an object to the underlying collection. Implementations need not - ** check for a valid state; this is done by {@link #acquire(Object)}. - ** - ** @return Whether the object was added - */ - abstract protected boolean add(T object); - - /** - ** Remove an object from the underlying collection. Implementations need not - ** check for a valid state; this is done by {@link #release(Object)}. - ** - ** @return Whether the object was removed - */ - abstract protected boolean remove(T object); - - /** - ** Helper method for implementations that also implement {@link Iterable}. - ** - ** @param it The iterator to remove an element from - */ - protected void releaseFrom(java.util.Iterator it) { - if (state == State.NEW) { throw new IllegalStateException("Sweeper: not yet opened"); } - if (state == State.CLEARED) { throw new IllegalStateException("Sweeper: already cleared"); } - - it.remove(); - if (state == State.CLOSED && !it.hasNext()) { state = State.CLEARED; } - } - - /*======================================================================== - public interface Sweeper - ========================================================================*/ - - /** - ** {@inheritDoc} - ** - ** This implementation also throws {@link IllegalStateException} if {@code - ** once} is {@code true} and the sweeper has already been opened once. - */ - /*@Override**/ public void open() { - if (state != State.NEW && (once || state != State.CLOSED)) { throw new IllegalStateException("Sweeper: already opened or cleared"); } - state = State.OPEN; - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public boolean acquire(T object) { - if (state != State.OPEN) { throw new IllegalStateException("Sweeper: not open"); } - - return add(object); - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public boolean release(T object) { - if (state == State.NEW) { throw new IllegalStateException("Sweeper: not yet opened"); } - if (state == State.CLEARED) { throw new IllegalStateException("Sweeper: already cleared"); } - - boolean b = remove(object); - if (state == State.CLOSED && size() == 0) { state = State.CLEARED; } - return b; - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public void close() { - if (state != State.OPEN) { throw new IllegalStateException("Sweeper: not open"); } - state = State.CLOSED; - if (size() == 0) { state = State.CLEARED; } - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public State getState() { - return state; - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public boolean isCleared() { - return state == State.CLEARED; - } - -} diff --git a/src/plugins/Library/util/event/CountingSweeper.java b/src/plugins/Library/util/event/CountingSweeper.java deleted file mode 100644 index f883209d..00000000 --- a/src/plugins/Library/util/event/CountingSweeper.java +++ /dev/null @@ -1,56 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.event; - -/** -** A {@link Sweeper} which only counts the number of objects added. It does -** not care about which objects these are; it assumes the user handles this. -** -** @author infinity0 -*/ -public class CountingSweeper extends AbstractSweeper { - - protected int count = 0; - - /** - ** Construct a new sweeper. - ** - ** @param autostart Whether to construct the sweeper already open - ** @param onceonly Whether the sweeper can only be opened once - */ - public CountingSweeper(boolean autostart, boolean onceonly) { - super(autostart, onceonly); - } - - /** - ** {@inheritDoc} - ** - ** This implementation will always return true, since it does not keep - ** track of the actual count in the collection. - */ - @Override protected boolean add(T object) { - ++count; - return true; - } - - /** - ** {@inheritDoc} - ** - ** This implementation will always return true, since it does not keep - ** track of the actual count in the collection. - */ - @Override protected boolean remove(T object) { - if (count == 0) { throw new IllegalStateException("CountingSweeper: no objects to release: " + object); } - --count; - return true; - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public int size() { - return count; - } - -} diff --git a/src/plugins/Library/util/event/Sweeper.java b/src/plugins/Library/util/event/Sweeper.java deleted file mode 100644 index ea8de10c..00000000 --- a/src/plugins/Library/util/event/Sweeper.java +++ /dev/null @@ -1,70 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.event; - -/** -** A class which holds a group of objects, eg. for the purposes of delaying an -** action until all objects have been released. -** -** @author infinity0 -*/ -public interface Sweeper extends java.io.Closeable { - - public enum State { NEW, OPEN, CLOSED, CLEARED } - - /** - ** Start accepting objects. If the trigger condition is reached whilst the - ** sweeper is accepting objects, execution should be delayed until {@link - ** #close()} is called. - ** - ** @throws IllegalStateException if the current state is not {@link - ** State#NEW NEW} or {@link State#CLOSED CLOSED} - */ - public void open(); - - /** - ** Acquire the given object for tracking. - ** - ** @return Whether the object was acquired - ** @throws IllegalStateException if the current state is not {@link - ** State#OPEN OPEN} - */ - public boolean acquire(T object); - - /** - ** Release the given object from being tracked. - ** - ** When the sweeper is {@link State#CLOSED CLOSED} and there are no - ** more objects to track, it becomes {@link State#CLEARED CLEARED}. - ** - ** @return Whether the object was released - ** @throws IllegalStateException if the current state is not {@link - ** State#OPEN OPEN} or {@link State#CLOSED CLOSED}. - */ - public boolean release(T object); - - /** - ** Stop accepting objects. - ** - ** @throws IllegalStateException if the current state is not {@link - ** State#OPEN OPEN}. - */ - public void close(); - - /** - ** Returns the current state. - */ - public State getState(); - - /** - ** Returns whether the sweeper is {@link State#CLEARED CLEARED}. - */ - public boolean isCleared(); - - /** - ** Returns the number of objects held by the sweeper. - */ - public int size(); - -} diff --git a/src/plugins/Library/util/event/TrackingSweeper.java b/src/plugins/Library/util/event/TrackingSweeper.java deleted file mode 100644 index a1e00849..00000000 --- a/src/plugins/Library/util/event/TrackingSweeper.java +++ /dev/null @@ -1,112 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.event; - -import java.util.Collections; -import java.util.Iterator; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.SortedSet; - -/** -** A {@link Sweeper} which uses a given {@link Collection} to keep track of the -** objects added. It also allows iteration through these objects, and supports -** calls to {@link Iterator#remove()}. -** -** @author infinity0 -*/ -public class TrackingSweeper> extends AbstractSweeper implements Iterable { - - final protected C objs; - final protected C objs_i; - - /** - ** Construct a new sweeper. - ** - ** If {@code coll_immute} is {@code null}, then {@code coll} will be used - ** in its place. This has a slight performance advantage over a wrapping - ** "immutable" view; but only use it when trusted code is going to access - ** the sweeper. - ** - ** @param autostart Whether to construct the sweeper already open - ** @param onceonly Whether the sweeper can only be opened once - ** @param coll A {@link Collection} to hold the items in - ** @param coll_immute An immutable view of the same collection - */ - public TrackingSweeper(boolean autostart, boolean onceonly, C coll, C coll_immute) { - super(autostart, onceonly); - if (!coll.isEmpty()) { - throw new IllegalArgumentException("TrackingSweeper: cannot use a non-empty collection"); - } - objs = coll; - objs_i = (coll_immute == null)? coll: coll_immute; - } - - /** - ** Construct a new sweeper, automatically inferring an immutable view using - ** {@link #inferImmutable(Collection)} - ** - ** @param autostart Whether to construct the sweeper already open - ** @param coll A {@link Collection} to hold the items in - */ - public TrackingSweeper(boolean autostart, boolean onceonly, C coll) { - this(autostart, onceonly, coll, inferImmutable(coll)); - } - - /** - ** Attempts to infer an immutable view of the given collection, using the - ** available static creators given in {@link Collections}. - ** - ** Note that this method will always return an immutable {@link Collection} - ** if the input is not a {@link SortedSet}, {@link Set}, or {@link List}; - ** this may or may not be what you want. - */ - public static > C inferImmutable(C coll) { - if (coll instanceof SortedSet) { - return (C)Collections.unmodifiableSortedSet((SortedSet)coll); - } else if (coll instanceof Set) { - return (C)Collections.unmodifiableSet((Set)coll); - } else if (coll instanceof List) { - return (C)Collections.unmodifiableList((List)coll); - } else { - return (C)Collections.unmodifiableCollection(coll); - } - } - - public C view() { - return objs_i; - } - - /** - ** {@inheritDoc} - */ - @Override protected boolean add(T object) { - return objs.add(object); - } - - /** - ** {@inheritDoc} - */ - @Override protected boolean remove(T object) { - return objs.remove(object); - } - - /** - ** {@inheritDoc} - */ - /*@Override**/ public int size() { - return objs.size(); - } - - /*@Override**/ public Iterator iterator() { - return new Iterator() { - final Iterator it = objs.iterator(); - /*@Override**/ public boolean hasNext() { return it.hasNext(); } - /*@Override**/ public T next() { return it.next(); } - /*@Override**/ public void remove() { releaseFrom(it); } - }; - } - -} diff --git a/src/plugins/Library/util/exec/AbstractExecution.java b/src/plugins/Library/util/exec/AbstractExecution.java deleted file mode 100644 index 7b8a4c0f..00000000 --- a/src/plugins/Library/util/exec/AbstractExecution.java +++ /dev/null @@ -1,176 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -import java.util.Date; -import java.util.Set; -import java.util.HashSet; - -/** -** A partial implementation of {@link Execution}, defining some higher-level -** functionality in terms of lower-level ones. -** -** To implement {@link Execution} fully from this class, the programmer needs to -** implement the following methods: -** -** * {@link Progress#getParts()} -** -** The programmer must call {@link #setResult(Object)} and {@link -** #setError(TaskAbortException)} to set the error and result fields correctly. -** If these methods are overridden, they '''must''' call the overriden ones -** using {@code super}. -** -** The programmer might also wish to override the following: -** -** * {@link #join()} -** -** @author MikeB -** @author infinity0 -*/ -public abstract class AbstractExecution implements Execution { - - protected Date start; - protected Date stop = null; - - /** - ** Keeps track of the acceptors. - */ - final protected Set> accept = new HashSet>(); - - /** - ** Subject of the execution. - */ - final protected String subject; - - /** - ** Holds the error that caused the operation to abort, if any. If not - ** {@code null}, then thrown by {@link #getResult()}. - */ - private TaskAbortException error; - - /** - ** Holds the result of the operation once it completes. If {@link #error} - ** is {@code null}, then returned by {@link #getResult()}. - */ - private V result; - - /** - ** Create an {@code Execution} with the given subject. - ** - ** @param subj The subject - */ - public AbstractExecution(String subj) { - this.subject = subj; - setStartDate(); - } - - protected synchronized void setStartDate() { - if (start != null) { throw new IllegalStateException("Execution is already running"); } - start = new Date(); - for (ExecutionAcceptor acc: accept) { offerStarted(acc); } - } - - protected void offerStarted(ExecutionAcceptor acc) { - try { - acc.acceptStarted(this); - } catch (RuntimeException e) { - // FIXME NORM log this somewhere - } - } - - protected synchronized void setResult(V res) { - if (stop != null) { throw new IllegalStateException("Execution has already finished."); } - result = res; - stop = new Date(); - notifyAll(); - for (ExecutionAcceptor acc: accept) { offerDone(acc); } - } - - protected void offerDone(ExecutionAcceptor acc) { - try { - acc.acceptDone(this, result); - } catch (RuntimeException e) { - // FIXME NORM log this somewhere - } - } - - protected synchronized void setError(TaskAbortException err) { - if (stop != null) { throw new IllegalStateException("Execution has already finished."); } - error = err; - stop = new Date(); - notifyAll(); - for (ExecutionAcceptor acc: accept) { offerAborted(acc); } - } - - protected void offerAborted(ExecutionAcceptor acc) { - try { - acc.acceptAborted(this, error); - } catch (RuntimeException e) { - // FIXME NORM log this somewhere - } - } - - /*======================================================================== - public interface Progress - ========================================================================*/ - - /*@Override**/ public String getSubject() { - return subject; - } - - abstract public String getStatus(); - - abstract public ProgressParts getParts() throws TaskAbortException; - - /*@Override**/ public synchronized boolean isDone() throws TaskAbortException { - if (error != null) { throw error; } - return result != null; - } - - /*@Override**/ public synchronized boolean isStarted() { - return start != null; - } - - /*@Override**/ public synchronized void join() throws InterruptedException, TaskAbortException { - while (stop == null) { wait(); } - if (error != null) { throw error; } - } - - /*======================================================================== - public interface Execution - ========================================================================*/ - - /*@Override**/ public Date getStartDate() { - return start; - } - - /*@Override**/ public long getTimeElapsed() { - return System.currentTimeMillis() - start.getTime(); - } - - /*@Override**/ public long getTimeTaken() { - if (stop == null) { return -1; } - return stop.getTime() - start.getTime(); - } - - /** - ** {@inheritDoc} - ** - ** This implementation returns {@link #result} if {@link #error} is {@code - ** null}, otherwise it throws it. - */ - /*@Override**/ public V getResult() throws TaskAbortException { - if (error != null) { throw error; } - return result; - } - - /*@Override**/ public synchronized void addAcceptor(ExecutionAcceptor acc) { - accept.add(acc); - // trigger the event if the task is already done/aborted - if (start != null) { offerStarted(acc); } - if (error != null) { offerDone(acc); } - if (result != null) { offerAborted(acc); } - } - -} diff --git a/src/plugins/Library/util/exec/BaseCompositeProgress.java b/src/plugins/Library/util/exec/BaseCompositeProgress.java deleted file mode 100644 index 050f3f02..00000000 --- a/src/plugins/Library/util/exec/BaseCompositeProgress.java +++ /dev/null @@ -1,150 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** A progress that accumulates its data from the given group of progresses. -** -** TODO NORM perhaps synchronize -** -** DOCUMENT -** -** @author infinity0 -*/ -public class BaseCompositeProgress implements CompositeProgress { - - protected String subject = "??"; - - protected Iterable subprogress = java.util.Collections.emptyList(); - - protected int esttype = ProgressParts.ESTIMATE_UNKNOWN; - - public BaseCompositeProgress() { } - - /** - ** Sets the iterable to accumulate progress information from. There are - ** several static helper methods you can use to create progress iterables - ** that are backed by a collection of tracker ids; see the rest of the - ** documentation for this class. - ** - ** @param subs The subprogresses to accumulating information from - */ - public void setSubProgress(Iterable subs) { - if (subs == null) { - throw new IllegalArgumentException("Can't set a null progress iterable"); - } - subprogress = subs; - } - - public void setSubject(String s) { - subject = s; - } - - /** - ** @see ProgressParts#totalest - */ - public void setEstimate(int est) { - // TODO NORM size check? - esttype = est; - } - - /*======================================================================== - public interface Progress - ========================================================================*/ - - /*@Override**/ public String getSubject() { - return subject; - } - - /*@Override**/ public String getStatus() { - try { - return getParts().toString(); - } catch (TaskAbortException e) { - assert(e.isError()); // in-progress / complete tasks should have been rm'd from the underlying iterable - return "Aborted: " + e.getMessage(); - } - } - - /*@Override**/ public ProgressParts getParts() throws TaskAbortException { - return ProgressParts.getParts(subprogress, esttype); - } - - /*@Override**/ public Iterable getSubProgress() { - // TODO NORM make this check that subprogress is immutable, and if not return a wrapper - return subprogress; - } - - /*@Override**/ public boolean isStarted() { - for (Progress p: subprogress) { - if (p != null && p.isStarted()) { return true; } - } - return false; - } - - /*@Override**/ public boolean isDone() throws TaskAbortException { - for (Progress p: subprogress) { - if (p == null || !p.isDone()) { return false; } - } - return true; - } - - /** - ** {@inheritDoc} - ** - ** This method just checks to see if at least one subprogress is done. - */ - /*@Override**/ public boolean isPartiallyDone() { - for (Progress p: subprogress) { - try { - if (p != null && !p.isDone()) { return true; } - } catch (TaskAbortException e) { - continue; - } - } - return false; - } - - /*@Override**/ public void join() throws InterruptedException, TaskAbortException { - int s = 1; - for (;;) { - boolean foundnull = false; - for (Progress p: subprogress) { - if (p == null) { foundnull = true; continue; } - p.join(); - } - if (!foundnull) { break; } - // yes, this is a really shit way of doing this. but the best alternative I - // could come up with was the ugly commented-out stuff in ProgressTracker. - - // anyhow, Progress objects are usually created very very soon after its - // tracking ID is added to the collection backing the Iterable, - // so this shouldn't happen that often. - - Thread.sleep(s*1000); - // extend the sleep time with reducing probability, or reset it to 1 - if (s == 1 || Math.random() < 1/Math.log(s)/4) { ++s; } else { s = 1; } - } - } - - /* - // moved here from ParallelSerialiser. might be useful. - protected void joinAll(Iterable

plist) throws InterruptedException, TaskAbortException { - Iterator

it = plist.iterator(); - while (it.hasNext()) { - P p = it.next(); - try { - p.join(); - } catch (TaskAbortException e) { - if (e.isError()) { - throw e; - } else { - // TODO LOW perhaps have a handleNonErrorAbort() that can be overridden - it.remove(); - } - } - } - } - */ - -} diff --git a/src/plugins/Library/util/exec/ChainedProgress.java b/src/plugins/Library/util/exec/ChainedProgress.java deleted file mode 100644 index 2529064d..00000000 --- a/src/plugins/Library/util/exec/ChainedProgress.java +++ /dev/null @@ -1,42 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Interface representing a {@link Progress} made up a sequence of progresses, -** each occuring one after the other, and whose completion usually (this is -** not enforced) depends on all of the subprogresses completing successfully. -** -** @author infinity0 -*/ -public interface ChainedProgress extends Progress { - - /** - ** {@inheritDoc} - ** - ** Implementations should return {@link ProgressParts#getParts(Iterable, - ** int)}, with the first argument being the collection of already - ** completed progresses plus the current one. - */ - /*@Override**/ public ProgressParts getParts() throws TaskAbortException; - - /** - ** Gets the latest subprogress. The earlier ones should all be complete. - ** Note: '''this can return {@code null}''' which means the next stage - ** hasn't started yet. - */ - public Progress getCurrentProgress(); - - /* - - display would look something like - - {as for Progress.this} - +----------------------------------------------------+ - | {as for getCurrentProgress()} | - +----------------------------------------------------+ - - */ - -} diff --git a/src/plugins/Library/util/exec/CompositeProgress.java b/src/plugins/Library/util/exec/CompositeProgress.java deleted file mode 100644 index 37353c05..00000000 --- a/src/plugins/Library/util/exec/CompositeProgress.java +++ /dev/null @@ -1,54 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Interface representing a {@link Progress} made up of several subprogresses, -** which are generally (though not necessarily) independent. -** -** @author infinity0 -*/ -public interface CompositeProgress extends Progress { - - /** - ** {@inheritDoc} - ** - ** Implementations should return {@link ProgressParts#getSubParts(Iterable, - ** boolean)}, with the first argument being the same underlying collection - ** as {@link #getSubProgress()}. - ** - ** TODO HIGH: BaseCompositeProgress actually uses {@link - ** ProgressParts#getParts(Iterable, int)}.... should we change the above - ** spec? - */ - /*@Override**/ public ProgressParts getParts() throws TaskAbortException; - - /** - ** Gets all the Progress objects backing this progress. The iterator - ** should not support {@link java.util.Iterator#remove()}. - */ - public Iterable getSubProgress(); - - /** - ** Whether the tasks has partially completed. (Implementations at their - ** discretion might chooseto take this to mean "enough tasks to be - ** acceptable", rather "any one task".) - */ - public boolean isPartiallyDone(); - - /* - display would look something like: - - {as for (Progress)this} - [ details ] - +--------------------------------------------------+ - | for (Progress p: getSubProgress()) | - | {as for p} | - +--------------------------------------------------+ - - */ - - -} - diff --git a/src/plugins/Library/util/exec/Execution.java b/src/plugins/Library/util/exec/Execution.java deleted file mode 100644 index aee8b54f..00000000 --- a/src/plugins/Library/util/exec/Execution.java +++ /dev/null @@ -1,76 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -import java.util.Date; -//import java.util.concurrent.Future; - -/** -** Interface for an execution being handled concurrently to the threads which -** need to know its status and results. -** -** TODO LOW maybe have {@code Execution} and {@code K getSubject()} -** -** @param Type of result of the execution -** @author MikeB -** @author infinity0 -*/ -public interface Execution extends Progress/*, Future*/ { - - /** - ** Returns the Date object representing the time at which this request was - ** started. - */ - public Date getStartDate(); - - /** - ** Get the number of milliseconds elapsed since the request was started. - */ - public long getTimeElapsed(); - - /** - ** Get the number of milliseconds it took for the execution to complete or - ** abort, or {@code -1} if it has not yet done so. - */ - public long getTimeTaken(); - - /** - ** Gets the result of this operation, or throws the error that caused it to - ** abort. - */ - public V getResult() throws TaskAbortException; - - /** - ** Attach an {@link ExecutionAcceptor} to this execution. - ** - ** Implementations should trigger the appropriate methods on all acceptors - ** as the corresponding events are triggered, and on newly-added acceptors - ** if events have already happened, ''in the same order in which they - ** happened'''. (This is slightly different from a listener, where past - ** events are missed by newly-added listeners.) - ** - ** Generally, adding an acceptor twice should not give different results, - ** but this is up to the implementation. Unchecked exceptions (ie. {@link - ** RuntimeException}s) thrown by any acceptor should be caught (and ideally - ** handled). - */ - public void addAcceptor(ExecutionAcceptor acc); - - /*======================================================================== - public interface Future - ========================================================================*/ - - // TODO LOW could retrofit this if it's ever needed... - - //*@Override**/ public boolean cancel(boolean mayInterruptIfRunning); - - //*@Override**/ public V get(); - - //*@Override**/ public V get(long timeout, TimeUnit unit); - - //*@Override**/ public boolean isCancelled(); - - //*@Override**/ public boolean isDone(); - -} diff --git a/src/plugins/Library/util/exec/ExecutionAcceptor.java b/src/plugins/Library/util/exec/ExecutionAcceptor.java deleted file mode 100644 index 1bb7b3e0..00000000 --- a/src/plugins/Library/util/exec/ExecutionAcceptor.java +++ /dev/null @@ -1,20 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Accepts state-changes on an execution. -** -** @param The specific type of execution, if any. -** @author infinity0 -*/ -public interface ExecutionAcceptor { - - public void acceptStarted(Execution opn); - - public void acceptDone(Execution opn, V result); - - public void acceptAborted(Execution opn, TaskAbortException abort); - -} diff --git a/src/plugins/Library/util/exec/Progress.java b/src/plugins/Library/util/exec/Progress.java deleted file mode 100644 index 95110c10..00000000 --- a/src/plugins/Library/util/exec/Progress.java +++ /dev/null @@ -1,54 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** An abstraction of the progress of a task. -** -** @author infinity0 -*/ -public interface Progress { - - /** - ** Returns the subject / name of the task, if any. - */ - public String getSubject(); - - /** - ** Returns the current completion status. - */ - public String getStatus(); - - /** - ** Get the details of this progress as a {@link ProgressParts} object, or - ** throw an exception if the task has aborted. - */ - public ProgressParts getParts() throws TaskAbortException; - - /** - ** Whether the progress has started. - */ - public boolean isStarted(); - - /** - ** Whether the progress has completed successfully, or throw an exception - ** if it has aborted. - */ - public boolean isDone() throws TaskAbortException; - - /** - ** Wait for the progress to finish. - */ - public void join() throws InterruptedException, TaskAbortException; - - /* - - display would look something like: - - getName(): getStatus() - | done| started| known| estimate| - - */ - -} diff --git a/src/plugins/Library/util/exec/ProgressParts.java b/src/plugins/Library/util/exec/ProgressParts.java deleted file mode 100644 index f1438d02..00000000 --- a/src/plugins/Library/util/exec/ProgressParts.java +++ /dev/null @@ -1,303 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -import java.util.Formatter; - -/** -** Immutable class representing the parts of a progress. -** -** @author infinity0 -*/ -public class ProgressParts { - - /** - ** The value for {@link #totalest} representing an unknown estimate. - */ - final public static int ESTIMATE_UNKNOWN = -2; - - /** - ** The value for {@link #totalest} representing a finalized estimate. - */ - final public static int TOTAL_FINALIZED = -1; - - /** - ** Parts done. This is never greater than parts started. - */ - final public int done; - - /** - ** Parts started. This is never greater than parts known. - */ - final public int started; - - /** - ** Parts known. If there is an estimate, this is never greater than it. - */ - final public int known; - - /** - ** Total estimated parts. Special values: - ** - ** ;{@code -1} : The final total is known and is equal to {@link #known}. - ** ;{@code -2} : No estimate is available. - ** - ** (All other negative values are invalid.) - */ - final public int totalest; - - /** - ** Constructs a new {@code ProgressPart} from the given arguments, fixing - ** any values which are greater than their next neighbour (For example, - ** 5/4/3/2 -> 2/2/2/2, and 3/7/4/? -> 3/4/4/?). - ** - ** @param d Parts {@linkplain #done} - ** @param s Parts {@linkplain #started} - ** @param k Parts {@linkplain #known} - ** @param t {@linkplain #totalest Estimated} total parts - */ - public static ProgressParts normalise(int d, int s, int k, int t) { - if (t >= 0 && k > t) { k = t; } - if (s > k) { s = k; } - if (d > s) { d = s; } - return new ProgressParts(d, s, k, t); - } - - /** - ** Constructs a new {@code ProgressPart} from the given arguments, fixing - ** any values which are greater than their next neighbour (for example, - ** 5/4/3/2 -> 2/2/2/2, and 3/7/4/? -> 3/4/4/?). - ** - ** @param d Parts {@linkplain #done}, same as parts {@linkplain #started} - ** @param k Parts {@linkplain #known} - */ - public static ProgressParts normalise(int d, int k) { - return normalise(d, d, k, ESTIMATE_UNKNOWN); - } - - /** - ** Constructus a new {@code ProgressPart} from the given arguments. - ** - ** @param d Parts {@linkplain #done} - ** @param s Parts {@linkplain #started} - ** @param k Parts {@linkplain #known} - ** @param t {@linkplain #totalest Estimated} total parts - ** @throws IllegalArgumentException if the constraints for the fields are - ** not met. - */ - public ProgressParts(int d, int s, int k, int t) { - if (0 > d || d > s || s > k || (t >= 0 && k > t) || (t < 0 && t != ESTIMATE_UNKNOWN && t != TOTAL_FINALIZED)) { - throw new IllegalArgumentException("ProgressParts (" + d + "/" + s + "/" + k + "/" + t + ") must obey the contract 0 <= done <= started <= known <= totalest or totalest == ESTIMATE_UNKNOWN or TOTAL_FINALIZED"); - } - done = d; - started = s; - known = k; - totalest = t; - } - - /** - ** Constructus a new {@code ProgressPart} from the given arguments. - ** - ** @param d Parts {@linkplain #done} - ** @param s Parts {@linkplain #started} - ** @param k Parts {@linkplain #known} - ** @throws IllegalArgumentException if the constraints for the fields are - ** not met. - */ - public ProgressParts(int d, int s, int k) { - this(d, s, k, ESTIMATE_UNKNOWN); - } - - /** - ** Constructus a new {@code ProgressPart} from the given arguments. - ** - ** @param d Parts {@linkplain #done}, same as parts {@linkplain #started} - ** @param k Parts {@linkplain #known} - ** @throws IllegalArgumentException if the constraints for the fields are - ** not met. - */ - public ProgressParts(int d, int k) { - this(d, d, k, ESTIMATE_UNKNOWN); - } - - /** - ** Whether the total is finalized. ({@code totalest == TOTAL_FINALIZED}) - */ - final public boolean finalizedTotal() { - return totalest == TOTAL_FINALIZED; - } - - /** - ** Whether an estimate exists. ({@code totalest != ESTIMATE_UNKNOWN}) - */ - final public boolean hasEstimate() { - return totalest != ESTIMATE_UNKNOWN; - } - - /** - ** Whether the task is done, ie. whether {@code done == staretd == known} - ** and the total is {@linkplain #totalest finalized}. - */ - final public boolean isDone() { - return done == started && started == known && totalest == TOTAL_FINALIZED; - } - - /** - ** Returns the parts done as a fraction of the parts known. - */ - final public float getKnownFractionDone() { - return known == 0? 0.0f: (float)done / known; - } - - /** - ** Returns the parts started as a fraction of the parts known. - */ - final public float getKnownFractionStarted() { - return known == 0? 0.0f: (float)started / known; - } - - /** - ** Returns the parts done as a fraction of the estimated total parts. - ** Note: this will return negative (ie. invalid) if there is no estimate. - */ - final public float getEstimatedFractionDone() { - return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? getKnownFractionDone(): (float)done / totalest; - } - - /** - ** Returns the parts started as a fraction of the estimated total parts. - ** Note: this will return negative (ie. invalid) if there is no estimate. - */ - final public float getEstimatedFractionStarted() { - return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? getKnownFractionStarted(): (float)started / totalest; - } - - /** - ** Returns the parts known as a fraction of the estimated total parts. - ** Note: this will return negative (ie. invalid) if there is no estimate. - */ - final public float getEstimatedFractionKnown() { - return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? 1.0f: (float)known / totalest; - } - - /** - ** Returns a summary of the progress in the form {@code d/s/k/t??}. - ** - ** * If {@code done == started}, only one will be included. - ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate - ** will be ommited. - ** * If there is no estimate, only the {@code ??} will be included. - */ - @Override final public String toString() { - String s = done + "/"; - if (done != started) { s += started + "/"; } - s += known; - if (!finalizedTotal()) { - s += hasEstimate()? "/" + totalest + "??" : "/??"; - } - return s; - } - - /** - ** Returns a summary of the progress in the form {@code known_fraction_done - ** (estimated_fraction_done)}. - ** - ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate - ** will be ommited. - ** * If there is no estimate, only the {@code ??} will be included. - */ - final public String toFractionString() { - Formatter f = new Formatter(); - if (finalizedTotal()) { - f.format("%.4f", getKnownFractionDone()); - } else if (!hasEstimate()) { - f.format("%.4f (??)", getKnownFractionDone()); - } else { - f.format("%.4f (%.4f??)", getKnownFractionDone(), getEstimatedFractionDone()); - } - return f.toString(); - } - - /** - ** Returns a summary of the progress in the form {@code known_percent_done - ** (estimated_percent_done)}. - ** - ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate - ** will be ommited. - ** * If there is no estimate, only the {@code ??} will be included. - */ - final public String toPercentageString() { - Formatter f = new Formatter(); - if (finalizedTotal()) { - f.format("%.2f", getKnownFractionDone()*100); - } else if (!hasEstimate()) { - f.format("%.2f (??)", getKnownFractionDone()*100); - } else { - f.format("%.2f (%.2f??)", getKnownFractionDone()*100, getEstimatedFractionDone()*100); - } - return f.toString(); - } - - - /** - ** Constructs a new {@code ProgressParts} based on the total number of - ** parts (done, started, known) in the given iterable. Total estimated - ** parts is calculated based on the value of {@code try_to_be_smart}: - ** - ** ;{@code true} : try to extrapolate an estimated total parts based on the - ** number of subprogresses having estimates, the sum of these estimates, - ** and the total number of subprogresses. - ** - ** ;{@code false} : an estimate will only be supplied if all subprogresses - ** have estimates. - */ - public static ProgressParts getSubParts(Iterable subprogress, boolean try_to_be_smart) throws TaskAbortException { - int d = 0, s = 0, k = 0, t = 0; - int num = 0, unknown = 0; - boolean totalfinalized = true; - for (Progress p: subprogress) { - ++num; - if (p == null) { ++unknown; continue; } - ProgressParts parts = p.getParts(); - d += parts.done; - s += parts.started; - k += parts.known; - switch (parts.totalest) { - case ESTIMATE_UNKNOWN: - ++unknown; - totalfinalized = false; - break; - case TOTAL_FINALIZED: - t += parts.known; - break; - default: - totalfinalized = false; - t += parts.totalest; - } - } - if (num == unknown) { - t = ESTIMATE_UNKNOWN; - } else if (unknown > 0) { - t = (try_to_be_smart)? (int)Math.round(t * num / (float)(num-unknown)): ESTIMATE_UNKNOWN; - } - return new ProgressParts(d, s, k, (totalfinalized)? TOTAL_FINALIZED: t); - } - - /** - ** Constructs a new {@code ProgressParts} based on the number of progresses - ** (done, started, known) in the given iterable, with the given total - ** estimate. - */ - public static ProgressParts getParts(Iterable subprogress, int estimate) throws TaskAbortException { - int d = 0, s = 0, k = 0; - for (Progress p: subprogress) { - ++k; - if (p == null) { continue; } - ++s; - if (p.isDone()) { ++d; } - } - return new ProgressParts(d, s, k, estimate); - } - -} diff --git a/src/plugins/Library/util/exec/SimpleProgress.java b/src/plugins/Library/util/exec/SimpleProgress.java deleted file mode 100644 index de1f7426..00000000 --- a/src/plugins/Library/util/exec/SimpleProgress.java +++ /dev/null @@ -1,239 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Basic progress implementation. The number of parts known is implicily equal -** to the number of parts started. -** -** '''USE OF THIS CLASS IS PRONE TO DEADLOCK - MAKE SURE YOU MATCH UP YOUR -** CALLS TO {@link #addPartDone()} AND {@link #addPartKnown(int, boolean)}.''' -** I will deal with this at some point, and make this class easier to use... -** possibly have a setDone() method like AtomicProgress has... -** -** NOTE: this implementation considers the task '''done''' when a call to -** {@link #addPartDone()} triggers the condition {@code pdone == known} when -** {@link #finalizedTotal()} is true. '''As soon as''' this occurs, all threads -** blocked on {@link #join()} will unblock and assume the task has finished. -** To prevent this from happening unexpectedly, it is recommended to call -** {@link #enteredSerialiser()} and {@link #exitingSerialiser()} appropriately -** '''for each serialiser visited'''. Failure to follow this advice may result -** in deadlock or task data being retrieved before the task is properly -** complete. For more details, see the source code. -** -** TODO NORM perhaps synchronize -** -** @author infinity0 -*/ -public class SimpleProgress implements Progress { - - /* - ** More details about the note in the class description: - ** - ** If you pass the progress object onto a child serialiser, but do extra - ** processing of the task data after the child returns, it is **vital** - ** that you call enteredSerialiser() and exitingSerialiser(). Otherwise, - ** if any child serialiser triggers the aforementioned condition, it will - ** cause all threads blocked on join() to unblock and think the task has - ** been completed, before the extra processing has occured. ie: - ** - ** * Parent: [progress is at 8/8, not final] - ** * Parent: subsrl.pullLive(task, progress) - ** * Child: addPartKnown(8, true); [progress is at 8/16, final] - ** * Child: addPartDone() * 8; [progress is at 16/16, final] - ** * **all threads blocked on join() will unblock** - ** * Parent: yay, child has done its stuff, now I need to do some extra - ** stuff to actually complete the task - ** * other threads: "wtf? data's changing randomly!?" - ** - ** (We don't wait for the child serialiser to finish to finalise the total, - ** because we want the total to be finalised as soon as possible. Generally, we - ** let the leaf serialiser finalise the total.) - */ - - protected String subject = "??"; - - protected String status = null; - - /** - ** Number of parts done. - */ - protected volatile int pdone; - - /** - ** Number of parts known. For {@link SimpleProgress}, this is implicitly - ** also the number of parts started. - */ - protected volatile int known; - - /** - ** Estimated total number of parts. - ** - ** @see ProgressParts#totalest - */ - protected volatile int estimate = ProgressParts.ESTIMATE_UNKNOWN; - - private boolean inprogress = true; // this variable is only used in synchronized blocks so doesn't need to be volatile - - private volatile TaskAbortException abort = null; - - public SimpleProgress() { } - - public boolean finalizedTotal() { - return estimate == ProgressParts.TOTAL_FINALIZED; - } - - /** - ** Add a done part. If {@code pdone == known} is triggered while {@link - ** #finalizedTotal()} is true, then the task is deemed to be completed and - ** all threads blocked on {@link #join()} will be released. - */ - public synchronized void addPartDone() { - if (pdone == known) { - throw new IllegalStateException("Can't increased parts done above parts known"); - } - pdone++; - if (finalizedTotal() && pdone == known) { - inprogress = false; - notifyAll(); - } - } - - /** - ** Add some known parts, and either finalise the total or invalidate the - ** previous estimate. - */ - public synchronized void addPartKnown(int parts, boolean finalise) { - if (parts < 0) { - throw new IllegalArgumentException("Can't decrease the number of parts done."); - } else if (finalise) { - if (finalizedTotal() && parts > 0) { - throw new IllegalArgumentException("Total already finalised"); - } - // update estimate first to maintain ProgressParts contract - // in case another thread reads this concurrently - estimate = known + parts; - known = estimate; - estimate = ProgressParts.TOTAL_FINALIZED; - } else { - if (finalizedTotal()) { - throw new IllegalArgumentException("Cannot un-finalise a final total!"); - } - estimate = ProgressParts.ESTIMATE_UNKNOWN; - known += parts; - } - } - - /** - ** Set an estimate. This should be used carefully due to the automatic - ** thread-unblocking behaviour of {@link #addPartDone()}. Specifically, you - ** should '''never''' give {@link ProgressParts#TOTAL_FINALIZED} as the - ** argument to this method. - ** - ** @see ProgressParts#totalest - */ - public synchronized void setEstimate(int e) { - if (e < known) { - throw new IllegalArgumentException("Can't give a lower estimate than what is already known."); - } - estimate = e; - } - - /** - ** This method should be called first thing after entering a serialiser. - ** Otherwise, deadlock may result, or thread-safety may be broken. - */ - public void enteredSerialiser() { - addPartKnown(1, false); - } - - /** - ** This method should be called last thing before leaving a serialiser. - ** Otherwise, deadlock may result, or thread-safety may be broken. - ** - ** (Note: normal exits; not exceptions. Ie. call this in a {@code try} - ** block, not a {@code finally} block.) - */ - public void exitingSerialiser() { - addPartDone(); - } - - public synchronized void abort(TaskAbortException e) throws TaskAbortException { - abort = e; - inprogress = false; - notifyAll(); - throw e; - } - - public void setSubject(String s) { - if (s == null) { - throw new IllegalArgumentException("Can't set a null progress subject"); - } - subject = s; - } - - public void setStatus(String s) { - status = s; - } - - public void setStatusDefault() { - status = null; - } - - /*======================================================================== - public interface Progress - ========================================================================*/ - - /*@Override**/ public String getSubject() { - return subject; - } - - /** - ** {@inheritDoc} - ** - ** This implementation (in order of priority), the message for {@link - ** #abort}, {@link #status}, or string constructed from {@link #pdone}, - ** {@link #known} and {@link #estimate}. - */ - /*@Override**/ public String getStatus() { - if (abort != null) { return abort.getMessage(); } - if (status != null) { return status; } - try { - return getParts().toString(); - } catch (TaskAbortException e) { - // abort could have been set between lines 1 and 4 - return abort.getMessage(); - } - } - - /*@Override**/ public ProgressParts getParts() throws TaskAbortException { - // updates are made such that the ProgressParts contract isn't broken even - // in mid-update so we don't need to synchronize here. - if (abort != null) { throw new TaskAbortException("Task failed", abort); } - return new ProgressParts(pdone, known, known, estimate); - } - - /** - ** {@inheritDoc} - ** - ** This implementation returns {@code true}; it's assume that the task has - ** already started by the time you construct one of these objects. - */ - /*@Override**/ public boolean isStarted() { - return true; - } - - /*@Override**/ public boolean isDone() throws TaskAbortException { - // updates are made such that the ProgressParts contract isn't broken even - // in mid-update so we don't need to synchronize here. - if (abort != null) { throw new TaskAbortException("Task failed", abort); } - return finalizedTotal() && pdone == known; - } - - /*@Override**/ public synchronized void join() throws InterruptedException, TaskAbortException { - while (inprogress) { wait(); } - if (abort != null) { throw new TaskAbortException("Task failed", abort); } - } - -} diff --git a/src/plugins/Library/util/exec/TaskAbortException.java b/src/plugins/Library/util/exec/TaskAbortException.java deleted file mode 100644 index 4100fd8b..00000000 --- a/src/plugins/Library/util/exec/TaskAbortException.java +++ /dev/null @@ -1,85 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Thrown when a task aborts. DOCUMENT -** -** @author infinity0 -*/ -public class TaskAbortException extends Exception { - - /** - ** Whether the abortion was due to an error condition. Defaults to {@code - ** true}. A {@code false} value generally means that the task was itself - ** aborted, but that the intended effects of the task can be treated as - ** having been accomplished, eg. {@link TaskCompleteException}. - ** - ** Multiple-task serialisers should handle abortions as follows: non-error - ** abortions result in the task being removed from the taskgroup; error - ** abortions result are rethrown as-is. - */ - final protected boolean error; - - /** - ** Whether the failure is temporary, and the client should try again at - ** a later time. Eg. this should be {@code true} for abortions caused by - ** {@link java.io.IOException}, and {@code false} for abortions caused by - ** {@link plugins.Library.io.DataFormatException}. Defaults to {@code - ** false}. - */ - final protected boolean retry; - - /** - ** Constructs a new exception with the specified parameters. - ** - ** @param s The detail message - ** @param t The cause - ** @param e Whether the current abortion is {@link #error}. - ** @param r Whether a {@link #retry} is likely to succeed. - */ - public TaskAbortException(String s, Throwable t, boolean e, boolean r) { - super(s, t); - error = e; - retry = r; - } - - /** - ** Constructs a new exception with the specified paramaters, marked as - ** error. - ** - ** @param s The detail message - ** @param t The cause - ** @param r Whether a {@link #retry} is likely to succeed. - */ - public TaskAbortException(String s, Throwable t, boolean r) { - this(s, t, true, r); - } - - /** - ** Constructs a new exception with the specified paramaters, marked as - ** error and non-retry. - ** - ** @param s The detail message - ** @param t The cause - */ - public TaskAbortException(String s, Throwable t) { - this(s+" : "+t.getMessage(), t, true, false); - } - - /** - ** @see #error - */ - public boolean isError() { - return error; - } - - /** - ** @see #retry - */ - public boolean shouldRetry() { - return retry; - } - -} diff --git a/src/plugins/Library/util/exec/TaskCompleteException.java b/src/plugins/Library/util/exec/TaskCompleteException.java deleted file mode 100644 index b81de820..00000000 --- a/src/plugins/Library/util/exec/TaskCompleteException.java +++ /dev/null @@ -1,20 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Thrown when a task aborts due to the task already having been done, eg. by -** another thread. -** -** @author infinity0 -*/ -public class TaskCompleteException extends TaskAbortException { - - // EXPAND - - public TaskCompleteException(String s) { - super(s, null, false, false); - } - -} diff --git a/src/plugins/Library/util/exec/TaskInProgressException.java b/src/plugins/Library/util/exec/TaskInProgressException.java deleted file mode 100644 index 5ec800b1..00000000 --- a/src/plugins/Library/util/exec/TaskInProgressException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.exec; - -/** -** Thrown when a task is already in progress elsewhere. -** -** @author infinity0 -*/ -public class TaskInProgressException extends TaskAbortException { - - final Progress prog; - - public TaskInProgressException(String s, Progress p) { - super(s, null, false, false); - prog = p; - } - - public TaskInProgressException(Progress p) { - super(null, null, false, false); - prog = p; - } - - public Progress getProgress() { - return prog; - } - - /** - ** DOCUMENT - */ - public TaskCompleteException join() throws InterruptedException, TaskAbortException { - prog.join(); - return new TaskCompleteException("Completed task: " + prog.getSubject()); - } - -} diff --git a/src/plugins/Library/util/exec/package-info.java b/src/plugins/Library/util/exec/package-info.java deleted file mode 100644 index 24a5a855..00000000 --- a/src/plugins/Library/util/exec/package-info.java +++ /dev/null @@ -1,9 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains classes to represent and monitor execution of tasks. -** -** @author infinity0 -*/ -package plugins.Library.util.exec; diff --git a/src/plugins/Library/util/func/Closure.java b/src/plugins/Library/util/func/Closure.java deleted file mode 100644 index d32ca796..00000000 --- a/src/plugins/Library/util/func/Closure.java +++ /dev/null @@ -1,40 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.func; - -/** -** An object representing a partially-evaluated subroutine. This is useful in -** many situations, for example when we have a general subroutine that takes -** several parameters, and we want to create a derivative subroutine for which -** a subset of these parameters is fixed (this is sometimes referred to as the -** "environment"). The derivate is sometimes called a "curried function". -** -** This idea can be applied to implement continuations in an algorithm that -** blocks waiting for some of its parameters to arrive. -** -** This interface defines a checked exception to be thrown from the {@link -** #invoke(Object)} method; if you call it on a general {@code Closure} then -** you will also need to catch {@link Exception}. If you don't need to do this, -** use {@link SafeClosure} instead. -** -** (Unfortunately, using a type paramater to indicate the type of exception -** thrown is unacceptable; the signature {@code throws T} disallows throwing -** multiple types of exception, whereas {@code throws Exception} allows it.) -** -** @param

Type of the input parameter. If you need more than one input -** parameter, consider using the classes in {@link Tuples}. -** @param Type of the thrown exception(s). If your implementation throws -** more than one type of exception, it's recommended to give the closest -** superclass of all the exceptions, though {@link Exception} is always -** safe. If your implementation throws no checked exceptions, consider -** using {@link SafeClosure} instead of this class. -** @author infinity0 -** @see Closure -** (computer science) -*/ -public interface Closure { - - public void invoke(P param) throws E; - -} diff --git a/src/plugins/Library/util/func/SafeClosure.java b/src/plugins/Library/util/func/SafeClosure.java deleted file mode 100644 index 1235781c..00000000 --- a/src/plugins/Library/util/func/SafeClosure.java +++ /dev/null @@ -1,17 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.func; - -/** -** A {@link Closure} that cannot throw a checked exception. -** -** @param

Type of the input parameter. If you want a closure with more than -** one input parameter, consider using the classes in {@link Tuples}. -** @author infinity0 -*/ -public interface SafeClosure

extends Closure { - - public void invoke(P param); - -} diff --git a/src/plugins/Library/util/func/Tuples.java b/src/plugins/Library/util/func/Tuples.java deleted file mode 100644 index ec080174..00000000 --- a/src/plugins/Library/util/func/Tuples.java +++ /dev/null @@ -1,160 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util.func; - -/** -** Tuple classes that retain type-safety through generics. These classes can -** be used to create throwaway anonymous types that can be referred back to, -** unlike anonymous classes. They are also useful for when you want to have a -** {@link Closure} with more than 1 input parameter. -** -** Example usage: -** -** import static plugins.Library.func.Tuples.*; -** import plugins.Library.func.Closure; -** -** ... -** -** Closure> my_closure = new -** Closure>() { -** -** public void invoke(X2 param) { -** -** ... -** -** } -** -** } -** -** my_closure.invoke(X2(3, "test")); -** -** Note that (for n > 0) each n-tuple class extends the (n-1)-tuple class, -** recursively, so that you can give any (k+e)-tuple to a {@code Closure} that -** takes a k-tuple, assuming the types also match. -** -** TODO NORM override equals() and hashCode() for all of these -** -** @author infinity0 -*/ -final public class Tuples { - - private Tuples() {} - - /** - ** An immutable 0-tuple. - */ - public static class X0 { - @Override public String toString() { return "()"; } - @Override public int hashCode() { return 0; } - @Override public boolean equals(Object o) { return o instanceof X0 && !(o instanceof X1); } - } - - /** - ** An immutable 1-tuple. - */ - public static class X1 extends X0 { - final public T0 _0; - public X1(T0 v0) { super(); _0 = v0; } - @Override public String toString() { return "(" + _0 + ")"; } - @Override public int hashCode() { return _0.hashCode() + 1; } - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof X1) || o instanceof X2) { return false; } - @SuppressWarnings("unchecked") X1 x = (X1)o; - return (_0 == null? x._0 == null: _0.equals(x._0)); - } - } - - /** - ** An immutable 2-tuple. - */ - public static class X2 extends X1 { - final public T1 _1; - public X2(T0 v0, T1 v1) { super(v0); _1 = v1; } - @Override public String toString() { return "(" + _0 + ", " + _1 + ")"; } - @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + 2; } - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof X2) || o instanceof X3) { return false; } - @SuppressWarnings("unchecked") X2 x = (X2)o; - return (_0 == null? x._0 == null: _0.equals(x._0)) - && (_1 == null? x._1 == null: _1.equals(x._1)); - } - } - - /** - ** An immutable 3-tuple. - */ - public static class X3 extends X2 { - final public T2 _2; - public X3(T0 v0, T1 v1, T2 v2) { super(v0, v1); _2 = v2; } - @Override public String toString() { return "(" + _0 + ", " + _1 + ", " + _2 + ")"; } - @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + _2.hashCode() + 3; } - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof X3) || o instanceof X4) { return false; } - @SuppressWarnings("unchecked") X3 x = (X3)o; - return (_0 == null? x._0 == null: _0.equals(x._0)) - && (_1 == null? x._1 == null: _1.equals(x._1)) - && (_2 == null? x._2 == null: _2.equals(x._2)); - } - } - - /** - ** An immutable 4-tuple. - */ - public static class X4 extends X3 { - final public T3 _3; - public X4(T0 v0, T1 v1, T2 v2, T3 v3) { super(v0, v1, v2); _3 = v3; } - @Override public String toString() { return "(" + _0 + ", " + _1 + ", " + _2 + ", " + _3 + ")"; } - @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + _2.hashCode() + _3.hashCode() + 4; } - @Override public boolean equals(Object o) { - if (o == this) { return true; } - if (!(o instanceof X4) /*|| o instanceof X5*/) { return false; } - @SuppressWarnings("unchecked") X4 x = (X4)o; - return (_0 == null? x._0 == null: _0.equals(x._0)) - && (_1 == null? x._1 == null: _1.equals(x._1)) - && (_2 == null? x._2 == null: _2.equals(x._2)) - && (_3 == null? x._3 == null: _3.equals(x._3)); - } - } - - final private static X0 X0 = new X0(); - - /** - ** Returns a {@link X0 0-tuple}. - */ - public static X0 X0() { - return X0; - } - - /** - ** Creates a new {@link X1 1-tuple} from the given parameters. - */ - public static X1 X1(T0 v0) { - return new X1(v0); - } - - /** - ** Creates a new {@link X2 2-tuple} from the given parameters. - */ - public static X2 X2(T0 v0, T1 v1) { - return new X2(v0, v1); - } - - /** - ** Creates a new {@link X3 3-tuple} from the given parameters. - */ - public static X3 X3(T0 v0, T1 v1, T2 v2) { - return new X3(v0, v1, v2); - } - - /** - ** Creates a new {@link X4 4-tuple} from the given parameters. - */ - public static X4 X4(T0 v0, T1 v1, T2 v2, T3 v3) { - return new X4(v0, v1, v2, v3); - } - -} diff --git a/src/plugins/Library/util/func/package-info.java b/src/plugins/Library/util/func/package-info.java deleted file mode 100644 index a801b916..00000000 --- a/src/plugins/Library/util/func/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains classes that implement various functional programming concepts in -** Java. -** -** @author infinity0 -** @see Functional -** programming -*/ -package plugins.Library.util.func; diff --git a/src/plugins/Library/util/package-info.java b/src/plugins/Library/util/package-info.java deleted file mode 100644 index fcc63aa7..00000000 --- a/src/plugins/Library/util/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -/** -** Contains various general-purpose utility classes. These do not depend on -** any other part of the plugin code, and may be useful to outside projects. -** -** @author infinity0 -*/ -package plugins.Library.util; diff --git a/test/plugins/Library/Tester.java b/test/plugins/Library/Tester.java deleted file mode 100644 index bb16a4a8..00000000 --- a/test/plugins/Library/Tester.java +++ /dev/null @@ -1,381 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library; - -import plugins.Library.client.*; -import plugins.Library.util.exec.*; -import plugins.Library.util.func.Closure; -import plugins.Library.index.*; -import plugins.Library.io.*; -import plugins.Library.io.serial.*; -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.util.*; -import plugins.Library.*; - -import freenet.keys.FreenetURI; -import freenet.node.RequestStarter; - -import java.util.*; -import java.io.*; -import java.net.*; - - -/** -** Various on-freenet tests. -** -** @author infinity0 -*/ -public class Tester { - - - public static String runTest(Library lib, String test) { - if (test.equals("push_index")) { - return testPushIndex(); - } else if (test.equals("push_progress")) { - return testPushProgress(); - } else if (test.equals("autodetect")) { - return testAutoDetect(lib); - } else if (test.equals("push_merge")) { - return testPushAndMergeIndex(); - } - return "Add &plugins.Library.Tester=<test name> to the URL to run a test.\n"+"Tests: push_index, push_progress, autodetect, push_merge"; - } - - final public static String PAGE_START = "\n\n"; - final public static String PAGE_END = "\n"; - - - public static String testAutoDetect(Library library) { - List indexes = Arrays.asList( - "CHK@MIh5-viJQrPkde5gmRZzqjBrqOuh~Wbjg02uuXJUzgM,rKDavdwyVF9Z0sf5BMRZsXj7yiWPFUuewoe0CPesvXE,AAIC--8", - "SSK@5hH~39FtjA7A9~VXWtBKI~prUDTuJZURudDG0xFn3KA,GDgRGt5f6xqbmo-WraQtU54x4H~871Sho9Hz6hC-0RA,AQACAAE/Search-20", - "SSK@US6gHsNApDvyShI~sBHGEOplJ3pwZUDhLqTAas6rO4c,3jeU5OwV0-K4B6HRBznDYGvpu2PRUuwL0V110rn-~8g,AQACAAE/freenet-index-2", - "SSK@jzLqXo2AnlWw2CHWAkOB~LmbvowLXn9UhAyz7n3FuAs,PG6wEqQeWw2KNQ6ZbNJti1mn9iXSdIwdRkEK7IyfrlE,AQACAAE/testing-0" - ); - - StringBuilder s = new StringBuilder(); - for (String index: indexes) { - try { - Class t = library.getIndexType(new FreenetURI(index)); - s.append("

").append(t.getName()).append(": ").append(index).append("

"); - } catch (Exception e) { - appendError(s, e); - } - } - return s.toString(); - } - - volatile static Thread push_progress_thread; - volatile static SimpleProgress push_progress_progress = new SimpleProgress(); - volatile static Throwable push_progress_error; - volatile static Date push_progress_start; - volatile static FreenetURI push_progress_endURI; - volatile static FreenetURI push_progress_insertURI; - public static String testPushProgress() { - if (push_progress_thread == null) { - push_progress_thread = new Thread() { - YamlReaderWriter yamlrw = new YamlReaderWriter(); - FreenetArchiver> arx = Library.makeArchiver(yamlrw, "text/yaml", 0x10000, RequestStarter.INTERACTIVE_PRIORITY_CLASS); - - @Override public void run() { - push_progress_start = new Date(); - Map testmap = new TreeMap(); - for(int i=0; i<0x10000; ++i) { - testmap.put(""+i, i); - } - try { - push_progress_insertURI = new FreenetURI("USK@EArbDzzgEeTOgOn-I2BND0-tgH6ql-XqRYSvPxZhFHo,PG6wEqQeWw2KNQ6ZbNJti1mn9iXSdIwdRkEK7IyfrlE,AQECAAE/testing/0"); - PushTask> task = new PushTask>(testmap, push_progress_insertURI); - arx.pushLive(task, push_progress_progress); - push_progress_endURI = (FreenetURI)task.meta; - } catch (TaskAbortException e) { - push_progress_error = e; - } catch (MalformedURLException e) { - push_progress_error = e; - } - } - }; - push_progress_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { - public void uncaughtException(Thread t, Throwable e) { - push_progress_error = e; - } - }); - push_progress_thread.start(); - } - - StringBuilder s = new StringBuilder(); - s.append(PAGE_START); - appendTimeElapsed(s, push_progress_start); - if (push_progress_insertURI != null) { s.append("

Insert: ").append(push_progress_insertURI.toString()).append("

\n"); } - s.append("

").append(push_progress_progress.getStatus()).append("

\n"); - appendResultURI(s, push_progress_endURI); - try { - ProgressParts parts = push_progress_progress.getParts(); - s.append("

").append(parts.done); - s.append(" ").append(parts.known); - s.append(" ").append(parts.finalizedTotal()).append("

\n"); - } catch (TaskAbortException e) { - appendError(s, push_progress_error); - } - s.append(PAGE_END); - return s.toString(); - } - - - volatile static Thread push_index_thread; - volatile static String push_index_status = ""; - volatile static FreenetURI push_index_endURI; - volatile static Date push_index_start; - volatile static Throwable push_index_error; - volatile static Set push_index_words = new TreeSet(Arrays.asList( - "Lorem", "ipsum", "dolor", "sit", "amet", "consectetur", "adipisicing", - "elit", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore", - "et", "dolore", "magna", "aliqua", "Ut", "enim", "ad", "minim", - "veniam", "quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", - "ut", "aliquip", "ex", "ea", "commodo", "consequat", "Duis", "aute", - "irure", "dolor", "in", "reprehenderit", "in", "voluptate", "velit", - "esse", "cillum", "dolore", "eu", "fugiat", "nulla", "pariatur", - "Excepteur", "sint", "occaecat", "cupidatat", "non", "proident", "sunt", - "in", "culpa", "qui", "officia", "deserunt", "mollit", "anim", "id", "est", - "laborum" - )); - public static String testPushIndex() { - if (push_index_thread == null) { - push_index_start = new Date(); - push_index_thread = new Thread() { - ProtoIndexSerialiser srl = ProtoIndexSerialiser.forIndex(push_index_endURI, RequestStarter.INTERACTIVE_PRIORITY_CLASS); - ProtoIndex idx; - Random rand = new Random(); - - @Override public void run() { - try { - idx = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0); - } catch (java.net.MalformedURLException e) { - throw new AssertionError(e); - } - ProtoIndexComponentSerialiser.get().setSerialiserFor(idx); - - for (String key: push_index_words) { - SkeletonBTreeSet entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - ProtoIndexComponentSerialiser.get().setSerialiserFor(entries); - int n = rand.nextInt(0x200) + 0x200; - for (int j=0; j> en: idx.ttab.entrySet()) { - push_index_status = "Deflating entry " + en.getKey() + " (" + en.getValue().size() + " entries)"; - en.getValue().deflate(); - } - push_index_status = "Deflating the term table"; - idx.ttab.deflate(); - PushTask task1 = new PushTask(idx); - push_index_status = "Deflating the index"; - srl.push(task1); - push_index_status = "Done!"; - push_index_endURI = (FreenetURI)task1.meta; - } catch (TaskAbortException e) { - push_index_error = e; - } - } - }; - push_index_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { - public void uncaughtException(Thread t, Throwable e) { - push_index_error = e; - } - }); - push_index_thread.start(); - } - - StringBuilder s = new StringBuilder(); - s.append(PAGE_START); - s.append("

Pushing index with terms: ").append(push_index_words.toString()).append("

\n"); - appendTimeElapsed(s, push_index_start); - s.append("

").append(push_index_status).append("

"); - appendError(s, push_index_error); - appendResultURI(s, push_index_endURI); - s.append(PAGE_END); - return s.toString(); - } - - volatile static int push_phase; - static final List generatedURIs = new ArrayList(); - - public static String testPushAndMergeIndex() { - - // Split it up. - int length = push_index_words.size(); - final int divideInto = 5; - final TreeSet[] phaseWords = new TreeSet[5]; - for(int i=0;i(); - int x = 0; - for(String s : push_index_words) { - phaseWords[x++].add(s.toLowerCase()); - if(x == divideInto) x = 0; - } - - if (push_index_thread == null) { - push_index_start = new Date(); - - final File cacheDir = new File("tester-cache-temp"); - cacheDir.mkdir(); - FreenetArchiver.setCacheDir(cacheDir); - - push_index_thread = new Thread() { - ProtoIndexSerialiser srl = ProtoIndexSerialiser.forIndex(push_index_endURI, RequestStarter.INTERACTIVE_PRIORITY_CLASS); - ProtoIndex idx; - Random rand = new Random(); - - @Override public void run() { - - try { - idx = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0); - } catch (java.net.MalformedURLException e) { - throw new AssertionError(e); - } - ProtoIndexComponentSerialiser.get().setSerialiserFor(idx); - - try { - for (Map.Entry> en: idx.ttab.entrySet()) { - push_index_status = "Deflating entry " + en.getKey() + " (" + en.getValue().size() + " entries)"; - en.getValue().deflate(); - } - push_index_status = "Deflating the term table"; - idx.ttab.deflate(); - PushTask task1 = new PushTask(idx); - push_index_status = "Deflating the index"; - srl.push(task1); - push_index_status = "Done!"; - push_index_endURI = (FreenetURI)task1.meta; - synchronized(generatedURIs) { - generatedURIs.add(push_index_endURI); - } - } catch (TaskAbortException e) { - push_index_error = e; - return; - } - - for(push_phase = 0; push_phase < divideInto; push_phase++) { - - // Merge new data in. - - long mergeStartTime = System.currentTimeMillis(); - // generate new set to merge - final SortedSet randAdd = phaseWords[push_phase]; - final Map> newtrees = new HashMap>(); - int entriesadded = 0; - for (String k: randAdd) { - SortedSet set = new TreeSet(); - entriesadded += fillEntrySet(k, set); - newtrees.put(k, set); - } - - // async merge - Closure>, TaskAbortException> clo = new - Closure>, TaskAbortException>() { - /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { - String key = entry.getKey(); - SkeletonBTreeSet tree = entry.getValue(); - //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); - if (tree == null) { - entry.setValue(tree = makeEntryTree()); - } - assert(tree.isBare()); - tree.update(newtrees.get(key), null); - assert(tree.isBare()); - //System.out.println("handled " + key); - } - }; - try { - assert(idx.ttab.isBare()); - idx.ttab.update(randAdd, null, clo, new TaskAbortExceptionConvertor()); - assert(idx.ttab.isBare()); - PushTask task4 = new PushTask(idx); - srl.push(task4); - - FreenetArchiver arch = (FreenetArchiver) srl.getChildSerialiser(); - arch.waitForAsyncInserts(); - - long mergeEndTime = System.currentTimeMillis(); - - System.out.print(entriesadded + " entries merged in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); - push_index_status = "Done!"; - push_index_endURI = (FreenetURI)task4.meta; - synchronized(generatedURIs) { - generatedURIs.add(push_index_endURI); - } - } catch (TaskAbortException e) { - push_index_error = e; - return; - } - - } - - } - }; - push_index_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { - public void uncaughtException(Thread t, Throwable e) { - push_index_error = e; - } - }); - push_index_thread.start(); - } - - StringBuilder s = new StringBuilder(); - s.append(PAGE_START); - s.append("

Pushing index with terms:

    \n"); - for(int i=0;i").append(phaseWords[i].toString()).append("\n"); - s.append("
\n"); - appendTimeElapsed(s, push_index_start); - s.append("

").append(push_index_status).append("

"); - appendError(s, push_index_error); - appendResultURI(s, push_index_endURI); - synchronized(generatedURIs) { - s.append("

Phase: "+push_phase+" URIs generated: "+generatedURIs.size()+"

    "); - for(FreenetURI uri : generatedURIs) - s.append("
  • "+uri.toASCIIString()+"
  • "); - s.append("
"); - } - s.append(PAGE_END); - return s.toString(); - } - - protected static int fillEntrySet(String key, SortedSet tree) { - int n = Generators.rand.nextInt(0x20) + 0x20; - for (int j=0; j makeEntryTree() { - SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - ProtoIndexComponentSerialiser.get().setSerialiserFor(tree); - return tree; - } - - public static void appendError(StringBuilder s, Throwable th) { - if (th == null) { return; } - ByteArrayOutputStream bs = new ByteArrayOutputStream(0x1000); - th.printStackTrace(new PrintWriter(bs, true)); - s.append("
").append(bs.toString()).append("
\n"); - } - - public static void appendTimeElapsed(StringBuilder s, Date start) { - if (start == null) { return; } - s.append("

").append(System.currentTimeMillis() - start.getTime()).append("ms elapsed

\n"); - } - - public static void appendResultURI(StringBuilder s, FreenetURI u) { - if (u != null) { s.append("

Result: ").append(u.toString()).append("

\n"); } - } - - -} diff --git a/test/plugins/Library/index/BIndexTest.java b/test/plugins/Library/index/BIndexTest.java deleted file mode 100644 index e3fadcf6..00000000 --- a/test/plugins/Library/index/BIndexTest.java +++ /dev/null @@ -1,381 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import junit.framework.TestCase; -import static plugins.Library.util.Generators.rand; - -import plugins.Library.util.*; -import plugins.Library.util.func.*; -import plugins.Library.util.exec.*; -import plugins.Library.io.serial.*; -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.index.*; - -import freenet.keys.FreenetURI; - -import java.util.*; -import java.io.*; - -/** -** TODO most of this code needs to go into WriteableIndex at some point. -** -** @author infinity0 -*/ -public class BIndexTest extends TestCase { - - // mininum number of entries in a b-tree node. - final public static int node_size = 0x04; - - // base number of keys in an index. the final size (after async-update) will be around double this. - final public static int index_size = 0x80; - - // base number of entries for a key. the actual size (per key) varies between 1x-2x this. - final public static int entry_size = 0x20; - - final public boolean extensive = Boolean.getBoolean("extensiveTesting"); - - final public static int it_full = 4; - final public static int it_partial = 2; - final public static boolean fuller = false; - - // TODO HIGH code a test that stores a huge on-disk index with repeated calls to update() - // this could be done in the same test as the progress test - final public static boolean disabled_progress = true; - - static { - ProtoIndex.BTREE_NODE_MIN = node_size; - System.out.println("ProtoIndex B-tree node_min set to " + ProtoIndex.BTREE_NODE_MIN); - } - - long time = 0; - - public long timeDiff() { - long oldtime = time; - time = System.currentTimeMillis(); - return time - oldtime; - } - - public BIndexTest() { - f = new File("BindexTest"); - f.mkdir(); - srl = ProtoIndexSerialiser.forIndex(f); - csrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, srl.getChildSerialiser()); - } - - private final File f; - private final ProtoIndexSerialiser srl; - private final ProtoIndexComponentSerialiser csrl; - private ProtoIndex idx; - - Set randomWords = new HashSet(Arrays.asList( - "Lorem", "ipsum", "dolor", "sit", "amet,", "consectetur", "adipisicing", - "elit,", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore", - "et", "dolore", "magna", "aliqua.", "Ut", "enim", "ad", "minim", - "veniam,", "quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", - "ut", "aliquip", "ex", "ea", "commodo", "consequat.", "Duis", "aute", - "irure", "dolor", "in", "reprehenderit", "in", "voluptate", "velit", - "esse", "cillum", "dolore", "eu", "fugiat", "nulla", "pariatur.", - "Excepteur", "sint", "occaecat", "cupidatat", "non", "proident,", "sunt", - "in", "culpa", "qui", "officia", "deserunt", "mollit", "anim", "id", "est", - "laborum." - )); - - protected void newTestSkeleton() { - try { - idx = new ProtoIndex(new FreenetURI("CHK@yeah"), "test", null, null, 0); - } catch (java.net.MalformedURLException e) { - assertTrue(false); - } - csrl.setSerialiserFor(idx); - timeDiff(); - } - - protected SkeletonBTreeSet makeEntryTree() { - SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - csrl.setSerialiserFor(tree); - return tree; - } - - protected int fillEntrySet(String key, SortedSet tree) { - int n = rand.nextInt(entry_size) + entry_size; - for (int j=0; j> tree) { - int total = 0; - for (int i=0; i entries = makeEntryTree(); - total += fillEntrySet(key, entries); - tree.put(key, entries); - } - return total; - } - - protected SortedSet randomMixset(Set set) { - SortedSet sub = new TreeSet(); - for (String s: set) { - sub.add((rand.nextInt(2) == 0)? s: Generators.rndKey()); - } - return sub; - } - - public void fullInflateDeflateUpdate() throws TaskAbortException { - newTestSkeleton(); - int totalentries = fillRootTree(idx.ttab); - System.out.print(totalentries + " entries generated in " + timeDiff() + " ms, "); - - // make a copy of the tree - Map> origtrees = new TreeMap>(); - for (Map.Entry> en: idx.ttab.entrySet()) { - origtrees.put(en.getKey(), new TreeSet(en.getValue())); - } - assertTrue(origtrees.equals(idx.ttab)); - - // full deflate - for (SkeletonBTreeSet entries: idx.ttab.values()) { - entries.deflate(); - assertTrue(entries.isBare()); - } - idx.ttab.deflate(); - assertTrue(idx.ttab.isBare()); - assertFalse(idx.ttab.isLive()); - PushTask task1 = new PushTask(idx); - srl.push(task1); - System.out.println("deflated in " + timeDiff() + " ms, root at " + task1.meta + "."); - - if (fuller) { - // full inflate - PullTask task2 = new PullTask(task1.meta); - srl.pull(task2); - idx = task2.data; - idx.ttab.inflate(); - assertTrue(idx.ttab.isLive()); - assertFalse(idx.ttab.isBare()); - System.out.print("inflated in " + timeDiff() + " ms, "); - - // full deflate (1) - for (SkeletonBTreeSet entries: idx.ttab.values()) { - // inflating the root tree does not automatically inflate the trees for - // each entry, so these should already be bare - assertTrue(entries.isBare()); - } - idx.ttab.deflate(); - assertTrue(idx.ttab.isBare()); - assertFalse(idx.ttab.isLive()); - PushTask task3 = new PushTask(idx); - srl.push(task3); - System.out.println("re-deflated in " + timeDiff() + " ms, root at " + task3.meta + "."); - } - - // generate new set to merge - final SortedSet randAdd = randomMixset(origtrees.keySet()); - final Map> newtrees = new HashMap>(); - int entriesadded = 0; - for (String k: randAdd) { - SortedSet set = new TreeSet(); - entriesadded += fillEntrySet(k, set); - newtrees.put(k, set); - } - - // async merge - Closure>, TaskAbortException> clo = new - Closure>, TaskAbortException>() { - /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { - String key = entry.getKey(); - SkeletonBTreeSet tree = entry.getValue(); - //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); - if (tree == null) { - entry.setValue(tree = makeEntryTree()); - } - assertTrue(tree.isBare()); - tree.update(newtrees.get(key), null); - assertTrue(tree.isBare()); - //System.out.println("handled " + key); - } - }; - assertTrue(idx.ttab.isBare()); - idx.ttab.update(randAdd, null, clo, new TaskAbortExceptionConvertor()); - assertTrue(idx.ttab.isBare()); - PushTask task4 = new PushTask(idx); - srl.push(task4); - System.out.print(entriesadded + " entries merged in " + timeDiff() + " ms, root at " + task4.meta + ", "); - - // Iterate it. - Iterator keys = idx.ttab.keySetAutoDeflate().iterator(); - long count = 0; - String prev = null; - while(keys.hasNext()) { - count++; - String x = keys.next(); - assertFalse("Inconsistent iteration: Previous was "+prev+" this is "+x, prev != null && x.compareTo(prev) <= 0); - prev = x; - } - assertTrue(count == idx.ttab.size()); - System.out.println("Iterated keys, total is "+count+" size is "+idx.ttab.size()); - assertTrue(idx.ttab.isBare()); - - // full inflate (2) - PullTask task5 = new PullTask(task4.meta); - srl.pull(task5); - idx = task5.data; - idx.ttab.inflate(); - assertTrue(idx.ttab.isLive()); - assertFalse(idx.ttab.isBare()); - System.out.println("re-inflated in " + timeDiff() + " ms."); - - // merge added trees into backup, and test against the stored version - for (Map.Entry> en: newtrees.entrySet()) { - String key = en.getKey(); - SortedSet tree = origtrees.get(key); - if (tree == null) { - origtrees.put(key, tree = new TreeSet()); - } - tree.addAll(en.getValue()); - } - System.out.println("validating merge. this will take a few minutes, mostly due to Thread.sleep(). just be patient :-)"); - for (SkeletonBTreeSet entries: idx.ttab.values()) { - // FIXME HIGH make some way of doing this in parallel, maybe - entries.inflate(); - assertTrue(entries.isLive()); - } - assertTrue(origtrees.equals(idx.ttab)); - System.out.println("merge validated in " + timeDiff() + " ms."); - - System.out.println(""); - - } - - public void testBasicMulti() throws TaskAbortException { - if (!extensive) { return; } - for (int i=0; i entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - csrl.setSerialiserFor(entries); - - int n = rand.nextInt(0xF0) + 0x10; - totalentries += n; - - for (int j=0; j entries: idx.ttab.values()) { - entries.deflate(); - assertTrue(entries.isBare()); - } - idx.ttab.deflate(); - assertTrue(idx.ttab.isBare()); - assertFalse(idx.ttab.isLive()); - PushTask task = new PushTask(idx); - srl.push(task); - - System.out.print("deflated in " + timeDiff() + " ms, root at " + task.meta + ", "); - - PullTask tasq = new PullTask(task.meta); - srl.pull(tasq); - - for (String s: randomWords) { - //assertFalse(test.isLive()); // might be live if inflate(key) inflates some other keys too - idx.ttab.inflate(s); - idx.ttab.get(s); - assertFalse(idx.ttab.isBare()); - } - assertTrue(idx.ttab.isLive()); - assertFalse(idx.ttab.isBare()); - System.out.println("inflated all terms separately in " + timeDiff() + " ms"); - - } - - public void testPartialInflateMulti() throws TaskAbortException { - if (!extensive) { return; } - for (int i=0; i entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); - csrl.setSerialiserFor(entries); - - int n = rand.nextInt(0x200) + 0x200; - totalentries += n; - - for (int j=0; j entries: idx.ttab.values()) { - entries.deflate(); - assertTrue(entries.isBare()); - } - idx.ttab.deflate(); - assertTrue(idx.ttab.isBare()); - assertFalse(idx.ttab.isLive()); - PushTask task = new PushTask(idx); - srl.push(task); - - System.out.print("deflated in " + timeDiff() + " ms, root at " + task.meta + ", "); - plugins.Library.io.serial.FileArchiver.setTestMode(); - - System.out.println("Requesting entries for term " + sterm); - Execution> rq1 = idx.getTermEntries(sterm); - Execution> rq2 = idx.getTermEntries(sterm); - Execution> rq3 = idx.getTermEntries(sterm); - - assertTrue(rq1 == rq2); - assertTrue(rq2 == rq3); - assertTrue(rq3 == rq1); - - Set entries; - while ((entries = rq1.getResult()) == null) { - System.out.println(rq1.getStatus()); - try { Thread.sleep(1000); } catch (InterruptedException x) { } - } - - int count=0; - for (TermEntry en: entries) { - ++count; - } - assertTrue(count == entries.size()); - System.out.println(count + " entries successfully got in " + rq1.getTimeElapsed() + "ms"); - - } - -} diff --git a/test/plugins/Library/index/TermEntryTest.java b/test/plugins/Library/index/TermEntryTest.java deleted file mode 100644 index fec1256f..00000000 --- a/test/plugins/Library/index/TermEntryTest.java +++ /dev/null @@ -1,121 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.index; - -import junit.framework.TestCase; - -import plugins.Library.io.serial.Serialiser.*; -import plugins.Library.io.serial.FileArchiver; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.io.serial.Packer; - -import plugins.Library.io.YamlReaderWriter; - -import freenet.keys.FreenetURI; - -import java.util.Arrays; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; -import java.util.UUID; -import java.net.MalformedURLException; -import java.io.*; - -/** -** @author infinity0 -*/ -public class TermEntryTest extends TestCase { - - final static TermTermEntry w = new TermTermEntry("test", 0.8f, "lol"); - final static TermIndexEntry x; - final static TermPageEntry z; - final static TermPageEntry v; - static { - try { - x = new TermIndexEntry("test", 0.8f, new FreenetURI("CHK@MIh5-viJQrPkde5gmRZzqjBrqOuh~Wbjg02uuXJUzgM,rKDavdwyVF9Z0sf5BMRZsXj7yiWPFUuewoe0CPesvXE,AAIC--8")); - z = new TermPageEntry("lol", 0.8f, new FreenetURI("CHK@9eDo5QWLQcgSuDh1meTm96R4oE7zpoMBuV15jLiZTps,3HJaHbdW~-MtC6YsSkKn6I0DTG9Z1gKDGgtENhHx82I,AAIC--8"), null); - v = new TermPageEntry("lol", 0.8f, new FreenetURI("CHK@9eDo5QWLQcgSuDh1meTm96R4oE7zpoMBuV15jLiZTps,3HJaHbdW~-MtC6YsSkKn6I0DTG9Z1gKDGgtENhHx82I,AAIC--8"), "title", null); - } catch (MalformedURLException e) { - throw new AssertionError(); - } - } - final static TermTermEntry y = new TermTermEntry("test", 0.8f, "lol2"); - - public void testBasic() throws TaskAbortException { - File f = new File("TermEntryTest"); - f.mkdir(); - FileArchiver> ym = new FileArchiver>(new YamlReaderWriter(), "test", null, ".yml", f); - - Map map = new HashMap(); - - List l = new ArrayList(); - l.add(w); - l.add(w); - l.add(x); - l.add(y); - l.add(z); - map.put("test", l); - try { - map.put("test2", new Packer.BinInfo(new FreenetURI("http://127.0.0.1:8888/CHK@WtWIvOZXLVZkmDrY5929RxOZ-woRpRoMgE8rdZaQ0VU,rxH~D9VvOOuA7bCnVuzq~eux77i9RR3lsdwVHUgXoOY,AAIC--8/Library.jar"), 123)); - } catch (java.net.MalformedURLException e) { - assert(false); - } - - ym.push(new PushTask>(map)); - PullTask> pt = new PullTask>(""); - try{ - ym.pull(pt); - } catch (Exception e) { - e.printStackTrace(); - } - - assertTrue(pt.data instanceof Map); - Map m = pt.data; - assertTrue(m.get("test") instanceof List); - List ll = (List)m.get("test"); - assertTrue(ll.get(0) instanceof TermTermEntry); - assertTrue(ll.get(1) == ll.get(0)); - // NOTE these tests fail in snakeYAML 1.2 and below, fixed in hg - assertTrue(ll.get(2) instanceof TermIndexEntry); - assertTrue(ll.get(3) instanceof TermTermEntry); - - assertTrue(m.get("test2") instanceof Packer.BinInfo); - Packer.BinInfo inf = (Packer.BinInfo)m.get("test2"); - assertTrue(inf.getID() instanceof FreenetURI); - } - - public void testBinaryReadWrite() throws IOException, TaskAbortException { - TermEntryReaderWriter rw = TermEntryReaderWriter.getInstance(); - ByteArrayOutputStream bo = new ByteArrayOutputStream(); - DataOutputStream oo = new DataOutputStream(bo); - rw.writeObject(v, oo); - rw.writeObject(w, oo); - rw.writeObject(x, oo); - rw.writeObject(y, oo); - rw.writeObject(z, oo); - oo.close(); - ByteArrayInputStream bi = new ByteArrayInputStream(bo.toByteArray()); - DataInputStream oi = new DataInputStream(bi); - TermEntry v1 = rw.readObject(oi); - TermEntry w1 = rw.readObject(oi); - TermEntry x1 = rw.readObject(oi); - TermEntry y1 = rw.readObject(oi); - TermEntry z1 = rw.readObject(oi); - oi.close(); - assertEqualButNotIdentical(v, v1); - assertEqualButNotIdentical(w, w1); - assertEqualButNotIdentical(x, x1); // this will fail before fred@a6e73dbbaa7840bd20d5e3fb95cd2c678a106e85 - assertEqualButNotIdentical(y, y1); - assertEqualButNotIdentical(z, z1); - } - - public static void assertEqualButNotIdentical(Object a, Object b) { - assertTrue(a != b); - assertTrue(a.equals(b)); - assertTrue(a.hashCode() == b.hashCode()); - } - -} diff --git a/test/plugins/Library/io/serial/PackerTest.java b/test/plugins/Library/io/serial/PackerTest.java deleted file mode 100644 index 7687f763..00000000 --- a/test/plugins/Library/io/serial/PackerTest.java +++ /dev/null @@ -1,84 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import junit.framework.TestCase; - -import plugins.Library.util.Generators; -import plugins.Library.util.SkeletonTreeMap; -import plugins.Library.util.exec.TaskAbortException; -import plugins.Library.io.serial.Packer.Bin; -import plugins.Library.io.serial.Serialiser.*; - -import java.util.Map; -import java.util.List; -import java.util.Iterator; -import java.util.HashSet; -import java.util.HashMap; - -/** -** PRIORITY actually write some tests for this... -** -** @author infinity0 -*/ -public class PackerTest extends TestCase { - - final static public int NODE_MAX = 64; - - final public static Packer srl = new Packer( - new IterableSerialiser>() { - - public void pull(Iterable>> t) {} - public void push(Iterable>> t) { - for (PushTask> task: t) { - System.out.print("["); - for (Map.Entry en: task.data.entrySet()) { - System.out.print(en.getKey() + ": " + en.getValue().size() + ", "); - } - System.out.println("]"); - } - } - - public void pull(PullTask> t) {} - public void push(PushTask> t) {} - - }, - new Packer.Scale() { - @Override public int weigh(HashSet elem) { - return elem.size(); - } - }, - NODE_MAX - ); - - protected Map> generateTasks(int[] sizes) { - String meta = "dummy metadata"; - Map> tasks = new HashMap>(); - for (int size: sizes) { - HashSet hs = new HashSet(size>>1); - for (int i=0; i(hs, meta)); - } - return tasks; - } - - public void testBasic() throws TaskAbortException { - -// for (int i=0; i<16; ++i) { - // do this several times since random UUID changes the order of the task map - - srl.push(generateTasks(new int[]{1,2,3,4,5}), null); - srl.push(generateTasks(new int[]{1,2,3,4,5,6,7,8,9,10,11,12}), null); - srl.push(generateTasks(new int[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}), null); - -// } - - } - - // TODO write some more tests for this... - - -} diff --git a/test/plugins/Library/io/serial/YamlMapTest.java b/test/plugins/Library/io/serial/YamlMapTest.java deleted file mode 100644 index 414e8a21..00000000 --- a/test/plugins/Library/io/serial/YamlMapTest.java +++ /dev/null @@ -1,123 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.io.serial; - -import junit.framework.TestCase; - -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.Yaml; -import org.yaml.snakeyaml.error.YAMLException; -import org.yaml.snakeyaml.Loader; -import org.yaml.snakeyaml.Dumper; -import org.yaml.snakeyaml.DumperOptions; -import org.yaml.snakeyaml.representer.Representer; -import org.yaml.snakeyaml.representer.Represent; -import org.yaml.snakeyaml.nodes.Node; -import org.yaml.snakeyaml.nodes.ScalarNode; -import org.yaml.snakeyaml.nodes.MappingNode; -import org.yaml.snakeyaml.constructor.Constructor; -import org.yaml.snakeyaml.constructor.Construct; -import org.yaml.snakeyaml.constructor.ConstructorException; - -import java.io.*; -import java.util.*; - -/** -** @author infinity0 -*/ -public class YamlMapTest extends TestCase { - - public void testYamlMap() throws IOException { - Map data = new TreeMap(); - data.put("key1", new Bean()); - data.put("key2", new Bean()); - data.put("key3", new Custom("test")); - data.put("key4", new Wrapper("test", new Custom("test"))); - - Yaml yaml = new Yaml(new Loader(new ExtendedConstructor()), - new Dumper(new ExtendedRepresenter(), new DumperOptions())); - File file = new File("beantest.yml"); - - FileOutputStream os = new FileOutputStream(file); - yaml.dump(data, new OutputStreamWriter(os)); - os.close(); - - FileInputStream is = new FileInputStream(file); - Object o = yaml.load(new InputStreamReader(is)); - is.close(); - - assertTrue(o instanceof Map); - Map m = (Map)o; - assertTrue(m.get("key1") instanceof Bean); - assertTrue(m.get("key2") instanceof Bean); // NOTE these tests fail in snakeYAML 1.2 and below, fixed in 1.3 - assertTrue(m.get("key3") instanceof Custom); - assertTrue(m.get("key4") instanceof Wrapper); - } - - public static class Bean { - private String a; - public Bean() { a = ""; } - public String getA() { return a; } - public void setA(String s) { a = s; } - - } - - public static class Wrapper { - private String a; - private Custom b; - public Wrapper(String s, Custom bb) { a = s; b = bb; } - public Wrapper() { } - public String getA() { return a; } - public void setA(String s) { a = s; } - public Custom getB() { return b; } - public void setB(Custom bb) { b = bb; } - } - - public static class Custom { - final private String str; - public Custom(String s) { - str = s; - } - // the presence of this constructor causes 1.4-snapshot to fail - // fixed in 1.4-rc1 - public Custom(Integer i) { - str = ""; - } - // the absence of this constructor causes 1.3 to fail - // fixed in 1.4-rc1 - public Custom(Custom c) { - str = c.str; - } - public String toString() { return str; } - } - - - public static class ExtendedRepresenter extends Representer { - public ExtendedRepresenter() { - this.representers.put(Custom.class, new RepresentCustom()); - } - - private class RepresentCustom implements Represent { - public Node representData(Object data) { - return representScalar("!Custom", ((Custom) data).toString()); - } - } - } - - - public static class ExtendedConstructor extends Constructor { - public ExtendedConstructor() { - this.yamlConstructors.put("!Custom", new ConstructCustom()); - } - - private class ConstructCustom implements Construct { - public Object construct(Node node) { - String str = (String) constructScalar((ScalarNode)node); - return new Custom(str); - } - public void construct2ndStep(Node node, Object object) { } - } - } - -} diff --git a/test/plugins/Library/util/BTreeMapTest.java b/test/plugins/Library/util/BTreeMapTest.java deleted file mode 100644 index c555a59a..00000000 --- a/test/plugins/Library/util/BTreeMapTest.java +++ /dev/null @@ -1,97 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; - -import java.util.*; - -/** -** @author infinity0 -*/ -public class BTreeMapTest extends SortedMapTestSkeleton { - - @Override public SortedMap makeTestMap() { - return new BTreeMap(0x40); - } - - final public static int sz0 = 0x400; - final public static int sz1 = sz0<<2; //sz0<<6; - - public void testBasic() { - - BTreeMap testmap = new BTreeMap(0x40); - Map backmap = new HashMap(); - try { - for (int i=0; i it = backmap.keySet().iterator(); - for (int i=0; i backmap = new TreeMap(); - for (int i=0; i testmap = new BTreeMap(2); - testmap.putAll(backmap); - testmap.verifyTreeIntegrity(); - //if (n<10) { System.out.println(testmap.toTreeString()); } - } - - } - - public void testNumericIndexes() { - - BTreeMap testmap = new BTreeMap(0x40); - int i=0; - - for (i=0; i ts = (new BTreeMap(0x40)).subSet(new TreeSet( - Arrays.asList("91665799", "93fc4806", "94ff78b2", "952225c8", "9678e897", "96bb4208", - "989c8d3f", "9a1f0877", "9faea63f", "9fec4192", "a19e7d4e", "a61a2c10", - "a6c681ec", "a72d4e4e", "a845d2cb")), "96bb4208", "9fec4192"); - assertTrue(ts.first().equals("989c8d3f")); - assertTrue(ts.last().equals("9faea63f")); - } - -} diff --git a/test/plugins/Library/util/BytePrefixKeyTest.java b/test/plugins/Library/util/BytePrefixKeyTest.java deleted file mode 100644 index b951fb7b..00000000 --- a/test/plugins/Library/util/BytePrefixKeyTest.java +++ /dev/null @@ -1,25 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; -import static plugins.Library.util.Generators.rand; - -import java.util.Random; -import java.util.Arrays; - -/** -** @author infinity0 -*/ -public class BytePrefixKeyTest extends TestCase { - - public void testBasic() { - for (int i=0; i<256; ++i) { - byte[] bs = new byte[32]; - rand.nextBytes(bs); - assertTrue(Arrays.equals(bs, BytePrefixKey.hexToBytes(BytePrefixKey.bytesToHex(bs)))); - } - } - -} diff --git a/test/plugins/Library/util/Generators.java b/test/plugins/Library/util/Generators.java deleted file mode 100644 index 70c97faf..00000000 --- a/test/plugins/Library/util/Generators.java +++ /dev/null @@ -1,43 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; - -import plugins.Library.index.*; - -import freenet.keys.FreenetURI; - -import java.util.UUID; -import java.util.Random; -import java.net.MalformedURLException; - -/** -** @author infinity0 -*/ -final public class Generators { - - final public static Random rand; - - static { - long seed = System.currentTimeMillis(); - rand = new Random(seed); - System.err.println("Generators.rand initialised with seed " + seed); - } - - private Generators() {} - - public static String rndStr() { - return UUID.randomUUID().toString(); - } - - public static String rndKey() { - return rndStr().substring(0,8); - } - - public static TermPageEntry rndEntry(String key) { - return new TermPageEntry(key, (float)Math.random(), FreenetURI.generateRandomCHK(rand), null); - } - -} diff --git a/test/plugins/Library/util/IntegersTest.java b/test/plugins/Library/util/IntegersTest.java deleted file mode 100644 index 5b035517..00000000 --- a/test/plugins/Library/util/IntegersTest.java +++ /dev/null @@ -1,71 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; -import static plugins.Library.util.Generators.rand; - -import java.util.Random; -import java.util.Arrays; - -/** -** @author infinity0 -*/ -public class IntegersTest extends TestCase { - - private int[] totals = { - 10, - 6, - 10 - }; - private int[] nums = { - 5, - 4, - 6 - }; - private int[][] shares = { - {2,2,2,2,2}, - {1,2,1,2}, - {2,1,2,2,1,2} - }; - - public void testAllocateEvenlyPredefined() { - for (int i=0; i it = Integers.allocateEvenly(total, num); - //System.out.println(it); - - int j=0; - for (Integer ii: it) { - share[j++] = ii; - } - - int k = total / num; - int r = total % num; - Arrays.sort(share); - assertTrue(share[0] == k); - assertTrue(share[num-r-1] == k); - if (r > 0) { - assertTrue(share[num-r] == k+1); - assertTrue(share[num-1] == k+1); - } - } - } - -} diff --git a/test/plugins/Library/util/SkeletonTreeMapTest.java b/test/plugins/Library/util/SkeletonTreeMapTest.java deleted file mode 100644 index 5573a2e1..00000000 --- a/test/plugins/Library/util/SkeletonTreeMapTest.java +++ /dev/null @@ -1,126 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; - -import java.util.Map; -import java.util.SortedMap; - -/** -** @author infinity0 -*/ -public class SkeletonTreeMapTest extends SortedMapTestSkeleton { - - SkeletonTreeMap skelmap; - - protected void setUp() { - skelmap = new SkeletonTreeMap(); - for (int i=0; i<1024; ++i) { - skelmap.putGhost(Generators.rndKey(), Boolean.FALSE); - } - } - - protected void tearDown() { - } - - public void fillSkelMap() { - for (String s: skelmap.keySet()) { - skelmap.put(s, 123); - } - } - - @Override public SortedMap makeTestMap() { - return new SkeletonTreeMap(); - } - - public void testBasic() { - skelmap = new SkeletonTreeMap(); - skelmap.putGhost("l0l", Boolean.FALSE); - skelmap.putGhost("l1l", Boolean.FALSE); - skelmap.putGhost("l2l", Boolean.FALSE); - - assertTrue(skelmap.firstKey().equals("l0l")); - assertTrue(skelmap.lastKey().equals("l2l")); - - try { - assertTrue(skelmap.get("zzz") == null); - assertTrue(skelmap.get("123") == null); - skelmap.get("l0l"); - } catch (DataNotLoadedException e) { - assertTrue(e.getParent() == skelmap); - assertTrue(e.getKey().equals("l0l")); - } - - skelmap.put("l0l", new Integer(123)); - assertTrue(skelmap.get("l0l") == 123); - - assertTrue(skelmap.size() == 3); - assertTrue(skelmap.remove("l0l") == 123); - assertTrue(skelmap.size() == 2); - - try { - skelmap.get("l1l"); - } catch (DataNotLoadedException e) { - assertTrue(e.getParent() == skelmap); - assertTrue(e.getKey().equals("l1l")); - } - - } - - public void testIncompleteEntrySet() { - try { - for (Map.Entry en: skelmap.entrySet()) { - assertTrue(skelmap.entrySet().contains(en)); - } - } catch (DataNotLoadedException e) { - assertTrue(skelmap.firstKey() == e.getKey()); - } - - skelmap.put(skelmap.firstKey(), 123); - try { - for (Map.Entry en: skelmap.entrySet()) { - assertTrue(skelmap.entrySet().contains(en)); - } - } catch (DataNotLoadedException e) { - assertTrue(skelmap.firstKey() != e.getKey()); - } - - fillSkelMap(); - for (Map.Entry en: skelmap.entrySet()) { - assertTrue(skelmap.entrySet().contains(en)); - } - } - - public void testIncompleteValues() { - int i = 0; - - try { - for (Integer en: skelmap.values()) { - assertTrue(skelmap.values().contains(en)); - ++i; - } - } catch (DataNotLoadedException e) { - assertTrue(e.getValue() != null); - assertTrue(i == 0); - } - - skelmap.put(skelmap.firstKey(), 123); - i = 0; - try { - for (Integer en: skelmap.values()) { - assertTrue(skelmap.values().contains(en)); - ++i; - } - } catch (DataNotLoadedException e) { - assertTrue(i == 1); - } - - fillSkelMap(); - for (Integer en: skelmap.values()) { - assertTrue(skelmap.values().contains(en)); - } - } - -} diff --git a/test/plugins/Library/util/SortedArraySetTest.java b/test/plugins/Library/util/SortedArraySetTest.java deleted file mode 100644 index 3c1a2271..00000000 --- a/test/plugins/Library/util/SortedArraySetTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; - -import java.util.SortedSet; - -/** -** @author infinity0 -*/ -public class SortedArraySetTest extends TestCase { - - SortedArraySet marr = new SortedArraySet(new Integer[]{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}); - - public void testSubsets() { - assertTrue(marr.first() == 0); - assertTrue(marr.last() == 15); - assertTrue(marr.size() == 16); - - SortedSet head = marr.headSet(8); - SortedSet tail = marr.tailSet(8); - SortedSet sub = marr.subSet(4, 12); - - assertTrue(head.first() == 0); - assertTrue(head.last() == 7); - assertTrue(head.size() == 8); - - assertTrue(tail.first() == 8); - assertTrue(tail.last() == 15); - assertTrue(tail.size() == 8); - - assertTrue(sub.first() == 4); - assertTrue(sub.last() == 11); - assertTrue(sub.size() == 8); - } - - public void testNoDuplicates() { - try { - SortedArraySet arr = new SortedArraySet(new Integer[]{0,1,1,2,3,4,5,6,7,8,9,10,11,12,13,14}); - fail("failed to detect an array with duplicates in"); - } catch (RuntimeException e) { - assertTrue(e instanceof IllegalArgumentException); - } - } - - public void testSortedDetection() { - try { - SortedArraySet arr = new SortedArraySet(new Integer[]{0,1,3,2,4,5,6,7,8,9,10,11,12,13,14,15}, null, true); - fail("failed to detect an unsorted array"); - } catch (RuntimeException e) { - assertTrue(e instanceof IllegalArgumentException); - } - } - -} diff --git a/test/plugins/Library/util/SortedMapTestSkeleton.java b/test/plugins/Library/util/SortedMapTestSkeleton.java deleted file mode 100644 index daf687f7..00000000 --- a/test/plugins/Library/util/SortedMapTestSkeleton.java +++ /dev/null @@ -1,141 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; - -import java.util.Map; -import java.util.SortedMap; -import java.util.Iterator; - -/** -** TODO maybe make some tailMap, headMap, subMap tests -** -** @author infinity0 -*/ -abstract public class SortedMapTestSkeleton extends TestCase { - - protected SortedMap testmap; - - abstract protected SortedMap makeTestMap(); - - public void fillTestMap() { - testmap = makeTestMap(); - for (int i=0; i<0x1000; ++i) { - testmap.put(Generators.rndStr(), Generators.rand.nextInt()); - } - } - - public void testEntrySet() { - // test that entrySet is properly backed by the map - fillTestMap(); - assertTrue(testmap.entrySet() == testmap.entrySet()); - assertTrue(testmap.size() == testmap.entrySet().size()); - testmap.put(testmap.firstKey(), 123); - Map.Entry entry = null; - for (Map.Entry en: testmap.entrySet()) { entry = en; break; } - assertTrue(testmap.entrySet().contains(entry)); - entry.setValue(124); - assertTrue(testmap.get(testmap.firstKey()) == 124); - assertTrue(testmap.entrySet().contains(entry)); - - int s = testmap.size(), i = 0; - for (Map.Entry en: testmap.entrySet()) { ++i; } - assertTrue(s == i); - i = 0; - - Map.Entry e = null; - while (testmap.size() > 0) { - // get first entry - for (Map.Entry en: testmap.entrySet()) { e = en; break; } - assertTrue(e.getKey() == testmap.firstKey()); - testmap.entrySet().remove(e); - ++i; - } - assertTrue(s == i); - } - - public void testKeySet() { - // test that keySet is properly backed by the map - fillTestMap(); - assertTrue(testmap.keySet() == testmap.keySet()); - assertTrue(testmap.size() == testmap.keySet().size()); - int s = testmap.size(), i = 0; - for (String en: testmap.keySet()) { ++i; } - assertTrue(s == i); - i = 0; - String e = null; - while (testmap.size() > 0) { - // get first entry - for (String en: testmap.keySet()) { e = en; break; } - assertTrue(e.equals(testmap.firstKey())); - testmap.keySet().remove(e); - ++i; - } - assertTrue(s == i); - } - - public void testValues() { - // test that values is properly backed by the map - fillTestMap(); - assertTrue(testmap.values() == testmap.values()); - assertTrue(testmap.size() == testmap.values().size()); - int s = testmap.size(), i = 0; - for (Integer en: testmap.values()) { ++i; } - assertTrue(s == i); - i = 0; - Integer e = null; - while (testmap.size() > 0) { - // get first entry - for (Integer en: testmap.values()) { e = en; break; } - assertTrue(e.equals(testmap.get(testmap.firstKey()))); - testmap.values().remove(e); - ++i; - } - assertTrue(s == i); - } - - public void testEntrySetIterator() { - // test that entrySet.iterator is properly backed by the map - fillTestMap(); - Iterator> it = testmap.entrySet().iterator(); - int s=testmap.size(), i=0; - while (it.hasNext()) { - assertTrue(it.next().getKey() == testmap.firstKey()); - it.remove(); - ++i; - } - assertTrue(i == s); - assertTrue(testmap.size() == 0); - } - - public void testKeySetIterator() { - // test that keySet.iterator is properly backed by the map - fillTestMap(); - Iterator it = testmap.keySet().iterator(); - int s=testmap.size(), i=0; - while (it.hasNext()) { - assertTrue(it.next().equals(testmap.firstKey())); - it.remove(); - ++i; - } - assertTrue(i == s); - assertTrue(testmap.size() == 0); - } - - public void testValuesIterator() { - // test that values.iterator is properly backed by the map - fillTestMap(); - Iterator it = testmap.values().iterator(); - int s=testmap.size(), i=0; - while (it.hasNext()) { - assertTrue(it.next().equals(testmap.get(testmap.firstKey()))); - it.remove(); - ++i; - } - assertTrue(i == s); - assertTrue(testmap.size() == 0); - } - -} diff --git a/test/plugins/Library/util/SortedTest.java b/test/plugins/Library/util/SortedTest.java deleted file mode 100644 index 2dffc627..00000000 --- a/test/plugins/Library/util/SortedTest.java +++ /dev/null @@ -1,190 +0,0 @@ -/* This code is part of Freenet. It is distributed under the GNU General - * Public License, version 2 (or at your option any later version). See - * http://www.gnu.org/ for further details of the GPL. */ -package plugins.Library.util; - -import junit.framework.TestCase; -import static plugins.Library.util.Generators.rand; - -import plugins.Library.util.Sorted.Inclusivity; - -import java.util.Random; -import java.util.Iterator; -import java.util.Arrays; -import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; - -/** -** @author infinity0 -*/ -public class SortedTest extends TestCase { - - final public static int rruns = 0x80; - final public static int rsizelow = 0x40; // must be > 8 - - private Integer[] split_sep = { 8, 16, 24, 32 }; - private Integer[][] split_subj = { - {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, - {4,5,6,7,8,9,10,11,12}, - {19,24,25}, - {17,19,21}, - {16,24}, - {4,12,20,28,36}, - {12,20,28}, - {4,8,12,16,20,24,28,32,36} - }; - private Integer[][][] split_res = { - {{1,7},{9,15}}, - {{4,7},{9,12}}, - {{19,19},{25,25}}, - {{17,21}}, - {}, - {{4,4},{12,12},{20,20},{28,28},{36,36}}, - {{12,12},{20,20},{28,28}}, - {{4,4},{12,12},{20,20},{28,28},{36,36}}, - }; - private Integer[][] split_fsep = { - {8}, - {8}, - {24}, - {}, - {16,24}, - {}, - {}, - {8,16,24,32} - }; - - public void testSplitPredefined() { - SortedSet sep = new TreeSet(Arrays.asList(split_sep)); - - for (int i=0; i fsep = new TreeSet(); - List> res = Sorted.split(new TreeSet(Arrays.asList(split_subj[i])), sep, fsep); - - Iterator> rit = res.iterator(); - for (int j=0; j range = rit.next(); - assertTrue(range.first().equals(split_res[i][j][0])); - assertTrue(range.last().equals(split_res[i][j][1])); - } - assertFalse(rit.hasNext()); - - assertTrue(Arrays.deepEquals(split_fsep[i], fsep.toArray())); - Iterator fit = fsep.iterator(); - for (int j=0; j res = Sorted.select(new TreeSet(Arrays.asList(select_subj[i])), 2, incs[j]); - assertTrue(Arrays.deepEquals(select_res[i][j], res.toArray())); - } - } - } - - public void verifySplit(SortedSet subj, SortedSet sep) { - // FIXME LOW this method assumes comparator is consistent with equals() and - // there are no null entries - SortedSet fsep = new TreeSet(); - List> subs = null; - try { - subs = Sorted.split(subj, sep, fsep); - } catch (Error e) { - System.out.println(subj); - System.out.println(sep); - throw e; - } - - try { - Iterator> lit = subs.iterator(); - Iterator pit = fsep.iterator(); - Iterator subit = null; - String cursep = pit.hasNext()? pit.next(): null; - for (Iterator it = subj.iterator(); it.hasNext(); ) { - String key = it.next(); - if (subit != null && subit.hasNext()) { - assertTrue(key.equals(subit.next())); - continue; - } - if (key.equals(cursep)) { - cursep = pit.hasNext()? pit.next(): null; - continue; - } - assertTrue(lit.hasNext()); - subit = lit.next().iterator(); - assertTrue(subit.hasNext()); - assertTrue(key.equals(subit.next())); - } - assertFalse(lit.hasNext()); - assertFalse(pit.hasNext()); - } catch (Error e) { - System.out.println(subj); - System.out.println(sep); - System.out.println(fsep); - System.out.println(subs); - throw e; - } - } - - protected void randomSelectSplit(int n, int k) { - SortedSet subj = new TreeSet(); - for (int i=0; i sep = new TreeSet(); - List candsep = Sorted.select(subj, k); - assertTrue(candsep.size() == k); - for (String key: candsep) { - sep.add((rand.nextInt(2) == 0)? key: Generators.rndKey()); - } - assertTrue(sep.size() == k); - - verifySplit(subj, sep); - } - - public void testRandomSelectSplit() { - int f = 0x10; - for (int n=0; n void testGenericSweeper(S sw1, S sw2) { - testAfterNew(sw1); - testCloseClear(sw1); - testAfterClear(sw1); - - testAfterNew(sw2); - testReleaseClear(sw2); - testAfterClear(sw2); - } - - public void testIterableSweeper(S sw1, S sw2) { - testAfterNew(sw1); - testCloseClearIterable(sw1); - testAfterClear(sw1); - - testAfterNew(sw2); - testReleaseClearIterable(sw2); - testAfterClear(sw2); - } - - @SuppressWarnings("unchecked") - public void testAfterNew(final Sweeper sw) { - assertTrue(sw.getState() == Sweeper.State.NEW); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when not open"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[1]); } }, "acquiring when not open"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[2]); } }, "acquiring when not open"); - testIllegalStateException(new Runnable() { public void run() { sw.release(testobj[0]); } }, "releasing when not open"); - testIllegalStateException(new Runnable() { public void run() { sw.close(); } }, "closing when not open"); - assertTrue(sw.size() == 0); - assertTrue(sw.getState() == Sweeper.State.NEW); - sw.open(); - testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening twice"); - assertTrue(sw.getState() == Sweeper.State.OPEN); - } - - @SuppressWarnings("unchecked") - public void testCloseClear(final Sweeper sw) { - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - sw.release(testobj[0]); - sw.release(testobj[1]); - sw.release(testobj[2]); - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - sw.release(testobj[0]); - sw.release(testobj[1]); - sw.release(testobj[2]); - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.close(); - assertTrue(sw.getState() == Sweeper.State.CLEARED); - } - - @SuppressWarnings("unchecked") - public void testCloseClearIterable(final S sw) { - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.close(); - assertTrue(sw.getState() == Sweeper.State.CLEARED); - } - - @SuppressWarnings("unchecked") - public void testReleaseClear(final Sweeper sw) { - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - sw.release(testobj[0]); - sw.release(testobj[1]); - sw.release(testobj[2]); - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.close(); - assertTrue(sw.getState() == Sweeper.State.CLOSED); - testAfterClose(sw); - sw.release(testobj[0]); - sw.release(testobj[1]); - sw.release(testobj[2]); - assertTrue(sw.getState() == Sweeper.State.CLEARED); - } - - @SuppressWarnings("unchecked") - public void testReleaseClearIterable(final S sw) { - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } - assertTrue(sw.getState() == Sweeper.State.OPEN); - sw.acquire(testobj[0]); - sw.acquire(testobj[1]); - sw.acquire(testobj[2]); - sw.close(); - assertTrue(sw.getState() == Sweeper.State.CLOSED); - testAfterClose(sw); - for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } - assertTrue(sw.getState() == Sweeper.State.CLEARED); - } - - @SuppressWarnings("unchecked") - public void testAfterClose(final Sweeper sw) { - assertTrue(sw.getState() == Sweeper.State.CLOSED); - int s = sw.size(); - assertTrue(s > 0); - testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening when closed"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when closed"); - assertTrue(sw.getState() == Sweeper.State.CLOSED); - assertTrue(sw.size() == s); - } - - @SuppressWarnings("unchecked") - public void testAfterClear(final Sweeper sw) { - assertTrue(sw.getState() == Sweeper.State.CLEARED); - testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening when cleared"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when cleared"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[1]); } }, "acquiring when cleared"); - testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[2]); } }, "acquiring when cleared"); - testIllegalStateException(new Runnable() { public void run() { sw.release(testobj[0]); } }, "releasing when cleared"); - testIllegalStateException(new Runnable() { public void run() { sw.close(); } }, "closing when cleared"); - assertTrue(sw.size() == 0); - assertTrue(sw.getState() == Sweeper.State.CLEARED); - } - - public void testIllegalStateException(Runnable r, String s) { - try { - r.run(); - fail("Failed to throw correct exception when: " + s); - } catch (IllegalStateException e) { - /* correct */ - } catch (RuntimeException e) { - fail("Failed to throw correct exception when: " + s); - } - } - -} From 174d9ba662cec692cb297ed4306d2b3b34ec76c1 Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 23:31:27 -0300 Subject: [PATCH 03/10] Gradle java source structure --- build.gradle | 99 + src/main/java/plugins/Library/Index.java | 30 + src/main/java/plugins/Library/Library.java | 780 +++++++ src/main/java/plugins/Library/Main.java | 199 ++ .../java/plugins/Library/SpiderIndexURIs.java | 121 + .../plugins/Library/SpiderIndexUploader.java | 969 ++++++++ src/main/java/plugins/Library/Version.java | 17 + .../java/plugins/Library/VirtualIndex.java | 20 + .../java/plugins/Library/WriteableIndex.java | 32 + .../Library/client/FreenetArchiver.java | 608 +++++ .../plugins/Library/index/ProtoIndex.java | 356 +++ .../index/ProtoIndexComponentSerialiser.java | 586 +++++ .../Library/index/ProtoIndexSerialiser.java | 230 ++ .../java/plugins/Library/index/TermEntry.java | 137 ++ .../Library/index/TermEntryReaderWriter.java | 124 ++ .../plugins/Library/index/TermIndexEntry.java | 58 + .../plugins/Library/index/TermPageEntry.java | 240 ++ .../plugins/Library/index/TermTermEntry.java | 54 + .../java/plugins/Library/index/URIEntry.java | 75 + .../java/plugins/Library/index/URIKey.java | 48 + .../plugins/Library/index/package-info.java | 10 + .../Library/index/xml/FindRequest.java | 224 ++ .../Library/index/xml/LibrarianHandler.java | 237 ++ .../Library/index/xml/MainIndexParser.java | 245 +++ .../Library/index/xml/URLUpdateHook.java | 7 + .../java/plugins/Library/index/xml/Util.java | 75 + .../plugins/Library/index/xml/XMLIndex.java | 861 ++++++++ .../Library/index/xml/package-info.java | 9 + .../Library/io/DataFormatException.java | 44 + .../plugins/Library/io/ObjectBlueprint.java | 520 +++++ .../Library/io/ObjectStreamReader.java | 21 + .../Library/io/ObjectStreamWriter.java | 21 + .../plugins/Library/io/YamlReaderWriter.java | 226 ++ .../plugins/Library/io/serial/Archiver.java | 54 + .../Library/io/serial/FileArchiver.java | 198 ++ .../Library/io/serial/IterableSerialiser.java | 53 + .../Library/io/serial/LiveArchiver.java | 46 + .../Library/io/serial/MapSerialiser.java | 50 + .../plugins/Library/io/serial/Packer.java | 995 +++++++++ .../Library/io/serial/ParallelSerialiser.java | 271 +++ .../Library/io/serial/ProgressTracker.java | 255 +++ .../io/serial/ScheduledSerialiser.java | 53 + .../plugins/Library/io/serial/Serialiser.java | 161 ++ .../plugins/Library/io/serial/Translator.java | 44 + .../Library/io/serial/package-info.java | 11 + .../java/plugins/Library/package-info.java | 13 + .../search/InvalidSearchException.java | 19 + .../plugins/Library/search/ResultSet.java | 511 +++++ .../java/plugins/Library/search/Search.java | 654 ++++++ .../Library/search/SearchTokenizer.java | 205 ++ .../plugins/Library/search/SearchUtil.java | 55 + .../Library/search/inter/IndexQuery.java | 65 + .../Library/search/inter/Interdex.java | 198 ++ .../Library/search/inter/TermResults.java | 56 + .../plugins/Library/search/package-info.java | 15 + .../java/plugins/Library/ui/L10nString.java | 72 + .../java/plugins/Library/ui/MainPage.java | 523 +++++ .../plugins/Library/ui/MainPageToadlet.java | 139 ++ .../Library/ui/RelevanceComparator.java | 43 + .../Library/ui/ResultNodeGenerator.java | 262 +++ .../plugins/Library/ui/StaticToadlet.java | 78 + .../Library/ui/TermPageGroupEntry.java | 50 + .../plugins/Library/ui/TestInterface.java | 64 + .../java/plugins/Library/ui/WebInterface.java | 67 + .../java/plugins/Library/ui/package-info.java | 14 + .../plugins/Library/ui/staticfiles/detect.js | 1 + .../plugins/Library/ui/staticfiles/script.js | 32 + .../plugins/Library/ui/staticfiles/style.css | 48 + .../java/plugins/Library/util/BTreeMap.java | 1959 +++++++++++++++++ .../java/plugins/Library/util/BTreeSet.java | 76 + .../plugins/Library/util/BytePrefixKey.java | 147 ++ .../Library/util/CompositeIterable.java | 68 + .../Library/util/DataNotLoadedException.java | 64 + .../Library/util/IdentityComparator.java | 84 + .../java/plugins/Library/util/Integers.java | 82 + src/main/java/plugins/Library/util/Maps.java | 132 ++ .../java/plugins/Library/util/PrefixTree.java | 615 ++++++ .../java/plugins/Library/util/Skeleton.java | 73 + .../Library/util/SkeletonBTreeMap.java | 1774 +++++++++++++++ .../Library/util/SkeletonBTreeSet.java | 177 ++ .../plugins/Library/util/SkeletonMap.java | 58 + .../plugins/Library/util/SkeletonTreeMap.java | 949 ++++++++ .../java/plugins/Library/util/Sorted.java | 268 +++ .../plugins/Library/util/SortedArraySet.java | 219 ++ .../plugins/Library/util/SortedMapSet.java | 164 ++ .../plugins/Library/util/SortedSetMap.java | 160 ++ .../util/TaskAbortExceptionConvertor.java | 13 + .../BoundedPriorityBlockingQueue.java | 196 ++ .../util/concurrent/ExceptionConvertor.java | 7 + .../Library/util/concurrent/Executors.java | 73 + .../Library/util/concurrent/Notifier.java | 31 + .../util/concurrent/ObjectProcessor.java | 400 ++++ .../Library/util/concurrent/Scheduler.java | 21 + .../Library/util/event/AbstractSweeper.java | 118 + .../Library/util/event/CountingSweeper.java | 56 + .../plugins/Library/util/event/Sweeper.java | 70 + .../Library/util/event/TrackingSweeper.java | 112 + .../Library/util/exec/AbstractExecution.java | 176 ++ .../util/exec/BaseCompositeProgress.java | 150 ++ .../Library/util/exec/ChainedProgress.java | 42 + .../Library/util/exec/CompositeProgress.java | 54 + .../plugins/Library/util/exec/Execution.java | 76 + .../Library/util/exec/ExecutionAcceptor.java | 20 + .../plugins/Library/util/exec/Progress.java | 54 + .../Library/util/exec/ProgressParts.java | 303 +++ .../Library/util/exec/SimpleProgress.java | 239 ++ .../Library/util/exec/TaskAbortException.java | 85 + .../util/exec/TaskCompleteException.java | 20 + .../util/exec/TaskInProgressException.java | 37 + .../Library/util/exec/package-info.java | 9 + .../plugins/Library/util/func/Closure.java | 40 + .../Library/util/func/SafeClosure.java | 17 + .../plugins/Library/util/func/Tuples.java | 160 ++ .../Library/util/func/package-info.java | 12 + .../plugins/Library/util/package-info.java | 10 + src/test/java/plugins/Library/Tester.java | 381 ++++ .../plugins/Library/index/BIndexTest.java | 381 ++++ .../plugins/Library/index/TermEntryTest.java | 121 + .../plugins/Library/io/serial/PackerTest.java | 84 + .../Library/io/serial/YamlMapTest.java | 123 ++ .../plugins/Library/util/BTreeMapTest.java | 97 + .../Library/util/BytePrefixKeyTest.java | 25 + .../java/plugins/Library/util/Generators.java | 43 + .../plugins/Library/util/IntegersTest.java | 71 + .../Library/util/SkeletonTreeMapTest.java | 126 ++ .../Library/util/SortedArraySetTest.java | 57 + .../Library/util/SortedMapTestSkeleton.java | 141 ++ .../java/plugins/Library/util/SortedTest.java | 190 ++ .../Library/util/event/SweepersTest.java | 177 ++ 129 files changed, 24345 insertions(+) create mode 100644 build.gradle create mode 100644 src/main/java/plugins/Library/Index.java create mode 100644 src/main/java/plugins/Library/Library.java create mode 100644 src/main/java/plugins/Library/Main.java create mode 100644 src/main/java/plugins/Library/SpiderIndexURIs.java create mode 100644 src/main/java/plugins/Library/SpiderIndexUploader.java create mode 100644 src/main/java/plugins/Library/Version.java create mode 100644 src/main/java/plugins/Library/VirtualIndex.java create mode 100644 src/main/java/plugins/Library/WriteableIndex.java create mode 100644 src/main/java/plugins/Library/client/FreenetArchiver.java create mode 100644 src/main/java/plugins/Library/index/ProtoIndex.java create mode 100644 src/main/java/plugins/Library/index/ProtoIndexComponentSerialiser.java create mode 100644 src/main/java/plugins/Library/index/ProtoIndexSerialiser.java create mode 100644 src/main/java/plugins/Library/index/TermEntry.java create mode 100644 src/main/java/plugins/Library/index/TermEntryReaderWriter.java create mode 100644 src/main/java/plugins/Library/index/TermIndexEntry.java create mode 100644 src/main/java/plugins/Library/index/TermPageEntry.java create mode 100644 src/main/java/plugins/Library/index/TermTermEntry.java create mode 100644 src/main/java/plugins/Library/index/URIEntry.java create mode 100644 src/main/java/plugins/Library/index/URIKey.java create mode 100644 src/main/java/plugins/Library/index/package-info.java create mode 100644 src/main/java/plugins/Library/index/xml/FindRequest.java create mode 100644 src/main/java/plugins/Library/index/xml/LibrarianHandler.java create mode 100644 src/main/java/plugins/Library/index/xml/MainIndexParser.java create mode 100644 src/main/java/plugins/Library/index/xml/URLUpdateHook.java create mode 100644 src/main/java/plugins/Library/index/xml/Util.java create mode 100644 src/main/java/plugins/Library/index/xml/XMLIndex.java create mode 100644 src/main/java/plugins/Library/index/xml/package-info.java create mode 100644 src/main/java/plugins/Library/io/DataFormatException.java create mode 100644 src/main/java/plugins/Library/io/ObjectBlueprint.java create mode 100644 src/main/java/plugins/Library/io/ObjectStreamReader.java create mode 100644 src/main/java/plugins/Library/io/ObjectStreamWriter.java create mode 100644 src/main/java/plugins/Library/io/YamlReaderWriter.java create mode 100644 src/main/java/plugins/Library/io/serial/Archiver.java create mode 100644 src/main/java/plugins/Library/io/serial/FileArchiver.java create mode 100644 src/main/java/plugins/Library/io/serial/IterableSerialiser.java create mode 100644 src/main/java/plugins/Library/io/serial/LiveArchiver.java create mode 100644 src/main/java/plugins/Library/io/serial/MapSerialiser.java create mode 100644 src/main/java/plugins/Library/io/serial/Packer.java create mode 100644 src/main/java/plugins/Library/io/serial/ParallelSerialiser.java create mode 100644 src/main/java/plugins/Library/io/serial/ProgressTracker.java create mode 100644 src/main/java/plugins/Library/io/serial/ScheduledSerialiser.java create mode 100644 src/main/java/plugins/Library/io/serial/Serialiser.java create mode 100644 src/main/java/plugins/Library/io/serial/Translator.java create mode 100644 src/main/java/plugins/Library/io/serial/package-info.java create mode 100644 src/main/java/plugins/Library/package-info.java create mode 100644 src/main/java/plugins/Library/search/InvalidSearchException.java create mode 100644 src/main/java/plugins/Library/search/ResultSet.java create mode 100644 src/main/java/plugins/Library/search/Search.java create mode 100755 src/main/java/plugins/Library/search/SearchTokenizer.java create mode 100644 src/main/java/plugins/Library/search/SearchUtil.java create mode 100644 src/main/java/plugins/Library/search/inter/IndexQuery.java create mode 100644 src/main/java/plugins/Library/search/inter/Interdex.java create mode 100644 src/main/java/plugins/Library/search/inter/TermResults.java create mode 100644 src/main/java/plugins/Library/search/package-info.java create mode 100644 src/main/java/plugins/Library/ui/L10nString.java create mode 100644 src/main/java/plugins/Library/ui/MainPage.java create mode 100644 src/main/java/plugins/Library/ui/MainPageToadlet.java create mode 100644 src/main/java/plugins/Library/ui/RelevanceComparator.java create mode 100644 src/main/java/plugins/Library/ui/ResultNodeGenerator.java create mode 100644 src/main/java/plugins/Library/ui/StaticToadlet.java create mode 100644 src/main/java/plugins/Library/ui/TermPageGroupEntry.java create mode 100644 src/main/java/plugins/Library/ui/TestInterface.java create mode 100644 src/main/java/plugins/Library/ui/WebInterface.java create mode 100644 src/main/java/plugins/Library/ui/package-info.java create mode 100644 src/main/java/plugins/Library/ui/staticfiles/detect.js create mode 100644 src/main/java/plugins/Library/ui/staticfiles/script.js create mode 100644 src/main/java/plugins/Library/ui/staticfiles/style.css create mode 100644 src/main/java/plugins/Library/util/BTreeMap.java create mode 100644 src/main/java/plugins/Library/util/BTreeSet.java create mode 100644 src/main/java/plugins/Library/util/BytePrefixKey.java create mode 100644 src/main/java/plugins/Library/util/CompositeIterable.java create mode 100644 src/main/java/plugins/Library/util/DataNotLoadedException.java create mode 100644 src/main/java/plugins/Library/util/IdentityComparator.java create mode 100644 src/main/java/plugins/Library/util/Integers.java create mode 100644 src/main/java/plugins/Library/util/Maps.java create mode 100644 src/main/java/plugins/Library/util/PrefixTree.java create mode 100644 src/main/java/plugins/Library/util/Skeleton.java create mode 100644 src/main/java/plugins/Library/util/SkeletonBTreeMap.java create mode 100644 src/main/java/plugins/Library/util/SkeletonBTreeSet.java create mode 100644 src/main/java/plugins/Library/util/SkeletonMap.java create mode 100644 src/main/java/plugins/Library/util/SkeletonTreeMap.java create mode 100644 src/main/java/plugins/Library/util/Sorted.java create mode 100644 src/main/java/plugins/Library/util/SortedArraySet.java create mode 100644 src/main/java/plugins/Library/util/SortedMapSet.java create mode 100644 src/main/java/plugins/Library/util/SortedSetMap.java create mode 100644 src/main/java/plugins/Library/util/TaskAbortExceptionConvertor.java create mode 100644 src/main/java/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java create mode 100644 src/main/java/plugins/Library/util/concurrent/ExceptionConvertor.java create mode 100644 src/main/java/plugins/Library/util/concurrent/Executors.java create mode 100644 src/main/java/plugins/Library/util/concurrent/Notifier.java create mode 100644 src/main/java/plugins/Library/util/concurrent/ObjectProcessor.java create mode 100644 src/main/java/plugins/Library/util/concurrent/Scheduler.java create mode 100644 src/main/java/plugins/Library/util/event/AbstractSweeper.java create mode 100644 src/main/java/plugins/Library/util/event/CountingSweeper.java create mode 100644 src/main/java/plugins/Library/util/event/Sweeper.java create mode 100644 src/main/java/plugins/Library/util/event/TrackingSweeper.java create mode 100644 src/main/java/plugins/Library/util/exec/AbstractExecution.java create mode 100644 src/main/java/plugins/Library/util/exec/BaseCompositeProgress.java create mode 100644 src/main/java/plugins/Library/util/exec/ChainedProgress.java create mode 100644 src/main/java/plugins/Library/util/exec/CompositeProgress.java create mode 100644 src/main/java/plugins/Library/util/exec/Execution.java create mode 100644 src/main/java/plugins/Library/util/exec/ExecutionAcceptor.java create mode 100644 src/main/java/plugins/Library/util/exec/Progress.java create mode 100644 src/main/java/plugins/Library/util/exec/ProgressParts.java create mode 100644 src/main/java/plugins/Library/util/exec/SimpleProgress.java create mode 100644 src/main/java/plugins/Library/util/exec/TaskAbortException.java create mode 100644 src/main/java/plugins/Library/util/exec/TaskCompleteException.java create mode 100644 src/main/java/plugins/Library/util/exec/TaskInProgressException.java create mode 100644 src/main/java/plugins/Library/util/exec/package-info.java create mode 100644 src/main/java/plugins/Library/util/func/Closure.java create mode 100644 src/main/java/plugins/Library/util/func/SafeClosure.java create mode 100644 src/main/java/plugins/Library/util/func/Tuples.java create mode 100644 src/main/java/plugins/Library/util/func/package-info.java create mode 100644 src/main/java/plugins/Library/util/package-info.java create mode 100644 src/test/java/plugins/Library/Tester.java create mode 100644 src/test/java/plugins/Library/index/BIndexTest.java create mode 100644 src/test/java/plugins/Library/index/TermEntryTest.java create mode 100644 src/test/java/plugins/Library/io/serial/PackerTest.java create mode 100644 src/test/java/plugins/Library/io/serial/YamlMapTest.java create mode 100644 src/test/java/plugins/Library/util/BTreeMapTest.java create mode 100644 src/test/java/plugins/Library/util/BytePrefixKeyTest.java create mode 100644 src/test/java/plugins/Library/util/Generators.java create mode 100644 src/test/java/plugins/Library/util/IntegersTest.java create mode 100644 src/test/java/plugins/Library/util/SkeletonTreeMapTest.java create mode 100644 src/test/java/plugins/Library/util/SortedArraySetTest.java create mode 100644 src/test/java/plugins/Library/util/SortedMapTestSkeleton.java create mode 100644 src/test/java/plugins/Library/util/SortedTest.java create mode 100644 src/test/java/plugins/Library/util/event/SweepersTest.java diff --git a/build.gradle b/build.gradle new file mode 100644 index 00000000..709d745e --- /dev/null +++ b/build.gradle @@ -0,0 +1,99 @@ +buildscript { + dependencies { + classpath files('gradle/gradle-witness.jar') + } +} + +apply plugin: 'java' +apply plugin: 'witness' +apply plugin: 'maven-publish' +apply plugin: 'application' + +compileJava { + sourceCompatibility = 1.7 + targetCompatibility = 1.7 +} + +version = "37" + +repositories { + mavenLocal() + maven { url "http://4thline.org/m2" } + maven { url 'https://mvn.freenetproject.org' } + jcenter() +} + +configurations { + extraLibs +} + +dependencies { + compile group: 'org.freenetproject', name: 'fred', version: 'build+' + compile group: 'org.yaml', name: 'snakeyaml', version: '1.5' + testImplementation 'junit:junit:4.13' +} + +def gitrev +task buildInfo { + try { + def cmd = "git describe --always --abbrev=4 --dirty" + def proc = cmd.execute() + gitrev = proc.text.trim() + } catch (java.io.IOException e) { + gitrev = "@unknown@" + } +} + +test { + useJUnit() +} + +application { + mainClassName = 'plugins.Library.ui.TestInterface' +} + +jar { + manifest { + attributes 'Plugin-Main-Class': 'plugins.Library.Main', + 'Main-Class': 'plugins.Library.ui.TestInterface', + 'Required-Node-Version': '1239', + 'Implementation-Version': version, + 'Built-By': System.getProperty('user.name'), + 'Built-Date': new Date(), + 'Built-JDK': System.getProperty('java.version') + } + from('src') { + include '**/*.txt' + include '**/*.properties' + } + from (configurations.extraLibs.collect { it.isDirectory() ? it : zipTree(it) }) { + exclude "META-INF/*.SF" + exclude "META-INF/*.DSA" + exclude "META-INF/*.RSA" + } + preserveFileTimestamps = false + reproducibleFileOrder = true + duplicatesStrategy = "exclude" + archiveName = 'freenet-Library.jar' +} + +publishing { + publications { + mavenJava(MavenPublication) { + groupId 'org.freenetproject.plugins' + artifactId "Library" + version version + from components.java + } + } + repositories { + maven { + url "s3://mvn.freenetproject.org/" + credentials(AwsCredentials) { + accessKey System.getenv('AWS_ACCESS_KEY_ID') + secretKey System.getenv('AWS_SECRET_ACCESS_KEY') + } + } + } +} + diff --git a/src/main/java/plugins/Library/Index.java b/src/main/java/plugins/Library/Index.java new file mode 100644 index 00000000..196ffb1a --- /dev/null +++ b/src/main/java/plugins/Library/Index.java @@ -0,0 +1,30 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.URIEntry; +import plugins.Library.util.exec.Execution; + +import freenet.keys.FreenetURI; + +import java.util.Set; + +/** +** Represents the data for an index. +** +** @author infinity0 +*/ +public interface Index { + + /** + ** Non-blocking fetch of the entries associated with a given term. + ** + ** DOCUMENT + */ + public Execution> getTermEntries(String term); + + public Execution getURIEntry(FreenetURI uri); + +} diff --git a/src/main/java/plugins/Library/Library.java b/src/main/java/plugins/Library/Library.java new file mode 100644 index 00000000..9a35b099 --- /dev/null +++ b/src/main/java/plugins/Library/Library.java @@ -0,0 +1,780 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.net.MalformedURLException; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import plugins.Library.client.FreenetArchiver; +import plugins.Library.index.ProtoIndex; +import plugins.Library.index.ProtoIndexSerialiser; +import plugins.Library.index.xml.URLUpdateHook; +import plugins.Library.index.xml.XMLIndex; +import plugins.Library.io.ObjectStreamReader; +import plugins.Library.io.ObjectStreamWriter; +import plugins.Library.io.serial.Serialiser.PullTask; +import plugins.Library.search.InvalidSearchException; +import plugins.Library.util.exec.TaskAbortException; + +import freenet.client.FetchContext; +import freenet.client.FetchException; +import freenet.client.FetchException.FetchExceptionMode; +import freenet.client.FetchResult; +import freenet.client.FetchWaiter; +import freenet.client.HighLevelSimpleClient; +import freenet.client.async.ClientContext; +import freenet.client.async.ClientGetter; +import freenet.client.async.PersistenceDisabledException; +import freenet.client.async.USKCallback; +import freenet.client.async.USKManager; +import freenet.client.async.USKRetriever; +import freenet.client.async.USKRetrieverCallback; +import freenet.client.events.ClientEvent; +import freenet.client.events.ClientEventListener; +import freenet.client.events.ExpectedMIMEEvent; +import freenet.keys.FreenetURI; +import freenet.keys.USK; +import freenet.node.NodeClientCore; +import freenet.node.RequestClient; +import freenet.node.RequestStarter; +import freenet.pluginmanager.PluginRespirator; +import freenet.pluginmanager.PluginStore; +import freenet.support.Executor; +import freenet.support.Logger; +import freenet.support.io.FileUtil; + + +/** + * Library class is the api for others to use search facilities, it is used by the interfaces + * @author MikeB + */ +final public class Library implements URLUpdateHook { + + public static final String BOOKMARK_PREFIX = "bookmark:"; + public static final String DEFAULT_INDEX_SITE = BOOKMARK_PREFIX + "liberty-of-information" + " " + BOOKMARK_PREFIX + "free-market-free-people" + " " + + BOOKMARK_PREFIX + "gotcha" + " " + BOOKMARK_PREFIX + "wanna" + " " + BOOKMARK_PREFIX + "wanna.old" + " " + BOOKMARK_PREFIX + "gogo"; + private static int version = 36; + public static final String plugName = "Library " + getVersion(); + + public static String getPlugName() { + return plugName; + } + + public static long getVersion() { + return version; + } + + /** + ** Library singleton. + */ + private static Library lib; + + public synchronized static Library init(PluginRespirator pr) { + if (lib != null) { + throw new IllegalStateException("Library already initialised"); + } + lib = new Library(pr); + return lib; + } + + final private PluginRespirator pr; + final private Executor exec; + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + public static final RequestClient REQUEST_CLIENT = new RequestClient() { + + @Override + public boolean persistent() { + return false; + } + + @Override + public boolean realTimeFlag() { + return false; + } + + }; + + static { + Logger.registerClass(Library.class); + } + + private final PluginStore store; + + static final String STOREKEY = "indexuris"; + + /** + * Method to setup Library class so it has access to PluginRespirator, and load bookmarks + * TODO pull bookmarks from disk + */ + private Library(PluginRespirator pr){ + this.pr = pr; + PluginStore ps; + if(pr!=null) { + this.exec = pr.getNode().executor; + try { + ps = pr.getStore(); + } catch (PersistenceDisabledException e) { + ps = null; + } + } else { + this.exec = null; + ps = null; + } + USKManager uskManager = pr.getNode().clientCore.clientContext.uskManager; + store = ps; + if(store != null && store.subStores.containsKey(STOREKEY)) { + for(Map.Entry entry : store.subStores.get(STOREKEY).strings.entrySet()) { + String name = entry.getKey(); + String target = entry.getValue(); + bookmarks.put(name, target); + } + } + + File persistentFile = new File("LibraryPersistent"); + boolean migrated = false; + if(persistentFile.canRead()){ + try { + ObjectInputStream is = new ObjectInputStream(new FileInputStream(persistentFile)); // These are annoying but it's better than nothing + bookmarks = (Map)is.readObject(); + is.close(); + FileUtil.secureDelete(persistentFile); + Logger.error(this, "Moved LibraryPersistent contents into database and securely deleted old file."); + migrated = true; + } catch (ClassNotFoundException ex) { + Logger.error(this, "Error trying to read bookmarks Map from file.", ex); + } catch (IOException ex) { + Logger.normal(this, "Error trying to read Library persistent data.", ex); + } + } + boolean needNewWanna = false; + for(Map.Entry entry : bookmarks.entrySet()) { + String name = entry.getKey(); + String target = entry.getValue(); + if(name.equals("wanna") && target.startsWith("USK@5hH~39FtjA7A9")) { + name = "wanna.old"; + needNewWanna = true; + } + FreenetURI uri; + long edition = -1; + try { + uri = new FreenetURI(target); + if(uri.isUSK()) + edition = uri.getEdition(); + } catch (MalformedURLException e) { + Logger.error(this, "Invalid bookmark URI: "+target+" for "+name, e); + continue; + } + if(uri.isUSK()) { + BookmarkCallback callback = new BookmarkCallback(name, uri.getAllMetaStrings(), edition); + bookmarkCallbacks.put(name, callback); + USK u; + try { + u = USK.create(uri); + } catch (MalformedURLException e) { + Logger.error(this, "Invalid bookmark USK: "+target+" for "+name, e); + continue; + } + uskManager.subscribe(u, callback, false, rcBulk); + callback.ret = uskManager.subscribeContent(u, callback, false, pr.getHLSimpleClient().getFetchContext(), RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, rcBulk); + } + } + if(bookmarks.isEmpty() || needNewWanna || !bookmarks.containsKey("gotcha") || + !bookmarks.containsKey("liberty-of-information") || + !bookmarks.containsKey("free-market-free-people")) { + if(!bookmarks.containsKey("liberty-of-information")) + addBookmark("liberty-of-information", "USK@5YCPzcs60uab6VSdiKcyvo-G-r-ga2UWCyOzWHaPoYE,a1zEPCf0qkUFQxftFZK5xmxYdxt0JErDb2aJgwG8s~4,AQACAAE/index.yml/25"); + if(!bookmarks.containsKey("free-market-free-people")) + addBookmark("free-market-free-people", "USK@X4lMQ51bXPSicAgbR~XdFzDyizYHYrvzStdeUrIFhes,0ze4TAqd~RdAMZMsshybHZFema3ZP3id4sgN3H8969g,AQACAAE/index.yml/4"); + if(!bookmarks.containsKey("gotcha")) + addBookmark("gotcha", "USK@zcAnAgT-xp5LnnK28-Lc7Qt-GU7pNKnVdkmU4-HCCBc,s2jiTh8~O9MtnGdVqJqgnKGrXrosK8rArcZ8A49hprY,AQACAAE/index.yml/6"); + if(!bookmarks.containsKey("wanna.old")) + addBookmark("wanna.old", "USK@5hH~39FtjA7A9~VXWtBKI~prUDTuJZURudDG0xFn3KA,GDgRGt5f6xqbmo-WraQtU54x4H~871Sho9Hz6hC-0RA,AQACAAE/Search/25/index.xml"); + if(!bookmarks.containsKey("freenetindex")) + addBookmark("freenetindex", "USK@US6gHsNApDvyShI~sBHGEOplJ3pwZUDhLqTAas6rO4c,3jeU5OwV0-K4B6HRBznDYGvpu2PRUuwL0V110rn-~8g,AQACAAE/freenet-index/5/index.xml"); + if(!bookmarks.containsKey("gogo")) + addBookmark("gogo", "USK@shmVvDhwivG1z1onSA5efRl3492Xyoov52hrC0tF6uI,wpMhseMpFHPLpXYbV8why1nfBXf2XdSQkzVaemFOEsA,AQACAAE/index.yml/51"); + if(!bookmarks.containsKey("wanna")) + addBookmark("wanna", "USK@gxuHPaicqxlpPPagBKPVPraZ4bwLdMYBc5vipkWGh3E,08ExdmvZzB8Hfi6H6crbiuCd2~ikWDIpJ8dvr~tLp7k,AQACAAE/index.yml/82"); + migrated = true; + Logger.normal(this, "Added default indexes"); + } + if(migrated) + saveState(); + } + + public synchronized void saveState(){ + if(store == null) return; + PluginStore inner; + inner = store.subStores.get(STOREKEY); + if(inner == null) + store.subStores.put(STOREKEY, inner = new PluginStore()); + inner.strings.clear(); + inner.strings.putAll(bookmarks); + try { + pr.putStore(store); + if(logMINOR) Logger.minor(this, "Stored state to database"); + } catch (PersistenceDisabledException e) { + // Not much we can do... + } + } + + /* FIXME + * Parallel fetch from the same index is not currently supported. This means that the only reliable way to + * do e.g. an intersection search is to run two completely separate fetches. Otherwise we get assertion + * errors in e.g. BTreePacker.preprocessPullBins from TaskInProgressException's we can't handle from + * ProgressTracker.addPullProgress, when the terms are in the same bucket. We should make it possible to + * search for multiple terms in the same btree, but for now, turning off caching is the only viable option. + */ + +// /** +// ** Holds all the read-indexes. +// */ +// private Map rtab = new HashMap(); +// +// /** +// ** Holds all the writeable indexes. +// */ +// private Map wtab = new HashMap(); +// + /** + ** Holds all the bookmarks (aliases into the rtab). + */ + private Map bookmarks = new HashMap(); + + private Map bookmarkCallbacks = new HashMap(); + + /** + ** Get the index type giving a {@code FreenetURI}. This must not contain + ** a metastring (end with "/") or be a USK. + */ + public Class getIndexType(FreenetURI indexuri) throws FetchException { + if(indexuri.lastMetaString()!=null && indexuri.lastMetaString().equals(XMLIndex.DEFAULT_FILE)) + return XMLIndex.class; + if(indexuri.lastMetaString()!=null && indexuri.lastMetaString().equals(ProtoIndex.DEFAULT_FILE)) + return ProtoIndex.class; + if(indexuri.isUSK() && indexuri.getDocName().equals(ProtoIndex.DEFAULT_FILE)) + return ProtoIndex.class; + + NodeClientCore core = pr.getNode().clientCore; + HighLevelSimpleClient hlsc = core.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); + + List uris = Arrays.asList(indexuri, + indexuri.pushMetaString(""), + indexuri.pushMetaString(ProtoIndex.DEFAULT_FILE), + indexuri.pushMetaString(XMLIndex.DEFAULT_FILE)); + + for (FreenetURI uri: uris) { + + for(int i=0;i<5;i++) { + ClientContext cctx = core.clientContext; + FetchContext fctx = hlsc.getFetchContext(); + FetchWaiter fw = new FetchWaiter(REQUEST_CLIENT); + final ClientGetter gu = hlsc.fetch(uri, 0x10000, fw, fctx); + gu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx); + + final Class[] c = new Class[1]; + hlsc.addEventHook(new ClientEventListener() { + /*@Override**/ public void receive(ClientEvent ce, ClientContext context) { + if (!(ce instanceof ExpectedMIMEEvent)) { return; } + synchronized(c) { + String type = ((ExpectedMIMEEvent)ce).expectedMIMEType; + Logger.normal(this, "Expected type in index: "+type); + try { + c[0] = getIndexTypeFromMIME(type); + gu.cancel(context); + } catch (UnsupportedOperationException e) { + // Ignore + } + } + } + }); + + try { + FetchResult res = fw.waitForCompletion(); + return getIndexTypeFromMIME(res.getMimeType()); + + } catch (FetchException e) { + if (e.getMode() == FetchExceptionMode.CANCELLED) { + synchronized(c) { + if(c[0] != null) + return c[0]; + else + throw new UnsupportedOperationException("Unable to get mime type or got an invalid mime type for index"); + } + } else if(e.newURI != null) { + uri = e.newURI; + continue; + } + } + } + + } + throw new UnsupportedOperationException("Could not find appropriate data for index"); + } + + public Class getIndexTypeFromMIME(String mime) { + if (mime.equals(ProtoIndex.MIME_TYPE)) { + //return "YAML index"; + return ProtoIndex.class; + } else if (mime.equals(XMLIndex.MIME_TYPE)) { + //return "XML index"; + return XMLIndex.class; + } else { + throw new UnsupportedOperationException("Unknown mime-type for index: "+mime); + } + } + + +/* + KEYEXPLORER slightly more efficient version that depends on KeyExplorer + + /** + ** Get the index type giving a {@code FreenetURI}. This should have been + ** passed through {@link KeyExplorerUtils#sanitizeURI(List, String)} at + ** some point - ie. it must not contain a metastring (end with "/") or be + ** a USK. + * / + public Class getIndexType(FreenetURI uri) + throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException { + GetResult getresult = KeyExplorerUtils.simpleGet(pr, uri); + byte[] data = BucketTools.toByteArray(getresult.getData()); + + if (getresult.isMetaData()) { + try { + Metadata md = Metadata.construct(data); + + if (md.isArchiveManifest()) { + if (md.getArchiveType() == ARCHIVE_TYPE.TAR) { + return getIndexTypeFromManifest(uri, false, true); + + } else if (md.getArchiveType() == ARCHIVE_TYPE.ZIP) { + return getIndexTypeFromManifest(uri, true, false); + + } else { + throw new UnsupportedOperationException("not implemented - unknown archive manifest"); + } + + } else if (md.isSimpleManifest()) { + return getIndexTypeFromManifest(uri, false, false); + } + + return getIndexTypeFromSimpleMetadata(md); + + } catch (MetadataParseException e) { + throw new RuntimeException(e); + } + } else { + throw new UnsupportedOperationException("Found data instead of metadata; I do not have enough intelligence to decode this."); + } + } + + public Class getIndexTypeFromSimpleMetadata(Metadata md) { + String mime = md.getMIMEType(); + if (mime.equals(ProtoIndex.MIME_TYPE)) { + //return "YAML index"; + return ProtoIndex.class; + } else if (mime.equals(XMLIndex.MIME_TYPE)) { + //return "XML index"; + return XMLIndex.class; + } else { + throw new UnsupportedOperationException("Unknown mime-type for index"); + } + } + + public Class getIndexTypeFromManifest(FreenetURI furi, boolean zip, boolean tar) + throws FetchException, IOException, MetadataParseException, LowLevelGetException, KeyListenerConstructionException { + + boolean automf = true, deep = true, ml = true; + Metadata md = null; + + if (zip) + md = KeyExplorerUtils.zipManifestGet(pr, furi); + else if (tar) + md = KeyExplorerUtils.tarManifestGet(pr, furi, ".metadata"); + else { + md = KeyExplorerUtils.simpleManifestGet(pr, furi); + if (ml) { + md = KeyExplorerUtils.splitManifestGet(pr, md); + } + } + + if (md.isSimpleManifest()) { + // a subdir + HashMap docs = md.getDocuments(); + Metadata defaultDoc = md.getDefaultDocument(); + + if (defaultDoc != null) { + //return "(default doc method) " + getIndexTypeFromSimpleMetadata(defaultDoc); + return getIndexTypeFromSimpleMetadata(defaultDoc); + } + + if (docs.containsKey(ProtoIndex.DEFAULT_FILE)) { + //return "(doclist method) YAML index"; + return ProtoIndex.class; + } else if (docs.containsKey(XMLIndex.DEFAULT_FILE)) { + //return "(doclist method) XML index"; + return XMLIndex.class; + } else { + throw new UnsupportedOperationException("Could not find a supported index in the document-listings for " + furi.toString()); + } + } + + throw new UnsupportedOperationException("Parsed metadata but did not reach a simple manifest: " + furi.toString()); + } +*/ + public Class getIndexType(File f) { + if (f.getName().endsWith(ProtoIndexSerialiser.FILE_EXTENSION)) + return ProtoIndex.class; + if (f.isDirectory() && new File(f, XMLIndex.DEFAULT_FILE).canRead()) + return XMLIndex.class; + + throw new UnsupportedOperationException("Could not determine default index file under the path given: " + f); + } + + public Object getAddressTypeFromString(String indexuri) { + try { + // return KeyExplorerUtils.sanitizeURI(new ArrayList(), indexuri); KEYEXPLORER + // OPT HIGH if it already ends with eg. *Index.DEFAULT_FILE, don't strip + // the MetaString, and have getIndexType behave accordingly + FreenetURI tempURI = new FreenetURI(indexuri); +// if (tempURI.hasMetaStrings()) { tempURI = tempURI.setMetaString(null); } +// if (tempURI.isUSK()) { tempURI = tempURI.sskForUSK(); } + return tempURI; + } catch (MalformedURLException e) { + File file = new File(indexuri); + if (file.canRead()) { + return file; + } else { + throw new UnsupportedOperationException("Could not recognise index type from string: " + indexuri); + } + } + } + + /** + * Add a new bookmark, + * TODO there should be a separate version for untrusted adds from freesites which throws some Security Exception + * @param name of new bookmark + * @param uri of new bookmark + * @return reference of new bookmark + */ + public String addBookmark(String name, String uri) { + FreenetURI u; + USK uskNew = null; + BookmarkCallback callback = null; + long edition = -1; + try { + u = new FreenetURI(uri); + if(u.isUSK()) { + uskNew = USK.create(u); + edition = uskNew.suggestedEdition; + } + } catch (MalformedURLException e) { + Logger.error(this, "Invalid new uri "+uri); + return null; + } + String old; + synchronized(this) { + old = bookmarks.put(name, uri); + callback = bookmarkCallbacks.get(name); + if(callback == null) { + bookmarkCallbacks.put(name, callback = new BookmarkCallback(name, u.getAllMetaStrings(), edition)); + old = null; + } + saveState(); + } + boolean isSame = false; + USKManager uskManager = pr.getNode().clientCore.clientContext.uskManager; + if(old != null) { + try { + FreenetURI uold = new FreenetURI(old); + if(uold.isUSK()) { + USK usk = USK.create(uold); + if(!(uskNew != null && usk.equals(uskNew, false))) { + uskManager.unsubscribe(usk, callback); + uskManager.unsubscribeContent(usk, callback.ret, true); + } else + isSame = true; + } + + } catch (MalformedURLException e) { + // Ignore + } + } + if(!isSame) { + uskManager.subscribe(uskNew, callback, false, rcBulk); + callback.ret = uskManager.subscribeContent(uskNew, callback, false, pr.getHLSimpleClient().getFetchContext(), RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS, rcBulk); + } + return name; + } + + final RequestClient rcBulk = new RequestClient() { + + public boolean persistent() { + return false; + } + + public boolean realTimeFlag() { + return false; + } + + }; + + private class BookmarkCallback implements USKRetrieverCallback, USKCallback { + + private final String bookmarkName; + private String[] metaStrings; + USKRetriever ret; + private long origEdition; + + public BookmarkCallback(String name, String[] allMetaStrings, long origEdition) { + this.bookmarkName = name; + this.metaStrings = allMetaStrings; + } + + public short getPollingPriorityNormal() { + return RequestStarter.UPDATE_PRIORITY_CLASS; + } + + public short getPollingPriorityProgress() { + return RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS; + } + + public void onFound(USK origUSK, long edition, FetchResult data) { + data.asBucket().free(); + if(logMINOR) Logger.minor(this, "Bookmark "+bookmarkName+" : fetching edition "+edition); + } + + public void onFoundEdition(long l, USK key, ClientContext context, boolean metadata, short codec, byte[] data, boolean newKnownGood, boolean newSlotToo) { + if(l < origEdition) { + Logger.error(this, "Wrong edition: "+l+" less than "+origEdition); + return; + } + if(newKnownGood) { + String uri = key.copy(l).getURI().setMetaString(metaStrings).toString(); + if(logMINOR) Logger.minor(this, "Bookmark "+bookmarkName+" new last known good edition "+l+" uri is now "+uri); + addBookmark(bookmarkName, uri); + } + } + + } + + public synchronized void removeBookmark(String name) { + bookmarks.remove(name); + saveState(); + } + + public synchronized String getBookmark(String bm) { + return bookmarks.get(bm); + } + /** + * Returns the Set of bookmark names, unmodifiable + * @return The set of bookmarks + */ + public Set bookmarkKeys() { + return Collections.unmodifiableSet(bookmarks.keySet()); + } + + /** + * Returns a set of Index objects one for each of the uri's specified + * gets an existing one if its there else makes a new one + * + * @param indexuris list of index specifiers separated by spaces + * @return Set of Index objects + */ + public final ArrayList getIndices(String indexuris) throws InvalidSearchException, TaskAbortException { + String[] uris = indexuris.split("[ ;]"); + ArrayList indices = new ArrayList(uris.length); + + for ( String uri : uris){ + indices.add(getIndex(uri, null)); + } + return indices; + } + + // See comments near rtab. Can't use in parallel so not acceptable. +// /** +// * Method to get all of the instatiated Indexes +// */ +// public final Iterable getAllIndices() { +// return rtab.values(); +// } +// + public final Index getIndex(String indexuri) throws InvalidSearchException, TaskAbortException { + return getIndex(indexuri, null); + } + + /** + * Gets an index using its id in the form {type}:{uri}
+ * known types are xml, bookmark + * @param indexid + * @return Index object + * @throws plugins.Library.util.InvalidSearchException + */ + /** + * Returns an Index object for the uri specified, + * gets an existing one if its there else makes a new one + * + * TODO : identify all index types so index doesn't need to refer to them directly + * @param indexuri index specifier + * @return Index object + */ + public final Index getIndex(String indexuri, String origIndexName) throws InvalidSearchException, TaskAbortException { + Logger.normal(this, "Getting index "+indexuri); + indexuri = indexuri.trim(); + if (indexuri.startsWith(BOOKMARK_PREFIX)){ + indexuri = indexuri.substring(BOOKMARK_PREFIX.length()); + if (bookmarks.containsKey(indexuri)) + return getIndex(bookmarks.get(indexuri), indexuri); + else + throw new InvalidSearchException("Index bookmark '"+indexuri+" does not exist"); + } + + // See comments near rtab. Can't use in parallel so caching is dangerous. +// if (rtab.containsKey(indexuri)) +// return rtab.get(indexuri); +// + Class indextype; + Index index; + Object indexkey; + + try{ + indexkey = getAddressTypeFromString(indexuri); + }catch(UnsupportedOperationException e){ + throw new TaskAbortException("Did not recognise index type in : \""+indexuri+"\"", e); + } + + long edition = -1; + + try { + if (indexkey instanceof File) { + indextype = getIndexType((File)indexkey); + } else if (indexkey instanceof FreenetURI) { + // TODO HIGH make this non-blocking + FreenetURI uri = (FreenetURI)indexkey; + if(uri.isUSK()) + edition = uri.getEdition(); + indextype = getIndexType(uri); + } else { + throw new AssertionError(); + } + + if (indextype == ProtoIndex.class) { + // TODO HIGH this *must* be non-blocking as it fetches the whole index root + PullTask task = new PullTask(indexkey); + ProtoIndexSerialiser.forIndex(indexkey, RequestStarter.INTERACTIVE_PRIORITY_CLASS).pull(task); + index = task.data; + + } else if (indextype == XMLIndex.class) { + index = new XMLIndex(indexuri, edition, pr, this, origIndexName); + + } else { + throw new AssertionError(); + } + + // See comments near rtab. Can't use in parallel so caching is dangerous. + //rtab.put(indexuri, index); + Logger.normal(this, "Loaded index type " + indextype.getName() + " at " + indexuri); + + return index; + + } catch (FetchException e) { + throw new TaskAbortException("Failed to fetch index " + indexuri+" : "+e, e, true); // can retry +/* KEYEXPLORER + } catch (IOException e) { + throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry + + } catch (LowLevelGetException e) { + throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry + + } catch (KeyListenerConstructionException e) { + throw new TaskAbortException("Failed to fetch index " + indexuri, e, true); // can retry + + } catch (MetadataParseException e) { + throw new TaskAbortException("Failed to parse index " + indexuri, e); +*/ + } catch (UnsupportedOperationException e) { + throw new TaskAbortException("Failed to parse index " + indexuri+" : "+e, e); + + } catch (RuntimeException e) { + throw new TaskAbortException("Failed to load index " + indexuri+" : "+e, e); + + } + } + + + /** + ** Create a {@link FreenetArchiver} connected to the core of the + ** singleton's {@link PluginRespirator}. + ** + ** @throws IllegalStateException if the singleton has not been initialised + ** or if it does not have a respirator. + */ + public static FreenetArchiver + makeArchiver(ObjectStreamReader r, ObjectStreamWriter w, String mime, int size, short priorityClass) { + if (lib == null || lib.pr == null) { + throw new IllegalStateException("Cannot archive to freenet without a fully live Library plugin connected to a freenet node."); + } else { + return new FreenetArchiver(lib.pr.getNode().clientCore, r, w, mime, size, priorityClass); + } + } + + /** + ** Create a {@link FreenetArchiver} connected to the core of the + ** singleton's {@link PluginRespirator}. + ** + ** @throws IllegalStateException if the singleton has not been initialised + ** or if it does not have a respirator. + */ + public static FreenetArchiver + makeArchiver(S rw, String mime, int size, short priorityClass) { + return Library.makeArchiver(rw, rw, mime, size, priorityClass); + } + + public static String convertToHex(byte[] data) { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < data.length; i++) { + int halfbyte = (data[i] >>> 4) & 0x0F; + int two_halfs = 0; + do { + if ((0 <= halfbyte) && (halfbyte <= 9)) + buf.append((char) ('0' + halfbyte)); + else + buf.append((char) ('a' + (halfbyte - 10))); + halfbyte = data[i] & 0x0F; + } while (two_halfs++ < 1); + } + return buf.toString(); + } + + //this function will return the String representation of the MD5 hash for the input string + public static String MD5(String text) { + try { + MessageDigest md = MessageDigest.getInstance("MD5"); + byte[] b = text.getBytes("UTF-8"); + md.update(b, 0, b.length); + byte[] md5hash = md.digest(); + return convertToHex(md5hash); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void update(String updateContext, String indexuri) { + addBookmark(updateContext, indexuri); + } +} diff --git a/src/main/java/plugins/Library/Main.java b/src/main/java/plugins/Library/Main.java new file mode 100644 index 00000000..600cb330 --- /dev/null +++ b/src/main/java/plugins/Library/Main.java @@ -0,0 +1,199 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import freenet.node.RequestStarter; +import freenet.pluginmanager.PluginReplySender; +import freenet.support.MutableBoolean; +import freenet.support.SimpleFieldSet; +import freenet.support.api.Bucket; +import freenet.support.io.BucketTools; +import freenet.support.io.Closer; +import freenet.support.io.FileBucket; +import freenet.support.io.FileUtil; +import freenet.support.io.LineReadingInputStream; +import freenet.support.io.NativeThread; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.logging.Level; + +import plugins.Library.client.FreenetArchiver; +import plugins.Library.index.ProtoIndex; +import plugins.Library.index.ProtoIndexComponentSerialiser; +import plugins.Library.index.ProtoIndexSerialiser; +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermPageEntry; +import plugins.Library.search.Search; +import plugins.Library.ui.WebInterface; +import plugins.Library.util.SkeletonBTreeMap; +import plugins.Library.util.SkeletonBTreeSet; +import plugins.Library.util.TaskAbortExceptionConvertor; +import plugins.Library.util.concurrent.Executors; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.func.Closure; + +import freenet.pluginmanager.FredPlugin; +import freenet.pluginmanager.FredPluginL10n; +import freenet.pluginmanager.FredPluginRealVersioned; +import freenet.pluginmanager.FredPluginThreadless; +import freenet.pluginmanager.FredPluginVersioned; +import freenet.pluginmanager.PluginNotFoundException; +import freenet.pluginmanager.PluginRespirator; +import freenet.support.Executor; +import freenet.client.InsertException; +import freenet.keys.FreenetURI; +import freenet.keys.InsertableClientSSK; +import freenet.l10n.BaseL10n.LANGUAGE; + +import freenet.pluginmanager.FredPluginFCP; +import freenet.support.Logger; +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.security.MessageDigest; +import plugins.Library.index.TermEntryReaderWriter; +import plugins.Library.index.xml.LibrarianHandler; +import plugins.Library.io.serial.LiveArchiver; +import plugins.Library.io.serial.Serialiser.PullTask; +import plugins.Library.io.serial.Serialiser.PushTask; + +/** + * Library class is the api for others to use search facilities, it is used by the interfaces + * @author MikeB + */ +public class Main implements FredPlugin, FredPluginVersioned, freenet.pluginmanager.FredPluginHTTP, // TODO remove this later + FredPluginRealVersioned, FredPluginThreadless, FredPluginL10n, FredPluginFCP { + + private static PluginRespirator pr; + private Library library; + private WebInterface webinterface; + private SpiderIndexUploader uploader; + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(Main.class); + } + + public static PluginRespirator getPluginRespirator() { + return pr; + } + + // FredPluginL10n + public void setLanguage(freenet.l10n.BaseL10n.LANGUAGE lang) { + // TODO implement + } + + // FredPluginVersioned + public String getVersion() { + return library.getVersion() + " " + Version.vcsRevision(); + } + + // FredPluginRealVersioned + public long getRealVersion() { + return library.getVersion(); + } + + // FredPluginHTTP + // TODO remove this later + public String handleHTTPGet(freenet.support.api.HTTPRequest request) { + Throwable th; + try { + Class tester = Class.forName("plugins.Library.Tester"); + java.lang.reflect.Method method = tester.getMethod("runTest", Library.class, String.class); + try { + return (String)method.invoke(null, library, request.getParam("plugins.Library.Tester")); + } catch (java.lang.reflect.InvocationTargetException e) { + throw e.getCause(); + } + } catch (ClassNotFoundException e) { + return "

To use Library, go to Browsing -> Search Freenet in the main menu in FProxy.

This page is only where the test suite would be, if it had been compiled in (give -Dtester= to ant).

"; + } catch (Throwable t) { + th = t; + } + java.io.ByteArrayOutputStream bytes = new java.io.ByteArrayOutputStream(); + th.printStackTrace(new java.io.PrintStream(bytes)); + return "
" + bytes + "
"; + } + // TODO remove this later + public String handleHTTPPost(freenet.support.api.HTTPRequest request) { return null; } + + // FredPlugin + public void runPlugin(PluginRespirator pr) { + Main.pr = pr; + Executor exec = pr.getNode().executor; + library = Library.init(pr); + Search.setup(library, exec); + Executors.setDefaultExecutor(exec); + webinterface = new WebInterface(library, pr); + webinterface.load(); + uploader = new SpiderIndexUploader(pr); + uploader.start(); + } + + public void terminate() { + webinterface.unload(); + } + + public String getString(String key) { + return key; + } + + private static String convertToHex(byte[] data) { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < data.length; i++) { + int halfbyte = (data[i] >>> 4) & 0x0F; + int two_halfs = 0; + do { + if ((0 <= halfbyte) && (halfbyte <= 9)) + buf.append((char) ('0' + halfbyte)); + else + buf.append((char) ('a' + (halfbyte - 10))); + halfbyte = data[i] & 0x0F; + } while (two_halfs++ < 1); + } + return buf.toString(); + } + + //this function will return the String representation of the MD5 hash for the input string + public static String MD5(String text) { + try { + MessageDigest md = MessageDigest.getInstance("MD5"); + byte[] b = text.getBytes("UTF-8"); + md.update(b, 0, b.length); + byte[] md5hash = md.digest(); + return convertToHex(md5hash); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void handle(PluginReplySender replysender, SimpleFieldSet params, final Bucket data, int accesstype) { + if("pushBuffer".equals(params.get("command"))){ + uploader.handlePushBuffer(params, data); + } else if("getSpiderURI".equals(params.get("command"))) { + uploader.handleGetSpiderURI(replysender); + } else { + Logger.error(this, "Unknown command : \""+params.get("command")); + } + } + +} diff --git a/src/main/java/plugins/Library/SpiderIndexURIs.java b/src/main/java/plugins/Library/SpiderIndexURIs.java new file mode 100644 index 00000000..46ce8edd --- /dev/null +++ b/src/main/java/plugins/Library/SpiderIndexURIs.java @@ -0,0 +1,121 @@ +package plugins.Library; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; + +import freenet.keys.FreenetURI; +import freenet.keys.InsertableClientSSK; +import freenet.pluginmanager.PluginRespirator; +import freenet.support.Logger; +import freenet.support.io.Closer; + +class SpiderIndexURIs { + private FreenetURI privURI; + private FreenetURI pubURI; + private long edition; + private final PluginRespirator pr; + + SpiderIndexURIs(PluginRespirator pr) { + this.pr = pr; + } + + synchronized long setEdition(long newEdition) { + if(newEdition < edition) return edition; + else return edition = newEdition; + } + + synchronized FreenetURI loadSSKURIs() { + if(privURI == null) { + File f = new File(SpiderIndexUploader.PRIV_URI_FILENAME); + FileInputStream fis = null; + InsertableClientSSK privkey = null; + boolean newPrivKey = false; + try { + fis = new FileInputStream(f); + BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); + privURI = new FreenetURI(br.readLine()).setDocName("index.yml"); // Else InsertableClientSSK doesn't like it. + privkey = InsertableClientSSK.create(privURI); + Logger.debug(this, "Read old privkey"); + this.pubURI = privkey.getURI(); + Logger.debug(this, "Recovered URI from disk, pubkey is "+pubURI); + fis.close(); + fis = null; + } catch (IOException e) { + // Ignore + } finally { + Closer.close(fis); + } + if(privURI == null) { + InsertableClientSSK key = InsertableClientSSK.createRandom(pr.getNode().random, "index.yml"); + privURI = key.getInsertURI(); + pubURI = key.getURI(); + newPrivKey = true; + Logger.debug(this, "Created new keypair, pubkey is "+pubURI); + } + FileOutputStream fos = null; + if(newPrivKey) { + try { + fos = new FileOutputStream(new File(SpiderIndexUploader.PRIV_URI_FILENAME)); + OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); + osw.write(privURI.toASCIIString()); + osw.close(); + fos = null; + } catch (IOException e) { + Logger.error(this, "Failed to write new private key : ", e); + } finally { + Closer.close(fos); + } + } + try { + fos = new FileOutputStream(new File(SpiderIndexUploader.PUB_URI_FILENAME)); + OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); + osw.write(pubURI.toASCIIString()); + osw.close(); + fos = null; + } catch (IOException e) { + Logger.error(this, "Failed to write new pubkey", e); + } finally { + Closer.close(fos); + } + try { + fis = new FileInputStream(new File(SpiderIndexUploader.EDITION_FILENAME)); + BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); + try { + edition = Long.parseLong(br.readLine()); + } catch (NumberFormatException e) { + edition = 0; + } + Logger.debug(this, "Edition: "+edition); + fis.close(); + fis = null; + } catch (IOException e) { + // Ignore + edition = 0; + } finally { + Closer.close(fis); + } + } + return privURI; + } + + synchronized FreenetURI getPrivateUSK() { + return loadSSKURIs().setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(edition); + } + + /** Will return edition -1 if no successful uploads so far, otherwise the correct edition. */ + synchronized FreenetURI getPublicUSK() { + loadSSKURIs(); + return pubURI.setKeyType("USK").setDocName(SpiderIndexUploader.INDEX_DOCNAME).setSuggestedEdition(getLastUploadedEdition()); + } + + private synchronized long getLastUploadedEdition() { + /** If none uploaded, return -1, otherwise return the last uploaded version. */ + return edition-1; + } + +} \ No newline at end of file diff --git a/src/main/java/plugins/Library/SpiderIndexUploader.java b/src/main/java/plugins/Library/SpiderIndexUploader.java new file mode 100644 index 00000000..9b853337 --- /dev/null +++ b/src/main/java/plugins/Library/SpiderIndexUploader.java @@ -0,0 +1,969 @@ +package plugins.Library; + +import java.io.BufferedReader; +import java.io.EOFException; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.net.MalformedURLException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.logging.Level; + +import plugins.Library.client.FreenetArchiver; +import plugins.Library.index.ProtoIndex; +import plugins.Library.index.ProtoIndexComponentSerialiser; +import plugins.Library.index.ProtoIndexSerialiser; +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermEntryReaderWriter; +import plugins.Library.io.serial.LiveArchiver; +import plugins.Library.io.serial.Serialiser.PullTask; +import plugins.Library.io.serial.Serialiser.PushTask; +import plugins.Library.util.SkeletonBTreeMap; +import plugins.Library.util.SkeletonBTreeSet; +import plugins.Library.util.TaskAbortExceptionConvertor; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.func.Closure; +import freenet.client.InsertException; +import freenet.keys.FreenetURI; +import freenet.node.RequestStarter; +import freenet.pluginmanager.PluginNotFoundException; +import freenet.pluginmanager.PluginReplySender; +import freenet.pluginmanager.PluginRespirator; +import freenet.support.Logger; +import freenet.support.MutableBoolean; +import freenet.support.SimpleFieldSet; +import freenet.support.TimeUtil; +import freenet.support.api.Bucket; +import freenet.support.io.BucketTools; +import freenet.support.io.Closer; +import freenet.support.io.FileBucket; +import freenet.support.io.FileUtil; +import freenet.support.io.LineReadingInputStream; + +public class SpiderIndexUploader { + + SpiderIndexUploader(PluginRespirator pr) { + this.pr = pr; + spiderIndexURIs = new SpiderIndexURIs(pr); + } + + static boolean logMINOR; + static { + Logger.registerClass(SpiderIndexUploader.class); + } + + private final PluginRespirator pr; + private Object freenetMergeSync = new Object(); + private boolean freenetMergeRunning = false; + private boolean diskMergeRunning = false; + + private final ArrayList toMergeToDisk = new ArrayList(); + static final int MAX_HANDLING_COUNT = 5; + // When pushing is broken, allow max handling to reach this level before stalling forever to prevent running out of disk space. + private int PUSH_BROKEN_MAX_HANDLING_COUNT = 10; + // Don't use too much disk space, take into account fact that Spider slows down over time. + + private boolean pushBroken; + + /** The temporary on-disk index. We merge stuff into this until it exceeds a threshold size, then + * we create a new diskIdx and merge the old one into the idxFreenet. */ + ProtoIndex idxDisk; + /** idxDisk gets merged into idxFreenet this long after the last merge completed. */ + static final long MAX_TIME = 24*60*60*1000L; + /** idxDisk gets merged into idxFreenet after this many incoming updates from Spider. */ + static final int MAX_UPDATES = 16; + /** idxDisk gets merged into idxFreenet after it has grown to this many terms. + * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must + * fit into memory during the merge process. */ + static final int MAX_TERMS = 100*1000; + /** idxDisk gets merged into idxFreenet after it has grown to this many terms. + * Note that the entire main tree of terms (not the sub-trees with the positions and urls in) must + * fit into memory during the merge process. */ + static final int MAX_TERMS_NOT_UPLOADED = 10*1000; + /** Maximum size of a single entry, in TermPageEntry count, on disk. If we exceed this we force an + * insert-to-freenet and move on to a new disk index. The problem is that the merge to Freenet has + * to keep the whole of each entry in RAM. This is only true for the data being merged in - the + * on-disk index - and not for the data on Freenet, which is pulled on demand. SCALABILITY */ + static final int MAX_DISK_ENTRY_SIZE = 10000; + /** Like pushNumber, the number of the current disk dir, used to create idxDiskDir. */ + private int dirNumber; + static final String DISK_DIR_PREFIX = "library-temp-index-"; + /** Directory the current idxDisk is saved in. */ + File idxDiskDir; + private int mergedToDisk; + + ProtoIndexSerialiser srl = null; + FreenetURI lastUploadURI = null; + String lastDiskIndexName; + /** The uploaded index on Freenet. This never changes, it just gets updated. */ + ProtoIndex idxFreenet; + + private final SpiderIndexURIs spiderIndexURIs; + + long pushNumber; + static final String LAST_URL_FILENAME = "library.index.lastpushed.chk"; + static final String PRIV_URI_FILENAME = "library.index.privkey"; + static final String PUB_URI_FILENAME = "library.index.pubkey"; + static final String EDITION_FILENAME = "library.index.next-edition"; + + static final String LAST_DISK_FILENAME = "library.index.lastpushed.disk"; + + static final String BASE_FILENAME_PUSH_DATA = "library.index.data."; + + /** Merge from the Bucket chain to the on-disk idxDisk. */ + protected void wrapMergeToDisk() { + spiderIndexURIs.loadSSKURIs(); + boolean first = true; + while(true) { + final Bucket data; + synchronized(freenetMergeSync) { + if(pushBroken) { + Logger.error(this, "Pushing broken"); + return; + } + if(first && diskMergeRunning) { + Logger.error(this, "Already running a handler!"); + return; + } else if((!first) && (!diskMergeRunning)) { + Logger.error(this, "Already running yet runningHandler is false?!"); + return; + } + first = false; + if(toMergeToDisk.size() == 0) { + if(logMINOR) Logger.minor(this, "Nothing to handle"); + diskMergeRunning = false; + freenetMergeSync.notifyAll(); + return; + } + data = toMergeToDisk.remove(0); + freenetMergeSync.notifyAll(); + diskMergeRunning = true; + } + try { + mergeToDisk(data); + } catch (Throwable t) { + // Failed. + synchronized(freenetMergeSync) { + diskMergeRunning = false; + pushBroken = true; + freenetMergeSync.notifyAll(); + } + if(t instanceof RuntimeException) + throw (RuntimeException)t; + if(t instanceof Error) + throw (Error)t; + } + } + } + + // This is a member variable because it is huge, and having huge stuff in local variables seems to upset the default garbage collector. + // It doesn't need to be synchronized because it's always used from mergeToDisk, which never runs in parallel. + private Map> newtrees; + // Ditto + private SortedSet terms; + + ProtoIndexSerialiser srlDisk = null; + private ProtoIndexComponentSerialiser leafsrlDisk; + + private long lastMergedToFreenet = -1; + + /** Merge a bucket of TermEntry's into an on-disk index. */ + private void mergeToDisk(Bucket data) { + + boolean newIndex = false; + + if(idxDiskDir == null) { + newIndex = true; + if(!createDiskDir()) return; + } + + if(!makeDiskDirSerialiser()) return; + + // Read data into newtrees and trees. + long entriesAdded = readTermsFrom(data); + + if(terms.size() == 0) { + Logger.debug(this, "Nothing to merge"); + synchronized(this) { + newtrees = null; + terms = null; + } + return; + } + + // Merge the new data to the disk index. + + try { + final MutableBoolean maxDiskEntrySizeExceeded = new MutableBoolean(); + maxDiskEntrySizeExceeded.value = false; + long mergeStartTime = System.currentTimeMillis(); + if(newIndex) { + if(createDiskIndex()) + maxDiskEntrySizeExceeded.value = true; + } else { + // async merge + Closure>, TaskAbortException> clo = + createMergeFromNewtreesClosure(maxDiskEntrySizeExceeded); + assert(idxDisk.ttab.isBare()); + Logger.debug(this, "Merging "+terms.size()+" terms, tree.size = "+idxDisk.ttab.size()+" from "+data+"..."); + idxDisk.ttab.update(terms, null, clo, new TaskAbortExceptionConvertor()); + + } + // Synchronize anyway so garbage collector knows about it. + synchronized(this) { + newtrees = null; + terms = null; + } + assert(idxDisk.ttab.isBare()); + PushTask task4 = new PushTask(idxDisk); + srlDisk.push(task4); + + long mergeEndTime = System.currentTimeMillis(); + Logger.debug(this, entriesAdded + " entries merged to disk in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); + // FileArchiver produces a String, which is a filename not including the prefix or suffix. + String uri = (String)task4.meta; + lastDiskIndexName = uri; + Logger.normal(this, "Pushed new index to file "+uri); + if(writeStringTo(new File(LAST_DISK_FILENAME), uri) && + writeStringTo(new File(idxDiskDir, LAST_DISK_FILENAME), uri)) { + // Successfully uploaded and written new status. Can delete the incoming data. + data.free(); + } + + maybeMergeToFreenet(maxDiskEntrySizeExceeded); + } catch (TaskAbortException e) { + Logger.error(this, "Failed to upload index for spider: ", e); + e.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + } + } + + /** We have just written a Bucket of new data to an on-disk index. We may or may not want to + * upload to an on-Freenet index, depending on how big the data is etc. If we do, we will need + * to create a new on-disk index. + * @param maxDiskEntrySizeExceeded A flag object which is set (off-thread) if any single term + * in the index is very large. + */ + private void maybeMergeToFreenet(MutableBoolean maxDiskEntrySizeExceeded) { + // Maybe chain to mergeToFreenet ??? + + boolean termTooBig = false; + synchronized(maxDiskEntrySizeExceeded) { + termTooBig = maxDiskEntrySizeExceeded.value; + } + + mergedToDisk++; + if((lastMergedToFreenet > 0 && idxDisk.ttab.size() > MAX_TERMS) || + (idxDisk.ttab.size() > MAX_TERMS_NOT_UPLOADED) + || (mergedToDisk > MAX_UPDATES) || termTooBig || + (lastMergedToFreenet > 0 && (System.currentTimeMillis() - lastMergedToFreenet) > MAX_TIME)) { + + final ProtoIndex diskToMerge = idxDisk; + final File dir = idxDiskDir; + Logger.debug(this, "Exceeded threshold, starting new disk index and starting merge from disk to Freenet..."); + mergedToDisk = 0; + lastMergedToFreenet = -1; + idxDisk = null; + srlDisk = null; + leafsrlDisk = null; + idxDiskDir = null; + lastDiskIndexName = null; + + synchronized(freenetMergeSync) { + while(freenetMergeRunning) { + if(pushBroken) return; + Logger.normal(this, "Need to merge to Freenet, but last merge not finished yet. Waiting..."); + try { + freenetMergeSync.wait(); + } catch (InterruptedException e) { + // Ignore + } + } + if(pushBroken) return; + freenetMergeRunning = true; + } + + Runnable r = new Runnable() { + + public void run() { + try { + mergeToFreenet(diskToMerge, dir); + } catch (Throwable t) { + Logger.error(this, "Merge to Freenet failed: ", t); + t.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + } finally { + synchronized(freenetMergeSync) { + freenetMergeRunning = false; + if(!pushBroken) + lastMergedToFreenet = System.currentTimeMillis(); + freenetMergeSync.notifyAll(); + } + } + } + + }; + pr.getNode().executor.execute(r, "Library: Merge data from disk to Freenet"); + } else { + Logger.debug(this, "Not merging to Freenet yet: "+idxDisk.ttab.size()+" terms in index, "+mergedToDisk+" merges, "+(lastMergedToFreenet <= 0 ? "never merged to Freenet" : ("last merged to Freenet "+TimeUtil.formatTime(System.currentTimeMillis() - lastMergedToFreenet))+"ago")); + } + } + + private boolean writeURITo(File filename, FreenetURI uri) { + return writeStringTo(filename, uri.toString()); + } + + private boolean writeStringTo(File filename, String uri) { + FileOutputStream fos = null; + try { + fos = new FileOutputStream(filename); + OutputStreamWriter osw = new OutputStreamWriter(fos, "UTF-8"); + osw.write(uri.toString()); + osw.close(); + fos = null; + return true; + } catch (IOException e) { + Logger.error(this, "Failed to write to "+filename+" : "+uri, e); + return false; + } finally { + Closer.close(fos); + } + } + + private String readStringFrom(File file) { + String ret; + FileInputStream fis = null; + try { + fis = new FileInputStream(file); + BufferedReader br = new BufferedReader(new InputStreamReader(fis, "UTF-8")); + ret = br.readLine(); + fis.close(); + fis = null; + return ret; + } catch (IOException e) { + // Ignore + return null; + } finally { + Closer.close(fis); + } + } + + private FreenetURI readURIFrom(File file) { + String s = readStringFrom(file); + if(s != null) { + try { + return new FreenetURI(s); + } catch (MalformedURLException e) { + // Ignore. + } + } + return null; + } + + /** Create a callback object which will do the merging of individual terms. This will be called + * for each term as it is unpacked from the existing on-disk index. It then merges in new data + * from newtrees and writes the subtree for the term back to disk. Most of the work is done in + * update() below. + * @param maxDiskEntrySizeExceeded Will be set if any single term is so large that we need to + * upload to Freenet immediately. */ + private Closure>, TaskAbortException> createMergeFromNewtreesClosure(final MutableBoolean maxDiskEntrySizeExceeded) { + return new + Closure>, TaskAbortException>() { + /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { + String key = entry.getKey(); + SkeletonBTreeSet tree = entry.getValue(); + if(logMINOR) Logger.minor(this, "Processing: "+key+" : "+tree); + if(tree != null) + Logger.debug(this, "Merging data (on disk) in term "+key); + else + Logger.debug(this, "Adding new term to disk index: "+key); + //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); + if (tree == null) { + entry.setValue(tree = makeEntryTree(leafsrlDisk)); + } + assert(tree.isBare()); + SortedSet toMerge = newtrees.get(key); + tree.update(toMerge, null); + if(toMerge.size() > MAX_DISK_ENTRY_SIZE) + synchronized(maxDiskEntrySizeExceeded) { + maxDiskEntrySizeExceeded.value = true; + } + toMerge = null; + newtrees.remove(key); + assert(tree.isBare()); + if(logMINOR) Logger.minor(this, "Updated: "+key+" : "+tree); + //System.out.println("handled " + key); + } + }; + } + + /** Create a new on-disk index from terms and newtrees. + * @return True if the size of any one item in the index is so large that we must upload + * immediately to Freenet. + * @throws TaskAbortException If something broke catastrophically. */ + private boolean createDiskIndex() throws TaskAbortException { + boolean tooBig = false; + // created a new index, fill it with data. + // DON'T MERGE, merge with a lot of data will deadlock. + // FIXME throw in update() if it will deadlock. + for(String key : terms) { + SkeletonBTreeSet tree = makeEntryTree(leafsrlDisk); + SortedSet toMerge = newtrees.get(key); + tree.addAll(toMerge); + if(toMerge.size() > MAX_DISK_ENTRY_SIZE) + tooBig = true; + toMerge = null; + tree.deflate(); + assert(tree.isBare()); + idxDisk.ttab.put(key, tree); + } + idxDisk.ttab.deflate(); + return tooBig; + } + + /** Read the TermEntry's from the Bucket into newtrees and terms, and set up the index + * properties. + * @param data The Bucket containing TermPageEntry's etc serialised with TermEntryReaderWriter. + */ + private long readTermsFrom(Bucket data) { + FileWriter w = null; + newtrees = new HashMap>(); + terms = new TreeSet(); + int entriesAdded = 0; + InputStream is = null; + try { + Logger.normal(this, "Bucket of buffer received, "+data.size()+" bytes"); + is = data.getInputStream(); + SimpleFieldSet fs = new SimpleFieldSet(new LineReadingInputStream(is), 1024, 512, true, true, true); + idxDisk.setName(fs.get("index.title")); + idxDisk.setOwnerEmail(fs.get("index.owner.email")); + idxDisk.setOwner(fs.get("index.owner.name")); + idxDisk.setTotalPages(fs.getLong("totalPages", -1)); + try{ + while(true){ // Keep going til an EOFExcepiton is thrown + TermEntry readObject = TermEntryReaderWriter.getInstance().readObject(is); + SortedSet set = newtrees.get(readObject.subj); + if(set == null) + newtrees.put(readObject.subj, set = new TreeSet()); + set.add(readObject); + terms.add(readObject.subj); + entriesAdded++; + } + }catch(EOFException e){ + // EOF, do nothing + } + } catch (IOException ex) { + java.util.logging.Logger.getLogger(Main.class.getName()).log(Level.SEVERE, null, ex); + } finally { + Closer.close(is); + } + return entriesAdded; + } + + /** Create a directory for an on-disk index. + * @return False if something broke and we can't continue. */ + private boolean createDiskDir() { + dirNumber++; + idxDiskDir = new File(DISK_DIR_PREFIX + Integer.toString(dirNumber)); + Logger.normal(this, "Created new disk dir for merging: "+idxDiskDir); + if(!(idxDiskDir.mkdir() || idxDiskDir.isDirectory())) { + Logger.error(this, "Unable to create new disk dir: "+idxDiskDir); + synchronized(this) { + pushBroken = true; + return false; + } + } + return true; + } + + /** Set up the serialisers for an on-disk index. + * @return False if something broke and we can't continue. */ + private boolean makeDiskDirSerialiser() { + if(srlDisk == null) { + srlDisk = ProtoIndexSerialiser.forIndex(idxDiskDir); + LiveArchiver,SimpleProgress> archiver = + (LiveArchiver,SimpleProgress>)(srlDisk.getChildSerialiser()); + leafsrlDisk = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, archiver); + if(lastDiskIndexName == null) { + try { + idxDisk = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0L); + } catch (java.net.MalformedURLException e) { + throw new AssertionError(e); + } + // FIXME more hacks: It's essential that we use the same FileArchiver instance here. + leafsrlDisk.setSerialiserFor(idxDisk); + } else { + try { + PullTask pull = new PullTask(lastDiskIndexName); + Logger.debug(this, "Pulling previous index "+lastDiskIndexName+" from disk so can update it."); + srlDisk.pull(pull); + Logger.debug(this, "Pulled previous index "+lastDiskIndexName+" from disk - updating..."); + idxDisk = pull.data; + if(idxDisk.getSerialiser().getLeafSerialiser() != archiver) + throw new IllegalStateException("Different serialiser: "+idxFreenet.getSerialiser()+" should be "+leafsrl); + } catch (TaskAbortException e) { + Logger.error(this, "Failed to download previous index for spider update: "+e, e); + e.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + return false; + } + } + } + return true; + } + + static final String INDEX_DOCNAME = "index.yml"; + + private ProtoIndexComponentSerialiser leafsrl; + + /** Merge a disk dir to an on-Freenet index. Usually called on startup, i.e. we haven't just + * created the on-disk index so we need to setup the ProtoIndex etc. */ + protected void mergeToFreenet(File diskDir) { + ProtoIndexSerialiser s = ProtoIndexSerialiser.forIndex(diskDir); + LiveArchiver,SimpleProgress> archiver = + (LiveArchiver,SimpleProgress>)(s.getChildSerialiser()); + ProtoIndexComponentSerialiser leaf = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, archiver); + String f = this.readStringFrom(new File(diskDir, LAST_DISK_FILENAME)); + if(f == null) { + if(diskDir.list().length == 0) { + Logger.debug(this, "Directory "+diskDir+" is empty. Nothing to merge."); + diskDir.delete(); + return; + } + // Ignore + Logger.error(this, "Unable to merge old data "+diskDir); + return; + } else { + Logger.debug(this, "Continuing old bucket: "+f); + } + + ProtoIndex idxDisk = null; + try { + PullTask pull = new PullTask(f); + Logger.debug(this, "Pulling previous index "+f+" from disk so can update it."); + s.pull(pull); + Logger.debug(this, "Pulled previous index "+f+" from disk - updating..."); + idxDisk = pull.data; + if(idxDisk.getSerialiser().getLeafSerialiser() != archiver) + throw new IllegalStateException("Different serialiser: "+idxDisk.getSerialiser()+" should be "+archiver); + } catch (TaskAbortException e) { + Logger.error(this, "Failed to download previous index for spider update: "+e, e); + e.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + return; + } + mergeToFreenet(idxDisk, diskDir); + } + + private final Object inflateSync = new Object(); + + /** Merge from an on-disk index to an on-Freenet index. + * @param diskToMerge The on-disk index. + * @param diskDir The folder the on-disk index is stored in. + */ + protected void mergeToFreenet(ProtoIndex diskToMerge, File diskDir) { + Logger.debug(this, "Merging on-disk index to Freenet: "+diskDir); + if(lastUploadURI == null) { + lastUploadURI = readURIFrom(new File(LAST_URL_FILENAME)); + } + setupFreenetCacheDir(); + + makeFreenetSerialisers(); + + updateOverallMetadata(diskToMerge); + + final SkeletonBTreeMap> newtrees = diskToMerge.ttab; + + // Do the upload + + // async merge + Closure>, TaskAbortException> clo = + createMergeFromTreeClosure(newtrees); + try { + long mergeStartTime = System.currentTimeMillis(); + assert(idxFreenet.ttab.isBare()); + Iterator it = + diskToMerge.ttab.keySetAutoDeflate().iterator(); + TreeSet terms = new TreeSet(); + while(it.hasNext()) terms.add(it.next()); + Logger.debug(this, "Merging "+terms.size()+" terms from disk to Freenet..."); + assert(terms.size() == diskToMerge.ttab.size()); + assert(idxFreenet.ttab.isBare()); + assert(diskToMerge.ttab.isBare()); + long entriesAdded = terms.size(); + // Run the actual merge. + idxFreenet.ttab.update(terms, null, clo, new TaskAbortExceptionConvertor()); + assert(idxFreenet.ttab.isBare()); + // Deflate the main tree. + newtrees.deflate(); + assert(diskToMerge.ttab.isBare()); + + // Push the top node to a CHK. + PushTask task4 = new PushTask(idxFreenet); + task4.meta = FreenetURI.EMPTY_CHK_URI; + srl.push(task4); + + // Now wait for the inserts to finish. They are started asynchronously in the above merge. + FreenetArchiver> arch = + (FreenetArchiver>) srl.getChildSerialiser(); + arch.waitForAsyncInserts(); + + long mergeEndTime = System.currentTimeMillis(); + Logger.debug(this, entriesAdded + " entries merged in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); + FreenetURI uri = (FreenetURI)task4.meta; + lastUploadURI = uri; + Logger.debug(this, "Uploaded new index to "+uri); + if(writeURITo(new File(LAST_URL_FILENAME), uri)) { + newtrees.deflate(); + diskToMerge = null; + terms = null; + Logger.debug(this, "Finished with disk index "+diskDir); + FileUtil.removeAll(diskDir); + } + + // Create the USK to redirect to the CHK at the top of the index. + uploadUSKForFreenetIndex(uri); + + } catch (TaskAbortException e) { + Logger.error(this, "Failed to upload index for spider: "+e, e); + e.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + } + } + + private void uploadUSKForFreenetIndex(FreenetURI uri) { + FreenetURI privUSK = spiderIndexURIs.getPrivateUSK(); + try { + FreenetURI tmp = pr.getHLSimpleClient().insertRedirect(privUSK, uri); + long ed; + synchronized(freenetMergeSync) { + ed = spiderIndexURIs.setEdition(tmp.getEdition()+1); + } + Logger.debug(this, "Uploaded index as USK to "+tmp); + + writeStringTo(new File(EDITION_FILENAME), Long.toString(ed)); + + } catch (InsertException e) { + e.printStackTrace(); + Logger.error(this, "Failed to upload USK for index update", e); + } + } + + /** Create a Closure which will merge the subtrees from one index (on disk) into the subtrees + * of another index (on Freenet). It will be called with each subtree from the on-Freenet + * index, and will merge data from the relevant on-disk subtree. Both subtrees are initially + * deflated, and should be deflated when we leave the method, to avoid running out of memory. + * @param newtrees The on-disk tree of trees to get data from. + * @return + */ + private Closure>, TaskAbortException> createMergeFromTreeClosure(final SkeletonBTreeMap> newtrees) { + return new + Closure>, TaskAbortException>() { + /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { + String key = entry.getKey(); + SkeletonBTreeSet tree = entry.getValue(); + if(logMINOR) Logger.minor(this, "Processing: "+key+" : "+tree); + //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); + boolean newTree = false; + if (tree == null) { + entry.setValue(tree = makeEntryTree(leafsrl)); + newTree = true; + } + assert(tree.isBare()); + SortedSet data; + // Can't be run in parallel. + synchronized(inflateSync) { + newtrees.inflate(key, true); + SkeletonBTreeSet entries; + entries = newtrees.get(key); + // CONCURRENCY: Because the lower-level trees are packed by the top tree, the bottom + // trees (SkeletonBTreeSet's) are not independant of each other. When the newtrees + // inflate above runs, it can deflate a tree that is still in use by another instance + // of this callback. Therefore we must COPY IT AND DEFLATE IT INSIDE THE LOCK. + entries.inflate(); + data = new TreeSet(entries); + entries.deflate(); + assert(entries.isBare()); + } + if(tree != null) + + if(newTree) { + tree.addAll(data); + assert(tree.size() == data.size()); + Logger.debug(this, "Added data to Freenet for term "+key+" : "+data.size()); + } else { + int oldSize = tree.size(); + tree.update(data, null); + // Note that it is possible for data.size() + oldSize != tree.size(), because we might be merging data we've already merged. + // But most of the time it will add up. + Logger.debug(this, "Merged data to Freenet in term "+key+" : "+data.size()+" + "+oldSize+" -> "+tree.size()); + } + tree.deflate(); + assert(tree.isBare()); + if(logMINOR) Logger.minor(this, "Updated: "+key+" : "+tree); + } + }; + } + + /** Update the overall metadata for the on-Freenet index from the on-disk index. */ + private void updateOverallMetadata(ProtoIndex diskToMerge) { + idxFreenet.setName(diskToMerge.getName()); + idxFreenet.setOwnerEmail(diskToMerge.getOwnerEmail()); + idxFreenet.setOwner(diskToMerge.getOwner()); + // This is roughly accurate, it might not be exactly so if we process a bit out of order. + idxFreenet.setTotalPages(diskToMerge.getTotalPages() + Math.max(0,idxFreenet.getTotalPages())); + } + + /** Setup the serialisers for uploading to Freenet. These convert tree nodes to and from blocks + * on Freenet, essentially. */ + private void makeFreenetSerialisers() { + if(srl == null) { + srl = ProtoIndexSerialiser.forIndex(lastUploadURI, RequestStarter.BULK_SPLITFILE_PRIORITY_CLASS); + LiveArchiver,SimpleProgress> archiver = + (LiveArchiver,SimpleProgress>)(srl.getChildSerialiser()); + leafsrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_DEFAULT, archiver); + if(lastUploadURI == null) { + try { + idxFreenet = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0L); + } catch (java.net.MalformedURLException e) { + throw new AssertionError(e); + } + // FIXME more hacks: It's essential that we use the same FreenetArchiver instance here. + leafsrl.setSerialiserFor(idxFreenet); + } else { + try { + PullTask pull = new PullTask(lastUploadURI); + Logger.debug(this, "Pulling previous index "+lastUploadURI+" so can update it."); + srl.pull(pull); + Logger.debug(this, "Pulled previous index "+lastUploadURI+" - updating..."); + idxFreenet = pull.data; + if(idxFreenet.getSerialiser().getLeafSerialiser() != archiver) + throw new IllegalStateException("Different serialiser: "+idxFreenet.getSerialiser()+" should be "+leafsrl); + } catch (TaskAbortException e) { + Logger.error(this, "Failed to download previous index for spider update: "+e, e); + e.printStackTrace(); + synchronized(freenetMergeSync) { + pushBroken = true; + } + return; + } + } + } + } + + /** Set up the on-disk cache, which keeps a copy of everything we upload to Freenet, so we + * won't need to re-download it, which can be very slow and doesn't always succeed. */ + private void setupFreenetCacheDir() { + if(FreenetArchiver.getCacheDir() == null) { + File dir = new File("library-spider-pushed-data-cache"); + dir.mkdir(); + FreenetArchiver.setCacheDir(dir); + } + } + + protected static SkeletonBTreeSet makeEntryTree(ProtoIndexComponentSerialiser leafsrl) { + SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + leafsrl.setSerialiserFor(tree); + return tree; + } + + public void start() { + final String[] oldToMerge; + synchronized(freenetMergeSync) { + oldToMerge = new File(".").list(new FilenameFilter() { + + public boolean accept(File arg0, String arg1) { + if(!(arg1.toLowerCase().startsWith(BASE_FILENAME_PUSH_DATA))) return false; + File f = new File(arg0, arg1); + if(!f.isFile()) return false; + if(f.length() == 0) { f.delete(); return false; } + String s = f.getName().substring(BASE_FILENAME_PUSH_DATA.length()); + pushNumber = Math.max(pushNumber, Long.parseLong(s)+1); + return true; + } + + }); + } + final String[] dirsToMerge; + synchronized(freenetMergeSync) { + dirsToMerge = new File(".").list(new FilenameFilter() { + + public boolean accept(File arg0, String arg1) { + if(!(arg1.toLowerCase().startsWith(DISK_DIR_PREFIX))) return false; + File f = new File(arg0, arg1); + String s = f.getName().substring(DISK_DIR_PREFIX.length()); + dirNumber = Math.max(dirNumber, Integer.parseInt(s)+1); + return true; + } + + }); + } + if(oldToMerge != null && oldToMerge.length > 0) { + Logger.debug(this, "Found "+oldToMerge.length+" buckets of old index data to merge..."); + Runnable r = new Runnable() { + + public void run() { + synchronized(freenetMergeSync) { + for(String filename : oldToMerge) { + File f = new File(filename); + toMergeToDisk.add(new FileBucket(f, true, false, false, true)); + } + } + wrapMergeToDisk(); + } + + }; + pr.getNode().executor.execute(r, "Library: handle index data from previous run"); + } + if(dirsToMerge != null && dirsToMerge.length > 0) { + Logger.debug(this, "Found "+dirsToMerge.length+" disk trees of old index data to merge..."); + Runnable r = new Runnable() { + + public void run() { + synchronized(freenetMergeSync) { + while(freenetMergeRunning) { + if(pushBroken) return; + Logger.normal(this, "Need to merge to Freenet, but last merge not finished yet. Waiting..."); + try { + freenetMergeSync.wait(); + } catch (InterruptedException e) { + // Ignore + } + } + if(pushBroken) return; + freenetMergeRunning = true; + } + try { + for(String filename : dirsToMerge) { + File f = new File(filename); + mergeToFreenet(f); + } + } finally { + synchronized(freenetMergeSync) { + freenetMergeRunning = false; + if(!pushBroken) + lastMergedToFreenet = System.currentTimeMillis(); + freenetMergeSync.notifyAll(); + } + } + + } + + }; + pr.getNode().executor.execute(r, "Library: handle trees from previous run"); + } + } + + public void handlePushBuffer(SimpleFieldSet params, Bucket data) { + + if(data.size() == 0) { + Logger.error(this, "Bucket of data ("+data+") to push is empty", new Exception("error")); + data.free(); + return; + } + + // Process data off-thread, but only one load at a time. + // Hence it won't stall Spider unless we get behind. + + long pn; + synchronized(this) { + pn = pushNumber++; + } + + final File pushFile = new File(BASE_FILENAME_PUSH_DATA+pn); + Bucket output = new FileBucket(pushFile, false, false, false, true); + try { + BucketTools.copy(data, output); + data.free(); + Logger.debug(this, "Written data to "+pushFile); + } catch (IOException e1) { + e1.printStackTrace(); + Logger.error(this, "Unable to back up push data #"+pn, e1); + output = data; + } + + synchronized(freenetMergeSync) { + boolean waited = false; + while(toMergeToDisk.size() > MAX_HANDLING_COUNT && !pushBroken) { + Logger.error(this, "Spider feeding us data too fast, waiting for background process to finish. Ahead of us in the queue: "+toMergeToDisk.size()); + try { + waited = true; + freenetMergeSync.wait(); + } catch (InterruptedException e) { + // Ignore + } + } + toMergeToDisk.add(output); + if(pushBroken) { + if(toMergeToDisk.size() < PUSH_BROKEN_MAX_HANDLING_COUNT) + // We have written the data, it will be recovered after restart. + Logger.error(this, "Pushing is broken, failing"); + else { + // Wait forever to prevent running out of disk space. + // Spider is single threaded. + // FIXME: Use an error return or a throwable to shut down Spider. + while(true) { + try { + freenetMergeSync.wait(); + } catch (InterruptedException e) { + // Ignore + } + } + } + return; + } + if(waited) + Logger.error(this, "Waited for previous handler to go away, moving on..."); + //if(freenetMergeRunning) return; // Already running, no need to restart it. + if(diskMergeRunning) return; // Already running, no need to restart it. + } + Runnable r = new Runnable() { + + public void run() { +// wrapMergeToFreenet(); + wrapMergeToDisk(); + } + + }; + pr.getNode().executor.execute(r, "Library: Handle data from Spider"); + } + + public FreenetURI getPublicUSKURI() { + return spiderIndexURIs.getPublicUSK(); + } + + public void handleGetSpiderURI(PluginReplySender replysender) { + FreenetURI uri = getPublicUSKURI(); + SimpleFieldSet sfs = new SimpleFieldSet(true); + sfs.putSingle("reply", "getSpiderURI"); + sfs.putSingle("publicUSK", uri.toString(true, false)); + try { + replysender.send(sfs); + } catch (PluginNotFoundException e) { + // Race condition, ignore. + } + } +} \ No newline at end of file diff --git a/src/main/java/plugins/Library/Version.java b/src/main/java/plugins/Library/Version.java new file mode 100644 index 00000000..042ee3cf --- /dev/null +++ b/src/main/java/plugins/Library/Version.java @@ -0,0 +1,17 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +/** + * Necessary to be able to use pluginmanager's versions + */ +public class Version { + + private static final String vcsRevision = "@custom@"; + + public static String vcsRevision() { + return vcsRevision; + } + +} diff --git a/src/main/java/plugins/Library/VirtualIndex.java b/src/main/java/plugins/Library/VirtualIndex.java new file mode 100644 index 00000000..9fae7c91 --- /dev/null +++ b/src/main/java/plugins/Library/VirtualIndex.java @@ -0,0 +1,20 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.URIEntry; +import plugins.Library.util.exec.Execution; + +/** +** Represents a virtual index that gets its data from another plugin. +** +** @author infinity0 +*/ +public interface VirtualIndex extends Index { + + + + +} diff --git a/src/main/java/plugins/Library/WriteableIndex.java b/src/main/java/plugins/Library/WriteableIndex.java new file mode 100644 index 00000000..5debe962 --- /dev/null +++ b/src/main/java/plugins/Library/WriteableIndex.java @@ -0,0 +1,32 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.URIEntry; +import plugins.Library.util.exec.Execution; + +import freenet.keys.FreenetURI; + +import java.util.Collection; +import java.util.Set; + +/** +** Represents a writable Index. +** +** TODO +** +** @author infinity0 +*/ +public interface WriteableIndex extends Index { + + public Execution putTermEntries(Collection entries); + + public Execution remTermEntries(Collection entries); + + public Execution putURIEntries(Collection entries); + + public Execution remURIEntries(Collection entries); + +} diff --git a/src/main/java/plugins/Library/client/FreenetArchiver.java b/src/main/java/plugins/Library/client/FreenetArchiver.java new file mode 100644 index 00000000..4401bbd6 --- /dev/null +++ b/src/main/java/plugins/Library/client/FreenetArchiver.java @@ -0,0 +1,608 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.client; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashSet; + +import plugins.Library.Library; +import plugins.Library.io.ObjectStreamReader; +import plugins.Library.io.ObjectStreamWriter; +import plugins.Library.io.serial.LiveArchiver; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.TaskAbortException; + +import freenet.client.ClientMetadata; +import freenet.client.FetchException; +import freenet.client.FetchException.FetchExceptionMode; +import freenet.client.FetchResult; +import freenet.client.HighLevelSimpleClient; +import freenet.client.InsertBlock; +import freenet.client.InsertContext; +import freenet.client.InsertException; +import freenet.client.async.BaseClientPutter; +import freenet.client.async.ClientContext; +import freenet.client.async.ClientPutCallback; +import freenet.client.async.ClientPutter; +import freenet.client.async.PersistenceDisabledException; +import freenet.client.events.ClientEvent; +import freenet.client.events.ClientEventListener; +import freenet.client.events.SplitfileProgressEvent; +import freenet.crypt.SHA256; +import freenet.keys.CHKBlock; +import freenet.keys.FreenetURI; +import freenet.node.NodeClientCore; +import freenet.node.RequestClient; +import freenet.node.RequestStarter; +import freenet.support.Base64; +import freenet.support.Logger; +import freenet.support.SimpleReadOnlyArrayBucket; +import freenet.support.SizeUtil; +import freenet.support.api.Bucket; +import freenet.support.api.RandomAccessBucket; +import freenet.support.io.BucketTools; +import freenet.support.io.Closer; +import freenet.support.io.FileBucket; +import freenet.support.io.ResumeFailedException; + +/** +** Converts between a map of {@link String} to {@link Object}, and a freenet +** key. An {@link ObjectStreamReader} and an {@link ObjectStreamWriter} is +** used to do the hard work once the relevant streams have been established, +** from temporary {@link Bucket}s. +** +** Supports a local cache. +** FIXME: NOT GARBAGE COLLECTED! Eventually will fill up all available disk space! +** +** @author infinity0 +*/ +public class FreenetArchiver +implements LiveArchiver { + + final protected NodeClientCore core; + final protected ObjectStreamReader reader; + final protected ObjectStreamWriter writer; + + final protected String default_mime; + final protected int expected_bytes; + public final short priorityClass; + public final boolean realTimeFlag; + private static File cacheDir; + /** If true, we will insert data semi-asynchronously. That is, we will start the + * insert, with ForceEncode enabled, and return the URI as soon as possible. The + * inserts will continue in the background, and before inserting the final USK, + * the caller should call waitForAsyncInserts(). + * + * FIXME SECURITY: This should be relatively safe, because it's all CHKs, as + * long as the content isn't easily predictable. + * + * FIXME PERFORMANCE: This dirty hack is unnecessary if we split up + * IterableSerialiser.push into a start phase and a stop phase. + * + * The main purpose of this mechanism is to minimise memory usage: Normally the + * data being pushed remains in RAM while we do the insert, which can take a very + * long time. */ + static final boolean SEMI_ASYNC_PUSH = true; + + private final HashSet semiAsyncPushes = new HashSet(); + private final ArrayList pushesFailed = new ArrayList(); + private long totalBytesPushing; + + public static void setCacheDir(File dir) { + cacheDir = dir; + } + + public static File getCacheDir() { + return cacheDir; + } + + public FreenetArchiver(NodeClientCore c, ObjectStreamReader r, ObjectStreamWriter w, String mime, int size, short priority) { + if (c == null) { + throw new IllegalArgumentException("Can't create a FreenetArchiver with a null NodeClientCore!"); + } + this.priorityClass = priority; + // FIXME pass it in somehow + if(priorityClass <= RequestStarter.IMMEDIATE_SPLITFILE_PRIORITY_CLASS) + realTimeFlag = true; + else + realTimeFlag = false; + core = c; + reader = r; + writer = w; + default_mime = mime; + expected_bytes = size; + } + + public FreenetArchiver(NodeClientCore c, S rw, String mime, int size, short priority) { + this(c, rw, rw, mime, size, priority); + } + + /** + ** {@inheritDoc} + ** + ** This implementation expects metdata of type {@link FreenetURI}. + * + * FIXME OPT NORM: Two-phase pull: First pull to a bucket, then construct the data. + * The reason for this is memory usage: We can limit the number in the second + * phase according to downstream demand (e.g. blocking queues in ObjectProcessor's), + * so that the amount of stuff kept in memory is limited, while still allowing an + * (almost?) unlimited number in the fetching data phase. See comments in ParallelSerialiser.createPullJob. + */ + /*@Override**/ public void pullLive(PullTask task, final SimpleProgress progress) throws TaskAbortException { + // FIXME make retry count configgable by client metadata somehow + // clearly a web UI fetch wants it limited; a merge might want it unlimited + HighLevelSimpleClient hlsc = core.makeClient(priorityClass, false, false); + Bucket tempB = null; InputStream is = null; + + long startTime = System.currentTimeMillis(); + + FreenetURI u; + byte[] initialMetadata; + String cacheKey; + + if(task.meta instanceof FreenetURI) { + u = (FreenetURI) task.meta; + initialMetadata = null; + cacheKey = u.toString(false, true); + } else { + initialMetadata = (byte[]) task.meta; + u = FreenetURI.EMPTY_CHK_URI; + cacheKey = Base64.encode(SHA256.digest(initialMetadata)); + } + + for(int i=0;i<10;i++) { + // USK redirects should not happen really but can occasionally due to race conditions. + + try { + try { + + if(cacheDir != null && cacheDir.exists() && cacheDir.canRead()) { + File cached = new File(cacheDir, cacheKey); + if(cached.exists() && cached.length() != 0) { + tempB = new FileBucket(cached, true, false, false, false); + Logger.debug(this, "Fetching block for FreenetArchiver from disk cache: "+cacheKey); + } + } + + if(tempB == null) { + + if(initialMetadata != null) + Logger.debug(this, "Fetching block for FreenetArchiver from metadata ("+cacheKey+")"); + else + Logger.debug(this, "Fetching block for FreenetArchiver from network: "+u); + + if (progress != null) { + hlsc.addEventHook(new SimpleProgressUpdater(progress)); + } + + // code for async fetch - maybe be useful elsewhere + //ClientContext cctx = core.clientContext; + //FetchContext fctx = hlsc.getFetchContext(); + //FetchWaiter fw = new FetchWaiter(); + //ClientGetter gu = hlsc.fetch(furi, false, null, false, fctx, fw); + //gu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx, null); + //FetchResult res = fw.waitForCompletion(); + + FetchResult res; + + // bookkeeping. detects bugs in the SplitfileProgressEvent handler + if (progress != null) { + ProgressParts prog_old = progress.getParts(); + if(initialMetadata != null) + res = hlsc.fetchFromMetadata(new SimpleReadOnlyArrayBucket(initialMetadata)); + else + res = hlsc.fetch(u); + ProgressParts prog_new = progress.getParts(); + if (prog_old.known - prog_old.done != prog_new.known - prog_new.done) { + Logger.error(this, "Inconsistency when tracking split file progress (pulling): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); + System.err.println("Inconsistency when tracking split file progress (pulling): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); + } + progress.addPartKnown(0, true); + } else { + if(initialMetadata != null) + res = hlsc.fetchFromMetadata(new SimpleReadOnlyArrayBucket(initialMetadata)); + else + res = hlsc.fetch(u); + } + + tempB = res.asBucket(); + } else { + // Make sure SimpleProgress.join() doesn't stall. + if(progress != null) { + progress.addPartKnown(1, true); + progress.addPartDone(); + } + } + long endTime = System.currentTimeMillis(); + Logger.debug(this, "Fetched block for FreenetArchiver in "+(endTime-startTime)+"ms."); + is = tempB.getInputStream(); + task.data = (T)reader.readObject(is); + is.close(); + + } catch (FetchException e) { + if(e.mode == FetchExceptionMode.PERMANENT_REDIRECT && e.newURI != null) { + u = e.newURI; + continue; + } + throw new TaskAbortException("Failed to fetch content", e, true); + + } catch (IOException e) { + throw new TaskAbortException("Failed to read content from local tempbucket", e, true); + + } catch (RuntimeException e) { + throw new TaskAbortException("Failed to complete task: ", e); + + } + } catch (TaskAbortException e) { + if (progress != null) { progress.abort(e); } + throw e; + + } finally { + Closer.close(is); + Closer.close(tempB); + } + break; + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation produces metdata of type {@link FreenetURI}. + ** + ** If the input metadata is an insert URI (SSK or USK), it will be replaced + ** by its corresponding request URI. Otherwise, the data will be inserted + ** as a CHK. Note that since {@link FreenetURI} is immutable, the {@link + ** FreenetURI#suggestedEdition} of a USK is '''not''' automatically + ** incremented. + */ + /*@Override**/ public void pushLive(PushTask task, final SimpleProgress progress) throws TaskAbortException { + HighLevelSimpleClient hlsc = core.makeClient(priorityClass, false, false); + RandomAccessBucket tempB = null; OutputStream os = null; + + + try { + ClientPutter putter = null; + PushCallback cb = null; + try { + tempB = core.tempBucketFactory.makeBucket(expected_bytes, 2); + os = tempB.getOutputStream(); + writer.writeObject(task.data, os); + os.close(); os = null; + tempB.setReadOnly(); + + boolean insertAsMetadata; + FreenetURI target; + + if(task.meta instanceof FreenetURI) { + insertAsMetadata = false; + target = (FreenetURI) task.meta; + } else { + insertAsMetadata = true; + target = FreenetURI.EMPTY_CHK_URI; + } + InsertBlock ib = new InsertBlock(tempB, new ClientMetadata(default_mime), target); + + Logger.debug(this, "Inserting block for FreenetArchiver..."); + long startTime = System.currentTimeMillis(); + + // code for async insert - maybe be useful elsewhere + //ClientContext cctx = core.clientContext; + //InsertContext ictx = hlsc.getInsertContext(true); + //PutWaiter pw = new PutWaiter(); + //ClientPutter pu = hlsc.insert(ib, false, null, false, ictx, pw); + //pu.setPriorityClass(RequestStarter.INTERACTIVE_PRIORITY_CLASS, cctx, null); + //FreenetURI uri = pw.waitForCompletion(); + + // bookkeeping. detects bugs in the SplitfileProgressEvent handler + ProgressParts prog_old = null; + if(progress != null) + prog_old = progress.getParts(); + + // FIXME make retry count configgable by client metadata somehow + // unlimited for push/merge + InsertContext ctx = hlsc.getInsertContext(false); + ctx.maxInsertRetries = -1; + // Early encode is normally a security risk. + // Hopefully it isn't here. + ctx.earlyEncode = true; + + String cacheKey = null; + +// if(!SEMI_ASYNC_PUSH) { +// // Actually report progress. +// if (progress != null) { +// hlsc.addEventHook(new SimpleProgressUpdater(progress)); +// } +// uri = hlsc.insert(ib, false, null, priorityClass, ctx); +// if (progress != null) +// progress.addPartKnown(0, true); +// } else { + // Do NOT report progress. Pretend we are done as soon as + // we have the URI. This allows us to minimise memory usage + // without yet splitting up IterableSerialiser.push() and + // doing it properly. FIXME + if(progress != null) + progress.addPartKnown(1, true); + cb = new PushCallback(progress, ib); + putter = new ClientPutter(cb, ib.getData(), FreenetURI.EMPTY_CHK_URI, ib.clientMetadata, + ctx, priorityClass, + false, null, false, core.clientContext, null, insertAsMetadata ? CHKBlock.DATA_LENGTH : -1); + cb.setPutter(putter); + long tStart = System.currentTimeMillis(); + try { + core.clientContext.start(putter); + } catch (PersistenceDisabledException e) { + // Impossible + } + WAIT_STATUS status = cb.waitFor(); + if(status == WAIT_STATUS.FAILED) { + cb.throwError(); + } else if(status == WAIT_STATUS.GENERATED_URI) { + FreenetURI uri = cb.getURI(); + task.meta = uri; + cacheKey = uri.toString(false, true); + Logger.debug(this, "Got URI for asynchronous insert: "+uri+" size "+tempB.size()+" in "+(System.currentTimeMillis() - cb.startTime)); + } else { + Bucket data = cb.getGeneratedMetadata(); + byte[] buf = BucketTools.toByteArray(data); + data.free(); + task.meta = buf; + cacheKey = Base64.encode(SHA256.digest(buf)); + Logger.debug(this, "Got generated metadata ("+buf.length+" bytes) for asynchronous insert size "+tempB.size()+" in "+(System.currentTimeMillis() - cb.startTime)); + } + if(progress != null) + progress.addPartDone(); +// } + + if(progress != null) { + ProgressParts prog_new = progress.getParts(); + if (prog_old.known - prog_old.done != prog_new.known - prog_new.done) { + Logger.error(this, "Inconsistency when tracking split file progress (pushing): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); + System.err.println("Inconsistency when tracking split file progress (pushing): "+prog_old.known+" of "+prog_old.done+" -> "+prog_new.known+" of "+prog_new.done); + } + } + + task.data = null; + + if(cacheKey != null && cacheDir != null && cacheDir.exists() && cacheDir.canRead()) { + File cached = new File(cacheDir, cacheKey); + Bucket cachedBucket = new FileBucket(cached, false, false, false, false); + BucketTools.copy(tempB, cachedBucket); + } + + if(SEMI_ASYNC_PUSH) + tempB = null; // Don't free it here. + + } catch (InsertException e) { + if(cb != null) { + synchronized(this) { + if(semiAsyncPushes.remove(cb)) + totalBytesPushing -= cb.size(); + } + } + throw new TaskAbortException("Failed to insert content", e, true); + + } catch (IOException e) { + throw new TaskAbortException("Failed to write content to local tempbucket", e, true); + + } catch (RuntimeException e) { + throw new TaskAbortException("Failed to complete task: ", e); + + } + } catch (TaskAbortException e) { + if (progress != null) { progress.abort(e); } + throw e; + + } finally { + Closer.close(os); + Closer.close(tempB); + } + } + + enum WAIT_STATUS { + FAILED, + GENERATED_URI, + GENERATED_METADATA; + } + + public class PushCallback implements ClientPutCallback { + + public final long startTime = System.currentTimeMillis(); + private ClientPutter putter; + private FreenetURI generatedURI; + private Bucket generatedMetadata; + private InsertException failed; + // See FIXME's in push(), IterableSerialiser. + // We don't do real progress, we pretend we're done when push() returns. +// private final SimpleProgress progress; + private final long size; + private final InsertBlock ib; + + public PushCallback(SimpleProgress progress, InsertBlock ib) { +// this.progress = progress; + this.ib = ib; + size = ib.getData().size(); + } + + public long size() { + return size; + } + + public synchronized void setPutter(ClientPutter put) { + putter = put; + synchronized(FreenetArchiver.this) { + if(semiAsyncPushes.add(this)) + totalBytesPushing += size; + Logger.debug(this, "Pushing "+totalBytesPushing+" bytes on "+semiAsyncPushes.size()+" inserters"); + } + } + + public synchronized WAIT_STATUS waitFor() { + while(generatedURI == null && generatedMetadata == null && failed == null) { + try { + wait(); + } catch (InterruptedException e) { + // Ignore + } + } + if(failed != null) return WAIT_STATUS.FAILED; + if(generatedURI != null) return WAIT_STATUS.GENERATED_URI; + return WAIT_STATUS.GENERATED_METADATA; + } + + public synchronized void throwError() throws InsertException { + if(failed != null) throw failed; + } + + public synchronized FreenetURI getURI() { + return generatedURI; + } + + public synchronized Bucket getGeneratedMetadata() { + return generatedMetadata; + } + + @Override + public void onFailure(InsertException e, BaseClientPutter state) { + Logger.error(this, "Failed background insert ("+generatedURI+"), now running: "+semiAsyncPushes.size()+" ("+SizeUtil.formatSize(totalBytesPushing)+")."); + synchronized(this) { + failed = e; + notifyAll(); + } + synchronized(FreenetArchiver.this) { + if(semiAsyncPushes.remove(this)) + totalBytesPushing -= size; + pushesFailed.add(e); + FreenetArchiver.this.notifyAll(); + } + if(ib != null) + ib.free(); + } + + @Override + public void onFetchable(BaseClientPutter state) { + // Ignore + } + + @Override + public synchronized void onGeneratedURI(FreenetURI uri, BaseClientPutter state) { + generatedURI = uri; + notifyAll(); + } + + @Override + public void onSuccess(BaseClientPutter state) { + synchronized(FreenetArchiver.this) { + if(semiAsyncPushes.remove(this)) + totalBytesPushing -= size; + Logger.debug(this, "Completed background insert ("+generatedURI+") in "+(System.currentTimeMillis()-startTime)+"ms, now running: "+semiAsyncPushes.size()+" ("+SizeUtil.formatSize(totalBytesPushing)+")."); + FreenetArchiver.this.notifyAll(); + } + if(ib != null) + ib.free(); +// if(progress != null) progress.addPartKnown(0, true); + + } + + @Override + public synchronized void onGeneratedMetadata(Bucket metadata, + BaseClientPutter state) { + generatedMetadata = metadata; + notifyAll(); + } + + @Override + public void onResume(ClientContext context) throws ResumeFailedException { + // Ignore. + } + + @Override + public RequestClient getRequestClient() { + return Library.REQUEST_CLIENT; + } + + } + + + + /*@Override**/ public void pull(PullTask task) throws TaskAbortException { + pullLive(task, null); + } + + /*@Override**/ public void push(PushTask task) throws TaskAbortException { + pushLive(task, null); + } + + public static class SimpleProgressUpdater implements ClientEventListener { + + final int[] splitfile_blocks = new int[2]; + final SimpleProgress progress; + + public SimpleProgressUpdater(SimpleProgress prog) { + progress = prog; + } + + @Override public void receive(ClientEvent ce, ClientContext context) { + progress.setStatus(ce.getDescription()); + if (!(ce instanceof SplitfileProgressEvent)) { return; } + + // update the progress "parts" counters + SplitfileProgressEvent evt = (SplitfileProgressEvent)ce; + int new_succeeded = evt.succeedBlocks; + int new_total = evt.minSuccessfulBlocks; + // fetch can go over 100% + if (new_succeeded > new_total) { + Logger.normal(this, "Received SplitfileProgressEvent greater than 100%: " + evt.getDescription()); + new_succeeded = new_total; + } + synchronized (splitfile_blocks) { + int old_succeeded = splitfile_blocks[0]; + int old_total = splitfile_blocks[1]; + try { + progress.addPartKnown(new_total - old_total, evt.finalizedTotal); // throws IllegalArgumentException + int n = new_succeeded - old_succeeded; + if (n == 1) { + progress.addPartDone(); + } else if (n != 0) { + Logger.normal(this, "Received SplitfileProgressEvent out-of-order: " + evt.getDescription()); + for (int i=0; i extra; + + + + final public /* DEBUG protected*/ SkeletonBTreeMap> ttab; + final protected SkeletonBTreeMap> utab; + + + public ProtoIndex(FreenetURI id, String n, String owner, String ownerEmail, long pages) { + this(id, n, owner, ownerEmail, pages, new Date(), new HashMap(), + new SkeletonBTreeMap>(BTREE_NODE_MIN), + new SkeletonBTreeMap>(BTREE_NODE_MIN)/*, + //filtab = new SkeletonPrefixTreeMap(new Token(), TKTAB_MAX)*/ + ); + } + + protected ProtoIndex(FreenetURI id, String n, String owner, String ownerEmail, long pages, Date m, Map x, + SkeletonBTreeMap> u, + SkeletonBTreeMap> t/*, + SkeletonMap f*/ + ) { + reqID = id; + name = (n == null)? "": n; + modified = m; + extra = x; + indexOwnerName = owner; + indexOwnerEmail = ownerEmail; + totalPages = pages; + + //filtab = f; + ttab = t; + utab = u; + } + + // TODO NORM maybe have more general class than ProtoIndexSerialiser + + protected int serialFormatUID; + protected ProtoIndexComponentSerialiser serialiser; + public ProtoIndexComponentSerialiser getSerialiser() { + return serialiser; + } + + public void setSerialiser(ProtoIndexComponentSerialiser srl) { + // FIXME LOW test for isLive() here... or something + serialFormatUID = srl.serialFormatUID; + serialiser = srl; + } + + + + + private Map>> getTermEntriesProgress = new + HashMap>>(); + + public Execution> getTermEntries(String term) { + Execution> request = getTermEntriesProgress.get(term); + if (request == null) { + request = new getTermEntriesHandler(term); + getTermEntriesProgress.put(term, request); + exec.execute((getTermEntriesHandler)request); + } + return request; + } + + + + + public Execution getURIEntry(FreenetURI uri) { + throw new UnsupportedOperationException("not implemented"); + } + + + public class getTermEntriesHandler extends AbstractExecution> implements Runnable, ChainedProgress { + // TODO NORM have a Runnable field instead of extending Runnable + // basically, redesign this entire class and series of classes + + final Map trackers = new LinkedHashMap(); + Object current_meta; + ProgressTracker current_tracker; + + protected getTermEntriesHandler(String t) { + super(t); + } + + @Override public ProgressParts getParts() throws TaskAbortException { + // TODO NORM tidy this up + Set result = getResult(); + int started = trackers.size(); + int known = started; + int done = started - 1; + // FIXME is ++known valid here? I get an exception with ProgressParts(0/1/0/2) without it, + // which presumably means last != null while trackers.size() == 0. + if (last != null) { ++started; ++done; ++known; } + if (done < 0) { done = 0; } + int estimate = (result != null)? ProgressParts.TOTAL_FINALIZED: Math.max(ttab.heightEstimate()+1, known); + return new ProgressParts(done, started, known, estimate); + } + + @Override public String getStatus() { + Progress cur = getCurrentProgress(); + return (cur == null)? "Starting next stage...": cur.getSubject() + ": " + cur.getStatus(); + } + + /*@Override**/ public Progress getCurrentProgress() { + return last != null? last: current_tracker == null? null: current_tracker.getPullProgressFor(current_meta); + } + + // TODO HIGH tidy this - see SkeletonBTreeMap.inflate() for details + Progress last = null; + /*@Override**/ public void run() { + try { + // get the root container + SkeletonBTreeSet root; + for (;;) { + try { + root = ttab.get(subject); + break; + } catch (DataNotLoadedException d) { + Skeleton p = d.getParent(); + trackers.put(current_meta = d.getValue(), current_tracker = ((Serialiser.Trackable)p.getSerialiser()).getTracker()); + p.inflate(d.getKey()); + } + } + + if (root == null) { + // TODO HIGH better way to handle this + throw new TaskAbortException("Index does not contain term " + subject, new Exception("Index does not contain term " + subject)); + } + last = root.getProgressInflate(); // REMOVE ME + root.inflate(); + + // Post-process relevance. + double multiplier = 1.0; + if(totalPages > 0) { + long total = totalPages; // Number of pages total + long specific = root.size(); // Number of pages in this entry + multiplier = Math.log(((double)total) / ((double)specific)); + if(multiplier < 0) { + Logger.error(this, "Negative multiplier!: "+multiplier+" total = "+total+" specific = "+root.size()); + multiplier = 1.0; + } else + Logger.normal(this, "Correcting results: "+multiplier); + } + Set entries = wrapper(root, multiplier); + + setResult(entries); + + } catch (TaskAbortException e) { + setError(e); + return; + } + } + + private Set wrapper(final SkeletonBTreeSet root, final double relAdjustment) { + return new AbstractSet() { + + public boolean add(TermEntry arg0) { + throw new UnsupportedOperationException(); + } + + public boolean addAll(Collection arg0) { + throw new UnsupportedOperationException(); + } + + public void clear() { + throw new UnsupportedOperationException(); + } + + public boolean contains(Object arg0) { + return root.contains(arg0); + } + + public boolean containsAll(Collection arg0) { + return root.containsAll(arg0); + } + + public boolean isEmpty() { + return root.isEmpty(); + } + + public Iterator iterator() { + final Iterator entries = root.iterator(); + return new Iterator() { + public boolean hasNext() { + return entries.hasNext(); + } + + public TermEntry next() { + TermEntry t = entries.next(); + if(t instanceof TermPageEntry && relAdjustment != 1.0) { + // Adjust relevance + return new TermPageEntry((TermPageEntry)t, (float)(relAdjustment*t.rel)); + } else + return t; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + public boolean remove(Object arg0) { + throw new UnsupportedOperationException(); + } + + public boolean removeAll(Collection arg0) { + throw new UnsupportedOperationException(); + } + + public boolean retainAll(Collection arg0) { + throw new UnsupportedOperationException(); + } + + public int size() { + return root.size(); + } + + }; + } + + } + + + public void setName(String indexName) { + this.name = indexName; + } + + public void setTotalPages(long total) { + totalPages = total; + } + + public void setOwner(String owner) { + this.indexOwnerName = owner; + } + + public void setOwnerEmail(String address) { + this.indexOwnerEmail = address; + } + + public String getName() { + return name; + } + + public String getOwnerEmail() { + return indexOwnerEmail; + } + + public String getOwner() { + return indexOwnerName; + } + + public long getTotalPages() { + return totalPages; + } + + + +} diff --git a/src/main/java/plugins/Library/index/ProtoIndexComponentSerialiser.java b/src/main/java/plugins/Library/index/ProtoIndexComponentSerialiser.java new file mode 100644 index 00000000..73d6f4f4 --- /dev/null +++ b/src/main/java/plugins/Library/index/ProtoIndexComponentSerialiser.java @@ -0,0 +1,586 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.Library; +import plugins.Library.client.FreenetArchiver; +import plugins.Library.util.SkeletonTreeMap; +import plugins.Library.util.SkeletonBTreeMap; +import plugins.Library.util.SkeletonBTreeSet; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.BaseCompositeProgress; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.exec.TaskInProgressException; +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.Serialiser; +import plugins.Library.io.serial.Translator; +import plugins.Library.io.serial.ProgressTracker; +import plugins.Library.io.serial.Archiver; +import plugins.Library.io.serial.IterableSerialiser; +import plugins.Library.io.serial.MapSerialiser; +import plugins.Library.io.serial.LiveArchiver; +import plugins.Library.io.serial.ParallelSerialiser; +import plugins.Library.io.serial.Packer; +import plugins.Library.io.serial.Packer.Scale; // WORKAROUND javadoc bug #4464323 +import plugins.Library.io.serial.FileArchiver; +import plugins.Library.io.DataFormatException; +import plugins.Library.io.YamlReaderWriter; + +import freenet.keys.FreenetURI; +import freenet.node.RequestStarter; + +import java.io.File; +import java.util.Collection; +import java.util.Set; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.LinkedHashMap; +import java.util.HashMap; +import java.util.TreeSet; +import java.util.TreeMap; +import java.util.Date; + +/** +** Serialiser for the components of a ProtoIndex. +** +** DOCUMENT +** +** @author infinity0 +*/ +public class ProtoIndexComponentSerialiser { + + final protected static HashMap + srl_fmt = new HashMap(); + + final protected static int FMT_FREENET_SIMPLE = 0x2db3c940; + public final static int FMT_FILE_LOCAL = 0xd439e29a; + + public final static int FMT_DEFAULT = FMT_FREENET_SIMPLE; + + /** + ** Converts between a low-level object and a byte stream. + */ + final protected static YamlReaderWriter yamlrw = new YamlReaderWriter(); + + /** + ** Translator for the local entries of a node of the ''term table''. + */ + final protected static Translator>, Map> + ttab_keys_mtr = new TreeMapTranslator>(null); + + /** + ** Translator for the local entries of a node of the ''B-tree'' for a + ** ''term''. + */ + final protected static Translator, Collection> + term_data_mtr = new SkeletonBTreeSet.TreeSetTranslator(); + + /** + ** Translator for {@link URIKey}. + */ + final protected static Translator + utab_keys_ktr = new Translator() { + public String app(URIKey k) { return k.toString(); } + public URIKey rev(String s) { return new URIKey(URIKey.hexToBytes(s)); } + }; + + /** + ** Translator for the local entries of a node of the ''uri table''. + */ + final protected static Translator>, Map> + utab_keys_mtr = new TreeMapTranslator>(utab_keys_ktr); + + /** + ** Serialiser for the ''targets'' of the values stored in a node of the + ** ''B-tree'' for a ''term''. In this case, the values are the actual + ** targets and are stored inside the node, so we use a dummy. + */ + final protected static MapSerialiser term_dummy = new DummySerialiser(); + + /** + ** Serialiser for the ''targets'' of the values stored in a node of the + ** ''B-tree'' for a ''urikey''. In this case, the values are the actual + ** targets and are stored inside the node, so we use a dummy. + */ + final protected static MapSerialiser uri_dummy = new DummySerialiser(); + + /** + ** {@link Scale} for the root node of the ''B-tree'' that holds + ** ''term entries'' for a ''term''. + */ + final protected static Packer.Scale> + term_data_scale = new Packer.Scale>() { + @Override public int weigh(SkeletonBTreeSet element) { + return element.sizeRoot(); + } + }; + + /** + ** {@link Scale} for the root node of the ''B-tree'' that holds + ** ''uri-entry mappings'' for a ''urikey''. + */ + final protected static Packer.Scale> + uri_data_scale = new Packer.Scale>() { + @Override public int weigh(SkeletonBTreeMap element) { + return element.sizeRoot(); + } + }; + + /** + ** Get the instance responsible for the given format ID. + ** + ** @param archiver The LiveArchiver to use, can be null. + ** @throws UnsupportedOperationException if the format ID is unrecognised + ** @throws IllegalStateException if the requirements for creating the + ** instance (eg. existence of a freenet node) are not met. + */ + public synchronized static ProtoIndexComponentSerialiser get(int fmtid, LiveArchiver, SimpleProgress> archiver) { + ProtoIndexComponentSerialiser srl = srl_fmt.get(fmtid); + if (srl == null || srl.leaf_arx != null && srl.leaf_arx != archiver) { + srl = new ProtoIndexComponentSerialiser(fmtid, archiver); + if(archiver == null) + srl_fmt.put(fmtid, srl); + } + return srl; + } + + /** + ** Get the instance responsible for the default format ID. + ** + ** @throws IllegalStateException if the requirements for creating the + ** instance (eg. existence of a freenet node) are not met. + */ + public static ProtoIndexComponentSerialiser get() { + return get(FMT_DEFAULT, null); + } + + /** + ** Unique code for each instance of this class. + */ + final protected int serialFormatUID; + + /** + ** Archiver for the lowest level (leaf). + */ + final protected LiveArchiver, SimpleProgress> + leaf_arx; + + public LiveArchiver, SimpleProgress> getLeafSerialiser() { + return leaf_arx; + } + + /** + ** Serialiser for the ''targets'' of the values stored in a node of the + ** ''term table''. In this case, each target is the root node of the + ** ''B-tree'' that holds ''term entries'' for the ''term'' mapping to the + ** value. + */ + final protected BTreePacker, EntryGroupSerialiser>> + ttab_data; + + /** + ** Serialiser for the ''targets'' of the values stored in a node of the + ** ''uri table''. In this case, each target is the root node of the + ** ''B-tree'' that holds ''uri-entry mappings'' for the ''urikey'' mapping + ** to the value. + */ + final protected BTreePacker, EntryGroupSerialiser>> + utab_data; + + /** + ** Constructs a new instance using the given format. + * @param archiver + ** + ** @throws UnsupportedOperationException if the format ID is unrecognised + ** @throws IllegalStateException if the requirements for creating the + ** instance (eg. existence of a freenet node) are not met. + */ + protected ProtoIndexComponentSerialiser(int fmtid, LiveArchiver, SimpleProgress> archiver) { + if(archiver != null) { + leaf_arx = archiver; + } else { + switch (fmtid) { + case FMT_FREENET_SIMPLE: + short priorityClass = RequestStarter.INTERACTIVE_PRIORITY_CLASS; + if(archiver instanceof FreenetArchiver) + priorityClass = ((FreenetArchiver)archiver).priorityClass; + leaf_arx = Library.makeArchiver(yamlrw, ProtoIndex.MIME_TYPE, 0x180 * ProtoIndex.BTREE_NODE_MIN, priorityClass); + break; + case FMT_FILE_LOCAL: + leaf_arx = new FileArchiver>(yamlrw, true, YamlReaderWriter.FILE_EXTENSION, "", "", null); + break; + default: + throw new UnsupportedOperationException("Unknown serial format id"); + } + } + + serialFormatUID = fmtid; + + ttab_data = new BTreePacker, EntryGroupSerialiser>>( + new EntryGroupSerialiser>( + leaf_arx, + null, + new SkeletonBTreeSet.TreeTranslator(null, term_data_mtr) { + @Override public SkeletonBTreeSet rev(Map tree) throws DataFormatException { + return setSerialiserFor(super.rev(tree)); + } + } + ), + term_data_scale, + ProtoIndex.BTREE_ENT_MAX + ); + + utab_data = new BTreePacker, EntryGroupSerialiser>>( + new EntryGroupSerialiser>( + leaf_arx, + null, + new SkeletonBTreeMap.TreeTranslator(null, null) { + @Override public SkeletonBTreeMap rev(Map tree) throws DataFormatException { + return setSerialiserFor(super.rev(tree)); + } + } + ), + uri_data_scale, + ProtoIndex.BTREE_ENT_MAX + ); + } + + /** + ** Set the serialisers for the ''uri table'' and the ''term table'' on an + ** index. + */ + public ProtoIndex setSerialiserFor(ProtoIndex index) { + // set serialisers on the ttab + BTreeNodeSerialiser> ttab_keys = new BTreeNodeSerialiser>( + "term listings", + leaf_arx, + index.ttab.makeNodeTranslator(null, ttab_keys_mtr) + ); + index.ttab.setSerialiser(ttab_keys, ttab_data); + + // set serialisers on the utab + BTreeNodeSerialiser> utab_keys = new BTreeNodeSerialiser>( + "uri listings", + leaf_arx, + index.utab.makeNodeTranslator(utab_keys_ktr, utab_keys_mtr) + ); + index.utab.setSerialiser(utab_keys, utab_data); + + // set serialiser on the index + index.setSerialiser(this); + return index; + } + + /** + ** Set the serialiser for the ''B-tree'' that holds the ''uri-entry + ** mappings'' for a ''urikey''. + */ + public SkeletonBTreeMap setSerialiserFor(SkeletonBTreeMap entries) { + BTreeNodeSerialiser uri_keys = new BTreeNodeSerialiser( + "uri entries", + leaf_arx, + entries.makeNodeTranslator(null, null) // no translator needed as FreenetURI and URIEntry are both directly serialisable by YamlReaderWriter + ); + entries.setSerialiser(uri_keys, uri_dummy); + return entries; + } + + /** + ** Set the serialiser for the ''B-tree'' that holds the ''term entries'' + ** for a ''term''. + */ + public SkeletonBTreeSet setSerialiserFor(SkeletonBTreeSet entries) { + BTreeNodeSerialiser term_keys = new BTreeNodeSerialiser( + "term entries", + leaf_arx, + entries.makeNodeTranslator(null, term_data_mtr) + ); + entries.setSerialiser(term_keys, term_dummy); + return entries; + } + + + /************************************************************************ + ** Generic {@link SkeletonTreeMap} translator. + ** + ** @author infinity0 + */ + public static class TreeMapTranslator + extends SkeletonTreeMap.TreeMapTranslator { + + final protected Translator ktr; + + public TreeMapTranslator(Translator k) { + ktr = k; + } + + /*@Override**/ public Map app(SkeletonTreeMap map) { + return app(map, new TreeMap(), ktr); + } + + /*@Override**/ public SkeletonTreeMap rev(Map map) throws DataFormatException { + return rev(map, new SkeletonTreeMap(), ktr); + } + } + + + /************************************************************************ + ** Serialiser for a general B-tree node. This can be used for both a + ** {@link SkeletonBTreeMap} and a {@link SkeletonBTreeSet}; they use the + ** same node class. + ** + ** @author infinity0 + */ + public static class BTreeNodeSerialiser + extends ParallelSerialiser.SkeletonNode, SimpleProgress> + implements Archiver.SkeletonNode>, + Serialiser.Translate.SkeletonNode, Map>, + Serialiser.Composite, SimpleProgress>> { + + final protected Translator.SkeletonNode, Map> trans; + final protected LiveArchiver, SimpleProgress> subsrl; + + protected String name; + + /** + ** Constructs a new serialiser. A new one must be constructed for each + ** BTree being serialised. + ** + ** @param n Description of what the map stores. This is used in the + ** progress report. + ** @param t Translator for the node - create this using {@link + ** SkeletonBTreeMap#makeNodeTranslator(Translator, Translator)}. + */ + public BTreeNodeSerialiser(String n, LiveArchiver, SimpleProgress> s, SkeletonBTreeMap.NodeTranslator t) { + super(new ProgressTracker.SkeletonNode, SimpleProgress>(SimpleProgress.class)); + subsrl = s; + trans = t; + name = n; + } + + /*@Override**/ public Translator.SkeletonNode, Map> getTranslator() { + return trans; + } + + /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { + return subsrl; + } + + /*@Override**/ public void pullLive(PullTask.SkeletonNode> task, SimpleProgress p) throws TaskAbortException { + p.enteredSerialiser(); + try { + SkeletonBTreeMap.GhostNode ghost = (SkeletonBTreeMap.GhostNode)task.meta; + p.setSubject("Pulling " + name + ": " + ghost.getRange()); + PullTask> serialisable = new PullTask>(ghost.getMeta()); + subsrl.pullLive(serialisable, p); + ghost.setMeta(serialisable.meta); task.data = trans.rev(serialisable.data); + p.exitingSerialiser(); + } catch (RuntimeException e) { + p.abort(new TaskAbortException("Could not pull B-tree node", e)); + } catch (DataFormatException e) { + p.abort(new TaskAbortException("Could not pull B-tree node", e)); + } + } + + /*@Override**/ public void pushLive(PushTask.SkeletonNode> task, SimpleProgress p) throws TaskAbortException { + p.enteredSerialiser(); + try { + p.setSubject("Pushing " + name + ": " + task.data.getRange()); + Map intermediate = trans.app(task.data); + PushTask> serialisable = new PushTask>(intermediate, task.meta); + subsrl.pushLive(serialisable, p); + task.meta = task.data.makeGhost(serialisable.meta); + p.exitingSerialiser(); + } catch (RuntimeException e) { + p.abort(new TaskAbortException("Could not push B-tree node", e)); + } + } + + } + + + /************************************************************************ + ** Serialiser for the entries contained within a B-tree node. + ** + ** @author infinity0 + */ + public static class BTreePacker> & Serialiser.Trackable>> + extends Packer + implements MapSerialiser, + Serialiser.Trackable { + + final protected ProgressTracker tracker; + final protected S subsrl; + + public BTreePacker(S s, Packer.Scale sc, int cap) { + super(s, sc, cap); + subsrl = s; + tracker = new ProgressTracker(BaseCompositeProgress.class); + } + + /*@Override**/ public ProgressTracker getTracker() { + return tracker; + } + + @Override protected void preprocessPullBins(Map> tasks, Collection>> bintasks) { + for (Map.Entry> en: tasks.entrySet()) { + try { + BaseCompositeProgress p = tracker.addPullProgress(en.getValue()); + p.setSubProgress(ProgressTracker.makePullProgressIterable(subsrl.getTracker(), bintasks)); + p.setEstimate(ProgressParts.TOTAL_FINALIZED); + p.setSubject("Pulling root container for " + en.getKey()); + } catch (TaskInProgressException e) { + throw new AssertionError(e); + } + } + } + + @Override protected void preprocessPushBins(Map> tasks, Collection>> bintasks) { + for (Map.Entry> en: tasks.entrySet()) { + try { + BaseCompositeProgress p = tracker.addPushProgress(en.getValue()); + p.setSubProgress(ProgressTracker.makePushProgressIterable(subsrl.getTracker(), bintasks)); + p.setEstimate(ProgressParts.TOTAL_FINALIZED); + p.setSubject("Pushing root container for " + en.getKey()); + } catch (TaskInProgressException e) { + throw new AssertionError(e); + } + } + } + + } + + + /************************************************************************ + ** Serialiser for the bins containing (B-tree root nodes) that was packed + ** by the {@link BTreePacker}. + ** + ** @author infinity0 + */ + public static class EntryGroupSerialiser + extends ParallelSerialiser, SimpleProgress> + implements IterableSerialiser>, + Serialiser.Composite, SimpleProgress>> { + + final protected LiveArchiver, SimpleProgress> subsrl; + final protected Translator ktr; + final protected Translator> btr; + + public EntryGroupSerialiser(LiveArchiver, SimpleProgress> s, Translator k, Translator> b) { + super(new ProgressTracker, SimpleProgress>(SimpleProgress.class)); + subsrl = s; + ktr = k; + btr = b; + } + + /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { + return subsrl; + } + + /*@Override**/ public void pullLive(PullTask> task, SimpleProgress p) throws TaskAbortException { + p.enteredSerialiser(); + try { + p.setSubject("Pulling root container " + task.meta); + PullTask> t = new PullTask>(task.meta); + subsrl.pullLive(t, p); + + Map map = new HashMap(t.data.size()<<1); + try { + for (Map.Entry en: t.data.entrySet()) { + map.put((ktr == null)? (K)en.getKey(): ktr.rev(en.getKey()), btr.rev((Map)en.getValue())); + } + } catch (ClassCastException e) { + // FIXME NORM more meaningful error message + throw new DataFormatException("Exception in converting data", e, t.data, null, null); + } + + task.data = map; + p.exitingSerialiser(); + } catch (RuntimeException e) { + p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); + } catch (DataFormatException e) { + p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); + } + } + + /*@Override**/ public void pushLive(PushTask> task, SimpleProgress p) throws TaskAbortException { + p.enteredSerialiser(); + try { + p.setSubject("Pushing root container for keys " + task.data); + Map conv = new HashMap(); + for (Map.Entry mp: task.data.entrySet()) { + conv.put((ktr == null)? (String)mp.getKey(): ktr.app(mp.getKey()), btr.app(mp.getValue())); + } + PushTask> t = new PushTask>(conv, task.meta); + subsrl.pushLive(t, p); + task.meta = t.meta; + p.exitingSerialiser(); + } catch (RuntimeException e) { + p.abort(new TaskAbortException("Failed task: " + p.getSubject(), e)); + } + } + + } + + + /************************************************************************ + ** Dummy serialiser. + ** + ** A SkeletonBTreeMap node was designed to hold values externally, whereas + ** (a B-tree node in the (BTreeSet container for a term's results)) and + ** (a B-tree node in the (BTreeMap container for a uri's results)) hold + ** them internally. This class simplifies the logic required; see the + ** source code of {@link SkeletonBTreeMap} for more details. + ** + ** @deprecated Avoid using this class. It will be removed after option 1 + ** (see the source code of {@link SkeletonBTreeMap}) is coded. + ** @author infinity0 + */ + public static class DummySerialiser + implements IterableSerialiser, + MapSerialiser { + + + public void pull(PullTask task) { + task.data = (T)task.meta; + } + + public void push(PushTask task) { + task.meta = task.data; + } + + public void pull(Iterable> tasks) { + for (PullTask task: tasks) { + task.data = (T)task.meta; + } + } + + public void push(Iterable> tasks) { + for (PushTask task: tasks) { + task.meta = task.data; + } + } + + public void pull(Map> tasks, Object mapmeta) { + for (PullTask task: tasks.values()) { + if(task.meta != null) + task.data = (T)task.meta; + } + } + + public void push(Map> tasks, Object mapmeta) { + for (PushTask task: tasks.values()) { + if(task.data != null) + task.meta = task.data; + } + } + + } + + + +} diff --git a/src/main/java/plugins/Library/index/ProtoIndexSerialiser.java b/src/main/java/plugins/Library/index/ProtoIndexSerialiser.java new file mode 100644 index 00000000..09c1a10a --- /dev/null +++ b/src/main/java/plugins/Library/index/ProtoIndexSerialiser.java @@ -0,0 +1,230 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.Library; +import plugins.Library.client.FreenetArchiver; +import plugins.Library.util.SkeletonBTreeMap; +import plugins.Library.util.SkeletonBTreeSet; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.LiveArchiver; +import plugins.Library.io.serial.Serialiser; +import plugins.Library.io.serial.Translator; +import plugins.Library.io.serial.Archiver; +import plugins.Library.io.serial.FileArchiver; +import plugins.Library.io.YamlReaderWriter; +import plugins.Library.io.DataFormatException; + +import freenet.keys.FreenetURI; + +import java.util.Collection; +import java.util.Set; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.LinkedHashMap; +import java.util.HashMap; +import java.util.TreeSet; +import java.util.TreeMap; +import java.util.Date; +import java.io.File; + +/** +** Serialiser for ProtoIndex +** +** DOCUMENT +** +** @author infinity0 +*/ +public class ProtoIndexSerialiser +implements Archiver, + Serialiser.Composite>>, // TODO NORM make this a LiveArchiver + Serialiser.Translate>/*, + Serialiser.Trackable*/ { + + final public static String MIME_TYPE = YamlReaderWriter.MIME_TYPE; + final public static String FILE_EXTENSION = YamlReaderWriter.FILE_EXTENSION; + + final protected Translator> + trans; + + final protected LiveArchiver, SimpleProgress> + subsrl; + + public ProtoIndexSerialiser(LiveArchiver, SimpleProgress> s) { + subsrl = s; + trans = new IndexTranslator(subsrl); + } + + /* FIXME HIGH: Parallelism in fetching multiple words for the same query. + * A single serialiser means when we fetch two words for the same query, and they both end up in the same + * bucket, we get an AssertionError when we fetch the bucket twice in ProgressTracker.addPullProgress. + * So the solution, for the time being, is simply to use two separate serialisers. */ + +// final protected static HashMap, ProtoIndexSerialiser> +// srl_cls = new HashMap, ProtoIndexSerialiser>(); + + public static ProtoIndexSerialiser forIndex(Object o, short priorityClass) { + if (o instanceof FreenetURI) { + return forIndex((FreenetURI)o, priorityClass); + } else if (o instanceof File) { + return forIndex((File)o); + } else { + throw new UnsupportedOperationException("Don't know how to retrieve index for object " + o); + } + } + + public static ProtoIndexSerialiser forIndex(FreenetURI uri, short priorityClass) { +// ProtoIndexSerialiser srl = srl_cls.get(FreenetURI.class); +// if (srl == null) { +// // java's type-inference isn't that smart, see +// FreenetArchiver> arx = Library.makeArchiver(ProtoIndexComponentSerialiser.yamlrw, MIME_TYPE, 0x80 * ProtoIndex.BTREE_NODE_MIN); +// srl_cls.put(FreenetURI.class, srl = new ProtoIndexSerialiser(arx)); +// } +// return srl; + + // One serialiser per application. See comments above re srl_cls. + // java's type-inference isn't that smart, see + FreenetArchiver> arx = Library.makeArchiver(ProtoIndexComponentSerialiser.yamlrw, MIME_TYPE, 0x80 * ProtoIndex.BTREE_NODE_MIN, priorityClass); + return new ProtoIndexSerialiser(arx); + } + + public static ProtoIndexSerialiser forIndex(File prefix) { +// ProtoIndexSerialiser srl = srl_cls.get(File.class); +// if (srl == null) { +// srl_cls.put(File.class, srl = new ProtoIndexSerialiser(new FileArchiver>(ProtoIndexComponentSerialiser.yamlrw, true, FILE_EXTENSION))); +// } +// return srl; + + // One serialiser per application. See comments above re srl_cls. + return new ProtoIndexSerialiser(new FileArchiver>(ProtoIndexComponentSerialiser.yamlrw, true, FILE_EXTENSION, "", "", prefix)); + } + + /*@Override**/ public LiveArchiver, SimpleProgress> getChildSerialiser() { + return subsrl; + } + + /*@Override**/ public Translator> getTranslator() { + return trans; + } + + /*@Override**/ public void pull(PullTask task) throws TaskAbortException { + PullTask> serialisable = new PullTask>(task.meta); + subsrl.pull(serialisable); + task.meta = serialisable.meta; + if (task.meta instanceof FreenetURI) { // if not FreenetURI, skip this silently so we can test on local files + serialisable.data.put("reqID", task.meta); + } + try { + task.data = trans.rev(serialisable.data); + } catch (DataFormatException e) { + throw new TaskAbortException("Could not construct index from data", e); + } + } + + /*@Override**/ public void push(PushTask task) throws TaskAbortException { + PushTask> serialisable = new PushTask>(trans.app(task.data)); + serialisable.meta = serialisable.data.remove("insID"); + subsrl.push(serialisable); + task.meta = serialisable.meta; + } + + public static class IndexTranslator + implements Translator> { + + /** + ** Term-table translator + */ + Translator>, Map> ttrans = new + SkeletonBTreeMap.TreeTranslator>(null, new + ProtoIndexComponentSerialiser.TreeMapTranslator>(null)); + + /** + ** URI-table translator + */ + Translator>, Map> utrans = new + SkeletonBTreeMap.TreeTranslator>(null, new + ProtoIndexComponentSerialiser.TreeMapTranslator>(null)); + + private LiveArchiver, SimpleProgress> subsrl; + + public IndexTranslator(LiveArchiver, SimpleProgress> subsrl) { + this.subsrl = subsrl; + } + + /** + ** {@inheritDoc} + ** + ** Note: the resulting map will contain the insert SSK URI under the + ** key {@code insID}. The intention is for push methods to remove this + ** and add it to the task metadata. '''Failure to do this could result + ** in the insert URI being published'''. + ** + ** FIXME NORM maybe make this more secure, eg. wrap it in a + ** UnserialisableWrapper or something that makes YAML throw an + ** exception if it is accidentally passed to it. + */ + /*@Override**/ public Map app(ProtoIndex idx) { + if (!idx.ttab.isBare() || !idx.utab.isBare()) { + throw new IllegalArgumentException("Data structure is not bare. Try calling deflate() first."); + } + Map map = new LinkedHashMap(); + map.put("serialVersionUID", idx.serialVersionUID); + map.put("serialFormatUID", idx.serialFormatUID); + map.put("insID", idx.insID); + map.put("name", idx.name); + map.put("ownerName", idx.indexOwnerName); + map.put("ownerEmail", idx.indexOwnerEmail); + map.put("totalPages", new Long(idx.totalPages)); + map.put("modified", idx.modified); + map.put("extra", idx.extra); + map.put("utab", utrans.app(idx.utab)); + map.put("ttab", ttrans.app(idx.ttab)); + return map; + } + + /*@Override**/ public ProtoIndex rev(Map map) throws DataFormatException { + long magic = (Long)map.get("serialVersionUID"); + + if (magic == ProtoIndex.serialVersionUID) { + try { + // FIXME yet more hacks related to the lack of proper asynchronous FreenetArchiver... + ProtoIndexComponentSerialiser cmpsrl = ProtoIndexComponentSerialiser.get((Integer)map.get("serialFormatUID"), subsrl); + FreenetURI reqID = (FreenetURI)map.get("reqID"); + String name = (String)map.get("name"); + String ownerName = (String)map.get("ownerName"); + String ownerEmail = (String)map.get("ownerEmail"); + // FIXME yaml idiocy??? It seems to give a Long if the number is big enough to need one, and an Integer otherwise. + long totalPages; + Object o = map.get("totalPages"); + if(o instanceof Long) + totalPages = (Long)o; + else // Integer + totalPages = (Integer)o; + Date modified = (Date)map.get("modified"); + Map extra = (Map)map.get("extra"); + SkeletonBTreeMap> utab = utrans.rev((Map)map.get("utab")); + SkeletonBTreeMap> ttab = ttrans.rev((Map)map.get("ttab")); + + return cmpsrl.setSerialiserFor(new ProtoIndex(reqID, name, ownerName, ownerEmail, totalPages, modified, extra, utab, ttab)); + + } catch (ClassCastException e) { + // TODO LOW maybe find a way to pass the actual bad data to the exception + throw new DataFormatException("Badly formatted data", e, null); + + } catch (UnsupportedOperationException e) { + throw new DataFormatException("Unrecognised format ID", e, map.get("serialFormatUID"), map, "serialFormatUID"); + + } + + } else { + throw new DataFormatException("Unrecognised serial ID", null, magic, map, "serialVersionUID"); + } + } + + } + +} diff --git a/src/main/java/plugins/Library/index/TermEntry.java b/src/main/java/plugins/Library/index/TermEntry.java new file mode 100644 index 00000000..e5832b5a --- /dev/null +++ b/src/main/java/plugins/Library/index/TermEntry.java @@ -0,0 +1,137 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +/** +** Represents data associated with a given subject {@link String} term. +** +** Subclasses '''must''' override the following methods to use information +** specific to that subclass: +** +** * {@link #entryType()} +** * {@link #compareTo(TermEntry)} +** * {@link #equals(Object)} +** * {@link #hashCode()} +** +** @author infinity0 +*/ +abstract public class TermEntry implements Comparable { + + final static long serialVersionUID = 0xF23194B7F015560CL; + + public enum EntryType { + INDEX, TERM, PAGE + }; + + /** + ** Subject term of this entry. + */ + final public String subj; + + /** + ** Relevance rating. Must be in the half-closed interval (0,1]. + ** a relevance of 0 indicates that the relevance is unset + */ + final public float rel; + + public TermEntry(String s, float r) { + if (s == null) { + throw new IllegalArgumentException("can't have a null subject!"); + } + if (r < 0/* || r > 1*/) { // FIXME: I don't see how our relevance algorithm can be guaranteed to produce relevance <1. + throw new IllegalArgumentException("Relevance must be in the half-closed interval (0,1]. Supplied: " + r); + } + subj = s.intern(); + rel = r; + } + + public TermEntry(TermEntry t, float newRel) { + this.subj = t.subj; + this.rel = newRel; + } + + /** + ** Returns the type of TermEntry. This '''must''' be constant for + ** instances of the same class, and different between classes. + */ + abstract public EntryType entryType(); + + /** + ** {@inheritDoc} + ** + ** Compares two entries, based on how useful they might be to the end user. + ** + ** This implementation sorts by order of descending relevance, then by + ** order of ascending {@link #entryType()}. + ** + ** Subclasses '''must overridde this method'' to also use information + ** specific to the subclass. + ** + ** @throws IllegalArgumentException if the entries have different subjects + */ + /*@Override**/ public int compareTo(TermEntry o) { + if (this == o) { return 0; } + int c = subj.compareTo(o.subj); + if (c != 0) { return c; } + if (rel != o.rel) { return (rel > o.rel)? -1: 1; } + + EntryType a = entryType(), b = o.entryType(); + return a.compareTo(b); + } + + /** + ** {@inheritDoc} + ** + ** This implementation tests whether the run-time classes of the argument + ** is identical to the run-time class of this object. If they are, then + ** it tests the relevance and subject fields. + ** + ** Subclasses '''must overridde this method'' to also use information + ** specific to the subclass. + */ + @Override public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) { return false; } + TermEntry en = (TermEntry)o; + return rel == en.rel && subj.equals(en.subj); + } + + /** + ** Returns whether the parts of this TermEntry other than the subject are equal + ** + ** Subclasses '''must overridde this method'' to use information + ** specific to the subclass. + */ + public abstract boolean equalsTarget(TermEntry entry); + + /** + ** {@inheritDoc} + ** + ** This implementation XORs the hashcode of {@link #subj} and {@link #rel}. + ** + ** Subclasses '''must overridde this method'' to also use information + ** specific to the subclass. + */ + @Override public int hashCode() { + return subj.hashCode() ^ Float.floatToIntBits(rel); + } + + @Override public String toString() { + return subj + ":" + rel; + } + + + /* + ** Calculates an accumulated score to sort the entry by, using the formula + ** relevance^3 * quality; in other words, relevance is (for sorting + ** results) 3 times as important as quality. For example, for (rel, qual) + ** we have (0.85, 0.95) < (1.0, 0.6) < (0.85, 1.00) + * / + public float score() { + if (score_ == null) { + score_ = new Float(rel * rel* rel * qual); + } + return score_; + }*/ + +} diff --git a/src/main/java/plugins/Library/index/TermEntryReaderWriter.java b/src/main/java/plugins/Library/index/TermEntryReaderWriter.java new file mode 100644 index 00000000..1b73fd36 --- /dev/null +++ b/src/main/java/plugins/Library/index/TermEntryReaderWriter.java @@ -0,0 +1,124 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.io.DataFormatException; +import plugins.Library.io.ObjectStreamReader; +import plugins.Library.io.ObjectStreamWriter; + +import freenet.keys.FreenetURI; + +import java.util.Map; +import java.util.HashMap; + +import java.io.InputStream; +import java.io.OutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +/** +** Reads and writes {@link TermEntry}s in binary form, for performance. +** +** @author infinity0 +*/ +public class TermEntryReaderWriter implements ObjectStreamReader, ObjectStreamWriter { + + final private static TermEntryReaderWriter instance = new TermEntryReaderWriter(); + + protected TermEntryReaderWriter() {} + + public static TermEntryReaderWriter getInstance() { + return instance; + } + + /*@Override**/ public TermEntry readObject(InputStream is) throws IOException { + return readObject(new DataInputStream(is)); + } + + public TermEntry readObject(DataInputStream dis) throws IOException { + long svuid = dis.readLong(); + if (svuid != TermEntry.serialVersionUID) { + throw new DataFormatException("Incorrect serialVersionUID", null, svuid); + } + int type = dis.readInt(); + String subj = dis.readUTF(); + float rel = dis.readFloat(); + TermEntry.EntryType[] types = TermEntry.EntryType.values(); + if (type < 0 || type >= types.length) { + throw new DataFormatException("Unrecognised entry type", null, type); + } + switch (types[type]) { + case TERM: + return new TermTermEntry(subj, rel, dis.readUTF()); + case INDEX: + return new TermIndexEntry(subj, rel, FreenetURI.readFullBinaryKeyWithLength(dis)); + case PAGE: + FreenetURI page = FreenetURI.readFullBinaryKeyWithLength(dis); + int size = dis.readInt(); + String title = null; + if (size < 0) { + title = dis.readUTF(); + size = ~size; + } + Map pos = new HashMap(size<<1); + for (int i=0; i p : enn.positionsMap().entrySet()) { + dos.writeInt(p.getKey()); + if(p.getValue() == null) + dos.writeUTF(""); + else + dos.writeUTF(p.getValue()); + } + } else { + for(int x : enn.positionsRaw()) { + dos.writeInt(x); + dos.writeUTF(""); + } + } + } + return; + } + } + +} diff --git a/src/main/java/plugins/Library/index/TermIndexEntry.java b/src/main/java/plugins/Library/index/TermIndexEntry.java new file mode 100644 index 00000000..a239c9d9 --- /dev/null +++ b/src/main/java/plugins/Library/index/TermIndexEntry.java @@ -0,0 +1,58 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.index.TermEntry.EntryType; + +import freenet.keys.FreenetURI; + +/** +** A {@link TermEntry} that associates a subject term with another index. +** +** @author infinity0 +*/ +public class TermIndexEntry extends TermEntry { + + /** + ** Index target of this entry. + */ + final public FreenetURI index; + + public TermIndexEntry(String s, float r, FreenetURI i) { + super(s, r); + if (i == null) { + throw new IllegalArgumentException("can't have a null index"); + } + index = i.intern(); + } + + /*======================================================================== + abstract public class TermEntry + ========================================================================*/ + + @Override public EntryType entryType() { + assert(getClass() == TermIndexEntry.class); + return EntryType.INDEX; + } + + @Override public int compareTo(TermEntry o) { + int a = super.compareTo(o); + if (a != 0) { return a; } + // OPT NORM make a more efficient way of comparing these + return index.toString().compareTo(((TermIndexEntry)o).index.toString()); + } + + @Override public boolean equals(Object o) { + return o == this || super.equals(o) && index.equals(((TermIndexEntry)o).index); + } + + @Override public boolean equalsTarget(TermEntry entry) { + return entry == this || (entry instanceof TermIndexEntry) && index.equals(((TermIndexEntry)entry).index); + } + + @Override public int hashCode() { + return super.hashCode() ^ index.hashCode(); + } + +} diff --git a/src/main/java/plugins/Library/index/TermPageEntry.java b/src/main/java/plugins/Library/index/TermPageEntry.java new file mode 100644 index 00000000..31604ffe --- /dev/null +++ b/src/main/java/plugins/Library/index/TermPageEntry.java @@ -0,0 +1,240 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.index.TermEntry.EntryType; + +import freenet.keys.FreenetURI; +import freenet.support.SortedIntSet; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; + +/** +** A {@link TermEntry} that associates a subject term with a final target +** {@link FreenetURI} that satisfies the term. +** +** @author infinity0 +*/ +public class TermPageEntry extends TermEntry { + + /** + ** URI of the target + */ + final public FreenetURI page; + + /** Positions where the term occurs. May be null if we don't have that data. + * Specified as SortedSet for ObjectBlueprint but will really always be a SortedIntSet. */ + final public Set positions; + + /** + ** Map from positions in the text to a fragment of text around where it occurs. + ** Only non-null if we have the fragments of text (we may have positions but not details), + ** to save memory. + */ + final public Map posFragments; + + /** + ** Here for backwards-compatibility with the old URIWrapper class. + */ + final public String title; + + /** + ** Standard constructor. + ** + ** @param s Subject of the entry + ** @param r Relevance of the entry + ** @param u {@link FreenetURI} of the page + ** @param p Map of positions (where the term appears) to context (fragment + ** surrounding it). + */ + public TermPageEntry(String s, float r, FreenetURI u, Map p) { + this(s, r, u, (String)null, p); + } + + /** + ** Extended constructor with additional {@code title} field for old-style + ** indexes. + ** + ** @param s Subject of the entry + ** @param r Relevance of the entry + ** @param u {@link FreenetURI} of the page + ** @param t Title or description of the page + ** @param p Map of positions (where the term appears) to context (fragment + ** surrounding it). + */ + public TermPageEntry(String s, float r, FreenetURI u, String t, Map p) { + super(s, r); + if (u == null) { + throw new IllegalArgumentException("can't have a null page"); + } + page = u.intern(); // OPT LOW make the translator use the same URI object as from the URI table? + title = t == null ? null : t.intern(); + if(p == null) { + posFragments = null; + positions = null; + } else { + posFragments = Collections.unmodifiableMap(p); + int[] pos = new int[p.size()]; + int x = 0; + for(Integer i : p.keySet()) + pos[x++] = i; + Arrays.sort(pos); + positions = new SortedIntSet(pos); + } + } + + /** + ** For serialisation. + */ + public TermPageEntry(String s, float r, FreenetURI u, String t, Set pos, Map frags) { + super(s, r); + if (u == null) { + throw new IllegalArgumentException("can't have a null page"); + } + page = u.intern(); // OPT LOW make the translator use the same URI object as from the URI table? + title = t; + if(pos != null) { + if(pos instanceof SortedIntSet) + this.positions = (SortedIntSet) pos; + else { + Integer[] p = pos.toArray(new Integer[pos.size()]); + int[] pp = new int[p.length]; + for(int i=0;i positionsMap() { + if(positions == null) return null; + if(posFragments != null) return posFragments; + HashMap ret = new HashMap(positions.size()); + if(positions instanceof SortedIntSet) { + int[] array = ((SortedIntSet)positions).toArrayRaw(); + for(int x : array) + ret.put(x, null); + return ret; + } else { + Integer[] array = positions.toArray(new Integer[positions.size()]); + if(!(positions instanceof SortedSet)) + Arrays.sort(array); + for(int x : array) + ret.put(x, null); + return ret; + } + } + + public boolean hasPosition(int i) { + return positions.contains(i); + } + + public ArrayList positions() { + if(positions instanceof SortedIntSet) { + int[] array = ((SortedIntSet)positions).toArrayRaw(); + ArrayList pos = new ArrayList(array.length); + for(int x : array) + pos.add(x); + return pos; + } else { + Integer[] array = positions.toArray(new Integer[positions.size()]); + if(!(positions instanceof SortedSet)) + Arrays.sort(array); + ArrayList ret = new ArrayList(positions.size()); + for(int i=0;i terms; + + public URIEntry(FreenetURI u) { + subject = u; + date_checked = new Date(); + terms = new HashSet(); + } + + public FreenetURI getSubject() { + return subject; + } + + public void setSubject(FreenetURI u) { + subject = u; + } + + public float getQuality() { + return qual; + } + + public void setQuality(float q) { + if (q < 0 || q > 1) { + throw new IllegalArgumentException("Relevance must be in the closed interval [0,1]."); + } + qual = q; + } + + public Set getTerms() { + return terms; + } + + public void setTerms(Set t) { + terms = (t == null)? new HashSet(): t; + } + +} diff --git a/src/main/java/plugins/Library/index/URIKey.java b/src/main/java/plugins/Library/index/URIKey.java new file mode 100644 index 00000000..f4c217a1 --- /dev/null +++ b/src/main/java/plugins/Library/index/URIKey.java @@ -0,0 +1,48 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import plugins.Library.util.BytePrefixKey; + +import freenet.keys.FreenetURI; +import freenet.keys.BaseClientKey; +import freenet.keys.ClientKey; + +/** +** A {@link BytePrefixKey} backed by the 32-byte routing key for the {@link +** freenet.keys.Key NodeKey} constructed from a {@link FreenetURI}. +** +** @author infinity0 +*/ +public class URIKey extends BytePrefixKey { + + public URIKey() { + super(0x20); + } + + public URIKey(byte[] h) { + super(0x20, h); + } + + public URIKey(FreenetURI u) throws java.net.MalformedURLException { + super(0x20, getNodeRoutingKey(u)); + } + + public static byte[] getNodeRoutingKey(FreenetURI u) throws java.net.MalformedURLException { + try { + return ((ClientKey)BaseClientKey.getBaseKey(u.isUSK()? u.sskForUSK(): u)).getNodeKey().getRoutingKey(); + } catch (ClassCastException e) { + throw new UnsupportedOperationException("Could not get the node routing key for FreenetURI " + u + ". Only CHK/SSK/USK/KSKs are supported."); + } + } + + /*======================================================================== + public interface BytePrefixKey + ========================================================================*/ + + @Override public URIKey clone() { + return new URIKey(hash); + } + +} diff --git a/src/main/java/plugins/Library/index/package-info.java b/src/main/java/plugins/Library/index/package-info.java new file mode 100644 index 00000000..4afd4344 --- /dev/null +++ b/src/main/java/plugins/Library/index/package-info.java @@ -0,0 +1,10 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains implementations of the index interfaces from the library package, +** as well as classes representing data held within an index. +** +** @author infinity0 +*/ +package plugins.Library.index; diff --git a/src/main/java/plugins/Library/index/xml/FindRequest.java b/src/main/java/plugins/Library/index/xml/FindRequest.java new file mode 100644 index 00000000..74550fcb --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/FindRequest.java @@ -0,0 +1,224 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index.xml; + +import freenet.client.events.ClientEvent; +import freenet.client.events.ExpectedFileSizeEvent; +import freenet.client.events.ExpectedHashesEvent; +import freenet.client.events.ExpectedMIMEEvent; +import freenet.client.events.SendingToNetworkEvent; +import freenet.client.events.SplitfileCompatibilityModeEvent; +import freenet.client.events.SplitfileProgressEvent; +import freenet.support.Logger; + +import java.util.Collections; +import java.util.ArrayList; +import java.util.Set; + +import plugins.Library.util.exec.AbstractExecution; +import plugins.Library.util.exec.ChainedProgress; +import plugins.Library.util.exec.Execution; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.index.TermPageEntry; +import plugins.Library.index.TermEntry; + + +/** + * An {@link Execution} implementation for xmlindex operations which have distinct parts working in serial + * eg a search for 1 term on 1 index. Only used for XMLIndex + * @author MikeB + */ +public class FindRequest extends AbstractExecution> implements Comparable, ChainedProgress, Execution> { + private Set resultnotfinished; + private SubProgress currentProgress; + + public enum Stages { + UNSTARTED("Not Started"), + FETCHROOT("Fetching Index Root"), + FETCHSUBINDEX("Fetching Subindex"), + PARSE("Parsing Subindex"), + DONE("Done"); + + public final String description; + Stages(String desc){ + this.description = desc; + } + }; + + /** + * Create Request of stated type & subject + * @param subject + */ + public FindRequest(String subject) { + super(subject); + currentProgress = new SubProgress(Stages.UNSTARTED); + } + + /** + * Log Exception for this request + */ + @Override + public void setError(TaskAbortException e) { + super.setError(e); + } + + /** + * Stores an unfinished result, call setFinished() to mark as complete + * @param result + */ + //@Override + public void setResult(Set result){ + resultnotfinished = result; + } + + /** + * Moves the unfinished result so that the Progress is marked as finsihed + * TODO get rid of this way of doing it + */ + public void setFinished(){ + super.setResult(Collections.unmodifiableSet(resultnotfinished)); + resultnotfinished = null; + setStage(Stages.DONE); + } + + public int compareTo(Execution right){ + return subject.compareTo(right.getSubject()); + } + + @Override + public ProgressParts getParts() throws TaskAbortException { + int stage = currentProgress.stage.ordinal(); + int end = Stages.DONE.ordinal(); + return ProgressParts.normalise(stage, (stage getUnfinishedResult() { + return resultnotfinished; + } + + /** + * Set the stage number, between 0 & 4 inclusive TODO : now stage is not used in Progress make this an enum + * @param stage The stage number to set + */ + void setStage(Stages stage) { + currentProgress= new SubProgress(stage); + } + + /** + * Update the ProgressParts of the currentProgress of all the FindRequests in the List with the contents of the ClientEvent + * @param receivingEvent + * @param ce + */ + static void updateWithEvent(ArrayList receivingEvent, ClientEvent ce) { + for (FindRequest findRequest : receivingEvent) { + findRequest.partsFromEvent(ce); + } + } + + /** + * Update the ProgressParts of the currentProgress with the ClientEvent + * @param ce + */ + private void partsFromEvent(ClientEvent ce) { + currentProgress.partsFromEvent(ce); + } + + @Override + public String toString() { + try { + return "FindRequest: " + getStatus() + " " + getParts() + " - "+currentProgress; + } catch (TaskAbortException ex) { + return "Error forming FindRequest string"; + } + } + + /** + * The FindRequest is a chained request so it has + */ + class SubProgress implements Progress { // TODO finish this + private ProgressParts parts = ProgressParts.normalise(0, 1, 2, -1); + boolean done = false; + public final Stages stage; + + /** + * Constructs a SubProgress for specified stage with a default unstarted ProgressParts + * @param stage + */ + public SubProgress (Stages stage){ + this.stage = stage; + parts = ProgressParts.normalise(0, 0, 1, ProgressParts.ESTIMATE_UNKNOWN); + } + + public String getSubject() { + return stage.description; + } + + public String getStatus() { + return stage.description; + } + + public ProgressParts getParts() throws TaskAbortException { + return parts; + } + + public boolean isStarted() { + return true; + } + + public boolean isDone() throws TaskAbortException { + return parts.isDone(); + } + + /** + * Not used + * @throws java.lang.InterruptedException + */ + public void join() throws InterruptedException, TaskAbortException { + throw new UnsupportedOperationException("Not supported."); + } + + /** + * Sets the Progress of this to the contents of the CLientEvent + * @param ce + */ + public void partsFromEvent(ClientEvent ce) { +// Logger.normal(this, "Setting parts in "+this+" from event "+ce.getClass()); + if(ce instanceof SplitfileProgressEvent){ + SplitfileProgressEvent spe = (SplitfileProgressEvent)ce; + parts = ProgressParts.normalise(spe.succeedBlocks, spe.minSuccessfulBlocks, spe.minSuccessfulBlocks, spe.finalizedTotal?ProgressParts.TOTAL_FINALIZED:ProgressParts.ESTIMATE_UNKNOWN); + }else if(ce instanceof SendingToNetworkEvent || ce instanceof ExpectedMIMEEvent || ce instanceof ExpectedFileSizeEvent || ce instanceof SplitfileCompatibilityModeEvent || ce instanceof ExpectedHashesEvent) { + // Ignore. + return; + }else{ + parts = ProgressParts.normalise(0, 0); + Logger.error(this, "Fetch progress will not update due to unrecognised ClientEvent : "+ce.getClass().getName()); + } + } + + @Override + public String toString(){ + return parts.toString(); + } + } +} diff --git a/src/main/java/plugins/Library/index/xml/LibrarianHandler.java b/src/main/java/plugins/Library/index/xml/LibrarianHandler.java new file mode 100644 index 00000000..4f0b6327 --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/LibrarianHandler.java @@ -0,0 +1,237 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index.xml; + +import plugins.Library.index.TermPageEntry; + +import freenet.support.Logger; +import freenet.keys.FreenetURI; + +import org.xml.sax.Attributes; +import org.xml.sax.Locator; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.HashMap; + +/** + * Required for using SAX parser on XML indices + * + * @author swati + * @author MikeB + * + */ +public class LibrarianHandler extends DefaultHandler { + private boolean processingWord; + + // The data about the pages referenced in this subindex + /** file id -> uri */ + private HashMap uris; + /** file id -> title */ + private HashMap titles; + /** file id -> wordCount */ + private HashMap wordCounts; + + // About the whole index + private int totalFileCount; + + // Requests and matches being made + private List requests; + private List wordMatches; + + private int inWordFileCount; + + // About the file tag being processed + private StringBuilder characters; + private String inFileTitle; + private FreenetURI inFileURI; + private int inFileWordCount; + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(LibrarianHandler.class); + } + + /** + * Construct a LibrarianHandler to look for many terms + * @param requests the requests wanting to be resolved by this LibrarianHandler, results are written back to them + * @throws java.lang.Exception + */ + public LibrarianHandler(List requests) { + this.requests = new ArrayList(requests); + for (FindRequest r : requests){ + r.setResult(new HashSet()); + } + } + + @Override public void setDocumentLocator(Locator value) { + + } + + @Override public void endDocument() throws SAXException { + } + + @Override public void startDocument() throws SAXException { + if(uris==null || titles ==null){ + uris = new HashMap(); + titles = new HashMap(); + wordCounts = new HashMap(); + } + } + + @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) + throws SAXException { + if(requests.size()==0&&wordMatches.size()==0) + return; + if (rawName == null) { + rawName = localName; + } + String elt_name = rawName; + + if (elt_name.equals("files")){ + processingWord = false; + String fileCount = attrs.getValue("", "totalFileCount"); + if(fileCount != null) + this.totalFileCount = Integer.parseInt(fileCount); + if(logMINOR) Logger.minor(this, "totalfilecount = "+this.totalFileCount); + } + if (elt_name.equals("keywords")) + processingWord = true; + /* + * looks for the word in the given subindex file if the word is found then the parser + * fetches the corresponding fileElements + */ + if (elt_name.equals("word")) { + try { + wordMatches = null; + String match = attrs.getValue("v"); + if (requests!=null){ + wordMatches = new ArrayList(); + for (Iterator it = requests.iterator(); it.hasNext();) { + FindRequest r = it.next(); + if (match.equals(r.getSubject())){ + wordMatches.add(r); + it.remove(); + if(logMINOR) Logger.minor(this, "found word match "+wordMatches); + } + } + if (attrs.getValue("fileCount")!=null) + inWordFileCount = Integer.parseInt(attrs.getValue("fileCount")); + } + } catch (Exception e) { + throw new SAXException(e); + } + } + + if (elt_name.equals("file")) { + if (processingWord == true && wordMatches!=null) { + try{ + String suri = uris.get(attrs.getValue("id")); + inFileURI = new FreenetURI(suri); + synchronized(this){ + if(titles.containsKey(attrs.getValue("id"))) + { + inFileTitle = titles.get(attrs.getValue("id")); + if ((suri).equals(inFileTitle)) + inFileTitle = null; + } + else + inFileTitle = null; + if(wordCounts.containsKey(attrs.getValue("id"))) + inFileWordCount = wordCounts.get(attrs.getValue("id")).intValue(); + else + inFileWordCount = -1; + + characters = new StringBuilder(); + } + } + catch (Exception e) { + Logger.error(this, "Index format may be outdated " + e.toString(), e); + } + + } else if (processingWord == false) { + try { + String id = attrs.getValue("id"); + String key = attrs.getValue("key"); + int l = attrs.getLength(); + String title; + synchronized (this) { + if (l >= 3) { + try { + title = attrs.getValue("title"); + titles.put(id, title); + } catch (Exception e) { + Logger.error(this, "Index Format not compatible " + e.toString(), e); + } + try { + String wordCountString = attrs.getValue("wordCount"); + if(wordCountString != null) { + int wordCount = Integer.parseInt(attrs.getValue("wordCount")); + wordCounts.put(id, wordCount); + } + } catch (Exception e) { + //if(logMINOR) Logger.minor(this, "No wordcount found " + e.toString(), e); + } + } + uris.put(id, key); + } + } + catch (Exception e) { + Logger.error(this, "File id and key could not be retrieved. May be due to format clash", e); + } + } + } + } + + @Override public void characters(char[] ch, int start, int length) { + if(processingWord && wordMatches!= null && characters!=null){ + characters.append(ch, start, length); + } + } + + @Override + public void endElement(String namespaceURI, String localName, String qName) { + if(processingWord && wordMatches != null){ + HashMap termpositions = null; + if(characters!=null){ + String[] termposs = characters.toString().split(","); + termpositions = new HashMap(); + for (String pos : termposs) { + try{ + termpositions.put(Integer.valueOf(pos), null); + }catch(NumberFormatException e){ + Logger.error(this, "Position in index not an integer :"+pos, e); + } + } + characters = null; + } + + + + for(FindRequest match : wordMatches){ + Set result = match.getUnfinishedResult(); + float relevance = 0; + +// if(logMINOR) Logger.minor(this, "termcount "+termpositions.size()+" filewordcount = "+inFileWordCount); + if(termpositions!=null && termpositions.size()>0 && inFileWordCount>0 ){ + relevance = (float)(termpositions.size()/(float)inFileWordCount); + if( totalFileCount > 0 && inWordFileCount > 0) + relevance *= Math.log( (float)totalFileCount/(float)inWordFileCount); + //if(logMINOR) Logger.minor(this, "Set relevance of "+pageEntry.getTitle()+" to "+pageEntry.rel+" - "+pageEntry.toString()); + } + + TermPageEntry pageEntry = new TermPageEntry(match.getSubject(), relevance, inFileURI, inFileTitle, termpositions); + result.add(pageEntry); + //if(logMINOR) Logger.minor(this, "added "+inFileURI+ " to "+ match); + } + } + } +} diff --git a/src/main/java/plugins/Library/index/xml/MainIndexParser.java b/src/main/java/plugins/Library/index/xml/MainIndexParser.java new file mode 100644 index 00000000..9e14c99a --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/MainIndexParser.java @@ -0,0 +1,245 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index.xml; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import org.xml.sax.Attributes; +import org.xml.sax.Locator; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +/** + * Main Index Parser + * + * Parser for master index index.xml. This does _NOT_ support index file generate by + * XMLSpider earlier then version 8 (Sep-2007). + * + * @author j16sdiz (1024D/75494252) + */ +public class MainIndexParser extends DefaultHandler { + protected enum V1State { + /** Anything else */ + MAIN, + /** Inside <header> */ + HEADER, + /** One-level inside <header>, such as <title> */ + HEADER_INNER, + /** Inside <keywords> */ + KEYWORDS + } + + protected class V1Handler extends DefaultHandler { + V1State state; + String headerKey; + StringBuilder headerValue; + + @Override + public void startDocument() { + state = V1State.MAIN; + headerKey = null; + headerValue = null; + } + + @Override + public void characters(char ch[], int start, int length) throws SAXException { + if (state != V1State.HEADER_INNER || headerKey == null) + return; + headerValue.append(ch, start, length); + } + + @Override + public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { + switch (state) { + case MAIN: + mainStartElement(uri, localName, qName, attributes); + break; + case HEADER: + headerStartElement(uri, localName, qName, attributes); + break; + case HEADER_INNER: + headerInnerStartElement(uri, localName, qName, attributes); + break; + case KEYWORDS: + keywordsStartElement(uri, localName, qName, attributes); + break; + } + } + + private void mainStartElement(String uri, String localName, String qName, Attributes attributes) + throws SAXException { + if ("main_index".equals(localName)) { + state = V1State.MAIN; + } else if ("header".equals(localName)) { + state = V1State.HEADER; + } else if ("keywords".equals(localName)) { + state = V1State.KEYWORDS; + } else if ("prefix".equals(localName)) { + // ignore + } else { + throw new SAXException("Bad tag <" + localName + "> in main @" + location()); + } + } + + private void headerStartElement(String uri, String localName, String qName, Attributes attributes) { + headerKey = localName; + headerValue = new StringBuilder(); + state = V1State.HEADER_INNER; + } + + private void headerInnerStartElement(String uri, String localName, String qName, Attributes attributes) + throws SAXException { + throw new SAXException("Bad tag <" + localName + "> in header @" + location()); + } + + private void keywordsStartElement(String uri, String localName, String qName, Attributes attributes) + throws SAXException { + if (!"subIndex".equals(localName)) + throw new SAXException("Bad tag <" + localName + "> in keywords @" + location()); + + String key = attributes.getValue("", "key"); + if (key == null || key.length() < 1 || key.length() > 32 || !key.matches("^[0-9a-fA-F]*$")) + throw new SAXException("Bad tag @" + location()); + key = key.toLowerCase(Locale.US); + subIndice.add(key); + } + + @Override + public void endElement(String uri, String localName, String qName) throws SAXException { + switch (state) { + case MAIN: + mainEndElement(uri, localName, qName); + break; + case HEADER: + headerEndElement(uri, localName, qName); + break; + case HEADER_INNER: + headerInnerEndElement(uri, localName, qName); + break; + case KEYWORDS: + keywordsEndElement(uri, localName, qName); + break; + } + } + + private void mainEndElement(String uri, String localName, String qName) throws SAXException { + if (!"main_index".equals(localName) && !"prefix".equals(localName)) + throw new SAXException("Bad close tag in main @" + location()); + } + + private void headerEndElement(String uri, String localName, String qName) throws SAXException { + if (!"header".equals(localName)) + throw new SAXException("Bad close tag in header @" + location()); + + state = V1State.MAIN; + } + + private void headerInnerEndElement(String uri, String localName, String qName) throws SAXException { + if (!localName.equals(headerKey)) + throw new SAXException("Bad close tag in header @" + location()); + header.put(headerKey, headerValue.toString()); + headerKey = null; + headerValue = null; + + state = V1State.HEADER; + } + + private void keywordsEndElement(String uri, String localName, String qName) throws SAXException { + if ("subIndex".equals(localName)) { + // ignore + } else if ("keywords".equals(localName)) { + state = V1State.MAIN; + } else + throw new SAXException("Bad close tag in keywords @" + location()); + } + } + + protected int version; + protected Map header; + protected Set subIndice; + protected Set siteIndice; + protected DefaultHandler handler; + protected Locator locator; + + @Override + public void characters(char ch[], int start, int length) throws SAXException { + handler.characters(ch, start, length); + } + + @Override + public void setDocumentLocator(Locator locator) { + this.locator = locator; + } + + @Override + public void startDocument() { + header = new HashMap(); + subIndice = new HashSet(); + siteIndice = new HashSet(); + handler = null; + } + + @Override + public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { + if (handler == null) { + version = detectVersion(uri, localName, qName, attributes); + + switch (version) { + case 1: + handler = new V1Handler(); + break; + default: + throw new SAXException("Unsupported version @(" + location() + " : " + version); + } + + handler.startDocument(); + } + handler.startElement(uri, localName, qName, attributes); + } + + @Override + public void endElement(String uri, String localName, String qName) throws SAXException { + handler.endElement(uri, localName, qName); + } + + @Override + public void endDocument() throws SAXException { + handler.endDocument(); + } + + private int detectVersion(String uri, String localName, String qName, Attributes attributes) throws SAXException { + if ("main_index".equals(localName)) { + return 1; + } else { + throw new SAXException("Unknown tag @" + location() + " : " + localName); + } + } + + private String location() { + if (locator != null) + return "(" + locator.getLineNumber() + "," + locator.getColumnNumber() + ")"; + else + return "()"; + } + + public int getVersion() { + return version; + } + + public String getHeader(String key) { + return header.get(key); + } + + public Set getSubIndice() { + return subIndice; + } + + public Set getSiteIndice() { + return siteIndice; + } +} diff --git a/src/main/java/plugins/Library/index/xml/URLUpdateHook.java b/src/main/java/plugins/Library/index/xml/URLUpdateHook.java new file mode 100644 index 00000000..a59e3ada --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/URLUpdateHook.java @@ -0,0 +1,7 @@ +package plugins.Library.index.xml; + +public interface URLUpdateHook { + + void update(String updateContext, String indexuri); + +} diff --git a/src/main/java/plugins/Library/index/xml/Util.java b/src/main/java/plugins/Library/index/xml/Util.java new file mode 100644 index 00000000..83004461 --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/Util.java @@ -0,0 +1,75 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index.xml; + +import java.io.File; +import java.net.MalformedURLException; + +import freenet.client.FetchException; +import freenet.client.FetchResult; +import freenet.client.HighLevelSimpleClient; +import freenet.keys.FreenetURI; +import freenet.support.Logger; +import freenet.support.api.Bucket; +import freenet.support.io.FileBucket; + +/** + * Utility class. + * TODO get rid of this + * @deprecated move these classes out to somewhere else + * @author j16sdiz (1024D/75494252) + */ +public class Util { + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(Util.class); + } + + public static Bucket fetchBucket(String uri, HighLevelSimpleClient hlsc) throws FetchException, MalformedURLException { + // try local file first + File file = new File(uri); + if (file.exists() && file.canRead()) + return new FileBucket(file, true, false, false, false); + else if (hlsc==null) + throw new NullPointerException("No client or file "+uri+" found"); + + // FreenetURI, try to fetch from freenet + if(logMINOR) Logger.minor(Util.class, "Fetching "+uri); + FreenetURI u = new FreenetURI(uri); + FetchResult res; + while (true) { + try { + res = hlsc.fetch(u); + break; + } catch (FetchException e) { + if (e.newURI != null) { + u = e.newURI; + continue; + } else + throw e; + } + } + + return res.asBucket(); + } + + public static boolean isValid(String uri) { + // try local file first + File file = new File(uri); + if (!file.exists() || !file.canRead()) { + // FreenetURI, try to fetch from freenet + try{ + FreenetURI u = new FreenetURI(uri); + }catch(MalformedURLException e){ + return false; + } + } + return true; + } +} + + diff --git a/src/main/java/plugins/Library/index/xml/XMLIndex.java b/src/main/java/plugins/Library/index/xml/XMLIndex.java new file mode 100644 index 00000000..2e03796e --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/XMLIndex.java @@ -0,0 +1,861 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index.xml; + +import plugins.Library.Library; +import plugins.Library.Index; +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermPageEntry; +import plugins.Library.index.URIEntry; +import plugins.Library.search.InvalidSearchException; +import plugins.Library.util.exec.Execution; +import plugins.Library.util.exec.TaskAbortException; + +import freenet.support.Fields; +import freenet.support.Logger; +import freenet.support.api.Bucket; +import freenet.support.io.FileBucket; +import freenet.support.io.ResumeFailedException; +import freenet.client.async.ClientGetCallback; +import freenet.client.async.ClientGetter; +import freenet.client.async.ClientContext; +import freenet.client.events.ClientEventListener; +import freenet.client.events.ClientEvent; +import freenet.client.FetchException; +import freenet.client.HighLevelSimpleClient; +import freenet.client.FetchResult; +import freenet.node.RequestStarter; +import freenet.node.RequestClient; +import freenet.keys.FreenetURI; + +import freenet.pluginmanager.PluginRespirator; +import freenet.support.Executor; + +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.parsers.SAXParser; +import javax.xml.parsers.SAXParserFactory; + +import org.xml.sax.Attributes; +import org.xml.sax.Locator; +import org.xml.sax.SAXException; +import org.xml.sax.helpers.DefaultHandler; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.File; +import java.io.OutputStream; +import java.net.MalformedURLException; +import java.util.logging.Level; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.List; +import java.util.TreeMap; +import java.util.SortedMap; +import java.util.HashMap; + + +/** + * The xml index format + * @author MikeB + */ +public class XMLIndex implements Index, ClientGetCallback { + + final public static String MIME_TYPE = "application/xml"; + final public static String DEFAULT_FILE = "index.xml"; + + private PluginRespirator pr; + private Executor executor; + private ClientGetter rootGetter; + private int fetchFailures = 0; + public enum FetchStatus{UNFETCHED, FETCHING, FETCHED, FAILED} + protected FetchStatus fetchStatus = FetchStatus.UNFETCHED; + protected HashMap indexMeta = new HashMap(); + // TODO it could be tidier if this was converted to a FreenetURI + protected String indexuri; + protected long origEdition; + private URLUpdateHook updateHook; + private String updateContext; + + private HighLevelSimpleClient hlsc; + + /** + * Index format version: + *
    + *
  • 1 = XMLSpider 8 to 33
  • + *
+ * Earlier format are no longer supported. + */ + private int version; + private List subIndiceList; + private SortedMap subIndice; + private ArrayList waitingOnMainIndex = new ArrayList(); + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(XMLIndex.class); + } + + /** + * Create an XMLIndex from a URI + * @param baseURI + * Base URI of the index (exclude the index.xml part) + */ + public XMLIndex(String baseURI, long edition, PluginRespirator pr, URLUpdateHook hook, String context) throws InvalidSearchException { + this.pr = pr; + this.origEdition = edition; + this.updateHook = hook; + this.updateContext = context; + if (pr!=null) + executor = pr.getNode().executor; + + if (baseURI.endsWith(DEFAULT_FILE)) + baseURI = baseURI.replace(DEFAULT_FILE, ""); + if (!baseURI.endsWith("/")) + baseURI += "/"; + indexuri = baseURI; + + // check if index is valid file or URI + if(!Util.isValid(baseURI)) + throw new InvalidSearchException(baseURI + " is neither a valid file nor valid Freenet URI"); + + + if(pr!=null){ + hlsc = pr.getNode().clientCore.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); + hlsc.addEventHook(mainIndexListener); + } + } + + /** + * process the bucket containing the main index file + * @param bucket + */ + private void processRequests(Bucket bucket){ + try { + InputStream is = bucket.getInputStream(); + parse(is); + is.close(); + fetchStatus = FetchStatus.FETCHED; + for(FindRequest req : waitingOnMainIndex) + setdependencies(req); + waitingOnMainIndex.clear(); + }catch(Exception e){ + fetchStatus = FetchStatus.FAILED; + for (FindRequest findRequest : waitingOnMainIndex) { + findRequest.setError(new TaskAbortException("Failure parsing "+toString(), e)); + } + Logger.error(this, indexuri, e); + } finally { + bucket.free(); + } + } + + /** + * Listener to receive events when fetching the main index + */ + ClientEventListener mainIndexListener = new ClientEventListener(){ + /** + * Hears an event. + **/ + public void receive(ClientEvent ce, ClientContext context){ + FindRequest.updateWithEvent(waitingOnMainIndex, ce); +// Logger.normal(this, "Updated with event : "+ce.getDescription()); + } + }; + + /** + * Callback for when index fetching completes + */ + /** Called on successful fetch */ + public void onSuccess(FetchResult result, ClientGetter state){ +// Logger.normal(this, "Fetch successful " + toString()); + processRequests(result.asBucket()); + } + + /** Called on failed/canceled fetch */ + public void onFailure(FetchException e, ClientGetter state){ + fetchFailures++; + if(fetchFailures < 20 && e.newURI!=null){ + try { + if(logMINOR) Logger.minor(this, "Trying new URI: "+e.newURI); + indexuri = e.newURI.setMetaString(new String[]{""}).toString(); + if(origEdition != -1 && e.newURI.getEdition() < origEdition) { + Logger.error(this, "Redirect to earlier edition?!?!?!?: "+e.newURI.getEdition()+" from "+origEdition); + } else { + if(logMINOR) Logger.minor(this, "Trying new URI: "+e.newURI+" : "+indexuri); + startFetch(true); + if(updateHook != null && updateContext != null) + updateHook.update(updateContext, indexuri); + return; + } + } catch (FetchException ex) { + e = ex; + } catch (MalformedURLException ex) { + Logger.error(this, "what?", ex); + } + } + fetchStatus = FetchStatus.FAILED; + for (FindRequest findRequest : waitingOnMainIndex) { + findRequest.setError(new TaskAbortException("Failure fetching rootindex of "+toString(), e)); + } + Logger.error(this, "Fetch failed on "+toString() + " -- state = "+state, e); + } + + /** + * Fetch main index & process if local or fetch in background with callback if Freenet URI + * @throws freenet.client.FetchException + * @throws java.net.MalformedURLException + */ + private synchronized void startFetch(boolean retry) throws FetchException, MalformedURLException { + if ((!retry) && (fetchStatus != FetchStatus.UNFETCHED && fetchStatus != FetchStatus.FAILED)) + return; + fetchStatus = FetchStatus.FETCHING; + String uri = indexuri + DEFAULT_FILE; + + + // try local file first + File file = new File(uri); + if (file.exists() && file.canRead()) { + processRequests(new FileBucket(file, true, false, false, false)); + return; + } + + if(logMINOR) Logger.minor(this, "Fetching "+uri); + // FreenetURI, try to fetch from freenet + FreenetURI u = new FreenetURI(uri); + while (true) { + try { + rootGetter = hlsc.fetch(u, -1, this, hlsc.getFetchContext().clone()); + Logger.normal(this, "Fetch started : "+toString()); + break; + } catch (FetchException e) { + if (e.newURI != null) { + u = e.newURI; + if(logMINOR) Logger.minor(this, "New URI: "+uri); + continue; + } else + throw e; + } + } + } + + /** + * Parse the xml in the main index to read fields for this object + * @param is InputStream for main index file + * @throws org.xml.sax.SAXException + * @throws java.io.IOException + */ + private void parse(InputStream is) throws SAXException, IOException { + SAXParserFactory factory = SAXParserFactory.newInstance(); +// Logger.normal(this, "Parsing main index"); + + try { + factory.setNamespaceAware(true); + factory.setFeature("http://xml.org/sax/features/namespaces", true); + factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true); + + SAXParser saxParser = factory.newSAXParser(); + MainIndexParser parser = new MainIndexParser(); + saxParser.parse(is, parser); + + version = parser.getVersion(); + if (version == 1) { + indexMeta.put("title", parser.getHeader("title")); + indexMeta.put("ownerName", parser.getHeader("owner")); + indexMeta.put("ownerEmail", parser.getHeader("email")); + + subIndiceList = new ArrayList(); + subIndice = new TreeMap(); + + for (String key : parser.getSubIndice()) { + subIndiceList.add(key); + String stillbase; + try{ + FreenetURI furi = new FreenetURI(indexuri); + stillbase = ( furi.isUSK() ? furi.sskForUSK() : furi ).toString(); + }catch(MalformedURLException e){ + stillbase = indexuri; + } + subIndice.put(key, new SubIndex(stillbase, "index_" + key + ".xml")); + } + Collections.sort(subIndiceList); + } + } catch (ParserConfigurationException e) { + Logger.error(this, "SAX ParserConfigurationException", e); + throw new SAXException(e); + } + } + + @Override + public String toString(){ + String output = "Index:[ "+indexuri+" "+fetchStatus+" "+waitingOnMainIndex+"\n\t"+subIndice + (rootGetter==null?"]":(" GETTING("+rootGetter.toString()+")]")); + //for (SubIndex s : subIndice) + // output = output+"\n -"+s; + return output; + } + + /** + * Gets the SubIndex object which should hold the keyword + */ + private SubIndex getSubIndex(String keyword) { + String md5 = Library.MD5(keyword); + int idx = Collections.binarySearch(subIndiceList, md5); + if (idx < 0) + idx = -idx - 2; + return subIndice.get(subIndiceList.get(idx)); + } + + /** + * Find the term in this Index + */ + public synchronized Execution> getTermEntries(String term){ + try { + FindRequest request = new FindRequest(term); + setdependencies(request); + notifyAll(); + return request; + } catch (FetchException ex) { + Logger.error(this, "Trying to find " + term, ex); + return null; + } catch (MalformedURLException ex) { + Logger.error(this, "Trying to find " + term, ex); + return null; + } + } + + public Execution getURIEntry(FreenetURI uri){ + throw new UnsupportedOperationException("getURIEntry not Implemented in XMLIndex"); + } + + /** + * Puts request into the dependency List of either the main index or the + * subindex depending on whether the main index is availiable + * @param request + * @throws freenet.client.FetchException + * @throws java.net.MalformedURLException + */ + private synchronized void setdependencies(FindRequest request) throws FetchException, MalformedURLException{ +// Logger.normal(this, "setting dependencies for "+request+" on "+this.toString()); + if (fetchStatus!=FetchStatus.FETCHED){ + waitingOnMainIndex.add(request); + request.setStage(FindRequest.Stages.FETCHROOT); + startFetch(false); + }else{ + request.setStage(FindRequest.Stages.FETCHSUBINDEX); + SubIndex subindex = getSubIndex(request.getSubject()); + subindex.addRequest(request); +// Logger.normal(this, "STarting "+getSubIndex(request.getSubject())+" to look for "+request.getSubject()); + if(executor!=null) + executor.execute(subindex, "Subindex:"+subindex.getFileName()); + else + (new Thread(subindex, "Subindex:"+subindex.getFileName())).start(); + } + } + + + /** + * @return the uri of this index prefixed with "xml:" to show what type it is + */ + public String getIndexURI(){ + return indexuri; + } + + private class SubIndex implements Runnable { + String indexuri, filename; + private final ArrayList waitingOnSubindex=new ArrayList(); + private final ArrayList parsingSubindex = new ArrayList(); + FetchStatus fetchStatus = FetchStatus.UNFETCHED; + HighLevelSimpleClient hlsc; + Bucket bucket; + Exception error; + + /** + * Listens for progress on a subIndex fetch + */ + ClientEventListener subIndexListener = new ClientEventListener(){ + /** + * Hears an event and updates those Requests waiting on this subindex fetch + **/ + public void receive(ClientEvent ce, ClientContext context){ + FindRequest.updateWithEvent(waitingOnSubindex, ce); +// Logger.normal(this, "Updated with event : "+ce.getDescription()); + } + }; + + SubIndex(String indexuri, String filename){ + this.indexuri = indexuri; + this.filename = filename; + + if(pr!=null){ + hlsc = pr.getNode().clientCore.makeClient(RequestStarter.INTERACTIVE_PRIORITY_CLASS, false, false); + hlsc.addEventHook(subIndexListener); + } + } + + String getFileName(){ + return filename; + } + + FetchStatus getFetchStatus(){ + return fetchStatus; + } + + /** + * Add a request to the List of requests looking in this subindex + * @param request + */ + void addRequest(FindRequest request){ + if(fetchStatus==FetchStatus.FETCHED) + request.setStage(FindRequest.Stages.PARSE); + else + request.setStage(FindRequest.Stages.FETCHSUBINDEX); + synchronized(waitingOnSubindex){ + waitingOnSubindex.add(request); + } + } + + @Override + public String toString(){ + return filename+" "+fetchStatus+" "+waitingOnSubindex; + } + + + public synchronized void run(){ + try{ + while(waitingOnSubindex.size()>0){ + if(fetchStatus==FetchStatus.UNFETCHED || fetchStatus == FetchStatus.FAILED){ + try { + fetchStatus = FetchStatus.FETCHING; + // TODO tidy the fetch stuff + bucket = Util.fetchBucket(indexuri + filename, hlsc); + fetchStatus = FetchStatus.FETCHED; + + } catch (Exception e) { // TODO tidy the exceptions + //java.net.MalformedURLException + //freenet.client.FetchException + String msg = indexuri + filename + " could not be opened: " + e.toString(); + Logger.error(this, msg, e); + throw new TaskAbortException(msg, e); + } + }else if(fetchStatus==FetchStatus.FETCHED){ + parseSubIndex(); + } else { + break; + } + } + } catch (TaskAbortException e) { + fetchStatus = FetchStatus.FAILED; + this.error = e; + Logger.error(this, "Dropping from subindex run loop", e); + for (FindRequest r : parsingSubindex) + r.setError(e); + for (FindRequest r : waitingOnSubindex) + r.setError(e); + } + } + + public void parseSubIndex() throws TaskAbortException { + synchronized(parsingSubindex){ + // Transfer all requests waiting on this subindex to the parsing list + synchronized (waitingOnSubindex){ + parsingSubindex.addAll(waitingOnSubindex); + waitingOnSubindex.removeAll(parsingSubindex); + } + // Set status of all those about to be parsed to PARSE + for(FindRequest r : parsingSubindex) + r.setStage(FindRequest.Stages.PARSE); + + // Multi-stage parse to minimise memory usage. + + // Stage 1: Extract the declaration (first tag), copy everything before "". + // Copy the declaration, plus everything between the two (inclusive) to another bucket. + + Bucket mainBucket, filesBucket; + + try { + InputStream is = bucket.getInputStream(); + mainBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1); + filesBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1); + OutputStream mainOS = new BufferedOutputStream(mainBucket.getOutputStream()); + OutputStream filesOS = new BufferedOutputStream(filesBucket.getOutputStream()); + //OutputStream mainOS = new BufferedOutputStream(new FileOutputStream("main.tmp")); + //OutputStream filesOS = new BufferedOutputStream(new FileOutputStream("files.tmp")); + + BufferedInputStream bis = new BufferedInputStream(is); + + byte greaterThan = ">".getBytes("UTF-8")[0]; + byte[] filesPrefix = "'s for the specific ID's. + + is = filesBucket.getInputStream(); + StageThreeHandler stageThreeHandler = new StageThreeHandler(); + saxParser.parse(is, stageThreeHandler); + if(logMINOR) Logger.minor(this, "Finished stage three XML parse"); + is.close(); + + Logger.minor(this, "parsing finished "+ parsingSubindex.toString()); + for (FindRequest findRequest : parsingSubindex) { + findRequest.setFinished(); + } + parsingSubindex.clear(); + } catch (Exception err) { + Logger.error(this, "Error parsing "+filename, err); + throw new TaskAbortException("Could not parse XML: ", err); + } + } + } + + class WordMatch { + public WordMatch(ArrayList searches, int inWordFileCount) { + this.searches = searches; + this.inWordFileCount = inWordFileCount; + } + final List searches; + int inWordFileCount; + } + + class FileMatch { + public FileMatch(String id2, HashMap termpositions2, WordMatch thisWordMatch) { + id = id2; + termpositions = termpositions2; + word = thisWordMatch; + } + final String id; + final HashMap termpositions; + final WordMatch word; + } + + Map> idToFileMatches = new HashMap>(); + + int totalFileCount = -1; + + // Parse the main XML file, including the keywords list. + // We do not see the files list. + class StageTwoHandler extends DefaultHandler { + + private boolean processingWord; + + // Requests and matches being made + private List requests; + private List wordMatches; + + private int inWordFileCount; + + // About the file tag being processed + private StringBuilder characters; + + private String match; + + private ArrayList fileMatches = new ArrayList(); + + private String id; + + private WordMatch thisWordMatch; + + StageTwoHandler() { + this.requests = new ArrayList(parsingSubindex); + for (FindRequest r : parsingSubindex){ + r.setResult(new HashSet()); + } + } + + @Override public void setDocumentLocator(Locator value) { + + } + + @Override public void endDocument() throws SAXException { + } + + @Override public void startDocument() throws SAXException { + // Do nothing + } + + @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) + throws SAXException { + if(requests.size()==0&&(wordMatches == null || wordMatches.size()==0)) + return; + if (rawName == null) { + rawName = localName; + } + String elt_name = rawName; + + if (elt_name.equals("keywords")) + processingWord = true; + + /* + * looks for the word in the given subindex file if the word is found then the parser + * fetches the corresponding fileElements + */ + if (elt_name.equals("word")) { + try { + fileMatches.clear(); + wordMatches = null; + match = attrs.getValue("v"); + if (requests!=null){ + + for (Iterator it = requests.iterator(); it.hasNext();) { + FindRequest r = it.next(); + if (match.equals(r.getSubject())){ + if(wordMatches == null) + wordMatches = new ArrayList(); + wordMatches.add(r); + it.remove(); + Logger.minor(this, "found word match "+wordMatches); + } + } + if(wordMatches != null) { + if (attrs.getValue("fileCount")!=null) + inWordFileCount = Integer.parseInt(attrs.getValue("fileCount")); + thisWordMatch = new WordMatch(new ArrayList(wordMatches), inWordFileCount); + } + } + } catch (Exception e) { + throw new SAXException(e); + } + } + + if (elt_name.equals("file")) { + if (processingWord == true && wordMatches!=null) { + try{ + id = attrs.getValue("id"); + characters = new StringBuilder(); + } + catch (Exception e) { + Logger.error(this, "Index format may be outdated " + e.toString(), e); + } + + } + } + } + + @Override public void characters(char[] ch, int start, int length) { + if(processingWord && wordMatches!= null && characters!=null){ + characters.append(ch, start, length); + } + } + + @Override + public void endElement(String namespaceURI, String localName, String qName) { + if(processingWord && wordMatches != null && qName.equals("file")){ + HashMap termpositions = null; + if(characters!=null){ + String[] termposs = characters.toString().split(","); + termpositions = new HashMap(); + for (String pos : termposs) { + try{ + termpositions.put(Integer.valueOf(pos), null); + }catch(NumberFormatException e){ + Logger.error(this, "Position in index not an integer :"+pos, e); + } + } + characters = null; + } + + FileMatch thisFile = new FileMatch(id, termpositions, thisWordMatch); + + ArrayList matchList = idToFileMatches.get(id); + if(matchList == null) { + matchList = new ArrayList(); + idToFileMatches.put(id, matchList); + } + if(logMINOR) Logger.minor(this, "Match: id="+id+" for word "+match); + matchList.add(thisFile); + } + + } + + } + + class StageThreeHandler extends DefaultHandler { + + @Override public void setDocumentLocator(Locator value) { + + } + + @Override public void endDocument() throws SAXException { + } + + @Override public void startElement(String nameSpaceURI, String localName, String rawName, Attributes attrs) + throws SAXException { + + if(idToFileMatches.isEmpty()) + return; + if (rawName == null) { + rawName = localName; + } + String elt_name = rawName; + + if (elt_name.equals("files")){ + String fileCount = attrs.getValue("", "totalFileCount"); + if(fileCount != null) + totalFileCount = Integer.parseInt(fileCount); + Logger.minor(this, "totalfilecount = "+totalFileCount); + } + + if (elt_name.equals("file")) { + try { + String id = attrs.getValue("id"); + + ArrayList matches = idToFileMatches.get(id); + + if(matches != null) { + + for(FileMatch match : matches) { + + String key = attrs.getValue("key"); + int l = attrs.getLength(); + String title = null; + int wordCount = -1; + if (l >= 3) { + try { + title = attrs.getValue("title"); + } catch (Exception e) { + Logger.error(this, "Index Format not compatible " + e.toString(), e); + } + try { + String wordCountString = attrs.getValue("wordCount"); + if(wordCountString != null) { + wordCount = Integer.parseInt(attrs.getValue("wordCount")); + } + } catch (Exception e) { + //Logger.minor(this, "No wordcount found " + e.toString(), e); + } + } + + for(FindRequest req : match.word.searches) { + + Set result = req.getUnfinishedResult(); + float relevance = 0; + + if(logDEBUG) Logger.debug(this, "termcount "+(match.termpositions == null ? 0 : match.termpositions.size())+" filewordcount = "+wordCount); + if(match.termpositions!=null && match.termpositions.size()>0 && wordCount>0 ){ + relevance = (float)(match.termpositions.size()/(float)wordCount); + if( totalFileCount > 0 && match.word.inWordFileCount > 0) + relevance *= Math.log( (float)totalFileCount/(float)match.word.inWordFileCount); + if(logDEBUG) Logger.debug(this, "Set relevance of "+title+" to "+relevance+" - "+key); + } + + TermPageEntry pageEntry = new TermPageEntry(req.getSubject(), relevance, new FreenetURI(key), title, match.termpositions); + result.add(pageEntry); + //Logger.minor(this, "added "+inFileURI+ " to "+ match); + } + + } + } + + } + catch (Exception e) { + Logger.error(this, "File id and key could not be retrieved. May be due to format clash", e); + } + } + } + + } + + } + + @Override + public void onResume(ClientContext context) throws ResumeFailedException { + // Ignore. Requests not persistent. + } + + @Override + public RequestClient getRequestClient() { + return Library.REQUEST_CLIENT; + } + +} diff --git a/src/main/java/plugins/Library/index/xml/package-info.java b/src/main/java/plugins/Library/index/xml/package-info.java new file mode 100644 index 00000000..6ef67706 --- /dev/null +++ b/src/main/java/plugins/Library/index/xml/package-info.java @@ -0,0 +1,9 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ + +/** + * Where classes which only apply to xml indexes stay + */ +package plugins.Library.index.xml; + diff --git a/src/main/java/plugins/Library/io/DataFormatException.java b/src/main/java/plugins/Library/io/DataFormatException.java new file mode 100644 index 00000000..6f7aeb4b --- /dev/null +++ b/src/main/java/plugins/Library/io/DataFormatException.java @@ -0,0 +1,44 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io; + +/** +** Thrown when data is not in a recognised format. +** +** @author infinity0 +*/ +public class DataFormatException extends java.io.IOException { + + /** + ** The parent, if any, of the unrecognised data. + */ + final Object parent; + + /** + ** The key, if any, that the data is associated with. + */ + final Object key; + + /** + ** The unrecognised data. + */ + final Object data; + + public DataFormatException(String s, Throwable t, Object v, Object p, Object k) { + super(s); + initCause(t); + data = v; + parent = p; + key = k; + } + + public DataFormatException(String s, Throwable t, Object v) { + this(s, t, v, null, null); + } + + public Object getParent() { return parent; } + public Object getKey() { return key; } + public Object getValue() { return data; } + +} diff --git a/src/main/java/plugins/Library/io/ObjectBlueprint.java b/src/main/java/plugins/Library/io/ObjectBlueprint.java new file mode 100644 index 00000000..36969847 --- /dev/null +++ b/src/main/java/plugins/Library/io/ObjectBlueprint.java @@ -0,0 +1,520 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io; + +import java.util.Iterator; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.Map; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.IdentityHashMap; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +/** +** An (immutable) blueprint for an object. This class provides methods to +** construct new objects from a list or map of property parameters, and to +** present (currently immutable) map-views of existing objects. +** +** @author infinity0 +*/ +public class ObjectBlueprint { + + // TODO LOW maybe make the map-view mutable + // TODO LOW move this to another package, maybe util.reflect + + /** + ** The class that this blueprint represents. + */ + final protected Class cls; + + /** + ** Map of properties to their method names. The keys are the property + ** names; the values are the names of the nullary methods to invoke to + ** retrieve the property-values. If any map-value is null, then the field + ** with the same name as its map-key will be used instead. + */ + final protected Map properties; + + /** + ** Map of properties to their corresponding fields. This is automatically + ** generated from the {@link #properties} map. + */ + final protected Map prop_fields = new HashMap(); + + /** + ** Map of properties to their corresponding getter methods. This is + ** automatically generated from the {@link #properties} map. + */ + final protected Map prop_methods = new HashMap(); + + /** + ** Map of properties to their corresponding types. This is automatically + ** generated from the {@link #properties} map. + */ + final protected Map> param_type = new LinkedHashMap>(); + + /** + ** A constructor for the class, whose parameter types are the values of + ** {@link #param_type} in the same iteration order. This is automatically + ** from the class object, and must exist (otherwise construction of the + ** blueprint will throw {@link NoSuchMethodException}). + */ + final protected Constructor constructor; + + /** + ** Constructs a blueprint from the given {@link #properties} map. If you + ** don't have the map ready at hand, and it's a mixture of methods and + ** fields, consider using {@link #init(Class)} instead. + ** + ** The constructor's parameter types are given by {@code ctor_params}. This + ** '''must be a subset''' of {@code blueprint}; otherwise the blueprint + ** constructor will not be able to determine the types of those parameters. + ** For example, if your class has two fields {@code int field_a} and + ** {@code float field_b}, and {@code blueprint} maps {"field_b":null, + ** "field_a":null}, and {@code ctor_params} lists ["field_b", + ** "field_a"], then this method will search for the constructor with + ** parameters {@code (float, int)}. If it doesn't exist, {@link + ** NoSuchMethodException} will be thrown. + ** + ** If {@code ctor_params} is {@code null}, the parameter types will instead + ** be inferred from the keys of the properties map; the end result will be + ** the same as if it was equal to {@code blueprint.keySet()}. + ** + ** @param c The class to represent + ** @param blueprint The map of properties + ** @param ctor_params Properties corresponding to the input parametrs of + ** the object constructor. + ** @throws NoSuchFieldException if Java reflection can't find an inferred + ** field + ** @throws NoSuchMethodException if Java reflection can't find a given + ** method or the inferred constructor + ** @throws NullPointerException if {@code c} or {@code blueprint} is {@code + ** null} + */ + public ObjectBlueprint(Class c, Map blueprint, Iterable ctor_params) + throws NoSuchFieldException, NoSuchMethodException { + if (c == null || blueprint == null) { throw new NullPointerException(); } + cls = c; + properties = blueprint; + setProperties(); + if (ctor_params == null) { + constructor = cls.getConstructor(param_type.values().toArray(new Class[param_type.size()])); + } else { + List> types = new ArrayList>(); + for (String property: ctor_params) { types.add(param_type.get(property)); } + constructor = cls.getConstructor(types.toArray(new Class[types.size()])); + } + } + + /** + ** Constructs a blueprint from the given {@link #properties} map. If you + ** don't have the map ready at hand, and it's a mixture of methods and + ** fields, consider using {@link #init(Class)} instead. + ** + ** This constructor just delegates to {@link #ObjectBlueprint(Class, Map, + ** Iterable) ObjectBlueprint(c, blueprint, null)}. + ** + ** @see #ObjectBlueprint(Class, Map, Iterable) + */ + public ObjectBlueprint(Class c, Map blueprint) + throws NoSuchFieldException, NoSuchMethodException { + this(c, blueprint, (Iterable)null); + } + + /** + ** Constructs a blueprint from the given collection of field names. This + ** constructor just creates the appropriate blueprint map to delegate to + ** {@link #ObjectBlueprint(Class, Map)}. + ** + ** @see #ObjectBlueprint(Class, Map) + */ + public ObjectBlueprint(Class c, Collection fields) + throws NoSuchFieldException, NoSuchMethodException { + this(c, makePropertiesFromFields(fields)); + } + + /** + ** Helper method for {@link #ObjectBlueprint(Class, Collection)}. + */ + private static Map makePropertiesFromFields(Collection fields) { + Map blueprint = new LinkedHashMap(); + for (String s: fields) { + blueprint.put(s, null); + } + return blueprint; + } + + /** + ** Constructs a blueprint from the given properties map and the given + ** object constructor. If you don't have the map ready at hand, and it's a + ** mixture of methods and fields, consider using {@link #init(Class)} + ** instead. + ** + ** If {@code ctor} is {@code null}, then any calls to the blueprint's + ** {@link #objectFromMap(Map)} and {@link #newInstance(Object[])} + ** will throw {@link NullPointerException}. + ** + ** @param c The class to represent + ** @param blueprint The map of properties + ** @param ctor The object constructor + ** @throws NoSuchFieldException if Java reflection can't find an inferred + ** field + ** @throws NoSuchMethodException if Java reflection can't find a given + ** method or the inferred constructor + ** @throws NullPointerException if {@code c} or {@code blueprint} is {@code + ** null} + */ + public ObjectBlueprint(Class c, Map blueprint, Constructor ctor) + throws NoSuchFieldException, NoSuchMethodException { + if (c == null || blueprint == null) { throw new NullPointerException(); } + cls = c; + properties = blueprint; + setProperties(); + constructor = ctor; + } + + /** + ** Helper method for the constructors. Sets {@link #prop_fields}, + ** {@link #prop_methods} and {@link #param_type} from {@link #properties}. + */ + private void setProperties() throws NoSuchFieldException, NoSuchMethodException { + assert(properties != null); + prop_fields.clear(); + prop_methods.clear(); + param_type.clear(); + + for (Map.Entry en: properties.entrySet()) { + String property = en.getKey(); + String method_name = en.getValue(); + + if (method_name == null || method_name.length() == 0) { + Field f = cls.getField(property); + prop_fields.put(property, f); + param_type.put(property, f.getType()); + + } else { + Method m = cls.getMethod(method_name); + if (m.getParameterTypes().length > 0) { + throw new IllegalArgumentException("Bad blueprint: getter method " +method_name+ " for property " +property+ " takes more than 0 arguments"); + } + prop_methods.put(property, m); + param_type.put(property, m.getReturnType()); + } + } + } + + /** + ** Returns a builder for an {@link ObjectBlueprint} for the given class. + ** This should make construction of blueprints neater in source code, eg: + ** + ** ObjectBlueprint bprint = + ** ObjectBlueprint.init(MyClass.class) + ** .addFields("name", "origin", "date") + ** .addMethod("height", "getHeight") + ** .addMethod("width", "getWidth") + ** .build(); + ** + ** + ** @throws NullPointerException if {@code cls} is {@code null} + ** @see Builder + ** Pattern + */ + public static Builder init(Class cls) { + return new Builder(cls); + } + + public Class getObjectClass() { + return cls; + } + + public Constructor getObjectConstructor() { + return constructor; + } + + /** + ** Constructs a new object by invoking the inferred constructor with the + ** given list of arguments. + ** + ** @throws NullPointerException if no constructor was supplied to this + ** class' own constructor. + ** @see Constructor#newInstance(Object[]) + */ + public T newInstance(Object... initargs) throws InstantiationException, IllegalAccessException, InvocationTargetException { + return constructor.newInstance(initargs); + } + + /** + ** Map of primitive classes to their object classes. + */ + final private static Map, Class> boxes = new IdentityHashMap, Class>(); + static { + boxes.put(boolean.class, Boolean.class); + boxes.put(byte.class, Byte.class); + boxes.put(char.class, Character.class); + boxes.put(short.class, Short.class); + boxes.put(int.class, Integer.class); + boxes.put(long.class, Long.class); + boxes.put(float.class, Float.class); + boxes.put(double.class, Double.class); + } + + /** + ** Casts a value to the object type for the given primitive type. + ** + ** @param cls The primitive class + ** @param val The value to cast + ** @throws ClassCastException if the cast cannot be made + ** @throws IllegalArgumentException if the input class is not a primitive + */ + protected static Object boxCast(Class cls, Object val) throws ClassCastException { + Class tcls = boxes.get(cls); + if (tcls == null) { + throw new IllegalArgumentException("Input class must be a primitive type."); + } + return tcls.cast(val); + } + + /** + ** Constructs a new object from the given map of properties and their + ** desired values. + ** + ** @param map Map of properties to their desired values. + ** @throws NullPointerException if no constructor has been set + ** @see Constructor#newInstance(Object[]) + */ + public T objectFromMap(Map map) throws InstantiationException, IllegalAccessException, InvocationTargetException { + Object[] initargs = new Object[param_type.size()]; + int i=0; + for (Map.Entry> en: param_type.entrySet()) { + String property = en.getKey(); + Class type = en.getValue(); + Object value = map.get(property); + try { + if (type.isPrimitive()) { + value = boxCast(type, value); + } else { + value = type.cast(value); + } + } catch (ClassCastException e) { + throw new IllegalArgumentException("Parameter for property " +property+ " is not of the correct type should be "+type+" but is "+value.getClass(), e); + } + initargs[i++] = value; + } + return constructor.newInstance(initargs); + } + + /** + ** Returns a map-view of an object. The keys are its properties as defined + ** by the blueprint, and the values are the values of those properties. + ** The map is backed by the object, so changes to the object (if any) are + ** reflected in the map. The map itself is immutable, however (for now; + ** this might be changed later). + ** + ** NOTE: the below implementation is technically not optimal since {@link + ** AbstractMap}'s remove methods (and that of its entry/key/value views) + ** actually iterate over the map looking for the key. However, this + ** shouldn't be an issue since the map doesn't support remove (calling it + ** will only throw {@link UnsupportedOperationException} when the key is + ** found), and most objects only have a small (<20) number of properties. + ** If you find that you need such functionality, feel free to implement a + ** more optimal solution. + ** + ** @param object The object to view as a map. + */ + public Map objectAsMap(final T object) { + + return new AbstractMap() { + + final private Set> entrySet = new AbstractSet>() { + + @Override public int size() { + return properties.size(); + } + + @Override public Iterator> iterator() { + return new Iterator>() { + final Iterator> props = properties.entrySet().iterator(); + + /*@Override**/ public boolean hasNext() { + return props.hasNext(); + } + + /*@Override**/ public Map.Entry next() { + Map.Entry en = props.next(); + final String property = en.getKey(); + final String method_name = en.getValue(); + return new Map.Entry() { + /*@Override**/ public String getKey() { return property; } + /*@Override**/ public Object getValue() { return get(property, method_name); } + /*@Override**/ public Object setValue(Object o) { throw new UnsupportedOperationException("Cannot modify an object in this way."); } + }; + } + + /*@Override**/ public void remove() { + throw new UnsupportedOperationException("Cannot modify an object in this way."); + } + + }; + } + + }; + + @Override public int size() { + return properties.size(); + } + + @Override public boolean containsKey(Object property) { + return properties.containsKey(property); + } + + @Override public Object get(Object property) { + return get((String)property, properties.get(property)); + } + + protected Object get(String property, String method_name) { + try { + if (method_name == null || method_name.length() == 0) { + return prop_fields.get(property).get(object); + } else { + return prop_methods.get(method_name).invoke(object); + } + } catch (IllegalAccessException e) { + throw new IllegalStateException(e); + } catch (InvocationTargetException e) { + throw new IllegalStateException(e); + } + } + + @Override public Set> entrySet() { + return entrySet; + } + + }; + } + + /** + ** Builder for a {@link ObjectBlueprint}. This class is protected; use + ** {@link ObjectBlueprint#init(Class)} to create one of these. + */ + protected static class Builder { + + final public Class cls; + + protected Iterable ctor_params; + + protected Constructor constructor; + + protected boolean use_ctor; + + /** + ** Properties map. This uses {@link LinkedHashMap}, which iterates through + ** the properties in the same order in which they were added to the builder. + */ + final protected Map props = new LinkedHashMap(); + + /** + ** Construct a builder for an {@link ObjectBlueprint} for the given class. + ** + ** @throws NullPointerException if {@code c} is {@code null} + */ + public Builder(Class c) { + if (c == null) { throw new NullPointerException(); } + cls = c; + } + + /** + ** @return {@code this} + */ + public Builder addMethod(String property, String method_name) { + props.put(property, method_name); + return this; + } + + /** + ** @return {@code this} + */ + public Builder addField(String field) { + props.put(field, null); + return this; + } + + /** + ** @return {@code this} + */ + public Builder addMethods(Map properties) { + props.putAll(properties); + return this; + } + + /** + ** @return {@code this} + */ + public Builder addFields(Collection fields) { + for (String field: fields) { + props.put(field, null); + } + return this; + } + + /** + ** @return {@code this} + */ + public Builder addFields(String... fields) { + for (String field: fields) { + props.put(field, null); + } + return this; + } + + /** + ** Sets an object constructor, and tell {@link #build()} to use {@link + ** ObjectBlueprint#ObjectBlueprint(Class, Map, Constructor)}. + ** + ** @return {@code this} + */ + public Builder setConstructor(Constructor ctor) { + use_ctor = true; + constructor = ctor; + return this; + } + + /** + ** Sets the constructor parameters, and tell {@link #build()} to use {@link + ** ObjectBlueprint#ObjectBlueprint(Class, Map, Iterable)}. (This is the + ** default). + ** + ** @return {@code this} + */ + public Builder setCtorParams(Iterable params) { + use_ctor = false; + ctor_params = params; + return this; + } + + /** + ** Build a blueprint from the properties given to the builder so far. + ** + ** @return The built blueprint + ** @see ObjectBlueprint#ObjectBlueprint(Class, Map) + */ + public ObjectBlueprint build() throws NoSuchFieldException, NoSuchMethodException { + if (use_ctor) { + return new ObjectBlueprint(cls, props, constructor); + } else { + return new ObjectBlueprint(cls, props, ctor_params); + } + } + + } + +} diff --git a/src/main/java/plugins/Library/io/ObjectStreamReader.java b/src/main/java/plugins/Library/io/ObjectStreamReader.java new file mode 100644 index 00000000..05b46786 --- /dev/null +++ b/src/main/java/plugins/Library/io/ObjectStreamReader.java @@ -0,0 +1,21 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io; + +import java.io.InputStream; +import java.io.IOException; + +/** +** Simpler version of {@link java.io.ObjectInput} that has only one method. +** +** @author infinity0 +*/ +public interface ObjectStreamReader { + + /** + ** Read and return the object from the given stream. + */ + public T readObject(InputStream is) throws IOException; + +} diff --git a/src/main/java/plugins/Library/io/ObjectStreamWriter.java b/src/main/java/plugins/Library/io/ObjectStreamWriter.java new file mode 100644 index 00000000..1843c740 --- /dev/null +++ b/src/main/java/plugins/Library/io/ObjectStreamWriter.java @@ -0,0 +1,21 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io; + +import java.io.OutputStream; +import java.io.IOException; + +/** +** Simpler version of {@link java.io.ObjectOutput} that has only one method. +** +** @author infinity0 +*/ +public interface ObjectStreamWriter { + + /** + ** Write the given object to the given stream. + */ + public void writeObject(T o, OutputStream os) throws IOException; + +} diff --git a/src/main/java/plugins/Library/io/YamlReaderWriter.java b/src/main/java/plugins/Library/io/YamlReaderWriter.java new file mode 100644 index 00000000..4c3118e6 --- /dev/null +++ b/src/main/java/plugins/Library/io/YamlReaderWriter.java @@ -0,0 +1,226 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io; + +import plugins.Library.io.DataFormatException; + +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.error.YAMLException; +import org.yaml.snakeyaml.error.Mark; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.Dumper; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.representer.Representer; +import org.yaml.snakeyaml.representer.Represent; +import org.yaml.snakeyaml.nodes.Node; +import org.yaml.snakeyaml.nodes.ScalarNode; +import org.yaml.snakeyaml.nodes.MappingNode; +import org.yaml.snakeyaml.constructor.Constructor; +import org.yaml.snakeyaml.constructor.AbstractConstruct; + +import java.util.Collections; +import java.util.Arrays; +import java.util.Map; +import java.util.LinkedHashMap; +import java.util.concurrent.Semaphore; +import java.io.File; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.IOException; + +/* class definitions added to the extended Yaml processor */ +import plugins.Library.io.serial.Packer; +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermPageEntry; +import plugins.Library.index.TermIndexEntry; +import plugins.Library.index.TermTermEntry; +import freenet.keys.FreenetURI; + + +/** +** Converts between an object and a stream containing a YAML document. By +** default, this uses a {@link Yaml} processor with additional object and tag +** definitions relevant to the Library plugin. +** +** (Ideally this would implement {@link java.io.ObjectInput} and {@link +** java.io.ObjectOutput} but they have too many methods to bother with...) +** +** @see Yaml +** @see Loader +** @see Dumper +** @author infinity0 +*/ +public class YamlReaderWriter +implements ObjectStreamReader, ObjectStreamWriter { + + final public static String MIME_TYPE = "text/yaml"; + final public static String FILE_EXTENSION = ".yml"; + + final static int MAX_PARALLEL = 1; // Limited by memory mainly. If memory is no object it could be limited by threads. + // Each Yaml instance uses a *significant* amount of memory... + static final Semaphore parallelLimiter = new Semaphore(MAX_PARALLEL); + + public YamlReaderWriter() { + } + + /*@Override**/ public Object readObject(InputStream is) throws IOException { + parallelLimiter.acquireUninterruptibly(); + try { + return makeYAML().load(new InputStreamReader(is, "UTF-8")); + } catch (YAMLException e) { + throw new DataFormatException("Yaml could not process the stream: " + is, e, is, null, null); + } finally { + parallelLimiter.release(); + } + } + + /*@Override**/ public void writeObject(Object o, OutputStream os) throws IOException { + parallelLimiter.acquireUninterruptibly(); + try { + makeYAML().dump(o, new OutputStreamWriter(os, "UTF-8")); + } catch (YAMLException e) { + throw new DataFormatException("Yaml could not process the object", e, o, null, null); + } finally { + parallelLimiter.release(); + } + } + + /** We do NOT keep this thread-local, because the Composer is only cleared after + * the next call to load(), so it can persist with a lot of useless data if we + * then use a different thread. So lets just construct them as needed. */ + private Yaml makeYAML() { + DumperOptions opt = new DumperOptions(); + opt.setWidth(Integer.MAX_VALUE); + opt.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + return new Yaml(new Loader(new ExtendedConstructor()), + new Dumper(new ExtendedRepresenter(), opt)); + } + + final public static ObjectBlueprint tebp_term; + final public static ObjectBlueprint tebp_index; + final public static ObjectBlueprint tebp_page; + static { + try { + tebp_term = new ObjectBlueprint(TermTermEntry.class, Arrays.asList("subj", "rel", "term")); + tebp_index = new ObjectBlueprint(TermIndexEntry.class, Arrays.asList("subj", "rel", "index")); + tebp_page = new ObjectBlueprint(TermPageEntry.class, Arrays.asList("subj", "rel", "page", "title", "positions", "posFragments")); + } catch (NoSuchFieldException e) { + throw new AssertionError(e); + } catch (NoSuchMethodException e) { + throw new AssertionError(e); + } + } + + /************************************************************************ + ** DOCUMENT + */ + public static class ExtendedRepresenter extends Representer { + + public ExtendedRepresenter() { + this.representers.put(FreenetURI.class, new Represent() { + /*@Override**/ public Node representData(Object data) { + return representScalar("!FreenetURI", ((FreenetURI) data).toString()); + } + }); + this.representers.put(Packer.BinInfo.class, new Represent() { + /*@Override**/ public Node representData(Object data) { + Packer.BinInfo inf = (Packer.BinInfo)data; + Map map = Collections.singletonMap(inf.getID(), inf.getWeight()); + return representMapping("!BinInfo", map, true); + } + }); + this.representers.put(TermTermEntry.class, new RepresentTermEntry(tebp_term)); + this.representers.put(TermIndexEntry.class, new RepresentTermEntry(tebp_index)); + this.representers.put(TermPageEntry.class, new RepresentTermEntry(tebp_page)); + } + + public class RepresentTermEntry implements Represent { + + final ObjectBlueprint blueprint; + final String tag; + + public RepresentTermEntry(ObjectBlueprint bp) { + blueprint = bp; + tag = "!" + bp.getObjectClass().getSimpleName(); + } + + /*@Override**/ public Node representData(Object data) { + return representMapping(tag, blueprint.objectAsMap((T)data), true); + } + + } + + } + + + /************************************************************************ + ** DOCUMENT + */ + public static class ExtendedConstructor extends Constructor { + public ExtendedConstructor() { + this.yamlConstructors.put("!FreenetURI", new AbstractConstruct() { + /*@Override**/ public Object construct(Node node) { + String uri = (String) constructScalar((ScalarNode)node); + try { + return new FreenetURI(uri); + } catch (java.net.MalformedURLException e) { + throw new ConstructorException("while constructing a FreenetURI", node.getStartMark(), "found malformed URI " + uri, null); + } + } + }); + this.yamlConstructors.put("!BinInfo", new AbstractConstruct() { + /*@Override**/ public Object construct(Node node) { + Map map = (Map) constructMapping((MappingNode)node); + if (map.size() != 1) { + throw new ConstructorException("while constructing a Packer.BinInfo", node.getStartMark(), "found incorrectly sized map data " + map, null); + } + for (Map.Entry en: map.entrySet()) { + return new Packer.BinInfo(en.getKey(), (Integer)en.getValue()); + } + throw new AssertionError(); + } + }); + this.yamlConstructors.put("!TermTermEntry", new ConstructTermEntry(tebp_term)); + this.yamlConstructors.put("!TermIndexEntry", new ConstructTermEntry(tebp_index)); + this.yamlConstructors.put("!TermPageEntry", new ConstructTermEntry(tebp_page)); + } + + public class ConstructTermEntry extends AbstractConstruct { + + final ObjectBlueprint blueprint; + + public ConstructTermEntry(ObjectBlueprint bp) { + blueprint = bp; + } + + /*@Override**/ public Object construct(Node node) { + Map map = (Map)constructMapping((MappingNode)node); + map.put("rel", new Float(((Double)map.get("rel")).floatValue())); + try { + return blueprint.objectFromMap(map); + } catch (Exception e) { + //java.lang.InstantiationException + //java.lang.IllegalAccessException + //java.lang.reflect.InvocationTargetException + throw new ConstructorException("while constructing a " + blueprint.getObjectClass().getSimpleName(), node.getStartMark(), "could not instantiate map " + map, null, e); + } + } + + } + } + + // for some reason SnakeYAML doesn't make this class publicly constructible + public static class ConstructorException extends org.yaml.snakeyaml.constructor.ConstructorException { + public ConstructorException(String context, Mark contextMark, String problem, Mark problemMark) { + super(context, contextMark, problem, problemMark); + } + public ConstructorException(String context, Mark contextMark, String problem, Mark problemMark, Throwable cause) { + super(context, contextMark, problem, problemMark, cause); + } + } + + +} diff --git a/src/main/java/plugins/Library/io/serial/Archiver.java b/src/main/java/plugins/Library/io/serial/Archiver.java new file mode 100644 index 00000000..62ddd136 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/Archiver.java @@ -0,0 +1,54 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.exec.TaskAbortException; + +import java.util.Collection; +import java.util.Map; + +/** +** An interface that handles a single {@link Serialiser.Task}. +** +** @author infinity0 +*/ +public interface Archiver extends Serialiser { + + /** + ** Execute a {@link PullTask}, returning only when the task is done. + ** + ** Implementations of this method which act on a recursive data structure + ** that is designed to be partially loaded (such as {@link + ** plugins.Library.util.SkeletonMap}), should only pull the minimal data + ** necessary to give a consistent instance of that data structure. This is + ** to provide a finer degree of control over the pulling process, which is + ** likely to be needed by such a data structure. + ** + ** In other words, the data structure should not be automatically populated + ** after it is formed. + ** + ** @param task The task to execute + */ + public void pull(PullTask task) throws TaskAbortException; + + /** + ** Execute a {@link PushTask}, returning only when the task is done. + ** + ** Implementations of this method which act on a recursive data structure + ** that is designed to be partially loaded (such as {@link + ** plugins.Library.util.SkeletonMap}), should ensure that the data passed + ** into this method is minimal in terms of that data structure. This is to + ** provide a finer degree of control over the pulling process, which is + ** likely to be needed by such a data structure. + ** + ** If the data is not minimal, implementations should throw {@link + ** IllegalArgumentException} rather than automatically depopulating + ** the data structure. + ** + ** @param task The task to execute + */ + public void push(PushTask task) throws TaskAbortException; + +} diff --git a/src/main/java/plugins/Library/io/serial/FileArchiver.java b/src/main/java/plugins/Library/io/serial/FileArchiver.java new file mode 100644 index 00000000..34290471 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/FileArchiver.java @@ -0,0 +1,198 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.channels.FileLock; + +import plugins.Library.io.ObjectStreamReader; +import plugins.Library.io.ObjectStreamWriter; +import plugins.Library.io.serial.Serialiser.Task; +import plugins.Library.util.exec.SimpleProgress; +import plugins.Library.util.exec.TaskAbortException; + +/** +** Converts between a map of {@link String} to {@link Object}, and a file on +** disk. An {@link ObjectStreamReader} and an {@link ObjectStreamWriter} is +** used to do the hard work once the relevant streams have been established. +** +** This class expects {@link Task#meta} to be of type {@link String}, or an +** array whose first element is of type {@link String}. +** +** @author infinity0 +*/ +public class FileArchiver +implements Archiver, LiveArchiver { + + // DEBUG + private static boolean testmode = false; + public static void setTestMode() { System.out.println("FileArchiver will now randomly pause 5-10s for each task, to simulate network speeds"); testmode = true; } + public static void randomWait(SimpleProgress p) { + int t = (int)(Math.random()*5+5); + p.addPartKnown(t, true); + for (int i=0; i FileArchiver(S rw, String pre, String suf, String ext, File parent) { + this(rw, rw, pre, suf, ext, parent); + } + + public FileArchiver(S rw, boolean rnd, String ext, String prefix, String suffix, File parent) { + this(rw, rw, rnd, ext, prefix, suffix, parent); + } + + public FileArchiver(ObjectStreamReader r, ObjectStreamWriter w, String pre, String suf, String ext, File parent) { + reader = r; + writer = w; + prefix = (pre == null)? "": pre; + suffix = (suf == null)? "": suf; + extension = (ext == null)? "": ext; + random = false; + parentDir = parent; + } + + public FileArchiver(ObjectStreamReader r, ObjectStreamWriter w, boolean rnd, String ext, String prefix, String suffix, File parent) { + reader = r; + writer = w; + this.suffix = suffix; + this.prefix = prefix; + extension = (ext == null)? "": ext; + random = rnd; + parentDir = parent; + } + + protected File getFile(Object meta) { + if (meta instanceof File) { return (File)meta; } + + String main = "", part = ""; + if (meta instanceof String) { + main = (String)(meta); + } else if (meta instanceof Object[]) { + Object[] arr = (Object[])meta; + if (arr.length > 0 && arr[0] instanceof String) { + main = (String)arr[0]; + if (arr.length > 1) { + StringBuilder str = new StringBuilder(arr[1].toString()); + for (int i=2; i t) throws TaskAbortException { + File file = getFile(t.meta); + try { + FileInputStream is = new FileInputStream(file); + try { + FileLock lock = is.getChannel().lock(0L, Long.MAX_VALUE, true); // shared lock for reading + try { + t.data = (T)reader.readObject(is); + } finally { + lock.release(); + } + } finally { + try { is.close(); } catch (IOException f) { } + } + } catch (IOException e) { + throw new TaskAbortException("FileArchiver could not complete pull on " + file, e, true); + } catch (RuntimeException e) { + throw new TaskAbortException("FileArchiver could not complete pull on " + file, e); + } + } + + /*@Override**/ public void push(PushTask t) throws TaskAbortException { + if (random) { t.meta = java.util.UUID.randomUUID().toString(); } + File file = getFile(t.meta); + try { + FileOutputStream os = new FileOutputStream(file); + try { + FileLock lock = os.getChannel().lock(); + try { + writer.writeObject(t.data, os); + } finally { + lock.release(); + } + } finally { + try { os.close(); } catch (IOException f) { } + } + } catch (IOException e) { + throw new TaskAbortException("FileArchiver could not complete push on " + file, e, true); + } catch (RuntimeException e) { + throw new TaskAbortException("FileArchiver could not complete push on " + file, e); + } + } + + /*@Override**/ public void pullLive(PullTask t, SimpleProgress p) throws TaskAbortException { + try { + pull(t); + if (testmode) { randomWait(p); } + else { p.addPartKnown(0, true); } + } catch (TaskAbortException e) { + p.abort(e); + } + } + + /*@Override**/ public void pushLive(PushTask t, SimpleProgress p) throws TaskAbortException { + try { + push(t); + if (testmode) { randomWait(p); } + else { p.addPartKnown(0, true); } + } catch (TaskAbortException e) { + p.abort(e); + } + } + +} diff --git a/src/main/java/plugins/Library/io/serial/IterableSerialiser.java b/src/main/java/plugins/Library/io/serial/IterableSerialiser.java new file mode 100644 index 00000000..ee457833 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/IterableSerialiser.java @@ -0,0 +1,53 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.exec.TaskAbortException; + +/** +** An interface that handles an iterable group of {@link Serialiser.Task}s. +** +** @author infinity0 +*/ +public interface IterableSerialiser extends Archiver { + + /** + ** Execute everything in a group of {@link PullTask}s, returning only when + ** they are all done. + ** + ** @param tasks The group of tasks to execute + */ + public void pull(Iterable> tasks) throws TaskAbortException; + + // FIXME OPT NORM + // Split up pull() into startPull and endPull. startPull would do the actual + // fetch, probably to a Bucket, and endPull would do the YAML parsing. + // This will allow us to run many slow startPull's at once, while limiting the + // number of memory-consuming endPull's so that we don't end up with a backlog + // of lots of parsed data that we don't yet need. + // See comments in FreenetArchiver.pull, ParallelSerialiser.createPullJob. + + /** + ** Execute everything in a group of {@link PushTask}s, returning only when + ** they are all done. + ** + ** @param tasks The group of tasks to execute + */ + public void push(Iterable> tasks) throws TaskAbortException; + + // FIXME OPT HIGH + // Split up push() into startPush and endPush. startPush will return as soon + // as there are valid .meta values for each task. endPush will wait for the + // actual insert (or whatever) to finish, and then clear the .data. + // Currently we have a dirty hack (for FreenetArchiver), which achieves a + // similar outcome: + // FreenetArchiver returns as soon as it has a URI and pretends it's finished. + // The high level caller using FreenetArchiver must call an explicit + // waitForAsyncInserts() method on it before finishing. + // Packer.push() is more or less unchanged as a result. + // Ultimately this is all necessary because inserts can take a long time and + // we need to limit memory usage. + +} diff --git a/src/main/java/plugins/Library/io/serial/LiveArchiver.java b/src/main/java/plugins/Library/io/serial/LiveArchiver.java new file mode 100644 index 00000000..85d03422 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/LiveArchiver.java @@ -0,0 +1,46 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.TaskAbortException; + +/** +** An interface that handles a single {@link Serialiser.Task} and sends live +** updates of its {@link Progress}. +** +** @author infinity0 +*/ +public interface LiveArchiver extends Archiver { + + /** + ** Executes a {@link PullTask} and update the progress associated with it. + ** {@link TaskAbortException}s are stored in the progress object, rather + ** than thrown. + ** + ** Implementations must also modify the state of the progress object such + ** that after the operation completes, all threads blocked on {@link + ** Progress#join()} will either return normally or throw the exact {@link + ** TaskAbortException} that caused it to abort. + ** + ** '''If this does not occur, deadlock will result'''. + */ + public void pullLive(PullTask task, P p) throws TaskAbortException; + + /** + ** Executes a {@link PushTask} and update the progress associated with it. + ** {@link TaskAbortException}s are stored in the progress object, rather + ** than thrown. + ** + ** Implementations must also modify the state of the progress object such + ** that after the operation completes, all threads blocked on {@link + ** Progress#join()} will either return normally or throw the exact {@link + ** TaskAbortException} that caused it to abort. + ** + ** '''If this does not occur, deadlock will result'''. + */ + public void pushLive(PushTask task, P p) throws TaskAbortException; + +} diff --git a/src/main/java/plugins/Library/io/serial/MapSerialiser.java b/src/main/java/plugins/Library/io/serial/MapSerialiser.java new file mode 100644 index 00000000..89a65888 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/MapSerialiser.java @@ -0,0 +1,50 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.exec.TaskAbortException; + +import java.util.Map; + +/** +** An interface that handles a map of {@link Serialiser.Task}s. As well as the +** metadata associated with each individual task, each map also has an +** associated metadata for the entire structure. +** +** Data structures which have a map that can be grouped into submaps that each +** have an associated metadata for the entire submap, can just use the methods +** provided here, for each submap-metadata pair. +** +** @author infinity0 +*/ +public interface MapSerialiser extends Serialiser { + + /** + ** Execute everything in a map of {@link PullTask}s, returning only when + ** they are all done. + ** + ** Implementations should only execute the tasks for which the metadata + ** is not null. + ** + ** @param tasks The map of tasks to execute + ** @param mapmeta The map-wide metadata + */ + public void pull(Map> tasks, Object mapmeta) throws TaskAbortException; + + /** + ** Execute everything in a map of {@link PushTask}s, returning only when + ** they are all done. + ** + ** Implementations should only execute the tasks for which the data is not + ** null, but may require that an entire submap (which might include tasks + ** with null data and non-null metadata) be passed into the method in order + ** to perform better optimisation. + ** + ** @param tasks The map of tasks to execute + ** @param mapmeta The map-wide metadata + */ + public void push(Map> tasks, Object mapmeta) throws TaskAbortException; + +} diff --git a/src/main/java/plugins/Library/io/serial/Packer.java b/src/main/java/plugins/Library/io/serial/Packer.java new file mode 100644 index 00000000..363155ec --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/Packer.java @@ -0,0 +1,995 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.IdentityComparator; +import plugins.Library.util.concurrent.ObjectProcessor; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.exec.TaskCompleteException; + +import java.util.Collections; +import java.util.Collection; +import java.util.Iterator; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.Map; +import java.util.SortedSet; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.TreeSet; + +import freenet.support.Logger; + +/** +** {@link MapSerialiser} that packs a map of weighable elements (eg. objects +** with a {@code size()} method) into a group of fixed-capacity bins. There are +** two main modes of operation, depending on the value of {@link #NO_TINY}; in +** addition to this, there is an {@link #aggression} attribute that affects +** the speed vs. optimity of the packing. +** +** The class requires that the weights of each element is never greater than +** BIN_CAP, so that an element can always totally fit into an additional bin; +** {@link IllegalArgumentException} will be thrown when elements that violate +** this are encountered. +** +** This class is useful for packing together a collection of root B-tree nodes. +** +** To use this class, the programmer needs to extend {@link Scale} to override +** {@link Scale#weigh(Object)}, and pass it into the constructor. The other +** methods can also be optionally overridden if an alternate metadata format is +** desired. +** +** The programmer might also like wish to override the following: +** +** * {@link #generator()} +** * {@link #preprocessPullBins(Map, Collection)} +** * {@link #preprocessPushBins(Map, Collection)} +** +** Note that this class effectively '''disables''' the "duplicate" detection of +** {@link ProgressTracker}; pull/push tasks are deemed to be equal if their +** meta/data point to the same object, but this class dynamically generates the +** meta/data for each bin on each pull/push request. This should not intefere +** with correctness (as long as the relevant locks in place); just be slightly +** inefficient when requests for the same bin are given to a child serialiser +** that uses {@link ProgressTracker} to detect duplicates. +** +** If you plan on using a child serialiser that uses some other method to +** detect duplicates, bear in mind that {@link #pullUnloaded(Map, Object)} +** actually '''depends''' on said behaviour for correctness; see its source +** code for more details. +** +** @author infinity0 +*/ +public class Packer +implements MapSerialiser, + Serialiser.Composite>> { + + private static volatile boolean logMINOR; + private static volatile boolean logDEBUG; + + static { + Logger.registerClass(Packer.class); + } + + /** + ** Maximum weight of a bin (except one; see {@link #push(Map, Object)} for + ** details). + */ + final public int BIN_CAP; + + /** + ** {@code BIN_CAP}/2 (or 1/2 less than this, if {@code BIN_CAP} is odd). + */ + final public int BIN_CAPHF; + + /** + ** Whether the "tiny" (weight less than {@code BIN_CAP/2}) bin will be kept + ** as a separate bin, or merged into the next-smallest. + */ + final public boolean NO_TINY; + + /** + ** The {@link Scale} used to weigh bins and read/make their metadata. + */ + final public Scale scale; + + /** + ** How aggressive the bin-packing algorithm is. Eg. will it load bins that + ** are not already loaded, to perform better optimisation? + ** + ** ;0 : discount all unloaded bins (unrecommended) + ** ;1 (default) : discount all unchanged bins after the BFD step + ** ;2 : discount all unchaged bins after the redistribution step + ** ;3 : load all bins before doing any packing (ie. re-pack from scratch) + */ + private Integer aggression = 1; + + final protected IterableSerialiser> subsrl; + public IterableSerialiser> getChildSerialiser() { return subsrl; } + + /** + ** Constructs a Packer with the given parameters. + ** + ** If you are using a child {@link Serialiser} that detects and discards + ** duplicate pull/push requests using a mechanism '''other''' than the + ** default {@link ProgressTracker}, please first read the section relating + ** to this in the class description. + ** + ** @param s The child serialiser + ** @param c The maximum weight of a bin + ** @param n Whether to merge or overlook the "tiny" element + */ + public Packer(IterableSerialiser> s, Scale sc, int c, boolean n) { + if (s == null) { + throw new IllegalArgumentException("Can't have a null child serialiser"); + } + if (c <= 0) { + throw new IllegalArgumentException("Capacity must be greater than zero."); + } + subsrl = s; + scale = sc; + BIN_CAP = c; + BIN_CAPHF = c>>1; + NO_TINY = n; + } + + /** + ** Constructs a Packer with the given parameters and {@link #NO_TINY} + ** set to {@code true}. + ** + ** If you are using a child {@link Serialiser} that detects and discards + ** duplicate pull/push requests using a mechanism '''other''' than the + ** default {@link ProgressTracker}, please first read the section relating + ** to this in the class description. + ** + ** @param s The child serialiser + ** @param c The maximum weight of a bin + */ + public Packer(IterableSerialiser> s, Scale sc, int c) { + this(s, sc, c, true); + } + + /** + ** Atomically set the {@link #aggression}. + */ + public void setAggression(int i) { + if (i < 0 || i > 3) { + throw new IllegalArgumentException("Invalid aggression value: only 0,1,2,3 is allowed."); + } + synchronized (this) { + aggression = i; + } + } + + /** + ** Atomically get the {@link #aggression}. + */ + public int getAggression() { + synchronized (this) { + return aggression; + } + } + + /** + ** Creates a {@link IDGenerator} for use by the rest of the algorithms of + ** this class. + ** + ** The default generator '''requires''' all keys of the backing map to be + ** present in the input task map, since the default {@link IDGenerator} + ** does not have any other way of knowing what bin IDs were handed out in + ** previous push operations, and it must avoid giving out duplicate IDs. + ** + ** You can bypass this requirement by extending {@link IDGenerator} so that + ** it can work out that information (eg. using an algorithm that generates + ** a UUID), and overriding {@link #generator()}. (ID generation based on + ** bin ''content'' is ''not'' supported, and is not a priority at present.) + */ + protected IDGenerator generator() { + return new IDGenerator(); + } + + /** + ** Any tasks that need to be done after the bin tasks have been formed, + ** but before they have been passed to the child serialiser. The default + ** implementation does nothing. + */ + protected void preprocessPullBins(Map> tasks, Collection>> bintasks) { } + + /** + ** Any tasks that need to be done after the bin tasks have been formed, + ** but before they have been passed to the child serialiser. The default + ** implementation does nothing. + */ + protected void preprocessPushBins(Map> tasks, Collection>> bintasks) { } + + /** + ** Looks through {@code elems} for {@link PushTask}s with null data and + ** reads its metadata to determine which bin to add it to. + * @throws TaskAbortException + */ + protected SortedSet> initialiseBinSet(SortedSet> bins, Map> elems, Inventory inv, IDGenerator gen, Object mapmeta) throws TaskAbortException { + + Map> binincubator = new HashMap>(); + + for (Map.Entry> en: elems.entrySet()) { + PushTask task = en.getValue(); + if (task.data == null) { + if (task.meta == null) { + throw new IllegalArgumentException("Packer error: null data and null meta for key" + en.getKey()); + } + Object binID = scale.readMetaID(task.meta); + + Set binegg = binincubator.get(binID); + if (binegg == null) { + binegg = new HashSet(); + binincubator.put(binID, binegg); + } + binegg.add(en.getKey()); + } + } + + HashMap> overflow = null; + for (Map.Entry> en: binincubator.entrySet()) { + Set keys = en.getValue(); + Bin bin = new Bin(BIN_CAP, inv, gen.registerID(en.getKey()), keys); + // Put the data into the bin so we don't repack it unless we need to. + // And so that the assertions in the next method work, which assume that the data is packed already (e.g. if we have 2 bins, they contain something). + for(K k : keys) { + if(bin.add(k)) { + if(bin.weight > BIN_CAP) { + // Overflow. Let packBestFitDecreasing fit it in somewhere else. + bin.remove(k); + if(overflow == null) overflow = new HashMap>(); + overflow.put(k, elems.get(k)); + } + } + } + bins.add(bin); + } + + if(overflow != null) { + // Pull the data so that it can be re-packed into a different bin. + // Otherwise, data will be null, so it won't be packed by packBestFitDecreasing. + System.err.println("Repacking: data has grown, "+overflow.size()+" items don't fit, pulling data to repack"); + pullUnloaded(overflow, mapmeta); + } + + // If the data can shrink, we can have multiple bins smaller than BIN_CAPHF + // FIXME: TEST THIS: In current usage, the data can't shrink, so we're okay. + + Bin second; + Bin lightest; + while(bins.size() > 1 && (second = bins.headSet(lightest = bins.last()).last()).filled() < BIN_CAPHF) { + System.err.println("Merging bins: Data has shrunk!"); + bins.remove(lightest); + bins.remove(second); + for (K k: lightest) { + second.add(k); + } + bins.add(second); + } + + return bins; + } + + /** + ** Looks through {@code elems} for {@link PushTask}s with non-null data and + ** packs it into the given collection of bins. + ** + ** NOTE: this method assumes the conditions described in the description + ** for this class. It is up to the calling code to ensure that they hold. + */ + protected void packBestFitDecreasing(SortedSet> bins, Map> elems, Inventory inv, IDGenerator gen) { + + if (NO_TINY && !bins.isEmpty()) { + // locate the single bin heavier than BIN_CAP (if it exists), and keep + // moving the heaviest element from that bin into another bin, until that + // bin weighs more than BIN_CAP / 2 (and both bins will weigh between + // BIN_CAP / 2 and BIN_CAP + + // proof that this always succeeds: + // + // - since we are visiting elements in descending order of weights, we will + // never reach an element that takes the total weight of the second bin + // from strictly less than BIN_CAP / 2 to strictly more than BIN_CAP + // - in other words, we will always "hit" our target weight range of + // BIN_CAP / 2 and BIN_CAP, and the other bin will be in this zone too + // (since the single bin would weigh between BIN_CAP and BIN_CAP * 3/2 + // + Bin heaviest = bins.first(); + if (heaviest.filled() > BIN_CAP) { + Bin second = new Bin(BIN_CAP, inv, gen.nextID(), null); + bins.remove(heaviest); + while (second.filled() < BIN_CAPHF) { + K key = heaviest.last(); + heaviest.remove(key); + second.add(key); + } + bins.add(heaviest); + bins.add(second); + } + } + + // heaviest bin is <= BIN_CAP // FIXME convert back to assertion when happy with this + if(!(bins.isEmpty() || bins.first().filled() <= BIN_CAP)) { + System.err.println("Bins:"); + for(Bin bin : bins) + System.err.println("Bin: "+bin.filled()); + assert(false); + } + // 2nd-lightest bin is >= BIN_CAP / 2 // FIXME convert back to assertion when happy with this + if(bins.size() >= 2) { + if(!(bins.headSet(bins.last()).last().filled() >= BIN_CAPHF)) { + System.err.println("Last but one bin should be at least "+BIN_CAPHF); + for(Bin bin : bins) + System.err.println("Bin: "+bin.filled()); + assert(false); + } + } + + // sort keys in descending weight order of their elements + SortedSet sorted = new TreeSet(Collections.reverseOrder(inv)); + for (Map.Entry> en: elems.entrySet()) { + if (en.getValue().data != null) { + sorted.add(en.getKey()); + } + } + + // go through the sorted set and try to fit the keys into the bins + for (K key: sorted) { + int weight = inv.getWeight(key); + + // get all bins that can fit the key; tailSet() should do this efficiently + SortedSet> subbins = bins.tailSet(new DummyBin(BIN_CAP, BIN_CAP - weight)); + + if (subbins.isEmpty()) { + // if no bin can fit it, then start a new bin + Bin bin = new Bin(BIN_CAP, inv, gen.nextID(), null); + bin.add(key); + bins.add(bin); + + } else { + // get the fullest bin that can fit the key + Bin lowest = subbins.first(); + // TreeSet assumes its elements are immutable. + bins.remove(lowest); + lowest.add(key); + bins.add(lowest); + } + } + + // heaviest bin is <= BIN_CAP + assert(bins.isEmpty() || bins.first().filled() <= BIN_CAP); + // 2nd-lightest bin is >= BIN_CAP / 2 + assert(bins.size() < 2 || bins.headSet(bins.last()).last().filled() >= BIN_CAPHF); + + if (NO_TINY && bins.size() > 1) { + // locate the single bin lighter than BIN_CAP / 2 (if it exists), and + // combine it with the next smallest bin + Bin lightest = bins.last(); + if (lightest.filled() < BIN_CAPHF) { + bins.remove(lightest); + Bin second = bins.last(); + bins.remove(second); + for (K k: lightest) { + second.add(k); + } + bins.add(second); + } + } + } + + /** + ** Given a group of bins, attempts to redistribute the items already + ** contained within them to make the weights more even. + */ + protected void redistributeWeights(SortedSet> bins, Inventory inv) { + + // keep track of bins we have finalised + List> binsFinal = new ArrayList>(bins.size()); + + // special cases + if (bins.size() < 2) { return; } + + // proof that the algorithm halts: + // + // - let D be the weight difference between the heaviest and lightest bins + // - let N be the set containing all maximum and minimum bins + // - at each stage of the algorithm, |N| strictly decreases, since the + // selected bin pair is either strictly "evened out", or its heavier + // member is discarded + // - when |N| decreases to 0, D strictly decreases, and we get a new N + // - D is bounded-below by 0 + // - hence the algorithm terminates + // + Bin lightest = bins.last(); + while (!bins.isEmpty()) { + // we don't need to find the lightest bin every time + Bin heaviest = bins.first(); + bins.remove(heaviest); + int weightdiff = heaviest.filled() - lightest.filled(); + + // get the lightest element + K feather = heaviest.first(); + if (inv.getWeight(feather) < weightdiff) { + // can be strictly "evened out" + + bins.remove(lightest); + + heaviest.remove(feather); + lightest.add(feather); + bins.add(heaviest); + bins.add(lightest); + + lightest = bins.last(); + + } else { + // if there does not exist such an element, then it is impossible for the + // heaviest bin to be evened out any further, since the loop does not + // decrease the load of the lightest bin. so, remove it from the bins set. + + binsFinal.add(heaviest); + } + + // TODO NORM some tighter assertions than this + assert(bins.isEmpty() || bins.first().filled() - bins.last().filled() <= weightdiff); + } + + for (Bin bin: binsFinal) { + bins.add(bin); + } + + } + + /** + ** Dicards all the bins which remain unchanged from what they were + ** initialised to contain. This means that we don't re-push something that + ** is already pushed. + */ + protected void discardUnchangedBins(SortedSet> bins, Map> elems) { + Iterator> it = bins.iterator(); + while (it.hasNext()) { + Bin bin = it.next(); + if (bin.unchanged()) { + it.remove(); + for (K key: bin) { + elems.remove(key); + } + } + } + } + + /** + ** Pulls all PushTasks with null data. This is used when we need to push a + ** bin that has been assigned to hold the (unloaded) data of these tasks. + */ + protected void pullUnloaded(Map> tasks, Object meta) throws TaskAbortException { + Map> newtasks = new HashMap>(tasks.size()<<1); + for (Map.Entry> en: tasks.entrySet()) { + PushTask task = en.getValue(); + if (task.data == null) { + newtasks.put(en.getKey(), new PullTask(task.meta)); + } + } + + if (newtasks.isEmpty()) { return; } + pull(newtasks, meta); + + for (Map.Entry> en: tasks.entrySet()) { + PushTask task = en.getValue(); + PullTask newtask = newtasks.get(en.getKey()); + if (task.data != null) { continue; } + if (newtask == null) { + // this part of the code should never be reached because pull/push tasks + // are deemed equal if their meta/data point to the same object; and these + // object are re-created from scratch upon each push/pull operation. so a + // child serialiser that extends Serialiser.Trackable should never discard + // a pull/push task for a bin as a "duplicate" of another task already in + // progress, and hence newtask should never be null. + + // this is an inefficiency that doesn't have an easy workaround due to the + // design of the serialiser/progress system, but such cases should be rare. + // external locks should prevent any concurrent modification of the bins; + // it is not the job of the Serialiser to prevent this (and Serialisers in + // general have this behaviour, not just this class). + + // if a future programmer uses a child serialiser that behaves differently, + // we throw this exception to warn them: + throw new IllegalStateException("The child serialiser should *not* discard (what it perceives to be) duplicate requests for bins. See the class documentation for details."); + } else if (newtask.data == null) { + throw new IllegalStateException("Packer did not get the expected data while pulling unloaded bins; the pull method (or the child serialiser) is buggy."); + } + task.data = newtask.data; + task.meta = newtask.meta; + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation loads all the data for each bin referred to in the + ** task map, and will add extra tasks to the map if those pulling those + ** bins result in extra data being loaded. + */ + /*@Override**/ public void pull(Map> tasks, Object mapmeta) throws TaskAbortException { + + try { + Inventory inv = new Inventory(this, tasks); + Map>> bintasks = new HashMap>>(); + + // form the list of bin-tasks from the bins referred to by the map-tasks + for (Map.Entry> en: tasks.entrySet()) { + Object binid = scale.readMetaID(en.getValue().meta); + if (!bintasks.containsKey(binid)) { + bintasks.put(binid, new PullTask>(scale.makeBinMeta(mapmeta, binid))); + } + } + + // pull all the bins + preprocessPullBins(tasks, bintasks.values()); + subsrl.pull(bintasks.values()); + + // for each task, grab and remove its element from its bin + for (Map.Entry> en: tasks.entrySet()) { + PullTask task = en.getValue(); + PullTask> bintask = bintasks.get(scale.readMetaID(task.meta)); + if (bintask == null) { + // task was marked as redundant by child serialiser + tasks.remove(en.getKey()); + } else if (bintask.data == null || (task.data = bintask.data.remove(en.getKey())) == null) { + throw new TaskAbortException("Packer did not find the element (" + en.getKey() + ") in the expected bin (" + scale.readMetaID(task.meta) + "). Either the data is corrupt, or the child serialiser is buggy.", new Exception("internal error")); + } + } + + // if there is any leftover data in the bins, load them anyway + Map> leftovers = new HashMap>(); + for (Map.Entry>> en: bintasks.entrySet()) { + PullTask> bintask = en.getValue(); + for (Map.Entry el: bintask.data.entrySet()) { + if (tasks.containsKey(el.getKey())) { + throw new TaskAbortException("Packer found an extra unexpected element (" + el.getKey() + ") inside a bin (" + en.getKey() + "). Either the data is corrupt, or the child serialiser is buggy.", new Exception("internal error")); + } + PullTask task = new PullTask(scale.makeMeta(en.getKey(), scale.weigh(el.getValue()))); + task.data = el.getValue(); + leftovers.put(el.getKey(), task); + } + } + tasks.putAll(leftovers); + if (tasks.isEmpty()) { throw new TaskCompleteException("All tasks appear to be complete"); } + + } catch (RuntimeException e) { + throw new TaskAbortException("Could not complete the pull operation", e); + } + + } + + /** + ** {@inheritDoc} + ** + ** This implementation will pack all tasks with non-null data into bins, + ** taking into account bins already allocated (as defined by the tasks with + ** null data and non-null metadata). + ** + ** There are two main modes of operation: + ** + ** ; {@link #NO_TINY} is {@code false} : + ** Uses the Best-Fit Decreasing bin-packing algorithm, plus an additional + ** redistribution algorithm that "evens out" the bins. Each bin's weight + ** will be between [BIN_CAP/2, BIN_CAP], except for at most one bin. + ** ; {@link #NO_TINY} is {@code true} : + ** As above, but if the exceptional bin exists, it will be merged with + ** the next-smallest bin, if that exists. As before, each bin will weigh + ** between [BIN_CAP/2, BIN_CAP], except for the merged element (if it + ** exists), which will weigh between (BIN_CAP, BIN_CAP*3/2). + ** + ** In addition to this, the {@link #aggression} attribute will affect when + ** fully-unloaded unchanged bins are discarded. + ** + ** The default generator '''requires''' all keys of the backing map to be + ** present in the input task map, since the default {@link IDGenerator} + ** does not have any other way of knowing what bin IDs were handed out in + ** previous push operations, and it must avoid giving out duplicate IDs. + ** + ** You can bypass this requirement by extending {@link IDGenerator} so that + ** it can work out that information (eg. using an algorithm that generates + ** a UUID), and overriding {@link #generator()}. (ID generation based on + ** bin ''content'' is ''not'' supported, and is not a priority at present.) + */ + /*@Override**/ public void push(Map> tasks, Object mapmeta) throws TaskAbortException { + + try { + // read local copy of aggression + int agg = getAggression(); + if(logDEBUG) Logger.debug(this, "Aggression = "+agg+" tasks size = "+tasks.size()); + + IDGenerator gen = generator(); + Inventory inv = new Inventory(this, tasks); + SortedSet> bins = new TreeSet>(); + + // initialise the binset based on the aggression setting + if (agg <= 0) { + // discard all tasks with null data + Iterator> it = tasks.values().iterator(); + while (it.hasNext()) { + PushTask task = it.next(); + if (task.data == null) { + it.remove(); + } + } + } else if (agg <= 2) { + // initialise already-allocated bins from tasks with null data + initialiseBinSet(bins, tasks, inv, gen, mapmeta); + } else { + // pull all tasks with null data + pullUnloaded(tasks, mapmeta); + } + + // pack elements into bins + packBestFitDecreasing(bins, tasks, inv, gen); + if (agg <= 1) { + // discard all bins not affected by the pack operation + discardUnchangedBins(bins, tasks); + } + + // redistribute weights between bins + redistributeWeights(bins, inv); + if (agg <= 2) { + // discard all bins not affected by the redistribution operation + discardUnchangedBins(bins, tasks); + // pull all data that is as yet unloaded + pullUnloaded(tasks, mapmeta); + } + + // form the list of bin-tasks for each bin, using data from the map-tasks + List>> bintasks = new ArrayList>>(bins.size()); + for (Bin bin: bins) { + Map data = new HashMap(bin.size()<<1); + for (K k: bin) { + data.put(k, tasks.get(k).data); + } + bintasks.add(new PushTask>(data, scale.makeBinMeta(mapmeta, bin.id))); + } + + // push all the bins + preprocessPushBins(tasks, bintasks); + subsrl.push(bintasks); + + // set the metadata for all the pushed bins + for (PushTask> bintask: bintasks) { + for (K k: bintask.data.keySet()) { + tasks.get(k).meta = scale.makeMeta(scale.readBinMetaID(bintask.meta), inv.getWeight(k)); + tasks.get(k).data = null; + } + } + + // FIXME PERFORMANCE: + // Split up IterableSerialiser, call the start phase, which generates + // metadata. Fill in the final metadata based on this and clear the + // tasks.get(k).data. Then wait for the finish. + + } catch (RuntimeException e) { + throw new TaskAbortException("Could not complete the push operation", e); + } + + } + + + /************************************************************************ + ** A class that represents a bin with a certain capacity. + ** + ** NOTE: this implementation is incomplete, since we don't override the + ** remove() methods of the sets and iterators returned by the subset and + ** iterator methods, to also recalculate the weight, but these are never + ** used so it's OK. + ** + ** @author infinity0 + */ + protected static class Bin extends TreeSet implements Comparable> { + + final protected Object id; + final protected Set orig; + final protected int capacity; + final protected Inventory inv; + + int weight = 0; + + public Bin(int c, Inventory v, Object i, Set o) { + super(v); + inv = v; + capacity = c; + id = i; + orig = o; + } + + public boolean unchanged() { + // proper implementations of equals() should not throw NullPointerException + // but check null anyway, just in case... + return orig != null && equals(orig); + } + + public int filled() { + return weight; + } + + public int remainder() { + return capacity - weight; + } + + @Override public boolean add(K c) { + if (super.add(c)) { weight += inv.getWeight(c); return true; } + return false; + } + + @Override public boolean remove(Object c) { + if (super.remove(c)) { weight -= inv.getWeight((K)c); return true; } + return false; + } + + /** + ** Descending comparator for total bin load, ie. ascending comparator + ** for remainding weight capacity. {@link DummyBin}s are treated as the + ** "heaviest", or "least empty" bin for its weight. + */ + /*@Override**/ public int compareTo(Bin bin) { + if (this == bin) { return 0; } + int f1 = filled(), f2 = bin.filled(); + if (f1 != f2) { return (f2 > f1)? 1: -1; } + return (bin instanceof DummyBin)? -1: IdentityComparator.comparator.compare(this, bin); + } + + } + + + /************************************************************************ + ** A class that pretends to be a bin with a certain capacity and weight. + ** This is used when we want a such a bin for some purpose (eg. as an + ** argument to a comparator) but we don't want to have to populate a real + ** bin to get the desired weight (which might be massive). + ** + ** @author infinity0 + */ + protected static class DummyBin extends Bin { + + public DummyBin(int c, int w) { + super(c, null, null, null); + weight = w; + } + + @Override public boolean add(K k) { + throw new UnsupportedOperationException("Dummy bins cannot be modified"); + } + + @Override public boolean remove(Object c) { + throw new UnsupportedOperationException("Dummy bins cannot be modified"); + } + + /** + ** {@inheritDoc} + */ + @Override public int compareTo(Bin bin) { + if (this == bin) { return 0; } + int f1 = filled(), f2 = bin.filled(); + if (f1 != f2) { return (f2 > f1)? 1: -1; } + return (bin instanceof DummyBin)? IdentityComparator.comparator.compare(this, bin): 1; + } + } + + + /************************************************************************ + ** A class that provides a "weight" assignment for each key in a given map, + ** by reading either the data or the metadata of its associated task. + ** + ** @author infinity0 + */ + protected static class Inventory extends IdentityComparator { + + final protected Map weights = new HashMap(); + final protected Map> elements; + final protected Packer packer; + final protected Scale scale; + + /** + ** Keeps track of "the exceptional element" as defined in the + ** description for {@link #push(Map, Object)}. + */ + protected K giant; + + // TODO LOW maybe , or something + protected Inventory(Packer pk, Map> elem) { + packer = pk; + scale = pk.scale; + elements = elem; + } + + /** + ** Get the weight assignment for a given key. + */ + public int getWeight(K key) { + Integer i = weights.get(key); + if (i == null) { + Task task = elements.get(key); + if (task == null) { + throw new IllegalArgumentException("This scale does not have a weight for " + key); + } else if (task.data == null) { + i = scale.readMetaWeight(task.meta); + } else { + i = scale.weigh(task.data); + } + if (i > packer.BIN_CAP) { + if (packer.NO_TINY && giant == null) { + giant = key; + } else { + throw new IllegalArgumentException("Element " + key + " greater than the capacity allowed: " + i + "/" + packer.BIN_CAP); + } + } + weights.put(key, i); + } + return i; + } + + /** + ** Compare keys by the weights of the element it maps to. The {@code + ** null} key is treated as the "lightest" key for its weight. + */ + @Override public int compare(K k1, K k2) { + if (k1 == k2) { return 0; } + int a = getWeight(k1), b = getWeight(k2); + if (a != b) { return (a < b)? -1: 1; } + // treat the null key as the "least" element for its weight + return (k1 == null)? -1: (k2 == null)? 1: super.compare(k1, k2); + } + + /** + ** Set the weight for the {@code null} key. This is useful for when + ** you want to obtain a subset using tailSet() or headSet(), but don't + ** have a key with the desired weight at hand. + */ + public K makeDummyObject(int weight) { + weights.put(null, weight); + return null; + } + + } + + + /************************************************************************ + ** Generates unique IDs for the bins. This generator assigns {@link Long}s + ** in sequence; registering any type of integer object reference will cause + ** future automatically-assigned IDs to be greater than that integer. + ** + ** This implementation cannot ensure uniqueness of IDs generated (with + ** respect to ones generated in a previous session), unless they are + ** explicitly registered using {@link #registerID(Object)}. + ** + ** @author infinity0 + */ + public static class IDGenerator { + + protected long nextID; + + /** + ** Register an ID that was assigned in a previous session, so the + ** generator doesn't output that ID again. + */ + public Object registerID(Object o) { + long id; + if (o instanceof Integer) { + id = (Integer)o; + } else if (o instanceof Long) { + id = (Long)o; + } else if (o instanceof Short) { + id = (Short)o; + } else if (o instanceof Byte) { + id = (Byte)o; + } else { + return o; + } + if (id > nextID) { + nextID = id+1; + } + return o; + } + + /** + ** Generate another ID. + */ + public Long nextID() { + return nextID++; + } + + } + + + /************************************************************************ + ** JavaBean representing bin metadata. + ** + ** @author infinity0 + */ + public static class BinInfo implements Cloneable { + + protected Object id; + protected int weight; + + public BinInfo() { } + public BinInfo(Object i, int w) { id = i; weight = w; } + public Object getID() { return id; } + public int getWeight() { return weight; } + public void setID(Object i) { id = i; } + public void setWeight(int w) { weight = w; } + + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof BinInfo)) { return false; } + BinInfo bi = (BinInfo)o; + return id.equals(bi.id) && weight == bi.weight; + } + + @Override public int hashCode() { + return id.hashCode() + weight*31; + } + + @Override public String toString() { + return "Bin \"" + id + "\" - " + weight + "oz."; + } + + } + + + /************************************************************************ + ** Performs measurements and readings on bins and their elements. + ** + ** To implement this class, the programmer needs to implement the {@link + ** #weigh(Object)} method. + ** + ** @author infinity0 + */ + abstract public static class Scale { + + /** + ** Constructs the metadata for a bin, from the bin ID and the map-wide + ** metadata. + */ + public Object makeBinMeta(Object mapmeta, Object binid) { + return (mapmeta == null)? binid: new Object[]{mapmeta, binid}; + } + + /** + ** Read the bin ID from the given bin's metadata. + */ + public Object readBinMetaID(Object binmeta) { + return (binmeta instanceof Object[])? ((Object[])binmeta)[1]: binmeta; + } + + /** + ** Return the weight of the given element. + */ + abstract public int weigh(T element); + + /** + ** Read the bin ID from the given metadata. + */ + public Object readMetaID(Object meta) { + return ((BinInfo)meta).id; + } + + /** + ** Read the bin weight from the given metadata. + */ + public int readMetaWeight(Object meta) { + return ((BinInfo)meta).weight; + } + + /** + ** Construct the metadata for the given bin ID and weight. + */ + public Object makeMeta(Object id, int weight) { + return new BinInfo(id, weight); + } + + } + +} diff --git a/src/main/java/plugins/Library/io/serial/ParallelSerialiser.java b/src/main/java/plugins/Library/io/serial/ParallelSerialiser.java new file mode 100644 index 00000000..f59cd10a --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/ParallelSerialiser.java @@ -0,0 +1,271 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.TaskAbortExceptionConvertor; +import plugins.Library.util.concurrent.Scheduler; +import plugins.Library.util.concurrent.ObjectProcessor; +import plugins.Library.util.concurrent.Executors; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.exec.TaskInProgressException; +import plugins.Library.util.exec.TaskCompleteException; +import plugins.Library.util.func.SafeClosure; +import static plugins.Library.util.func.Tuples.X2; // also imports the class + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.ConcurrentMap; + +/** +** An {@link IterableSerialiser} that uses threads to handle tasks given to it +** in parallel, and keeps track of task progress. +** +** To implement this class, the programmer must implement the {@link +** LiveArchiver#pullLive(Serialiser.PullTask, Progress)} and {@link +** LiveArchiver#pushLive(Serialiser.PushTask, Progress)} methods. +** +** DOCUMENT (rewritten) +** +** @author infinity0 +*/ +public abstract class ParallelSerialiser +implements IterableSerialiser, + ScheduledSerialiser, + LiveArchiver, + Serialiser.Trackable { + + final static protected ThreadPoolExecutor exec = new ThreadPoolExecutor( + 0, 0x40, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + new ThreadPoolExecutor.CallerRunsPolicy() // easier than catching RejectedExecutionException, if it ever occurs + ); + + final protected ProgressTracker tracker; + + public ParallelSerialiser(ProgressTracker k) { + if (k == null) { + throw new IllegalArgumentException("ParallelSerialiser must have a progress tracker."); + } + tracker = k; + } + + // return ? extends Progress so as to hide the implementation details of P + /*@Override**/ public ProgressTracker getTracker() { + return tracker; + } + + protected Runnable createJoinRunnable(final K task, final TaskInProgressException e, final SafeClosure> post) { + return new Runnable() { + public void run() { + TaskAbortException ex; + try { + Progress p = e.getProgress(); + p.join(); + ex = new TaskCompleteException("Task complete: " + p.getSubject()); + } catch (InterruptedException e) { + ex = new TaskAbortException("Progress join was interrupted", e, true); + } catch (RuntimeException e) { + ex = new TaskAbortException("failed", e); + } catch (TaskAbortException a) { + ex = a; + } + if (post != null) { post.invoke(X2(task, ex)); } + } + }; + } + + /** + ** DOCUMENT. + ** + ** If {@code post} is {@code null}, it is assumed that {@link Progress} + ** objects are created by the thread that calls this method. Failure to do + ** this '''will result in deadlock'''. + ** + ** Otherwise, this method will create {@link Progress} objects for each + ** task. If they are created before this, '''deadlock will result''' as the + ** handler waits for that progress to complete. + */ + protected Runnable createPullJob(final PullTask task, final SafeClosure, TaskAbortException>> post) { + try { + final P prog = (post != null)? tracker.addPullProgress(task): tracker.getPullProgress(task); + return new Runnable() { + public void run() { + TaskAbortException ex = null; + try { pullLive(task, prog); } + catch (RuntimeException e) { ex = new TaskAbortException("failed", e); } + catch (TaskAbortException e) { ex = e; } + if (post != null) { post.invoke(X2(task, ex)); } + // FIXME OPT NORM Two-phase pull (see FreenetArchiver.pull comments): + // All we'd need to do is: + // Bucket = pullFirst(task, prog) + // Take semaphore (purpose is to limit the number of decoded stuff in memory which is not yet wanted) + // pullSecond(task, prog, bucket) + // Call post-process + // Release semaphore + } + }; + } catch (final TaskInProgressException e) { + return createJoinRunnable(task, e, post); + } + } + + /** + ** DOCUMENT. + ** + ** If {@code post} is {@code null}, it is assumed that {@link Progress} + ** objects are created by the thread that calls this method. Failure to do + ** this '''will result in deadlock'''. + ** + ** Otherwise, this method will create {@link Progress} objects for each + ** task. If they are created before this, '''deadlock will result''' as the + ** handler waits for that progress to complete. + */ + protected Runnable createPushJob(final PushTask task, final SafeClosure, TaskAbortException>> post) { + try { + final P prog = (post != null)? tracker.addPushProgress(task): tracker.getPushProgress(task); + return new Runnable() { + public void run() { + TaskAbortException ex = null; + try { pushLive(task, prog); } + catch (RuntimeException e) { ex = new TaskAbortException("failed", e); } + catch (TaskAbortException e) { ex = e; } + if (post != null) { post.invoke(X2(task, ex)); } + } + }; + } catch (final TaskInProgressException e) { + return createJoinRunnable(task, e, post); + } + } + + /*======================================================================== + public interface IterableSerialiser + ========================================================================*/ + + /*@Override**/ public void pull(PullTask task) throws TaskAbortException { + try { + try { + P p = tracker.addPullProgress(task); + exec.execute(createPullJob(task, null)); + p.join(); + } catch (TaskInProgressException e) { + throw e.join(); + } + } catch (InterruptedException e) { + throw new TaskAbortException("Task pull was interrupted", e, true); + } + } + + /*@Override**/ public void push(PushTask task) throws TaskAbortException { + try { + try { + P p = tracker.addPushProgress(task); + exec.execute(createPushJob(task, null)); + p.join(); + } catch (TaskInProgressException e) { + throw e.join(); + } + } catch (InterruptedException e) { + throw new TaskAbortException("Task push was interrupted", e, true); + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation DOCUMENT + */ + /*@Override**/ public void pull(Iterable> tasks) throws TaskAbortException { + List progs = new ArrayList(); + try { + for (Iterator> it = tasks.iterator(); it.hasNext();) { + PullTask task = it.next(); + try { + progs.add(tracker.addPullProgress(task)); + exec.execute(createPullJob(task, null)); + } catch (TaskInProgressException e) { + it.remove(); + progs.add(e.getProgress()); + } + } + for (Progress p: progs) { p.join(); } + // TODO NORM: toad - if it fails, we won't necessarily know until all of the + // other tasks have completed ... is this acceptable? + + } catch (InterruptedException e) { + throw new TaskAbortException("ParallelSerialiser pull was interrupted", e, true); + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation DOCUMENT + */ + /*@Override**/ public void push(Iterable> tasks) throws TaskAbortException { + List progs = new ArrayList(); + try { + for (Iterator> it = tasks.iterator(); it.hasNext();) { + PushTask task = it.next(); + try { + progs.add(tracker.addPushProgress(task)); + exec.execute(createPushJob(task, null)); + } catch (TaskInProgressException e) { + it.remove(); + progs.add(e.getProgress()); + } + } + for (Progress p: progs) { p.join(); } + // TODO NORM: toad - if it fails, we won't necessarily know until all of the + // other tasks have completed ... is this acceptable? + + } catch (InterruptedException e) { + throw new TaskAbortException("ParallelSerialiser pull was interrupted", e, true); + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation DOCUMENT + */ + /*@Override**/ public ObjectProcessor, E, TaskAbortException> pullSchedule( + BlockingQueue> input, + BlockingQueue, TaskAbortException>> output, + Map, E> deposit + ) { + return new ObjectProcessor, E, TaskAbortException>(input, output, deposit, null, Executors.DEFAULT_EXECUTOR, new TaskAbortExceptionConvertor()) { + @Override protected Runnable createJobFor(PullTask task) { + return createPullJob(task, postProcess); + } + }.autostart(); + } + + /** + ** {@inheritDoc} + ** + ** This implementation DOCUMENT + */ + /*@Override**/ public ObjectProcessor, E, TaskAbortException> pushSchedule( + BlockingQueue> input, + BlockingQueue, TaskAbortException>> output, + Map, E> deposit + ) { + return new ObjectProcessor, E, TaskAbortException>(input, output, deposit, null, Executors.DEFAULT_EXECUTOR, new TaskAbortExceptionConvertor()) { + @Override protected Runnable createJobFor(PushTask task) { + return createPushJob(task, postProcess); + } + }.autostart(); + } + +} diff --git a/src/main/java/plugins/Library/io/serial/ProgressTracker.java b/src/main/java/plugins/Library/io/serial/ProgressTracker.java new file mode 100644 index 00000000..8769193c --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/ProgressTracker.java @@ -0,0 +1,255 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.TaskInProgressException; +import plugins.Library.util.CompositeIterable; + +import java.util.Iterator; +import java.util.Map; +import java.util.WeakHashMap; + +/** +** Keeps track of a task's progress and provides methods to retrieve this data. +** For this to function properly, the data/meta for push/pull tasks MUST NOT +** be modified for as long as the task is in operation. This is because their +** {@link #equals(Object)} method is defined in terms of those fields, so any +** change to them would interfere with {@link WeakHashMap}'s ability to locate +** the tasks within the map. +** +** TODO HIGH make those fields (PullTask.meta / PushTask.data) final? would +** involve removing Task class. +** +** The progress objects are stored in a {@link WeakHashMap} and are freed +** automatically by the garbage collector if its corresponding task goes out of +** scope '''and''' no other strong references to the progress object exist. +** +** @author infinity0 +*/ +public class ProgressTracker { + + /** + ** Keeps track of the progress of each {@link PullTask}. The key is the + ** metadata of the task. + */ + final protected WeakHashMap, P> pullProgress = new WeakHashMap, P>(); + + /** + ** Keeps track of the progress of each {@link PushTask}. The key is the + ** data of the task. + */ + final protected WeakHashMap, P> pushProgress = new WeakHashMap, P>(); + + /** + ** An element class which is used to instantiate new elements. + */ + final protected Class progressClass; + + /** + ** DOCUMENT + ** + ** @param cc A class with a nullary constructor that is used to instantiate + ** new progress objects. This argument may be null if a subclass + ** overrides {@link #newProgress()}. + */ + public ProgressTracker(Class cc) { + try { + if (cc != null) { cc.newInstance(); } + } catch (InstantiationException e) { + throw new IllegalArgumentException("Cannot instantiate class. Make sure it has a nullary constructor.", e); + } catch (IllegalAccessException e) { + throw new IllegalArgumentException("Cannot instantiate class. Make sure you have access to it.", e); + } + progressClass = cc; + } + + protected P newProgress() { + if (progressClass == null) { + throw new IllegalStateException("ProgressTracker cannot create progress: No class was given to the constructor, but newElement() was not overriden."); + } + try { + return progressClass.newInstance(); + } catch (InstantiationException e) { + return null; // constructor should prevent this from ever happening, but the compiler bitches. + } catch (IllegalAccessException e) { + return null; // constructor should prevent this from ever happening, but the compiler bitches. + } + } + + // TODO NORM there is probably a better way of doing this... + //final protected HashSet> pullWaiters() = new HashSet> pullWaiters(); + //final protected HashSet> pushWaiters() = new HashSet> pushWaiters(); + + /** + ** Get the progress of a {@link PullTask} by its metadata. + ** + ** NOTE: since progress is stored in a {@link WeakHashMap} this means that + ** if the task has finished, then this will return null. (FIXME NORM) + ** + ** This doesn't have to be a major problem - when a task completes, detect + ** this, and deny further attempts to retrieve the progress. + */ + public P getPullProgressFor(Object meta/*, boolean block*/) { + return getPullProgress(new PullTask(meta)/*, block*/); + } + + /** + ** Get the progress of a {@link PushTask} by its metadata. + ** + ** NOTE: since progress is stored in a {@link WeakHashMap} this means that + ** if the task has finished, then this will return null. (FIXME NORM) + ** + ** This doesn't have to be a major problem - when a task completes, detect + ** this, and deny further attempts to retrieve the progress. + */ + public P getPushProgressFor(T data/*, boolean block*/) { + return getPushProgress(new PushTask(data)/*, block*/); + } + + /** + ** Get the progress of a {@link PullTask}. + */ + public P getPullProgress(PullTask task/*, boolean block*/) { + synchronized (pullProgress) { + return pullProgress.get(task); + /*P prog = pullProgress.get(task); + if (prog != null || !block) { return prog; } + + pullWaiters.add(task); + while ((prog = pullProgress.get(task)) == null) { + pullProgress.wait(); + } + return prog;*/ + } + } + + /*public P getPullProgress(PullTask task) { + return getPullProgress(task, false); + }*/ + + /** + ** Get the progress of a {@link PushTask}. + */ + public P getPushProgress(PushTask task/*, boolean block*/) { + synchronized (pushProgress) { + return pushProgress.get(task); + /*P prog = pushProgress.get(task); + if (prog != null || !block) { return prog; } + + pushWaiters.add(task); + while ((prog = pushProgress.get(task)) == null) { + pushProgress.wait(); + } + return prog;*/ + } + } + + /*public P getPushProgress(PushTask task) { + return getPushProgress(task, false); + }*/ + + /** + ** Creates a new pull progress and keeps track of it. If there is already a + ** progress for the metadata, throws {@link TaskInProgressException}. This + ** ensures that the object returned from this method has not been seen by + ** any other threads. + ** + ** @throws TaskInProgressException + */ + public P addPullProgress(PullTask task) throws TaskInProgressException { + synchronized (pullProgress) { + P p = pullProgress.get(task); + if (p != null) { throw new TaskInProgressException(p); } + pullProgress.put(task, p = newProgress()); + + /*if (pullWaiters.contains(task)) { + pullProgress.notifyAll(); + }*/ + return p; + } + } + + /** + ** Creates a new push progress and keeps track of it. If there is already a + ** progress for the metadata, throws {@link TaskInProgressException}. This + ** ensures that the object returned from this method has not been seen by + ** any other threads. + ** + ** @throws TaskInProgressException + */ + public P addPushProgress(PushTask task) throws TaskInProgressException { + synchronized (pushProgress) { + P p = pushProgress.get(task); + if (p != null) { throw new TaskInProgressException(p); } + pushProgress.put(task, p = newProgress()); + + /*if (pushWaiters.remove(task)) { + pushProgress.notifyAll(); + }*/ + return p; + } + } + + + /** + ** Creates an iterable over the {@link Progress} objects corresponding to + ** the given pull tasks, all tracked by the given tracker. + ** + ** @param tracker The single tracker for all the ids + ** @param tasks The tasks to track + */ + public static Iterable

makePullProgressIterable(final ProgressTracker tracker, final Iterable> tasks) { + return new CompositeIterable, P>(tasks, true) { + @Override public P nextFor(PullTask next) { + return tracker.getPullProgress(next); + } + }; + } + + /** + ** Creates an iterable over the {@link Progress} objects corresponding to + ** the given push tasks, all tracked by the given tracker. + ** + ** @param tracker The single tracker for all the ids + ** @param tasks The tasks to track + */ + public static Iterable

makePushProgressIterable(final ProgressTracker tracker, final Iterable> tasks) { + return new CompositeIterable, P>(tasks, true) { + @Override public P nextFor(PushTask next) { + return tracker.getPushProgress(next); + } + }; + } + + /** + ** Creates an iterable over the {@link Progress} objects corresponding to + ** the given pull tracking ids, each tracked by its own tracker. + ** + ** @param ids Map of tracking ids to their respective trackers + */ + public static Iterable

makePullProgressIterable(final Map, ProgressTracker> ids) { + return new CompositeIterable, ProgressTracker>, P>(ids.entrySet(), true) { + @Override public P nextFor(Map.Entry, ProgressTracker> next) { + return next.getValue().getPullProgress(next.getKey()); + } + }; + } + + /** + ** Creates an iterable over the {@link Progress} objects corresponding to + ** the given push tracking ids, each tracked by its own tracker. + ** + ** @param ids Map of tracking ids to their respective trackers + */ + public static Iterable

makePushProgressIterable(final Map, ProgressTracker> ids) { + return new CompositeIterable, ProgressTracker>, P>(ids.entrySet(), true) { + @Override public P nextFor(Map.Entry, ProgressTracker> next) { + return next.getValue().getPushProgress(next.getKey()); + } + }; + } + +} diff --git a/src/main/java/plugins/Library/io/serial/ScheduledSerialiser.java b/src/main/java/plugins/Library/io/serial/ScheduledSerialiser.java new file mode 100644 index 00000000..09020c8d --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/ScheduledSerialiser.java @@ -0,0 +1,53 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.concurrent.Scheduler; +import plugins.Library.util.concurrent.ObjectProcessor; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.func.Tuples.X2; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; + +/** +** An interface for asynchronous task execution. The methods return objects +** for managing and scheduling tasks. +** +** @author infinity0 +*/ +public interface ScheduledSerialiser extends IterableSerialiser { + + /** + ** Creates a {@link ObjectProcessor} for executing {@link PullTask}s, which + ** should be {@linkplain ObjectProcessor#auto()} automatically started. + ** + ** @param input Queue to add task requests to + ** @param output Queue to pop completed tasks from + ** @param deposit Map of tasks to deposits + */ + // TODO LOW public Scheduler pullSchedule( + public ObjectProcessor, E, TaskAbortException> pullSchedule( + BlockingQueue> input, + BlockingQueue, TaskAbortException>> output, + Map, E> deposit + ); + + /** + ** Creates a {@link ObjectProcessor} for executing {@link PushTask}s, which + ** should be {@linkplain ObjectProcessor#auto()} automatically started. + ** + ** @param input Queue to add task requests to + ** @param output Queue to pop completed tasks from + ** @param deposit Map of tasks to deposits + */ + // TODO LOW public Scheduler pushSchedule( + public ObjectProcessor, E, TaskAbortException> pushSchedule( + BlockingQueue> input, + BlockingQueue, TaskAbortException>> output, + Map, E> deposit + ); + +} diff --git a/src/main/java/plugins/Library/io/serial/Serialiser.java b/src/main/java/plugins/Library/io/serial/Serialiser.java new file mode 100644 index 00000000..736d41cf --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/Serialiser.java @@ -0,0 +1,161 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.util.exec.Progress; + +import java.util.Collection; +import java.util.Map; + +/** +** An empty marker interface for serialisation classes. It defines some nested +** subclasses that acts as a unified interface between these classes. +** +** Nested data structures such as {@link plugins.Library.util.SkeletonBTreeMap} +** will generally use the metadata of their children directly (ie. without +** using a {@link Translator} during a serialisation operation. Therefore, it +** is recommended that all metadata passing through this class (and related +** classes) be directly serialisable by an {@link Archiver}. This will make the +** task of writing {@link Translator}s much easier, maybe even unnecessary. +** +** The recommended way to ensure this is for it to be either: a primitive or +** Object form of a primitive; an Array, {@link Collection}, or {@link Map}, +** where the elements are also serialisable as defined here; or a Java Bean. +** +** TODO NORM restructure this class, maybe split off into a Serialisers class, +** or a serial.serialiser package... +** +** @author infinity0 +*/ +public interface Serialiser { + + /************************************************************************ + ** Defines a serialisation task for an object. Contains two fields - data + ** and metadata. + */ + abstract public static class Task { + + /** + ** Field for the metadata. This should be serialisable as defined in the + ** description for {@link Serialiser}. + */ + public Object meta = null; + + /** + ** Field for the data. + */ + public T data = null; + + } + + /************************************************************************ + ** Defines a pull task: given some metadata, the task is to retrieve the + ** data for this metadata, possibly updating the metadata in the process. + ** + ** For any single {@code PullTask}, the metadata should uniquely determine + ** what data will be generated during a pull operation, independent of any + ** information from other {@code PullTasks}, even if they are in the same + ** group of tasks given to a compound {@link Serialiser} such as {@link + ** MapSerialiser}. (The converse is not required for {@link PushTask}s.) + ** + ** Consequently, {@code Serialiser}s should be implemented so that its + ** push operations generate metadata for which this property holds. + */ + final public static class PullTask extends Task { + + public PullTask(Object m) { + if (m == null) { throw new IllegalArgumentException("Cowardly refusing to make a PullTask with null metadata."); } + meta = m; + } + + @Override public boolean equals(Object o) { + if (!(o instanceof PullTask)) { return false; } + return meta == ((PullTask)o).meta; + } + + @Override public int hashCode() { + return System.identityHashCode(meta); + } + + } + + /************************************************************************ + ** Defines a push task: given some data and optional metadata, the task is + ** to archive this data and generate the metadata for it in the process. + ** + ** For any single {@code PushTask}, the data may or may not uniquely + ** determine the metadata that will be generated for it during the push + ** operation. For example, compound {@link Serialiser}s that work on groups + ** of tasks may use information from the whole group in order to optimise + ** the operation. (The converse is required for {@link PullTask}s.) + ** + ** Consequently, {@code Serialiser}s may require that extra data is passed + ** to its push operations such that it can... + */ + final public static class PushTask extends Task { + + public PushTask(T d) { + if (d == null) { throw new IllegalArgumentException("Cowardly refusing to make a PushTask with null data."); } + data = d; + } + + public PushTask(T d, Object m) { + // d == null && m != null is allowed in some cases, e.g. MapSerialiser allows us to have some stuff already serialised. + if (d == null && m == null) { throw new IllegalArgumentException("Cowardly refusing to make a PushTask with null data."); } + data = d; + meta = m; + } + + @Override public boolean equals(Object o) { + if (!(o instanceof PushTask)) { return false; } + return data == ((PushTask)o).data; + } + + @Override public int hashCode() { + return System.identityHashCode(data); + } + + } + + /** + ** Represents a serialiser that exposes the progress status of its + ** running operations via a {@link ProgressTracker}. + */ + public interface Trackable extends Serialiser { + + public ProgressTracker getTracker(); + + } + + /** + ** Represents a serialiser which uses a {@link Translator} to do much of + ** its work. This can be used alongside a {@link Serialiser.Composite}. + ** + ** TODO LOW rename this to something better... + */ + public interface Translate extends Serialiser { + + public Translator getTranslator(); + + } + + /** + ** Represents a serialisation process which is divided up into several + ** parts. The basic premise is to convert the target object into another + ** type of object, which can be handled by an already existing {@link + ** Serialiser} (which may also be composite). + ** + ** The conversion can be handled by a {@link Translator}, in which case the + ** class might also implement {@link Serialiser.Translate}. + ** + ** TODO HIGH find a tidy way to make this extend Serialiser + ** "Composite> extends Serialiser" will work... + */ + public interface Composite> /*extends Serialiser*/ { + + public S getChildSerialiser(); + + } + +} diff --git a/src/main/java/plugins/Library/io/serial/Translator.java b/src/main/java/plugins/Library/io/serial/Translator.java new file mode 100644 index 00000000..a6ffd35a --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/Translator.java @@ -0,0 +1,44 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import plugins.Library.io.DataFormatException; + +/** +** A class that translates an object into one of another type. Used mostly in +** conjunction with an {@link Serialiser} that can take objects of the latter +** type but not the former type. +** +** @author infinity0 +*/ +public interface Translator { + + /** + ** Apply the translation. + ** + ** Implementations of this method which act on a recursive data structure + ** that is designed to be partially loaded (such as + ** {@link plugins.Library.util.SkeletonMap}), should follow the same + ** guidelines as in {@link Archiver#push(Serialiser.PushTask)}, + ** particularly with regards to throwing {@link IllegalArgumentException}. + ** + ** TODO LOW maybe this could throw {@link DataFormatException} like {@link + ** #rev(Object)}? (probably no point...) + */ + I app(T translatee);// throws DataFormatException; + + /** + ** Reverse the translation. + ** + ** Implementations of this method which act on a recursive data structure + ** that is designed to be partially loaded (such as + ** {@link plugins.Library.util.SkeletonMap}), should follow the same + ** guidelines as in {@link Archiver#pull(Serialiser.PullTask)}. + ** + ** @throws DataFormatException if some aspect of the input prevents the + ** output from being constructed. + */ + T rev(I intermediate) throws DataFormatException; + +} diff --git a/src/main/java/plugins/Library/io/serial/package-info.java b/src/main/java/plugins/Library/io/serial/package-info.java new file mode 100644 index 00000000..5bafbe64 --- /dev/null +++ b/src/main/java/plugins/Library/io/serial/package-info.java @@ -0,0 +1,11 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains the serialisation engine and related classes and interfaces, such +** as {@link plugins.Library.io.serial.ProgressTracker} and {@link +** plugins.Library.io.serial.Translator} +** +** @author infinity0 +*/ +package plugins.Library.io.serial; diff --git a/src/main/java/plugins/Library/package-info.java b/src/main/java/plugins/Library/package-info.java new file mode 100644 index 00000000..fc42a0c2 --- /dev/null +++ b/src/main/java/plugins/Library/package-info.java @@ -0,0 +1,13 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains interfaces and the main Library class that holds them together. +** The index package and its subpackages contain implementations of these +** interfaces. +** +** @author MikeB +** @author infinity0 +*/ +package plugins.Library; + diff --git a/src/main/java/plugins/Library/search/InvalidSearchException.java b/src/main/java/plugins/Library/search/InvalidSearchException.java new file mode 100644 index 00000000..191e811a --- /dev/null +++ b/src/main/java/plugins/Library/search/InvalidSearchException.java @@ -0,0 +1,19 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ + +package plugins.Library.search; + +/** + * Exception when something is wrong with search parameters but nothing else + * @author MikeB + */ +public class InvalidSearchException extends Exception{ + public InvalidSearchException(String s){ + super (s); + } + + public InvalidSearchException(String string, Exception ex) { + super(string, ex); + } +} diff --git a/src/main/java/plugins/Library/search/ResultSet.java b/src/main/java/plugins/Library/search/ResultSet.java new file mode 100644 index 00000000..b9df2990 --- /dev/null +++ b/src/main/java/plugins/Library/search/ResultSet.java @@ -0,0 +1,511 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.search; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermIndexEntry; +import plugins.Library.index.TermTermEntry; +import plugins.Library.index.TermPageEntry; +import plugins.Library.util.exec.Execution; +import plugins.Library.util.exec.TaskAbortException; + +import freenet.support.Logger; + +import java.util.Iterator; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.HashMap; + +/** + * Unmodifiable Set which makes sure all data in results being combined is + * retained and is created as the result of Set operations on other Sets. Does + * not contain references to the original Sets. + *

+ * The set isn't finalised until a run is finished and isDone() is true. + * + * @author MikeB + */ +public class ResultSet implements Set, Runnable{ + private ResultOperation resultOperation; + private boolean done = false; + private Set[] subresults; + private RuntimeException exception; + + /** + * What should be done with the results of subsearches\n + * INTERSECTION : ( >2 subRequests) result is common results of subsearches\n + * UNION : ( >2 subRequests) result is all results of subsearches\n + * REMOVE : ( 2 subRequests) result is the results of the first subsearch with the second search removed + * PHRASE : ( >2 subRequests) For a phrase, the subsearches are in the order of the words in a phrase + * SINGLE : ( 1 subRequests) To encapsulate a single request + * DIFFERENTINDEXES : ( >2 subRequests) for a serch made up of requests on multiple indexes + */ + public enum ResultOperation{INTERSECTION, UNION, REMOVE, PHRASE, SINGLE, DIFFERENTINDEXES}; + + private HashMap internal; + private final String subject; + private final boolean ignoreTAEs; + + /** + * @param subject The subject for each of the entries in the Set + * @param resultOperation {@link ResultOperation} to be performed on each of the given Sets + * @param subRequests List of subrequests to test each getResult() + * @throws TaskAbortException if thrown from a higher getResult() method + * + * TODO reevaluate relevance for all combinations, and find a way to calculate relevance of phrases + */ + ResultSet(String subject, ResultOperation resultOperation, List>> subRequests, boolean ignoreTAEs) throws TaskAbortException { + if(resultOperation==ResultOperation.SINGLE && subRequests.size()!=1) + throw new IllegalArgumentException(subRequests.size() + " requests supplied with SINGLE operation"); + if(resultOperation==ResultOperation.REMOVE && subRequests.size()!=2) + throw new IllegalArgumentException("Negative operations can only have 2 parameters"); + if( ( resultOperation==ResultOperation.PHRASE + || resultOperation == ResultOperation.INTERSECTION + || resultOperation == ResultOperation.UNION + || resultOperation == ResultOperation.DIFFERENTINDEXES ) + && subRequests.size()<2) + throw new IllegalArgumentException(resultOperation.toString() + " operations need more than one term"); + + + this.subject = subject; + internal = new HashMap(); + this.resultOperation = resultOperation; + + // Make sure any TaskAbortExceptions are found here and not when it's run + this.ignoreTAEs = ignoreTAEs; + subresults = getResultSets(subRequests, ignoreTAEs); + } + + public synchronized void run() { + if(done) + throw new IllegalStateException("This ResultSet has already run and is finalised."); + + try{ + // Decide what to do + switch(resultOperation){ + case SINGLE: // Just add everything + addAllToEmptyInternal(subresults[0]); + break; + case DIFFERENTINDEXES: // Same as UNION currently + case UNION: // Add every one without overwriting + unite(subresults); + break; + case INTERSECTION: // Add one then retain the others + intersect(subresults); + break; + case REMOVE: // Add one then remove the other + exclude(subresults[0], subresults[1]); + break; + case PHRASE: + phrase(subresults); + break; + } + }catch(RuntimeException e){ + exception = e; // Exeptions thrown here are stored in case this is being run in a thread, in this case it is thrown in isDone() or iterator() + throw e; + } + subresults = null; // forget the subresults + done = true; + } + + /** + * Copy a collection into a ResultSet + * @param subject to change the subject of each entry + * @param copy Collection to copy + */ + private ResultSet(String subject, Collection copy, boolean ignoreTAEs) { + this.subject = subject; + internal = new HashMap(); + this.ignoreTAEs = ignoreTAEs; + addAllToEmptyInternal(copy); + } + + + // Set interface methods, folow the standard contract + + public int size() { + return internal.size(); + } + + public boolean isEmpty() { + return internal.isEmpty(); + } + + public boolean contains(Object o) { + return internal.containsKey(o); + } + + /** + * @return Iterator of Set, remove is unsupported + * @throws RuntimeException if a RuntimeException was caught while generating the Set + */ + public Iterator iterator() { + if(exception != null) + throw new RuntimeException("RuntimeException thrown in ResultSet thread", exception); + return new ResultIterator(internal.keySet().iterator()); + } + + public Object[] toArray() { + return internal.keySet().toArray(); + } + + public T[] toArray(T[] a) { + return internal.keySet().toArray(a); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public boolean add(TermEntry e) { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public boolean remove(Object o) { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + public boolean containsAll(Collection c) { + return internal.keySet().containsAll(c); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public boolean addAll(Collection c) { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + /** + * Not supported : Set is unmodifiable + * @throws UnsupportedOperationException always + */ + public void clear() { + throw new UnsupportedOperationException("Set is unmodifiable."); + } + + + + + + // Private internal functions for changing the set + // TODO relevences, metadata extra, check copy constructors + + private void addInternal(TermEntry entry) { + internal.put(entry, entry); + } + + /** + * Overwrite add all entries in result to the internal Set converted to this + * ResultSet's subject. If the Set already has anything inside it + * you should use addAllInternal below. + * @param result + */ + private void addAllToEmptyInternal(Collection result) { + for (TermEntry termEntry : result) { + TermEntry entry = convertEntry(termEntry); + addInternal(entry); + } + } + + /** + * Add all entries in the first collection but not in the second + */ + private void exclude(Collection add, Collection subtract) { + for (TermEntry termEntry : add){ + if(getIgnoreSubject(termEntry, subtract)==null) + addInternal(termEntry); + } + } + + /** + * Iterate over all the collections adding and merging all their entries + * @param collections to be merged into this collection + * TODO proper relevance calculating here, currently i think the relevance of the first one added will have less impact than the others, the other 3 types are more important i believe + */ + private void unite(Collection... collections) { + for(Collection c : collections) + if(c==null) + Logger.error(this, "the result was null"); + else + for (TermEntry termEntry : c) { + TermEntry entry = convertEntry(termEntry); + if(contains(entry)) + addInternal(mergeEntries(internal.get(entry), entry)); + else + addInternal(entry); + } + } + + /** + * Iterate over the first collection adding those elements which exist in all the other collections + * @param collections a bunch of collections to intersect + */ + private void intersect(Collection... collections) { + Collection firstCollection = collections[0]; + // Iterate over it + for (Iterator it = firstCollection.iterator(); it.hasNext();) { + TermEntry termEntry = it.next(); + // if term entry is contained in all the other collections add it + float combinedrelevance = termEntry.rel; + + int i; + for (i = 1; i < collections.length; i++) { + Collection collection = collections[i]; + // See if collection contains termEntry + + TermEntry termEntry2 = getIgnoreSubject(termEntry, collection); + if ( termEntry2 == null ) + break; + else // add to combined relevance + combinedrelevance += termEntry2.rel; + } + if (i==collections.length){ + TermEntry newEntry = convertEntry(termEntry, combinedrelevance/collections.length); + addInternal(newEntry); + } + } + } + + /** + * Iterate over the first collection, and the termpositions of each entry, + * keeping those positions which are followed in the other collections. Keeps + * those entries which have positions remaingin after this process + * @param collections + */ + private void phrase(Collection... collections) { + Collection firstCollection = collections[0]; + // Iterate over it + for (TermEntry termEntry : firstCollection) { + if(!(termEntry instanceof TermPageEntry)) + continue; + // if term entry is followed in all the others, add it to this + TermPageEntry termPageEntry = (TermPageEntry)termEntry; + if(!termPageEntry.hasPositions()) + continue; + Map positions = new HashMap(termPageEntry.positionsMap()); + + int i; // Iterate over the other collections, checking for following + for (i = 1; positions != null && i < collections.length && positions.size() > 0; i++) { + Collection collection = collections[i]; + if(collection == null) + continue; // Treat stop words as blanks, dont check + // See if collection follows termEntry + TermPageEntry termPageEntry1 = (TermPageEntry)getIgnoreSubject(termPageEntry, collection); + if(termPageEntry1==null || !termPageEntry1.hasPositions()) // If collection doesnt contain this termpageentry or has not positions, it does not follow + positions = null; + else{ + for (Iterator it = positions.keySet().iterator(); it.hasNext();) { + int posi = it.next(); + if ( !termPageEntry1.hasPosition(posi+i)) + it.remove(); + else + Logger.minor(this, termPageEntry.page + "["+positions.keySet()+"] is followed by "+termPageEntry1.page+"["+termPageEntry1.positions()+"] +"+i); + } + } + } + // if this termentry has any positions remaining, add it + if(positions != null && positions.size() > 0) + addInternal(new TermPageEntry(subject, termPageEntry.rel, termPageEntry.page, termPageEntry.title, positions)); + } + } + + private TermEntry convertEntry(TermEntry termEntry) { + return convertEntry(termEntry, termEntry.rel); + } + + private TermEntry convertEntry(TermEntry termEntry, float rel) { + TermEntry entry; + if (termEntry instanceof TermTermEntry) + entry = new TermTermEntry(subject, rel, ((TermTermEntry)termEntry).term ); + else if (termEntry instanceof TermPageEntry) + entry = new TermPageEntry(subject, rel, ((TermPageEntry)termEntry).page, ((TermPageEntry)termEntry).title, ((TermPageEntry)termEntry).positionsMap() ); + else if (termEntry instanceof TermIndexEntry) + entry = new TermIndexEntry(subject, rel, ((TermIndexEntry)termEntry).index ); + else + throw new UnsupportedOperationException("The TermEntry type " + termEntry.getClass().getName() + " is not currently supported in ResultSet"); + return entry; + } + + + // TODO merge and combine can be cut down + /** + * Merge a group of TermEntries each pair of which(a, b) must be a.equalsTarget + * The new TermEntry created will have the subject of this ResultSet, it + * will try to combine all the optional fields from the TermEntrys being merged, + * with priority being put on those earliest in the arguments + * + * @param entries + * @return The merged entry + */ + private TermEntry mergeEntries(TermEntry... entries) { + for (int i = 1; i < entries.length; i++) + if(!entries[0].equalsTarget(entries[i])) + throw new IllegalArgumentException("entries were not equal : "+entries[0].toString()+" & "+entries[i].toString()); + + TermEntry combination = entries[0]; + + // This mess could be replaced by a method in the TermEntrys which returns a new TermEntry with a changed subject + if(combination instanceof TermIndexEntry){ + combination = new TermIndexEntry(subject, entries[0].rel, ((TermIndexEntry)combination).index); + } else if(combination instanceof TermPageEntry){ + combination = new TermPageEntry(subject, entries[0].rel, ((TermPageEntry)combination).page, ((TermPageEntry)combination).title, ((TermPageEntry)combination).positionsMap()); + } else if(combination instanceof TermTermEntry){ + combination = new TermTermEntry(subject, entries[0].rel, ((TermTermEntry)combination).term); + } else + throw new IllegalArgumentException("Unknown type : "+combination.getClass().toString()); + + for (int i = 1; i < entries.length; i++) { + TermEntry termEntry = entries[i]; + combination = combine(combination, termEntry); + } + + return combination; + } + + private TermEntry combine(TermEntry entry1, TermEntry entry2) { + if(!entry1.equalsTarget(entry2)) + throw new IllegalArgumentException("Combine can only be performed on equal TermEntrys"); + + float newRel = entry1.rel / ((entry2.rel == 0)? 1 : 2 ) + entry2.rel / ((entry1.rel == 0)? 1 : 2 ); + + if(entry1 instanceof TermPageEntry){ + TermPageEntry pageentry1 = (TermPageEntry)entry1; + TermPageEntry pageentry2 = (TermPageEntry)entry2; + // Merge positions + Map newPos = null; + if((!pageentry1.hasPositions()) && pageentry2.hasPositions()) + newPos = new HashMap(pageentry2.positionsMap()); + else if(pageentry1.hasPositions()){ + newPos = new HashMap(pageentry1.positionsMap()); + if(pageentry2.hasPositions()) + newPos.putAll(pageentry2.positionsMap()); + } + return new TermPageEntry(pageentry1.subj, newRel, pageentry1.page, (pageentry1.title!=null)?pageentry1.title:pageentry2.title, newPos); + + } else if(entry1 instanceof TermIndexEntry){ + TermIndexEntry castEntry = (TermIndexEntry) entry1; + // nothing to merge, no optional fields except relevance + return new TermIndexEntry(castEntry.subj, newRel, castEntry.index); + + } else if(entry1 instanceof TermTermEntry){ + // nothing to merge, no optional fields except relevance + TermTermEntry castEntry = (TermTermEntry) entry1; + + return new TermTermEntry(castEntry.subj, newRel, castEntry.term); + + } else + throw new UnsupportedOperationException("This type of TermEntry is not yet supported in the combine code : "+entry1.getClass().getName()); + + } + + /** + * Strip all results out of List of requests + * @param subRequests + * @return An array containing the stripped sets + * @throws {@link TaskAbortException} from subRequests' {@link Execution#getResult()} + */ + private Set[] getResultSets(List>> subRequests, boolean ignoreTAEs) throws TaskAbortException{ + Set[] sets = new Set[subRequests.size()]; + int x = 0; + for (int i = 0; i < subRequests.size(); i++) { + if(subRequests.get(i) == null){ + if(resultOperation == ResultOperation.PHRASE) + sets[x++] = null; + else + throw new NullPointerException("Nulls not allowed in subRequests for operations other than phrase."); + } else { + try { + sets[x++] = subRequests.get(i).getResult(); + } catch (TaskAbortException e) { + if(!ignoreTAEs) throw e; + } + } + } + if(x != subRequests.size()) { + Set[] newSets = new Set[x]; + System.arraycopy(sets, 0, newSets, 0, newSets.length); + sets = newSets; + } + return sets; + } + + /** + * Gets a TermEntry from the collection which is equal to entry ignoring subject + * @param entry + * @param collection + */ + private TermEntry getIgnoreSubject(TermEntry entry, Collection collection){ + TermEntry result = null; + for (TermEntry termEntry : collection) { + if (entry.equalsTarget(termEntry)){ + result = termEntry; + break; + } + } + return result; + } + + @Override public String toString(){ + return internal.keySet().toString(); + } + + /** + * Returns true if the ResultSet has completed its operations + * @throws RuntimeException if a RuntimeException was caught while generating the Set + */ + public boolean isDone(){ + if(exception != null) + throw new RuntimeException("RuntimeException thrown in ResultSet thread", exception); + return done; + } + + + /** + * Iterator which doesn't allow remove() + */ + class ResultIterator implements Iterator { + Iterator internal; + + private ResultIterator(Iterator iterator) { + internal = iterator; + } + + public boolean hasNext() { + return internal.hasNext(); + } + + public TermEntry next() { + return internal.next(); + } + + public void remove() { + throw new UnsupportedOperationException("Removal not allowed."); + } + + } +} diff --git a/src/main/java/plugins/Library/search/Search.java b/src/main/java/plugins/Library/search/Search.java new file mode 100644 index 00000000..f01a79a9 --- /dev/null +++ b/src/main/java/plugins/Library/search/Search.java @@ -0,0 +1,654 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.search; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.regex.Matcher; + +import plugins.Library.Library; +import plugins.Library.index.TermEntry; +import plugins.Library.search.ResultSet.ResultOperation; +import plugins.Library.ui.ResultNodeGenerator; +import plugins.Library.util.exec.AbstractExecution; +import plugins.Library.util.exec.CompositeProgress; +import plugins.Library.util.exec.Execution; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.TaskAbortException; +import freenet.support.Executor; +import freenet.support.HTMLNode; +import freenet.support.Logger; + +/** + * Performs asynchronous searches over many index or with many terms and search logic + * TODO review documentation + * @author MikeB + */ +public class Search extends AbstractExecution> + implements CompositeProgress, Execution> { + + private static Library library; + private static Executor executor; + + private ResultOperation resultOperation; + + private List>> subsearches; + + private String query; + private String indexURI; + + /** Map of Searches by subject */ + private static HashMap allsearches = new HashMap(); + /** Map of Searches by hashCode */ + private static HashMap searchhashes = new HashMap(); + private ResultSet resultset; + + /** + * Settings for producing result nodes, if true a HTMLNode of the results will be generated after the results are complete which can be accessed via getResultNode() + */ + private boolean formatResult = false; + private boolean htmlgroupusk; + private boolean htmlshowold; + private boolean htmljs; + private ResultNodeGenerator resultNodeGenerator; + private HTMLNode pageEntryNode; + + private enum SearchStatus { Unstarted, Busy, Combining_First, Combining_Last, Formatting, Done }; + private SearchStatus status = SearchStatus.Unstarted; + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(Search.class); + } + + private synchronized static void storeSearch(Search search){ + allsearches.put(search.getSubject(), search); + searchhashes.put(search.hashCode(), search); + } + + private static synchronized void removeSearch(Search search) { + allsearches.remove(search.subject); + searchhashes.remove(search.hashCode()); + } + + /** + * Creates a search for any number of indices, starts and returns the associated Request object + * TODO startSearch with array of indexes + * + * @param search string to be searched + * @param indexuri URI of index(s) to be used + * @return existing Search for this if it exists, new one otherwise or null if query is for a stopword or stop query + * @throws InvalidSearchException if any part of the search is invalid + */ + public static Search startSearch(String search, String indexuri) throws InvalidSearchException, TaskAbortException{ + search = search.toLowerCase(Locale.US).trim(); + if(search.length()==0) + throw new InvalidSearchException("Blank search"); + search = fixCJK(search); + + // See if the same search exists + if (hasSearch(search, indexuri)) + return getSearch(search, indexuri); + + if(logMINOR) Logger.minor(Search.class, "Starting new search for "+search+" in "+indexuri); + + String[] indices = indexuri.split("[ ;]"); + if(indices.length<1 || search.trim().length()<1) + throw new InvalidSearchException("Attempt to start search with no index or terms"); + else if(indices.length==1){ + Search newSearch = splitQuery(search, indexuri); + return newSearch; + }else{ + // create search for multiple terms over multiple indices + ArrayList>> indexrequests = new ArrayList>>(indices.length); + for (String index : indices){ + Search indexsearch = startSearch(search, index); + if(indexsearch==null) + return null; + indexrequests.add(indexsearch); + } + Search newSearch = new Search(search, indexuri, indexrequests, ResultOperation.DIFFERENTINDEXES); + return newSearch; + } + } + + + /** Transform into characters separated by spaces. + * FIXME: I'm sure we could handle this a lot better! */ + private static String fixCJK(String search) { + StringBuffer sb = null; + int offset = 0; + boolean wasCJK = false; + for(;offset < search.length();) { + int character = search.codePointAt(offset); + if(SearchUtil.isCJK(character)) { + if(wasCJK) { + sb.append(' '); // Delimit characters by whitespace so we do an && search. + } + if(sb == null) { + sb = new StringBuffer(); + sb.append(search.substring(0, offset)); + } + wasCJK = true; + } else { + wasCJK = false; + } + if(sb != null) + sb.append(Character.toChars(character)); + offset += Character.charCount(character); + } + if(sb != null) + return sb.toString(); + else + return search; + } + + /** + * Creates Search instance depending on the given requests + * + * @param query the query this instance is being used for, only for reference + * @param indexURI the index uri this search is made on, only for reference + * @param requests subRequests of this search + * @param resultOperation Which set operation to do on the results of the subrequests + * @throws InvalidSearchException if the search is invalid + **/ + private Search(String query, String indexURI, List>> requests, ResultOperation resultOperation) + throws InvalidSearchException{ + super(makeString(query, indexURI)); + if(resultOperation==ResultOperation.SINGLE && requests.size()!=1) + throw new InvalidSearchException(requests.size() + " requests supplied with SINGLE operation"); + if(resultOperation==ResultOperation.REMOVE && requests.size()!=2) + throw new InvalidSearchException("Negative operations can only have 2 parameters"); + if( ( resultOperation==ResultOperation.PHRASE + || resultOperation == ResultOperation.INTERSECTION + || resultOperation == ResultOperation.UNION + || resultOperation == ResultOperation.DIFFERENTINDEXES ) + && requests.size()<2) + throw new InvalidSearchException(resultOperation.toString() + " operations need more than one term"); + + query = query.toLowerCase(Locale.US).trim(); + + // Create a temporary list of sub searches then make it unmodifiable + List>> tempsubsearches = new ArrayList>>(); + for (Execution> request : requests) { + if(request != null || resultOperation == ResultOperation.PHRASE) + tempsubsearches.add(request); + else + throw new NullPointerException("Search cannot encapsulate nulls except in the case of a ResultOperation.PHRASE where they are treated as blanks"); + } + subsearches = Collections.unmodifiableList(tempsubsearches); + + this.query = query; + this.indexURI = indexURI; + this.resultOperation = resultOperation; + try { + setStatus(); + } catch (TaskAbortException ex) { + setError(ex); + } + + storeSearch(this); + if(logMINOR) Logger.minor(this, "Created Search object for with subRequests :"+subsearches); + } + + /** + * Encapsulate a request as a Search, only so original query and uri can be stored + * + * @param query the query this instance is being used for, only for reference + * @param indexURI the index uri this search is made on, only for reference + * @param request Request to encapsulate + */ + private Search(String query, String indexURI, Execution> request){ + super(makeString(query, indexURI)); + if(request == null) + throw new NullPointerException("Search cannot encapsulate null (query=\""+query+"\" indexURI=\""+indexURI+"\")"); + query = query.toLowerCase(Locale.US).trim(); + subsearches = Collections.singletonList(request); + + this.query = query; + this.indexURI = indexURI; + this.resultOperation = ResultOperation.SINGLE; + try { + setStatus(); + } catch (TaskAbortException ex) { + setError(ex); + } + storeSearch(this); + } + + + /** + * Splits query into multiple searches, will be used for advanced queries + * @param query search query, can use various different search conventions + * @param indexuri uri for one index + * @return single Search encompassing everything in the query or null if query is a stop word + * @throws InvalidSearchException if search query is invalid + */ + private static Search splitQuery(String query, String indexuri) throws InvalidSearchException, TaskAbortException{ + query = query.trim(); + if(query.matches("\\A[\\S&&[^-\"]]*\\Z")){ + // single search term + // return null if stopword + if(SearchUtil.isStopWord(query)) + return null; + Execution> request = library.getIndex(indexuri).getTermEntries(query); + if (request == null) + throw new InvalidSearchException( "Something wrong with query=\""+query+"\" or indexURI=\""+indexuri+"\", maybe something is wrong with the index or it's uri is wrong." ); + return new Search(query, indexuri, request ); + } + + // Make phrase search (hyphen-separated words are also treated as phrases) + if(query.matches("\\A\"[^\"]*\"\\Z") || query.matches("\\A((?:[\\S&&[^-]]+-)+[\\S&&[^-]]+)\\Z")){ + ArrayList>> phrasesearches = new ArrayList>>(); + String[] phrase = query.replaceAll("\"(.*)\"", "$1").split("[\\s-]+"); + if(logMINOR) Logger.minor(Search.class, "Phrase split: "+query); + for (String subquery : phrase){ + Search term = startSearch(subquery, indexuri); + phrasesearches.add(term); + } + // Not really sure how stopwords should be handled in phrases + // currently i'm thinking that they should be treated as blanks + // between other words and ignored in other cases "jesus of nazareth" + // is treated as "jesus nazareth". Whereas "the who" will be + // treated as a stop query as just searching for "who" and purporting + // that the results are representative of "the who" is misleading. + + // this makes sure there are no trailing nulls at the start + while(phrasesearches.size() > 0 && phrasesearches.get(0)==null) + phrasesearches.remove(0); + // this makes sure there are no trailing nulls at the end + while(phrasesearches.size() > 0 && phrasesearches.get(phrasesearches.size()-1)==null) + phrasesearches.remove(phrasesearches.size()-1); + + if(phrasesearches.size()>1) + return new Search(query, indexuri, phrasesearches, ResultOperation.PHRASE); + else + return null; + } + + + + if(logMINOR) Logger.minor(Search.class, "Splitting " + query); + // Remove phrases, place them in arraylist and replace them with references to the arraylist + ArrayList phrases = new ArrayList(); + Matcher nextPhrase = Pattern.compile("\"([^\"]*?)\"").matcher(query); + StringBuffer sb = new StringBuffer(); + while (nextPhrase.find()) + { + String phrase = nextPhrase.group(1); + nextPhrase.appendReplacement(sb, "£"+phrases.size()+"€"); + phrases.add(phrase); + } + nextPhrase.appendTail(sb); + + if(logMINOR) Logger.minor(Search.class, "Phrases removed query: "+sb); + + // Remove the unmatched \" (if any) + String formattedquery = sb.toString().replaceFirst("\"", ""); + if(logMINOR) Logger.minor(Search.class, "Removing the unmatched bracket: "+formattedquery); + + // Treat hyphens as phrases, as they are treated equivalently in spider so this is the most effective way now + nextPhrase = Pattern.compile("((?:[\\S&&[^-]]+-)+[\\S&&[^-]]+)").matcher(formattedquery); + sb.setLength(0); + while (nextPhrase.find()) + { + String phrase = nextPhrase.group(1); + nextPhrase.appendReplacement(sb, "£"+phrases.size()+"€"); + phrases.add(phrase); + } + nextPhrase.appendTail(sb); + + formattedquery = sb.toString(); + if(logMINOR) Logger.minor(Search.class, "Treat hyphenated words as phrases: " + formattedquery); + + // Substitute service symbols. Those which are inside phrases should not be seen as "service" ones. + formattedquery = formattedquery.replaceAll("\\s+or\\s+", "||"); + if(logMINOR) Logger.minor(Search.class, "OR-subst query : "+formattedquery); + formattedquery = formattedquery.replaceAll("\\s+(?:not\\s*|-)(\\S+)", "^^($1)"); + if(logMINOR) Logger.minor(Search.class, "NOT-subst query : "+formattedquery); + formattedquery = formattedquery.replaceAll("\\s+", "&&"); + if(logMINOR) Logger.minor(Search.class, "AND-subst query : "+formattedquery); + + // Put phrases back in + String[] phraseparts=formattedquery.split("£"); + formattedquery=phraseparts[0]; + for (int i = 1; i < phraseparts.length; i++) { + String string = phraseparts[i]; + if(logMINOR) Logger.minor(Search.class, "replacing phrase "+string.replaceFirst("(\\d+).*", "$1")); + formattedquery += "\""+ phrases.get(Integer.parseInt(string.replaceFirst("(\\d+).*", "$1"))) +"\"" + string.replaceFirst("\\d+€(.*)", "$1"); + } + if(logMINOR) Logger.minor(Search.class, "Phrase back query: "+formattedquery); + + // Make complement search + if (formattedquery.contains("^^(")){ + ArrayList>> complementsearches = new ArrayList>>(); + String[] splitup = formattedquery.split("(\\^\\^\\(|\\))", 3); + Search add = startSearch(splitup[0]+splitup[2], indexuri); + Search subtract = startSearch(splitup[1], indexuri); + if(add==null || subtract == null) + return null; // If 'and' is not to be searched for 'the -john' is not to be searched for, also 'john -the' wouldnt have shown many results anyway + complementsearches.add(add); + complementsearches.add(subtract); + return new Search(query, indexuri, complementsearches, ResultOperation.REMOVE); + } + // Split intersections + if (formattedquery.contains("&&")){ + ArrayList intersectsearches = new ArrayList(); + String[] intersects = formattedquery.split("&&"); + for (String subquery : intersects){ + Search subsearch = startSearch(subquery, indexuri); + if (subsearch != null) // We will assume that searching for 'the big apple' will near enough show the same results as 'big apple', so just ignore 'the' in interseaction + intersectsearches.add(subsearch); + } + switch(intersectsearches.size()){ + case 0: // eg. 'the that' + return null; + case 1 : // eg. 'cake that' will return a search for 'cake' + return intersectsearches.get(0); + default : + return new Search(query, indexuri, intersectsearches, ResultOperation.INTERSECTION); + } + } + // Split Unions + if (formattedquery.contains("||")){ + ArrayList>> unionsearches = new ArrayList>>(); + String[] unions = formattedquery.split("\\|\\|"); + for (String subquery : unions){ + Search add = startSearch(subquery, indexuri); + if (add == null) // eg a search for 'the or cake' would be almost the same as a search for 'the' and so should be treated as such + return null; + unionsearches.add(add); + } + return new Search(query, indexuri, unionsearches, ResultOperation.UNION); + } + + Logger.error(Search.class, "No split made, "+formattedquery+query); + return null; + } + + + /** + * Sets the parent plugin to be used for logging & plugin api + */ + public static void setup(Library library, Executor executor){ + Search.library = library; + Search.executor = executor; + Search.allsearches = new HashMap(); + } + + /** + * Gets a Search from the Map + * @param search + * @param indexuri + * @return Search or null if not found + */ + public synchronized static Search getSearch(String search, String indexuri){ + if(search==null || indexuri==null) + return null; + search = search.toLowerCase(Locale.US).trim(); + + return allsearches.get(makeString(search, indexuri)); + } + public synchronized static Search getSearch(int searchHash){ + return searchhashes.get(searchHash); + } + + /** + * Looks for a given search in the map of searches + * @param search + * @param indexuri + * @return true if it's found + */ + public static boolean hasSearch(String search, String indexuri){ + if(search==null || indexuri==null) + return false; + search = search.toLowerCase(Locale.US).trim(); + return allsearches.containsKey(makeString(search, indexuri)); + } + + public static boolean hasSearch(int searchHash){ + return searchhashes.containsKey(searchHash); + } + + public static synchronized Map getAllSearches(){ + return Collections.unmodifiableMap(allsearches); + } + + public String getQuery(){ + return query; + } + + public String getIndexURI(){ + return indexURI; + } + + /** + * Creates a string which uniquly identifies this Search object for comparison + * and lookup, wont make false positives but can make false negatives as search and indexuri aren't standardised + * + * @param search + * @param indexuri + */ + public static String makeString(String search, String indexuri){ + return search + "@" + indexuri; + } + + /** + * A descriptive string for logging + */ + @Override + public String toString(){ + return "Search: "+resultOperation+" - " + status + " : "+subject+" : "+subsearches; + } + + /** + * @return List of Progresses this search depends on, it will not return CompositeProgresses + */ + public List getSubProgress(){ + if(logMINOR) Logger.minor(this, toString()); + + if (subsearches == null) + return null; + // Only index splits will allowed as composites + if (resultOperation == ResultOperation.DIFFERENTINDEXES) + return subsearches; + // Everything else is split into leaves + List subprogresses = new ArrayList(); + for (Execution> request : subsearches) { + if(request == null) + continue; + if( request instanceof CompositeProgress && ((CompositeProgress)request).getSubProgress()!=null && ((CompositeProgress) request).getSubProgress().iterator().hasNext()){ + for (Iterator it = ((CompositeProgress)request).getSubProgress().iterator(); it.hasNext();) { + Progress progress1 = it.next(); + subprogresses.add(progress1); + } + }else + subprogresses.add(request); + } + return subprogresses; + } + + + /** + * @return true if all are Finished and Result is ready, also stimulates the creation of the result if all subreqquests are complete and the result isn't made + */ + @Override public boolean isDone() throws TaskAbortException{ + setStatus(); + return status == SearchStatus.Done; + } + + /** + * Returns whether the generator has formatted the results + */ + public boolean hasGeneratedResultNode(){ + return pageEntryNode != null; + } + + /** + * After this finishes running, the status of this Search object will be correct, stimulates the creation of the result if all subreqquests are complete and the result isn't made + * @throws plugins.Library.util.exec.TaskAbortException + */ + private synchronized void setStatus() throws TaskAbortException{ + switch (status){ + case Unstarted : // If Unstarted, status -> Busy + status = SearchStatus.Busy; + case Busy : + if(!isSubRequestsComplete()) + for (Execution> request : subsearches) + if(request != null && (!(request instanceof Search) || ((Search)request).status==SearchStatus.Busy)) + return; // If Busy & still waiting for subrequests to complete, status remains Busy + status = SearchStatus.Combining_First; // If Busy and waiting for subrequests to combine, status -> Combining_First + case Combining_First : // for when subrequests are combining + if(!isSubRequestsComplete()) // If combining first and subsearches still haven't completed, remain + return; + // If subrequests have completed start process to combine results + resultset = new ResultSet(subject, resultOperation, subsearches, innerCanFailAndStillComplete()); + if(executor!=null) + executor.execute(resultset, "Library.Search : combining results"); + else + (new Thread(resultset, "Library.Search : combining results")).start(); + status = SearchStatus.Combining_Last; + case Combining_Last : // for when this is combining + if(!resultset.isDone()) + return; // If Combining & combine not finished, status remains as Combining + subsearches = null; // clear the subrequests after they have been combined + // If finished Combining and asked to generate resultnode, start that process + if(formatResult){ + // resultset doesn't exist but subrequests are complete so we can start up a resultset + resultNodeGenerator = new ResultNodeGenerator(resultset, htmlgroupusk, htmlshowold, htmljs); + if(executor!=null) + executor.execute(resultNodeGenerator, "Library.Search : formatting results"); + else + (new Thread(resultNodeGenerator, "Library.Search : formatting results")).start(); + status = SearchStatus.Formatting; // status -> Formatting + }else // If not asked to format output, status -> done + status = SearchStatus.Done; + case Formatting : + if(formatResult){ + // If asked to generate resultnode and still doing that, status remains as Formatting + if(!resultNodeGenerator.isDone()) + return; + // If finished Formatting or not asked to do so, status -> Done + pageEntryNode = resultNodeGenerator.getPageEntryNode(); + resultNodeGenerator = null; + } + status = SearchStatus.Done; + case Done : + // Done , do nothing + } + } + + /** + * @return true if all are Finished, false otherwise + */ + private boolean isSubRequestsComplete() throws TaskAbortException{ + for(Execution> r : subsearches) { + try { + if(r != null && !r.isDone()) + return false; + } catch (TaskAbortException e) { + if(innerCanFailAndStillComplete()) continue; + throw e; + } + } + return true; + } + + + /** + * Return the set of results or null if it is not ready
+ * @return Set of TermEntry + */ + @Override public Set getResult() throws TaskAbortException { + try { + if(!isDone()) + return null; + } catch (TaskAbortException e) { + removeSearch(this); + throw e; + } + + removeSearch(this); + Set rs = resultset; + return rs; + } + + public HTMLNode getHTMLNode(){ + try { + if (!isDone() || !formatResult) { + return null; + } + } catch (TaskAbortException ex) { + Logger.error(this, "Error finding out whether this is done", ex); + return null; + } + + removeSearch(this); + HTMLNode pen = pageEntryNode; + pageEntryNode = null; + + return pen; + } + + public synchronized void setMakeResultNode(boolean groupusk, boolean showold, boolean js){ + formatResult = true; + htmlgroupusk = groupusk; + htmlshowold = showold; + htmljs = js; + } + + @Override + public ProgressParts getParts() throws TaskAbortException { + if(subsearches==null) + return ProgressParts.normalise(0, 0); + return ProgressParts.getParts(this.getSubProgress(), ProgressParts.ESTIMATE_UNKNOWN); + } + + @Override + public String getStatus() { + try { + setStatus(); + return status.name(); + } catch (TaskAbortException ex) { + return "Error finding Status"; + } + } + + public boolean isPartiallyDone() { + throw new UnsupportedOperationException("Not supported yet."); + } + + public void remove() { + // FIXME abort the subsearches. + // FIXME in fact this shouldn't be necessary, the TaskAbortException should be propagated upwards??? + // FIXME really we'd want to convert it into a failed status so we could show that one failed, and still show partial results + if(subsearches != null) { + for(Execution> sub : subsearches) { + if(sub instanceof Search) + ((Search)sub).remove(); + } + } + removeSearch(this); + } + + public boolean innerCanFailAndStillComplete() { + switch(resultOperation) { + case DIFFERENTINDEXES: + case UNION: + return true; + } + return false; + } + +} diff --git a/src/main/java/plugins/Library/search/SearchTokenizer.java b/src/main/java/plugins/Library/search/SearchTokenizer.java new file mode 100755 index 00000000..76ef8908 --- /dev/null +++ b/src/main/java/plugins/Library/search/SearchTokenizer.java @@ -0,0 +1,205 @@ +package plugins.Library.search; + +import java.lang.Character.UnicodeBlock; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Locale; +import static plugins.Library.search.SearchUtil.*; + +/** + * Search Tokenizer + * + * Normalize and tokenize text blocks into words (for latin scripts), + * or single-double-tokens (for CJK). + * + * @author SDiZ + */ +public class SearchTokenizer implements Iterable, Iterator { + private ArrayList mode; + private ArrayList segments; + private int nextPos; + private final boolean returnPairs; + static final int KEEP_NON_LETTER_MIN_CHARS = 3; + static final String allowedMidWord = "'"; + static final String discardIfEndWord = "'"; + + private Iterator cjkTokenizer; + + enum Mode { + UNDEF, LATIN, CJK + }; + + public SearchTokenizer(String text, boolean returnPairs) { + this.returnPairs = returnPairs; + // normalize + text = normalize(text); + + // split code points, filter for letter or digit + final int length = text.length(); + segments = new ArrayList(); + mode = new ArrayList(); + + Mode curMode = Mode.UNDEF; + + StringBuilder sb = new StringBuilder(); + for (int offset = 0; offset < length;) { + final int codepoint = text.codePointAt(offset); + int charCount = Character.charCount(codepoint); + offset += charCount; + + if (Character.isLetterOrDigit(codepoint)) { + boolean isCJK = isCJK(codepoint); + boolean isNum = Character.isDigit(codepoint); + + // add seperator across CJK/latin margin + if (isCJK) { + if (curMode == Mode.LATIN && sb.length() != 0) { + segments.add(sb.toString()); + mode.add(curMode); + sb = new StringBuilder(); + } + curMode = Mode.CJK; + } else if (!isNum) { + if (curMode == Mode.CJK && sb.length() != 0) { + segments.add(sb.toString()); + mode.add(curMode); + sb = new StringBuilder(); + } + curMode = Mode.LATIN; + } + + sb.append(Character.toChars(codepoint)); + } else if (sb.length() != 0) { + boolean passed = false; + if(charCount == 1) { + // Allow apostrophes mid-word. + char c = text.charAt(offset-1); + if(allowedMidWord.indexOf(c) != -1) { + sb.append(c); + passed = true; + } + } + if(!passed) { + // last code point is not 0, add a separator + if(curMode != Mode.UNDEF || sb.length() >= KEEP_NON_LETTER_MIN_CHARS) { + // Words can't end in an apostrophe. + while(sb.length() > 0 && discardIfEndWord.indexOf(sb.charAt(sb.length()-1)) != -1) { + sb.setLength(sb.length()-1); + } + if(sb.length() > 0) { + segments.add(sb.toString()); + mode.add(curMode); + } + } + curMode = Mode.UNDEF; + sb = new StringBuilder(); + } + } + } + + if (sb.length() != 0) { + // Words can't end in an apostrophe. + while(sb.length() > 0 && discardIfEndWord.indexOf(sb.charAt(sb.length()-1)) != -1) { + sb.setLength(sb.length()-1); + } + if(sb.length() > 0) { + segments.add(sb.toString()); + mode.add(curMode); + } + } + } + + public String toString() { + StringBuilder str = new StringBuilder(); + str.append("->["); + + Iterator it = segments.iterator(); + if (it.hasNext()) + str.append(it.next()); + while (it.hasNext()) { + str.append(','); + str.append(it.next()); + } + str.append("]"); + + for (String s : this) { + str.append(';'); + str.append(s); + } + + return str.toString(); + } + + public Iterator iterator() { + return this; + } + + public boolean hasNext() { + return (cjkTokenizer != null && cjkTokenizer.hasNext()) || + (nextPos < segments.size()); + } + + public String next() { + if (cjkTokenizer != null) { + if (cjkTokenizer.hasNext()) + return cjkTokenizer.next(); + cjkTokenizer = null; + } + + Mode curMode = mode.get(nextPos); + String curSeg = segments.get(nextPos); + nextPos++; + + switch (curMode) { + case LATIN: + return curSeg; + + case CJK: + cjkTokenizer = cjkIterator(curSeg, returnPairs); + assert cjkTokenizer.hasNext(); + return cjkTokenizer.next(); + + case UNDEF: + // E.g. a number. We do index these. FIXME should we? Probably yes... + return curSeg; + + default: + assert false; // DOH! + } + return null; + } + + /** Iterate a CJK string. Return characters or characters and pairs of characters. + * @param returnPairs If true, return pairs of characters in between characters: + * C1C2C3C4 -> C1, C1C2, C2, C2C3, C3, C3C4, C4 */ + private Iterator cjkIterator(String cjkText, final boolean returnPairs) { + ArrayList cjkToken = new ArrayList(); + + String lastChar = null; + int length = cjkText.length(); + for (int offset = 0; offset < length;) { + final int codepoint = cjkText.codePointAt(offset); + offset += Character.charCount(codepoint); + + String curChar = new String(Character.toChars(codepoint)); + + if (lastChar != null && returnPairs) + cjkToken.add(lastChar + curChar); + if (isCJK(codepoint)) // skip number embedded in cjk + cjkToken.add(curChar); + lastChar = curChar; + } + + return cjkToken.iterator(); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + + protected String normalize(String text) { + // TODO: JAVA6: normalize to NFKC + // Do upper case first for Turkish and friends + return text.toUpperCase(Locale.US).toLowerCase(Locale.US); + } +} diff --git a/src/main/java/plugins/Library/search/SearchUtil.java b/src/main/java/plugins/Library/search/SearchUtil.java new file mode 100644 index 00000000..65236427 --- /dev/null +++ b/src/main/java/plugins/Library/search/SearchUtil.java @@ -0,0 +1,55 @@ +package plugins.Library.search; + +import java.lang.Character.UnicodeBlock; +import java.util.Arrays; +import java.util.List; + +public class SearchUtil { + public static boolean isCJK(int codePoint) { + UnicodeBlock block = Character.UnicodeBlock.of(codePoint); + return block == UnicodeBlock.CJK_COMPATIBILITY // CJK + || block == UnicodeBlock.CJK_COMPATIBILITY_FORMS // + || block == UnicodeBlock.CJK_COMPATIBILITY_IDEOGRAPHS // + || block == UnicodeBlock.CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT // + || block == UnicodeBlock.CJK_RADICALS_SUPPLEMENT // + || block == UnicodeBlock.CJK_SYMBOLS_AND_PUNCTUATION // + || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS // + || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A // + || block == UnicodeBlock.CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B // + || block == UnicodeBlock.BOPOMOFO // Chinese + || block == UnicodeBlock.BOPOMOFO_EXTENDED // + || block == UnicodeBlock.HANGUL_COMPATIBILITY_JAMO // Korean + || block == UnicodeBlock.HANGUL_JAMO // + || block == UnicodeBlock.HANGUL_SYLLABLES // + || block == UnicodeBlock.KANBUN // Japanese + || block == UnicodeBlock.HIRAGANA // + || block == UnicodeBlock.KANGXI_RADICALS // + || block == UnicodeBlock.KANNADA // + || block == UnicodeBlock.KATAKANA // + || block == UnicodeBlock.KATAKANA_PHONETIC_EXTENSIONS; + } + + private static List stopWords = Arrays.asList(new String[]{ + "the", "and", "that", "have", "for" // English stop words + }); + + public static boolean isStopWord(String word) { + if (stopWords.contains(word)) + return true; + + int len = word.codePointCount(0, word.length()); + if (len < 3 ) { + // too short, is this CJK? + int cp1 = word.codePointAt(0); + if (isCJK(cp1)) + return false; + if (len == 2) { + // maybe digit+CJK, check the second char + int cp2 = word.codePointAt(Character.charCount(cp1)); + return !isCJK(cp2); + } + return true; + } + return false; + } +} diff --git a/src/main/java/plugins/Library/search/inter/IndexQuery.java b/src/main/java/plugins/Library/search/inter/IndexQuery.java new file mode 100644 index 00000000..08c819ec --- /dev/null +++ b/src/main/java/plugins/Library/search/inter/IndexQuery.java @@ -0,0 +1,65 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.search.inter; + +import plugins.Library.Index; + +import java.util.Collections; +import java.util.Set; +import java.util.HashSet; + +/** +** DOCUMENT +** +** @author infinity0 +*/ +public class IndexQuery /* implements Index */ { + + /** + ** Reference to the index. + */ + final public Index index; + + /** + ** Score for this index in the WoT. + */ + final public float WoT_score; + + /** + ** Minimum number of hops this index is from the inner-group of indexes. + */ + protected int WoT_hops; + + /** + ** References to this index that we've seen, from other indexes. + ** + ** TODO decide if this is needed. + */ + protected int WoT_refs; + + /** + ** Mutex for manipulating the below sets. + */ + final protected Object terms_lock = new Object(); + + final protected Set terms_done = new HashSet(); + final protected Set terms_started = new HashSet(); + final protected Set terms_pending; + + public IndexQuery(Index i, float s, Set terms, int hops) { + index = i; + WoT_score = s; + WoT_hops = hops; + terms_pending = new HashSet(terms); + } + + public IndexQuery(Index i, float s, String root_term) { + this(i, s, Collections.singleton(root_term), 0); + } + + public void updateMinimumHops(int h) { + if (h < WoT_hops) { WoT_hops = h; } + } + +} diff --git a/src/main/java/plugins/Library/search/inter/Interdex.java b/src/main/java/plugins/Library/search/inter/Interdex.java new file mode 100644 index 00000000..86226621 --- /dev/null +++ b/src/main/java/plugins/Library/search/inter/Interdex.java @@ -0,0 +1,198 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.search.inter; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermIndexEntry; +import plugins.Library.index.TermTermEntry; +import plugins.Library.Index; + +import freenet.keys.FreenetURI; + +import java.util.Set; +import java.util.Map; +import java.util.HashMap; + +/** +** DOCUMENT +** +** @author infinity0 +*/ +public class Interdex /* implements Index */ { + + + + /** + ** Constructs the root set of {@link IndexQuery}s. + */ + protected static Map getRootIndices() { + throw new UnsupportedOperationException("not implemented"); + } + + + + + /** + ** Class to handle a request for a given subject term. + */ + public class InterdexQuery implements Runnable { + + final public String subject; + + /** + ** Table of {@link IndexQuery} objects relevant to this query. + */ + final protected Map queries = new HashMap(); + + /** + ** Table of terms relevant to this query and results retrieved for + ** that particular term. + ** + */ + /*final*/ protected Map results; + + /** + ** Constructs a new InterdexQuery and TODO starts executing it. + */ + public InterdexQuery(String subj) { + subject = subj; + Map roots = getRootIndices(); + for (Map.Entry en: roots.entrySet()) { + Index index = en.getKey(); + // FreenetURI id = index.reqID; + // queries.put(id, new IndexQuery(index, en.getValue(), subj)); + } + } + + /** + ** TODO implement. + ** + ** Returns the average perceived relevance of a given term relative to + ** a parent subject term. "Perceived" because it uses data from indexes + ** in the WoT's nearer rings; "average" because it generates an average + ** rating from the individual values encountered. + ** + ** The value is calculated from the mean of all the relevance values + ** encountered for this subject-term pair (with the weights / + ** probabilities being the normalised trust score of the index which + ** defined the pairing), multiplied by 3. (Values greater than 1 are + ** normalised back down to 1). + ** + ** For example, if we have encountered 9 indexes all with equal weight, + ** and 2 of them defined rel(S, T) = 0.5, then this method will return + ** 0.5 * min(1, 3*2/9) = 1/3. + ** + ** In other words, if less than 1/3 of the total index trust-mass gives + ** a relevance rating for a given subject-term pair, the value returned + ** by this method will be lower (than it would be if more indexes had + ** had supplied a rating). + ** + ** We choose 1/3 as an arbitrary compromise between majority-ignorance + ** (most people might not know obscure term X is related to obscure + ** term Y) and minority-attack (one person might inject information to + ** distort a search query; we shouldn't expect WoT to deal with minor + ** corner cases). '''This value can be discussed further'''. + ** + ** Formally, let + ** + ** : S := subject + ** : T := term + ** : A := { all indexes encountered } + ** : R := { i ∊ A: i.rel(S, T) is defined } + ** : Wi := weight of index i (eg. WoT trust score) + ** + ** Then this method calculates: + ** + ** : (Σi∊RWi·i.rel(S, T)) + ** · min(1, 3·Σi∊RWi + ** / Σi∊AWi) + ** + */ + public float getAvPercvRelevance(String subj, String term) { + throw new UnsupportedOperationException("not implemented"); + } + + /** + ** TODO implement. maybe use JGraphT. DefaultDirectedWeightedGraph + ** and DijkstraShortestPath. + ** + ** Returns the effective relevance of all terms relative to the subject + ** term for this query. + ** + ** The effective relevance of a term (relative to the subject term) is + ** defined as the highest-relevance path from the subject to that term, + ** where the relevance of a path is defined as the product of all the + ** edges on it, where each edge from S to T has weight equal to their + ** {@linkplain #getAvPercvRelevance(String, String) average perceived + ** relevance}. + ** + ** This is itself equivalent to the shortest-path problem from the + ** subject term to the target term, with each edge from S to T having + ** weight {@code -log(R)}, with R being the average perceived relevance + ** of S, T. + ** + ** This definition was chosen because there may be an infinite number + ** of paths between two terms in the semantic web; we want the "most + ** direct" one that exists (by consensus; so we use average perceived + ** relevance). + */ + public Map getEffectiveRelevances() { + throw new UnsupportedOperationException("not implemented"); + } + + + /** + ** Returns true when we have enough data to display something + ** acceptable to the user. The query may still be going on in the + ** background. + ** + ** TODO decide when this is. + */ + /*@Override*/ public boolean isPartiallyDone() { + throw new UnsupportedOperationException("not implemented"); + } + + + public void run() { + + for (;;) { + + while (Math.log(1)!=0 /* completed tasks is not empty */ ) { + // get completed task + + Set entries = null; + + for (TermEntry en: entries) { + + if (en instanceof TermIndexEntry) { + + } else if (en instanceof TermTermEntry) { + + + } else { + + + } + + + } + + } + + while (Math.log(1)!=0 /* pending tasks is not empty */) { + + + + + + + } + + } + } + + } + + +} diff --git a/src/main/java/plugins/Library/search/inter/TermResults.java b/src/main/java/plugins/Library/search/inter/TermResults.java new file mode 100644 index 00000000..2595e7f5 --- /dev/null +++ b/src/main/java/plugins/Library/search/inter/TermResults.java @@ -0,0 +1,56 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.search.inter; + +import java.util.Collection; +import java.util.Map; + +/** +** DOCUMENT +** +** @author infinity0 +*/ +public class TermResults { + + /** + ** Subject term for the original query. + */ + /*final*/ public String query_subject; + + /** + ** Subject term for these results. + */ + /*final*/ public String subject; + + /** + ** Map of parent terms to the relevances that were seen for this term in + ** the results for the parent. + */ + /*final*/ public Map> parents; + + /** + ** Incremental calculation of effective relevance. May not be exact if + ** if additional indexes were encountered referencing this term, after the + ** last update to this field. + */ + protected float av_pcv_rel_; + + /** + ** Get the cached effective relevance. Use this in preference to {@link + ** Interdex#getEffectiveRelevance()} when the search is still ongoing. + ** + ** TODO this should be a good estimate that is always greater than the + ** actual value... + */ + public float getCachedAveragePerceivedRelevance() { + return av_pcv_rel_; + } + + public TermResults(String q, String s) { + query_subject = q; + subject = s; + } + + +} diff --git a/src/main/java/plugins/Library/search/package-info.java b/src/main/java/plugins/Library/search/package-info.java new file mode 100644 index 00000000..229a0d44 --- /dev/null +++ b/src/main/java/plugins/Library/search/package-info.java @@ -0,0 +1,15 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ + +/** + * Handles higher-level search functions such as set operations on the results, + * splitting search queries, searching on multiple indexes. + * + * TODO support for stopwords, maybe it should be an exception passed back from + * getResult() so that that one can be ignored and need to send a message to the user to say it was excluded ( Could be a TermEntry ) + * + * @author MikeB + */ +package plugins.Library.search; + diff --git a/src/main/java/plugins/Library/ui/L10nString.java b/src/main/java/plugins/Library/ui/L10nString.java new file mode 100644 index 00000000..d04c79be --- /dev/null +++ b/src/main/java/plugins/Library/ui/L10nString.java @@ -0,0 +1,72 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import freenet.l10n.BaseL10n; +import freenet.l10n.NodeL10n; +import freenet.l10n.BaseL10n.LANGUAGE; + +/** + * Feeble attempt at a translation interface, this is best left until there is a + * general Plugin method for doing this + * + * @author MikeB + */ +public class L10nString{ + + + static LANGUAGE lang; + + public static String getString(String key){ + lang = NodeL10n.getBase().getSelectedLanguage(); + if("Index".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "Index "; + } + else if("Searching-for".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "Searching for "; + } + else if("in-index".equals(key)) + switch(lang){ + case ENGLISH: + default: + return " in index "; + } + else if("Search-status".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "Search status : "; + } + else if("failed".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "Failed"; + } + else if("title".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "Search Freenet"; + } + else if("page-warning".equals(key)) + switch(lang){ + case ENGLISH: + default: + return "WARNING: This search, like the rest of Freenet, is not filtered, and could find offensive or illegal content. Be careful!"; + } + else + return key; + } + + public static void setLanguage(LANGUAGE newLanguage){ + lang = newLanguage; + } +} diff --git a/src/main/java/plugins/Library/ui/MainPage.java b/src/main/java/plugins/Library/ui/MainPage.java new file mode 100644 index 00000000..ddbb17c1 --- /dev/null +++ b/src/main/java/plugins/Library/ui/MainPage.java @@ -0,0 +1,523 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import plugins.Library.Library; +import plugins.Library.search.InvalidSearchException; +import plugins.Library.search.Search; +import plugins.Library.util.exec.ChainedProgress; +import plugins.Library.util.exec.CompositeProgress; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.TaskAbortException; + +import freenet.keys.FreenetURI; +import freenet.pluginmanager.PluginRespirator; +import freenet.support.HTMLNode; +import freenet.support.Logger; +import freenet.support.MultiValueTable; +import freenet.support.api.HTTPRequest; + +import java.io.UnsupportedEncodingException; +import java.net.MalformedURLException; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; + +/** + * Generates the main search page + * + * @author MikeB + */ +class MainPage { + /** map of search hashes to pages on them */ + private static final HashMap searchPages = new HashMap(); + + private synchronized static void addpage(int hashCode, MainPage page) { + searchPages.put(hashCode, page); + } + + private synchronized static void cleanUpPages(){ + for (Iterator it = searchPages.keySet().iterator(); it.hasNext();) { + if (!Search.hasSearch(it.next())) + it.remove(); + } + } + + private synchronized static MainPage getPage(int intParam) { + return searchPages.get(intParam); + } + + + + private final Library library; + private final PluginRespirator pr; + + private Search search = null; + private ArrayList exceptions = new ArrayList(); + private boolean showold = false; + private boolean js = false; + private String query = ""; + /** All of the indexes used in the request together seperated by spaces */ + private String indexstring = Library.DEFAULT_INDEX_SITE; + /** Array of all the bookmark indexes used in this request */ + private ArrayList selectedBMIndexes = new ArrayList(); + /** Any other indexes which are not bookmarks */ + private ArrayList selectedOtherIndexes = new ArrayList(); + /** Any other indexes which are not bookmarks seperated by spaces */ + private boolean groupusk = false; + private StringBuilder messages = new StringBuilder(); + + private String addindexname = ""; + private String addindexuri = ""; + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(MainPage.class); + } + + + MainPage(Exception e, Library library, PluginRespirator pr) { + exceptions.add(e); + this.library = library; + this.pr = pr; + } + + + /** + * FIXME ajaxy stuff isnt working, it doesnt seem to renew the timeout in the js, it stopped working when the response type of the xml was changed to text/xml + * @param library + * @param pr + */ + MainPage(Library library, PluginRespirator pr) { + this.library = library; + this.pr = pr; + } + + /** + * Process a get request, the only parameter allowed for a get request is + * request id (for an ongoing request) + */ + public static MainPage processGetRequest(HTTPRequest request){ + if (request.isParameterSet("request") && searchPages.containsKey(request.getIntParam("request"))){ + return getPage(request.getIntParam("request")); + } + return null; + } + + /** post commands */ + private static enum Commands { + /** performs a search */ + find, + /** adds a new index to the bookmarks in Library */ + addbookmark, + /** puts an index in the add bookmark box to be named, requires an integer parameter between 0 and "extraindexcount" to specify which index is being added */ + addindex, + /** deletes a bookmark from the Library, requires an integer parameter between 0 and the number of bookmarks */ + removebookmark + } + + /** + * Process a post request + * @param userAccess + * + * @see plugins.XMLSpider.WebPage#processPostRequest(freenet.support.api.HTTPRequest, + * freenet.support.HTMLNode) + */ + public static MainPage processPostRequest(HTTPRequest request, HTMLNode contentNode, boolean hasFormPassword, boolean userAccess, Library library, PluginRespirator pr ) { + cleanUpPages(); + MainPage page = new MainPage(library, pr); + + page.js = request.isPartSet("js"); + page.showold = request.isPartSet("showold"); + page.groupusk = request.isPartSet("groupusk"); + String[] etcIndexes = request.getPartAsStringFailsafe("indexuris", 256).trim().split("[ ;]"); + page.query = request.getPartAsStringFailsafe("search", 256); + + if(userAccess) { + page.addindexname = request.getPartAsStringFailsafe("addindexname", 32); + page.addindexuri = request.getPartAsStringFailsafe("addindexuri", 256); + } + + // Get bookmarked index list + page.indexstring = ""; + for (String bm : library.bookmarkKeys()){ + String bmid = (Library.BOOKMARK_PREFIX + bm).trim(); + //Logger.normal(this, "checking for ~" + bm + " - "+request.isPartSet("~"+bm)); + if(request.isPartSet("~"+bm)){ + page.indexstring += bmid + " "; + page.selectedBMIndexes.add(bmid); + } + } + // Get other index list + //Logger.normal(page, "extra indexes : "+request.getIntPart("extraindexcount", 0)); + for (int i = 0; i < request.getIntPart("extraindexcount", 0); i++) { + if (request.isPartSet("index"+i)){ + String otherindexuri = request.getPartAsStringFailsafe("index"+i, 256); + //Logger.normal(page, "Added index : "+otherindexuri); + page.indexstring += otherindexuri + " "; + page.selectedOtherIndexes.add(otherindexuri); + }//else + //Logger.normal(page, "other index #"+i+" unchecked"); + } + for (String string : etcIndexes) { + if(string.length()>0){ + page.indexstring += string + " "; + page.selectedOtherIndexes.add(string); + } + } + page.indexstring = page.indexstring.trim(); + + if("".equals(page.indexstring)) + page.indexstring = Library.DEFAULT_INDEX_SITE; + + if(page.selectedBMIndexes.size() == 0 && page.selectedOtherIndexes.size() == 0) + page.selectedBMIndexes.add(Library.DEFAULT_INDEX_SITE); + + + + + + // get search query + if (request.isPartSet(Commands.find.toString())){ + if(hasFormPassword) { + // Start or continue a search + try { + if(logMINOR) + Logger.minor(MainPage.class, "starting search for "+page.query+" on "+page.indexstring); + page.search = Search.startSearch(page.query, page.indexstring); + if(page.search == null) + page.messages.append("Stopwords too prominent in search term, try removing words like 'the', 'and' and 'that' and any words less than 3 characters"); + else{ + page.search.setMakeResultNode(page.groupusk, page.showold, true); // for the moment js will always be on for results, js detecting isnt being used + + // at this point pages is in a state ready to be saved + addpage(page.search.hashCode(), page); + } + } catch (InvalidSearchException ex) { + page.messages.append("Problem with search : "+ex.getLocalizedMessage()+"\n"); + } catch (TaskAbortException ex) { + page.exceptions.add(ex); // TODO handle these exceptions separately + } catch (RuntimeException ex){ + page.exceptions.add(ex); + } + } + }else if (request.isPartSet(Commands.addbookmark.toString())) { + try { + // adding an index + // TODO make sure name is valid + if (userAccess && hasFormPassword && page.addindexname.length() > 0 && URLEncoder.encode(page.addindexname, "UTF-8").equals(page.addindexname)) { + // Use URLEncoder to have a simple constraint on names + library.addBookmark(page.addindexname, page.addindexuri); + if (page.selectedOtherIndexes.contains(page.addindexuri)) { + page.selectedOtherIndexes.remove(page.addindexuri); + page.selectedBMIndexes.add(Library.BOOKMARK_PREFIX + page.addindexname); + } + page.addindexname = ""; + page.addindexuri = ""; + } + } catch (UnsupportedEncodingException ex) { + Logger.error(page, "Encoding error", ex); + } + }else{ // check if one of the other indexes is being added as a bookmark, this doesnt actually add it but moves it into the add box + for (int i = 0; i < request.getIntPart("extraindexcount", 0); i++) { // TODO make sure name is valid + if (request.isPartSet(Commands.addindex.toString()+i)) + page.addindexuri = request.getPartAsStringFailsafe("index"+i, 256); + } + if(userAccess && hasFormPassword) + // check if one of the bookmarks is being removed + for (String bm : library.bookmarkKeys()) { + if (request.isPartSet(Commands.removebookmark+bm)){ + library.removeBookmark(bm); + break; + } + } + } + + return page; + } + + + + /** + * Write the search page out + * + * + * @see plugins.XMLSpider.WebPage#writeContent(freenet.support.api.HTTPRequest, + * freenet.support.HTMLNode) + */ + public void writeContent(HTMLNode contentNode, MultiValueTable headers) { + HTMLNode errorDiv = contentNode.addChild("div", "id", "errors"); + + for (Exception exception : exceptions) { + addError(errorDiv, exception, messages); + } + + try{ + //Logger.normal(this, "Writing page for "+ query + " " + search + " " + indexuri); + contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", "/library/static/script.js"}).addChild("%", " "); + + // Generate the url to refresh to to update the progress + String refreshURL = null; + if ( search != null) + refreshURL = path()+"?request="+search.hashCode(); + + + + + if(search == null) + contentNode.addChild("#",L10nString.getString("page-warning")); + + contentNode.addChild(searchBox()); + + + + + // If showing a search + if(search != null){ + // show progress + contentNode.addChild(progressBox()); + // If search is complete show results + if (search.isDone()) { + if(search.hasGeneratedResultNode()){ + contentNode.addChild(search.getHTMLNode()); + //Logger.normal(this, "Got pre generated result node."); + }else + try { + //Logger.normal(this, "Blocking to generate resultnode."); + ResultNodeGenerator nodegenerator = new ResultNodeGenerator(search.getResult(), groupusk, showold, true); // js is being switch on always currently due to detection being off + nodegenerator.run(); + contentNode.addChild(nodegenerator.getPageEntryNode()); + } catch (TaskAbortException ex) { + exceptions.add(ex); + } catch (RuntimeException ex) { + exceptions.add(ex); + } + } else { + contentNode.addChild("div", "id", "results").addChild("#"); + } + } + + + + + // Don't refresh if there is no request option, if it is finished or there is an error to show or js is enabled + if (search != null && !"".equals(search.getQuery()) && !search.isDone() && exceptions.size() <= 0) { + // refresh will GET so use a request id + if (!js) { + headers.put("Refresh", "2;url=" + refreshURL); + //contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", path() + "static/" + (js ? "script.js" : "detect.js") + "?request="+search.hashCode()+(showold?"&showold=on":"")}).addChild("%", " "); + //contentNode.addChild("script", new String[]{"type", "src"}, new String[]{"text/javascript", path() + "static/" + (js ? "script.js" : "detect.js") + "?request="+search.hashCode()+(showold?"&showold=on":"")}).addChild("%", " "); + } + } + }catch(TaskAbortException e) { + if(search != null) + search.remove(); + exceptions.add(e); + search = null; + addError(errorDiv, e, messages); + } + + // Show any errors + errorDiv.addChild("p", messages.toString()); + } + + + /** + * Create search form + * + * // @param search already started + * // @param indexuri + * // @param js whether js has been detected + * // @param showold + * + * @return an {@link HTMLNode} representing the search form + */ + private HTMLNode searchBox(){ + HTMLNode searchDiv = new HTMLNode("div", new String[]{"id", "style"}, new String[]{"searchbar", "text-align: center;"}); + if(pr!=null){ + HTMLNode searchForm = pr.addFormChild(searchDiv, path(), "searchform"); + HTMLNode searchBox = searchForm.addChild("div", "style", "display: inline-table; text-align: left; margin: 20px 20px 20px 0px;"); + searchBox.addChild("#", "Search query:"); + searchBox.addChild("br"); + searchBox.addChild("input", new String[]{"name", "size", "type", "value", "title"}, new String[]{"search", "40", "text", query, "Enter a search query. You can use standard search syntax such as 'and', 'or', 'not' and \"\" double quotes around phrases"}); + searchBox.addChild("input", new String[]{"name", "type", "value", "tabindex"}, new String[]{"find", "submit", "Find!", "1"}); + if(js) + searchBox.addChild("input", new String[]{"type","name"}, new String[]{"hidden","js"}); + // Shows the list of bookmarked indexes TODO show descriptions on mouseover ?? + HTMLNode indexeslist = searchBox.addChild("ul", "class", "index-bookmark-list", "Select indexes"); + for (String bm : library.bookmarkKeys()){ + //Logger.normal(this, "Checking for bm="+Library.BOOKMARK_PREFIX+bm+" in \""+indexuri + " = " + selectedBMIndexes.contains(Library.BOOKMARK_PREFIX+bm)+" "+indexuri.contains(Library.BOOKMARK_PREFIX+bm)); + HTMLNode bmItem = indexeslist.addChild("li"); + bmItem.addChild("input", new String[]{"name", "type", "value", "title", (selectedBMIndexes.contains(Library.BOOKMARK_PREFIX+bm) ? "checked" : "size" )}, new String[]{"~"+bm, "checkbox", Library.BOOKMARK_PREFIX+bm, "Index uri : "+library.getBookmark(bm), "1" } , bm); + bmItem.addChild("input", new String[]{"name", "type", "value", "title", "class"}, new String[]{Commands.removebookmark+bm, "submit", "X", "Delete this bookmark", "index-bookmark-delete" }); + } + int i=0; + for (String uri : selectedOtherIndexes) { + HTMLNode removeItem = indexeslist.addChild("li"); + String showuri; + try{ + showuri = (new FreenetURI(uri)).toShortString(); + }catch(MalformedURLException e){ + showuri = uri; + } + removeItem.addChild("input", new String[]{"type", "name", "value", "checked"}, new String[]{"checkbox", "index"+i, uri, "checked", } , showuri); + removeItem.addChild("input", new String[]{"name", "type", "value"}, new String[]{Commands.addindex.toString()+i, "submit", "Add=>" }); + i++; + } + indexeslist.addChild("input", new String[]{"name", "type", "value"}, new String[]{"extraindexcount", "hidden", ""+selectedOtherIndexes.size()}); + indexeslist.addChild("li") + .addChild("input", new String[]{"name", "type", "value", "class", "title"}, new String[]{"indexuris", "text", "", "index", "URI or path of other index(s) to search on"}); + + + HTMLNode optionsBox = searchForm.addChild("div", "style", "margin: 20px 0px 20px 20px; display: inline-table; text-align: left;", "Options"); + HTMLNode optionsList = optionsBox.addChild("ul", "class", "options-list"); + optionsList.addChild("li") + .addChild("input", new String[]{"name", "type", groupusk?"checked":"size", "title"}, new String[]{"groupusk", "checkbox", "1", "If set, the results are returned grouped by site and edition, this makes the results quicker to scan through but will disrupt ordering on relevance, if applicable to the indexs you are using."}, "Group sites and editions"); + optionsList.addChild("li") + .addChild("input", new String[]{"name", "type", showold?"checked":"size", "title"}, new String[]{"showold", "checkbox", "1", "If set, older editions are shown in the results greyed out, otherwise only the most recent are shown."}, "Show older editions"); + + HTMLNode newIndexInput = optionsBox.addChild("div", new String[]{"class", "style"}, new String[]{"index", "display: inline-table;"}, "Add an index:"); + newIndexInput.addChild("br"); + newIndexInput.addChild("div", "style", "display: inline-block; width: 50px;", "Name:"); + newIndexInput.addChild("input", new String[]{"name", "type", "class", "title", "value"}, new String[]{"addindexname", "text", "index", "Name of the bookmark, this will appear in the list to the left", addindexname}); + newIndexInput.addChild("br"); + newIndexInput.addChild("div", "style", "display: inline-block; width: 50px;", "URI:"); + newIndexInput.addChild("input", new String[]{"name", "type", "class", "title", "value"}, new String[]{"addindexuri", "text", "index", "URI or path of index to add to bookmarks, including the main index filename at the end of a Freenet uri will help Library not to block in order to discover the index type.", addindexuri}); + newIndexInput.addChild("br"); + newIndexInput.addChild("input", new String[]{"name", "type", "value"}, new String[]{"addbookmark", "submit", "Add Bookmark"}); + }else + searchDiv.addChild("#", "No PluginRespirater, so Form cannot be displayed"); + return searchDiv; + } + + + /** + * Draw progress box with bars + * + * // @param search + * // @param indexuri + * // @param request request to get progress from + * + * @return an {@link HTMLNode} representing a progress box + */ + private HTMLNode progressBox() throws TaskAbortException{ + HTMLNode progressDiv = new HTMLNode("div", "id", "progress"); + // Search description + HTMLNode progressTable = progressDiv.addChild("table", "width", "100%"); + HTMLNode searchingforCell = progressTable.addChild("tr") + .addChild("td"); + searchingforCell.addChild("#", L10nString.getString("Searching-for")); + searchingforCell.addChild("span", "class", "librarian-searching-for-target") + .addChild("b", query); + searchingforCell.addChild("#", L10nString.getString("in-index")); + searchingforCell.addChild("i", indexstring); + + + // Search status + HTMLNode statusRow = progressTable.addChild("tr"); + statusRow.addChild("td") + .addChild("div", "id", "librarian-search-status") + .addChild("table", new String[]{"id", "class"}, new String[]{"progress-base", "progress-table"}) + .addChild("tbody") + .addChild(progressBar(search, true)); + return progressDiv; + } + + /** + * Put an error on the page, under node, also draws a big grey box around + * the error, unless it is an InvalidSearchException in which case it just shows the description + */ + public static void addError(HTMLNode node, Throwable error, StringBuilder messages){ + if(messages != null && + (error instanceof InvalidSearchException || error instanceof TaskAbortException) && !logMINOR){ // Print description + messages.append(error.getMessage()+"\n"); + // TODO if there is a cause there should be some way to view this if needed for debugging + }else{ // Else print stack trace + HTMLNode error1 = node.addChild("div", "style", "padding:10px;border:5px solid gray;margin:10px", error.toString()); + for (StackTraceElement ste : error.getStackTrace()){ + error1.addChild("br"); + error1.addChild("#", " -- "+ste.toString()); + } + if(error.getCause()!=null) + addError(error1, error.getCause(), messages); + } + } + + + /** + * Draw progress bars and describe progress, CompositeProgess are drawn as a table with each row containing a subProgress + * @param progress The progress to represent + * @return an {@link HTMLNode} representing a progress bar + */ + public static HTMLNode progressBar(Progress progress, boolean canFail) throws TaskAbortException { + synchronized (progress){ + if( progress instanceof CompositeProgress && ((CompositeProgress) progress).getSubProgress()!=null && ((CompositeProgress) progress).getSubProgress().iterator().hasNext()){ + // Put together progress bars for all the subProgress + HTMLNode block = new HTMLNode("#"); + block.addChild("tr").addChild("td", "colspan", "6", progress.getSubject() + " : "+progress.getStatus()); + TaskAbortException firstError = null; + boolean anySuccess = false; + if(canFail && progress instanceof Search) { + if(!(((Search)progress).innerCanFailAndStillComplete())) + canFail = false; + } else canFail = false; + if(((CompositeProgress) progress).getSubProgress() != null) + for (Progress progress1 : ((CompositeProgress) progress).getSubProgress()) { + try { + block.addChild(progressBar(progress1, canFail)); + anySuccess = true; + } catch (TaskAbortException e) { + if(!canFail) throw e; + if(firstError == null) firstError = e; + block.addChild("tr").addChild("td", "colspan", "6", progress1.getSubject() + " : "+L10nString.getString("failed") + " : "+e.getMessage()); + } + } + if(firstError != null && !anySuccess) + throw firstError; + return block; + } else { + // Draw progress bar for single or chained progress + ProgressParts parts; + if(progress instanceof ChainedProgress && ((ChainedProgress)progress).getCurrentProgress()!=null) + parts = ((ChainedProgress)progress).getCurrentProgress().getParts(); + else + parts = progress.getParts(); + + HTMLNode bar = new HTMLNode("tr"); + bar.addChild("td"); + // search term + bar.addChild("td", progress.getSubject()); + // search stage + bar.addChild("td", progress.getStatus()); + // show fetch progress if fetching something + if(progress.isDone() || progress.getParts().known==0){ + bar.addChild("td", ""); bar.addChild("td"); + }else{ + float fractiondone = parts.getKnownFractionDone(); + int percentage = (int)(((float)100)*fractiondone); // TODO cater for all data and invalid (negative) values + boolean fetchFinalized = parts.finalizedTotal(); + + bar.addChild("td", new String[]{"class", "style"}, new String[]{"progress-bar-outline", "padding: 0px 3px;"}) + .addChild("div", new String[]{"class", "style"}, new String[]{fetchFinalized?"progress-bar-inner-final":"progress-bar-inner-nonfinal", "z-index : -1; width:"+percentage+"%;"}); + bar.addChild("td", fetchFinalized?percentage+"%":"Operation length unknown"); + } + return bar; + } + } + } + + + + public static String path() { + return "/library/"; + } + + public String getQuery() { + return query; + } +} diff --git a/src/main/java/plugins/Library/ui/MainPageToadlet.java b/src/main/java/plugins/Library/ui/MainPageToadlet.java new file mode 100644 index 00000000..2f9e383b --- /dev/null +++ b/src/main/java/plugins/Library/ui/MainPageToadlet.java @@ -0,0 +1,139 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import plugins.Library.Library; + +import freenet.client.HighLevelSimpleClient; +import freenet.clients.http.PageNode; +import freenet.clients.http.RedirectException; +import freenet.clients.http.Toadlet; +import freenet.clients.http.ToadletContext; +import freenet.clients.http.ToadletContextClosedException; +import freenet.node.NodeClientCore; +import freenet.pluginmanager.PluginRespirator; +import freenet.support.HTMLNode; +import freenet.support.Logger; +import freenet.support.MultiValueTable; +import freenet.support.api.HTTPRequest; + +import java.io.IOException; +import java.net.URI; +import java.util.Collection; + +/** + * Encapsulates the MainPage in a Toadlet + * @author MikeB + */ +public class MainPageToadlet extends Toadlet { + private NodeClientCore core; + private final Library library; + private final PluginRespirator pr; + + public MainPageToadlet(HighLevelSimpleClient client, Library library, NodeClientCore core, PluginRespirator pr) { + super(client); + this.core = core; + this.library = library; + this.pr = pr; + } + + public boolean allowPOSTWithoutPassword() { + return true; + } + + public void handleMethodGET(URI uri, final HTTPRequest request, final ToadletContext ctx) + throws ToadletContextClosedException, IOException, RedirectException { + ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); + try { + // process the request + String title = L10nString.getString("title") + " (" + Library.plugName + ")"; + MainPage page = MainPage.processGetRequest(request); + if(page == null) + page = new MainPage(library, pr); + else { + String query = page.getQuery(); + if(query != null && query.length() > 0) + title = query + " - " + L10nString.getString("title") + " (" + Library.plugName + ")"; + } + PageNode p = ctx.getPageMaker().getPageNode(title, ctx); + // Style + p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); + HTMLNode pageNode = p.outer; + HTMLNode contentNode = p.content; + + MultiValueTable headers = new MultiValueTable(); + page.writeContent(contentNode, headers); + // write reply + writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); + } catch(RuntimeException e) { // this way isnt working particularly well, i think the ctx only gives out one page maker + Logger.error(this, "Runtime Exception writing main page", e); + PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); + // Style + p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); + HTMLNode pageNode = p.outer; + HTMLNode contentNode = p.content; + MainPage errorpage = new MainPage(e, library, pr); + MultiValueTable headers = new MultiValueTable(); + errorpage.writeContent(contentNode, headers); + writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); + } finally { + Thread.currentThread().setContextClassLoader(origClassLoader); + } + } + + public void handleMethodPOST(URI uri, HTTPRequest request, final ToadletContext ctx) throws ToadletContextClosedException, IOException, RedirectException { + ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); + + boolean hasFormPassword = ctx.hasFormPassword(request); + + try { + // Get the nodes of the page + PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); + // Style + p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); + HTMLNode pageNode = p.outer; + HTMLNode contentNode = p.content; + + MainPage page = MainPage.processPostRequest(request, contentNode, hasFormPassword, ctx.isAllowedFullAccess(), library, pr); + if(page==null) + page = new MainPage(library,pr); + // Process the request + MultiValueTable headers = new MultiValueTable(); + // write reply + page.writeContent(contentNode, headers); + // write reply + writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); + } catch(RuntimeException e) { + Logger.error(this, "Runtime Exception writing main page", e); + PageNode p = ctx.getPageMaker().getPageNode(Library.plugName, ctx); + // Style + p.headNode.addChild("link", new String[]{"rel", "href", "type"} , new String[]{"stylesheet", path() + "static/style.css", "text/css"}); + HTMLNode pageNode = p.outer; + HTMLNode contentNode = p.content; + // makes a mainpage for showing errors + MainPage errorpage = new MainPage(e, library, pr); + MultiValueTable headers = new MultiValueTable(); + errorpage.writeContent(contentNode, headers); + writeHTMLReply(ctx, 200, "OK", headers, pageNode.generate()); + } finally { + Thread.currentThread().setContextClassLoader(origClassLoader); + } + } + + + + @Override public String path() { + return MainPage.path(); + } + + public String name() { + return "WelcomeToadlet.searchFreenet"; + } + + public String menu() { + return "FProxyToadlet.categoryBrowsing"; + } +} diff --git a/src/main/java/plugins/Library/ui/RelevanceComparator.java b/src/main/java/plugins/Library/ui/RelevanceComparator.java new file mode 100644 index 00000000..179eb91c --- /dev/null +++ b/src/main/java/plugins/Library/ui/RelevanceComparator.java @@ -0,0 +1,43 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ + +package plugins.Library.ui; + +import plugins.Library.index.TermEntry; +import plugins.Library.util.IdentityComparator; + +/** + * Compares the relevance of two TermEntrys, extends IdentityComparator so that two unique entries will not return a comparison of 0 + * + * @author MikeB + */ +public class RelevanceComparator extends IdentityComparator { + + + public static final RelevanceComparator comparator = new RelevanceComparator(); + + @Override + public int compare(TermEntry o1, TermEntry o2) { + float rel1 = o1.rel; + float rel2 = o2.rel; + + if(rel1 == rel2) + return super.compare(o1, o2); + else + return (rel1 < rel2) ? +1 : -1; + } + + @Override + public boolean equals(Object o){ + return getClass() == o.getClass(); + } + + @Override + public int hashCode() { + int hash = 7; + return hash; + } + + +} diff --git a/src/main/java/plugins/Library/ui/ResultNodeGenerator.java b/src/main/java/plugins/Library/ui/ResultNodeGenerator.java new file mode 100644 index 00000000..ac0bc942 --- /dev/null +++ b/src/main/java/plugins/Library/ui/ResultNodeGenerator.java @@ -0,0 +1,262 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermIndexEntry; +import plugins.Library.index.TermPageEntry; +import plugins.Library.index.TermTermEntry; + +import freenet.keys.FreenetURI; +import freenet.support.HTMLNode; +import freenet.support.Logger; + +import java.util.Iterator; +import java.util.Map.Entry; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Class for parsing and formatting search results, once isDone() return true, the nodes are ready to use + * + * @author MikeB + */ +public class ResultNodeGenerator implements Runnable { + private TreeMap groupmap; + private TreeMap pageset; + private TreeSet relatedTerms; + private TreeSet relatedIndexes; + private Set result; + private boolean groupusk; + private boolean showold; + private boolean js; + private boolean done; + private HTMLNode pageEntryNode; + private RuntimeException exception; + + + /** + * Create the generator + * @param result Set of TermEntrys to format + * @param groupusk whether the sites and editions should be grouped + * @param showold whether to show older editions + * @param js whether + */ + public ResultNodeGenerator(Set result, boolean groupusk, boolean showold, boolean js){ + this.result = result; + this.groupusk = groupusk; + this.showold = showold; + this.js = js; + } + + + public synchronized void run(){ + if(done) + throw new IllegalStateException("ResultNodeGenerator can only be run once."); + + try{ + parseResult(); + generatePageEntryNode(); + }catch(RuntimeException e){ + exception = e; // Exeptions thrown here are stored in case this is being run in a thread, in this case it is thrown in isDone() or iterator() + throw e; + } + done = true; + result = null; + } + + /** + * Return the generated HTMLNode of PageEntrys, only call this after checking isDone() + * @throws RuntimeException if a RuntimeException was caught while generating the node + */ + public HTMLNode getPageEntryNode(){ + if(exception != null) + throw new RuntimeException("RuntimeException thrown in ResultNodeGenerator thread", exception); + return pageEntryNode; + } + + /** + * Whether this ResultNodegenerator has finished formatting and get methods can be called + * @throws RuntimeException if a RuntimeException was caught while generating the node + */ + public boolean isDone(){ + if(exception != null) + throw new RuntimeException("RuntimeException thrown in ResultNodeGenerator thread", exception); + return done; + } + + /** + * Parse result into generator + */ + private void parseResult(){ + groupmap = new TreeMap(); + if(!groupusk) + pageset = new TreeMap(RelevanceComparator.comparator); + relatedTerms = new TreeSet(RelevanceComparator.comparator); + relatedIndexes = new TreeSet(RelevanceComparator.comparator); + + Iterator it = result.iterator(); + while(it.hasNext()){ + TermEntry o = (TermEntry)it.next(); + if(o instanceof TermPageEntry){ + TermPageEntry pageEntry = (TermPageEntry)o; + // Put pages into a group hirearchy : USK key/docnames --> USK editions --> Pages + String sitebase; + long uskEdition = Long.MIN_VALUE; + // Get the key and name + FreenetURI uri; + uri = pageEntry.page; + // convert usk's + if(uri.isSSKForUSK()){ + uri = uri.uskForSSK(); + // Get the USK edition + uskEdition = uri.getEdition(); + } + // Get the site base name, key + documentname - uskversion + sitebase = uri.setMetaString(null).setSuggestedEdition(0).toString().replaceFirst("/0", ""); + Logger.minor(this, sitebase); + + // Add site + if(!groupmap.containsKey(sitebase)) + groupmap.put(sitebase, new TermPageGroupEntry(sitebase)); + TermPageGroupEntry siteGroup = groupmap.get(sitebase); + // Add page + siteGroup.addPage(uskEdition, pageEntry); + + }else if(o instanceof TermTermEntry){ + relatedTerms.add((TermTermEntry)o); + }else if(o instanceof TermIndexEntry){ + relatedIndexes.add((TermIndexEntry)o); + }else + Logger.error(this, "Unknown TermEntry type : "+o.getClass().getName()); + } + + + if(!groupusk){ // Move entries from the groupmap to the pageset, marking whether thaey are the newest + for (Iterator it2 = groupmap.values().iterator(); it2.hasNext();) { + TermPageGroupEntry groupentry = it2.next(); + SortedMap> editions = groupentry.getEditions(); // The editions of each site + SortedSet newest = editions.get(editions.lastKey()); // get the newest edition + for (Iterator> editionIterator = groupentry.getEditions().values().iterator(); editionIterator.hasNext();) { + SortedSet edition = editionIterator.next(); // Iterate through all the editions + if(!showold && edition != newest) // If not showing old, skip all but newest + continue; + for (TermPageEntry termPageEntry : edition) { + pageset.put(termPageEntry, edition==newest); // Add pages, marking whether they are from the newest edition + } + } + } + groupmap = null; + } + } + + private HTMLNode generateIndexEntryNode(){ + return new HTMLNode("#", "TermIndexEntry code not done yet"); + } + + private HTMLNode generateTermEntryNode(){ + return new HTMLNode("#", "TermTermEntry code not done yet"); + } + + /** + * Generate node of page results from this generator + */ + private void generatePageEntryNode(){ + pageEntryNode = new HTMLNode("div", "id", "results"); + + int results = 0; + // Loop to separate results into SSK groups + + if(groupmap != null){ // Produce grouped list of pages + SortedSet groupSet = new TreeSet(RelevanceComparator.comparator); + groupSet.addAll(groupmap.values()); + + // Loop over keys + Iterator it2 = groupSet.iterator(); + while (it2.hasNext()) { + TermPageGroupEntry group = it2.next(); + String keybase = group.subj; + SortedMap> siteMap = group.getEditions(); + HTMLNode siteNode = pageEntryNode.addChild("div", "style", "padding-bottom: 6px;"); + // Create a block for old versions of this SSK + HTMLNode siteBlockOldOuter = siteNode.addChild("div", new String[]{"id", "style"}, new String[]{"result-hiddenblock-"+keybase, (!showold?"display:none":"")}); + // put title on block if it has more than one version in it + if(siteMap.size()>1) + siteBlockOldOuter.addChild("a", new String[]{"onClick", "name"}, new String[]{"toggleResult('"+keybase+"')", keybase}).addChild("h3", "class", "result-grouptitle", keybase.replaceAll("\\b.*/(.*)", "$1")); + // inner block for old versions to be hidden + HTMLNode oldEditionContainer = siteBlockOldOuter.addChild("div", new String[]{"class", "style"}, new String[]{"result-hideblock", "border-left: thick black;"}); + // Loop over all editions in this site + Iterator it3 = siteMap.keySet().iterator(); + while(it3.hasNext()){ + long version = it3.next(); + boolean newestVersion = !it3.hasNext(); + if(newestVersion) // put older versions in block, newest outside block + oldEditionContainer = siteNode; + HTMLNode versionCell; + HTMLNode versionNode; + if(siteMap.get(version).size()>1||siteMap.size()>1){ + // table for this version + versionNode = oldEditionContainer.addChild("table", new String[]{"class"}, new String[]{"librarian-result"}); + HTMLNode grouptitle = versionNode.addChild("tr").addChild("td", new String[]{"padding", "colspan"}, new String[]{"0", "3"}); + grouptitle.addChild("h4", "class", (newestVersion?"result-editiontitle-new":"result-editiontitle-old"), keybase.replaceAll("\\b.*/(.*)", "$1")+(version>=0 ? "-"+version:"")); + // Put link to show hidden older versions block if necessary + if(newestVersion && !showold && js && siteMap.size()>1) + grouptitle.addChild("a", new String[]{"href", "onClick"}, new String[]{"#"+keybase, "toggleResult('"+keybase+"')"}, " ["+(siteMap.size()-1)+" older matching versions]"); + HTMLNode versionrow = versionNode.addChild("tr"); + versionrow.addChild("td", "width", "8px"); + // draw black line down the side of the version + versionrow.addChild("td", new String[]{"class"}, new String[]{"sskeditionbracket"}); + + versionCell=versionrow.addChild("td", "style", "padding-left:15px"); + }else + versionCell = oldEditionContainer; + // loop over each result in this version + Iterator it4 = siteMap.get(version).iterator(); + while(it4.hasNext()){ + versionCell.addChild(termPageEntryNode(it4.next(), newestVersion)); + results++; + } + } + } + }else{ // Just produce sorted list of results + for (Iterator> it = pageset.entrySet().iterator(); it.hasNext();) { + Entry entry = it.next(); + TermPageEntry termPageEntry = entry.getKey(); + boolean newestVersion = entry.getValue(); + pageEntryNode.addChild("div").addChild(termPageEntryNode(termPageEntry, newestVersion)); + results++; + } + } + pageEntryNode.addChild("p").addChild("span", "class", "librarian-summary-found", "Found "+results+" results"); + } + + /** + * Returns an {@link HTMLNode} representation of a {@link TermPageEntry} for display in a browser + * @param entry + * @param newestVersion if set, the result is shown in full brightness, if unset the result is greyed out + */ + private HTMLNode termPageEntryNode(TermPageEntry entry,boolean newestVersion) { + FreenetURI uri = entry.page; + String showtitle = entry.title; + String showurl = uri.toShortString(); + if (showtitle == null || showtitle.trim().length() == 0) { + showtitle = showurl; + } + String realurl = "/" + uri.toString(); + HTMLNode pageNode = new HTMLNode("div", new String[]{"class", "style"}, new String[]{"result-entry", ""}); + pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realurl, (newestVersion ? "result-title-new" : "result-title-old"), (entry.rel>0)?"Relevance : "+(entry.rel*100)+"%":"Relevance unknown"}, showtitle); + // create usk url + if (uri.isSSKForUSK()) { + String realuskurl = "/" + uri.uskForSSK().toString(); + pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realuskurl, (newestVersion ? "result-uskbutton-new" : "result-uskbutton-old"), realuskurl}, "[ USK ]"); + } + pageNode.addChild("br"); + pageNode.addChild("a", new String[]{"href", "class", "title"}, new String[]{realurl, (newestVersion ? "result-url-new" : "result-url-old"), uri.toString()}, showurl); + + return pageNode; + } +} diff --git a/src/main/java/plugins/Library/ui/StaticToadlet.java b/src/main/java/plugins/Library/ui/StaticToadlet.java new file mode 100644 index 00000000..0259d15e --- /dev/null +++ b/src/main/java/plugins/Library/ui/StaticToadlet.java @@ -0,0 +1,78 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.net.URL; +import java.util.Date; + +import freenet.client.DefaultMIMETypes; +import freenet.client.HighLevelSimpleClient; +import freenet.clients.http.RedirectException; +import freenet.clients.http.Toadlet; +import freenet.clients.http.ToadletContext; +import freenet.clients.http.ToadletContextClosedException; +import freenet.support.api.Bucket; +import freenet.support.api.HTTPRequest; + +import plugins.Library.Library; + + +/** + * Encapsulates a WebPage in a Toadlet + * @author MikeB + */ +public class StaticToadlet extends Toadlet { + + public StaticToadlet(HighLevelSimpleClient client) { + super(client); + } + + /** + * Get the path to this page + */ + @Override + public String path() { + return ROOT_URL; + } + + public static final String ROOT_URL = "/library/static/"; + public static final String ROOT_PATH = "staticfiles/"; + + public void handleMethodGET(URI uri, final HTTPRequest httprequest, final ToadletContext ctx) + throws ToadletContextClosedException, IOException, RedirectException { + String path = uri.getPath(); + if (!path.startsWith(ROOT_URL)) + return; // doh! + path = path.substring(ROOT_URL.length()); + + ClassLoader origClassLoader = Thread.currentThread().getContextClassLoader(); + Thread.currentThread().setContextClassLoader(Library.class.getClassLoader()); + try { + InputStream strm = getClass().getResourceAsStream(ROOT_PATH+path); + if (strm == null) { + this.sendErrorPage(ctx, 404, "Not found", "Could not find " + httprequest.getPath().substring(path().length()) + " in " + path()); + return; + } + Bucket data = ctx.getBucketFactory().makeBucket(strm.available()); + OutputStream os = data.getOutputStream(); + byte[] cbuf = new byte[4096]; + while(true) { + int r = strm.read(cbuf); + if(r == -1) break; + os.write(cbuf, 0, r); + } + strm.close(); + os.close(); + + ctx.sendReplyHeaders(200, "OK", null, DefaultMIMETypes.guessMIMEType(path, false), data.size()); + ctx.writeData(data); + } finally { + Thread.currentThread().setContextClassLoader(origClassLoader); + } + } +} diff --git a/src/main/java/plugins/Library/ui/TermPageGroupEntry.java b/src/main/java/plugins/Library/ui/TermPageGroupEntry.java new file mode 100644 index 00000000..fbc87ba7 --- /dev/null +++ b/src/main/java/plugins/Library/ui/TermPageGroupEntry.java @@ -0,0 +1,50 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import java.util.Collections; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.TreeSet; + +import plugins.Library.index.TermEntry; +import plugins.Library.index.TermPageEntry; + +/** + * TODO make this fit the TermEntry contract + * @author MikeB + */ +public class TermPageGroupEntry extends TermEntry { + private final SortedMap> editions = new TreeMap>(); + + TermPageGroupEntry(String sitebase) { + super(sitebase, 0); + } + + @Override + public TermEntry.EntryType entryType() { + throw new UnsupportedOperationException("Not supported yet."); + } + + @Override + public boolean equalsTarget(TermEntry entry) { + throw new UnsupportedOperationException("Not supported yet."); + } + + public void addPage(long uskEdition, TermPageEntry pageEntry) { + // Add Edition + if(!editions.containsKey(uskEdition)) + editions.put(uskEdition, new TreeSet(RelevanceComparator.comparator)); + editions.get(uskEdition).add(pageEntry); + + // TODO HIGH rework this... TermEntry is supposed to be immutable + //if(rel < pageEntry.rel) // TODO enter better algorithm for calculating relevance here + // rel = pageEntry.rel; // relevance should be on a per-edition basis, probably shouldnt use TermEntry at all + } + + SortedMap> getEditions() { + return Collections.unmodifiableSortedMap(editions); + } +} diff --git a/src/main/java/plugins/Library/ui/TestInterface.java b/src/main/java/plugins/Library/ui/TestInterface.java new file mode 100644 index 00000000..86e19e79 --- /dev/null +++ b/src/main/java/plugins/Library/ui/TestInterface.java @@ -0,0 +1,64 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.ui; + +import plugins.Library.util.exec.Execution; +import plugins.Library.search.Search; +import plugins.Library.*; + +import freenet.support.Logger; + +import java.util.HashMap; +import java.io.BufferedReader; +import java.io.InputStreamReader; + +public class TestInterface{ + static HashMap requests = new HashMap(); + static int requestcount =0; + + public static void main(String[] args){ + try{ + Logger.setupStdoutLogging(Logger.MINOR, "Strarting logging"); + BufferedReader br = new BufferedReader(new InputStreamReader(System.in)); + String command; + String param; + String line; + String index = "../../Freenet/myindex8"; + Library library = Library.init(null); + Search.setup(library, null); + + do{ + line = br.readLine(); + command = line.substring(0, 1); + param = line.substring(1); + + if("f".equals(command)){ + requests.put(""+requestcount,library.getIndex(index).getTermEntries(param)); + System.out.println("Started request "+requestcount); + requestcount++; + } + if("s".equals(command)){ + Search search = Search.startSearch(param, index); + if (search == null) + System.out.println("Stopwords too prominent in query"); + else{ + requests.put(""+requestcount,search); + System.out.println("Started request "+requestcount); + requestcount++; + } + } + if("p".equals(command)) + System.out.println(requests.get(param).toString()); + if("r".equals(command)) + System.out.println(requests.get(param).getResult()); + if("i".equals(command)) + System.out.println(library.getIndex(index)); +// if("w".equals(command)) +// WebUI.resultNodeGrouped(requests.get(param), true, true); + }while(!"x".equals(command)); + }catch(Exception e){ + e.printStackTrace(); + } + } +} diff --git a/src/main/java/plugins/Library/ui/WebInterface.java b/src/main/java/plugins/Library/ui/WebInterface.java new file mode 100644 index 00000000..9c1c8927 --- /dev/null +++ b/src/main/java/plugins/Library/ui/WebInterface.java @@ -0,0 +1,67 @@ +/** + * Web reuqest handlers + * + * @author j16sdiz (1024D/75494252) + */ +package plugins.Library.ui; + +import freenet.client.HighLevelSimpleClient; +import freenet.clients.http.PageMaker; +import freenet.clients.http.ToadletContainer; +import freenet.node.NodeClientCore; +import freenet.pluginmanager.PluginRespirator; +import plugins.Library.Library; + + +public class WebInterface { + private PageMaker pageMaker; + private final ToadletContainer toadletContainer; + private final HighLevelSimpleClient client; + private final NodeClientCore core; + private Library library; + private final PluginRespirator pr; + private MainPageToadlet pluginsToadlet; + private MainPageToadlet mainToadlet; + private StaticToadlet staticToadlet; + + /** + * // @param spider + * // @param client + */ + public WebInterface(Library library, PluginRespirator pr) { + this.library = library; + + pageMaker = pr.getPageMaker(); + this.toadletContainer = pr.getToadletContainer(); + this.client = pr.getHLSimpleClient(); + this.core = pr.getNode().clientCore; + this.pr = pr; + } + + /** + * Load the Library interface into the FProxy interface + */ + public void load() { + //pageMaker.addNavigationCategory("/library/", "Library", "Library", new Main()); + + mainToadlet = new MainPageToadlet(client, library, core, pr); + toadletContainer.register(mainToadlet, mainToadlet.menu(), mainToadlet.path(), true, mainToadlet.name(), mainToadlet.name(), true, null ); + + // Ive just realised that the form filter allows access to /plugins/... so /library/ wont be allowed, this is a temporary Toadlet untilthere is a whitelist for formfilter and /library is on it TODO put /library on formfilter whitelist + pluginsToadlet = new MainPageToadlet(client, library, core, pr); + toadletContainer.register(pluginsToadlet, null, "/plugins/plugin.Library.FreesiteSearch", true, null, null, true, null ); + staticToadlet = new StaticToadlet(client); + toadletContainer.register(staticToadlet, null, "/library/static/", true, false); + + } + + /** + * UNload the Library interface form the FProxy interface + */ + public void unload() { + toadletContainer.unregister(mainToadlet); + pageMaker.removeNavigationLink(mainToadlet.menu(), mainToadlet.name()); + toadletContainer.unregister(pluginsToadlet); + toadletContainer.unregister(staticToadlet); + } +} diff --git a/src/main/java/plugins/Library/ui/package-info.java b/src/main/java/plugins/Library/ui/package-info.java new file mode 100644 index 00000000..010f8c8c --- /dev/null +++ b/src/main/java/plugins/Library/ui/package-info.java @@ -0,0 +1,14 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ + + +/** + * Generates the web interface for the Library + * + * TODO get rid of code duplication in here, break up methods into smaller ones and cope with all types of TermEntry + * + * @author MikeB + */ +package plugins.Library.ui; + diff --git a/src/main/java/plugins/Library/ui/staticfiles/detect.js b/src/main/java/plugins/Library/ui/staticfiles/detect.js new file mode 100644 index 00000000..130b91d3 --- /dev/null +++ b/src/main/java/plugins/Library/ui/staticfiles/detect.js @@ -0,0 +1 @@ +window.location='/library/?js&request=0'; diff --git a/src/main/java/plugins/Library/ui/staticfiles/script.js b/src/main/java/plugins/Library/ui/staticfiles/script.js new file mode 100644 index 00000000..f0ea4b3b --- /dev/null +++ b/src/main/java/plugins/Library/ui/staticfiles/script.js @@ -0,0 +1,32 @@ +var url = '/library/xml/?request=0&'; // this bit will have to be put into the page being generated dynamically if used again +var xmlhttp; + +function getProgress(){ + xmlhttp = new XMLHttpRequest(); + xmlhttp.onreadystatechange=xmlhttpstatechanged; + xmlhttp.open('GET', url, true); + xmlhttp.send(null); +} + +function xmlhttpstatechanged(){ + if(xmlhttp.readyState==4){ + var resp = xmlhttp.responseXML; + var progresscontainer = document.getElementById('librarian-search-status'); + progresscontainer.replaceChild(resp.getElementsByTagName('progress')[0].cloneNode(true), progresscontainer.getElementsByTagName('table')[0]); + if(resp.getElementsByTagName('progress')[0].attributes.getNamedItem('RequestState').value=='FINISHED') + document.getElementById('results').appendChild(resp.getElementsByTagName('result')[0].cloneNode(true)); + else if(resp.getElementsByTagName('progress')[0].attributes.getNamedItem('RequestState').value=='ERROR') + document.getElementById('errors').appendChild(resp.getElementsByTagName('error')[0].cloneNode(true)); + else + var t = setTimeout('getProgress()', 1000); + } +} +getProgress(); + +function toggleResult(key){ + var togglebox = document.getElementById('result-hiddenblock-'+key); + if(togglebox.style.display == 'block') + togglebox.style.display = 'none'; + else + togglebox.style.display = 'block'; +} diff --git a/src/main/java/plugins/Library/ui/staticfiles/style.css b/src/main/java/plugins/Library/ui/staticfiles/style.css new file mode 100644 index 00000000..45c836f2 --- /dev/null +++ b/src/main/java/plugins/Library/ui/staticfiles/style.css @@ -0,0 +1,48 @@ + +#results { padding-left: 8px; } +.result-group { padding : 0 0 6px; } +.result-grouptitle { font-size: large; font-weight: bold; margin-bottom: 0px; margin-top: 6px; } +.result-editiontitle-new { display:inline; padding-top: 5px; font-size: medium; color: black; } +.result-editiontitle-old { display:inline; padding-top: 5px; font-size: medium; color: darkGrey; } +.result-entry { margin-bottom: 10px; } +.result-sitename {color:black; font-weight:bold; } +.result-table { border-spacing : 5px; } +a.result-title-new:link { color: Blue } +a.result-title-new:visited { color: Purple; } +a.result-title-old:link { color: LightBlue; } +a.result-title-old:visited { color: Orchid; } +a.result-url-new {color:Green; font-size:small; padding-left:15px; } +a.result-url-old {color:LightGreen; font-size:small; padding-left:15px; } +a.result-uskbutton-new {color: #480000; font-variant: small-caps; font-size: small; padding-left: 20px; } +a.result-uskbutton-old {color: #996666; font-variant: small-caps; font-size: small; padding-left: 20px; } +.progress-table {border-spacing:10px 0px; } +table.progress-table td { padding: 5px 15px; } +td.progress-bar-outline { padding: 0px 3px; width:300px; border:1px solid grey; height : 20px; border-spacing: 0px; } +div.progress-bar-inner-final { background-color: red; height:20px; z-index:-1; border-spacing: 0px; } +div.progress-bar-inner-nonfinal { background-color: pink; height:20px; z-index:-1; border-spacing: 0px; } +input.index { font-size: 0.63em; width: 170px; height:1.1em; } +input.index-bookmark-delete { + width: 12px; + height: 11px; + padding-top: 0px; + padding-bottom: 0px; + padding-left: 0px; + padding-right: 0px; + border-top-width: 0px; + border-bottom-width: 0px; + border-left-width: 0px; + border-right-width: 0px; + background-color: DarkSkyBlue; + color: red; + font-size: xx-small; +} +input.index-other-add { } +li.index { } +td.sskeditionbracket { background: black; width: 2px; padding: 0px; } +ul.index-bookmark-list { list-style: none; } +ul.options-list { list-style-type: none; margin-top: 0; } +div#content th, td { border: none; } +div.authorization-box { background: darksalmon; border: 1px solid darkred; padding: 8px; } +div.authorization-box h1 { font-size: 16pt; font-weight: bold; margin-top: 0px; } +div.authorization-box a:link { color: blue; } + diff --git a/src/main/java/plugins/Library/util/BTreeMap.java b/src/main/java/plugins/Library/util/BTreeMap.java new file mode 100644 index 00000000..11b21aeb --- /dev/null +++ b/src/main/java/plugins/Library/util/BTreeMap.java @@ -0,0 +1,1959 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.util.CompositeIterable; +import plugins.Library.util.func.Tuples.X2; +import plugins.Library.util.func.Tuples.X3; + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.Map; +import java.util.AbstractSet; +import java.util.AbstractMap; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.TreeMap; +import java.util.HashMap; +import java.util.Stack; +import java.util.NoSuchElementException; +import java.util.ConcurrentModificationException; + +/** +** General purpose B-tree implementation. '''This class is not a general-use +** {@link SortedMap}'''; for that use {@link TreeMap}. +** +** From [http://en.wikipedia.org/wiki/B-tree wikipedia]: A B-tree is a tree +** data structure that keeps data sorted and allows searches, insertions, and +** deletions in logarithmic amortized time. Unlike self-balancing binary search +** trees, it is optimized for systems that read and write large blocks of data. +** It is most commonly used in databases and filesystems. +** +** In B-trees, internal (non-leaf) nodes can have a variable number of child +** nodes within some pre-defined range. When data is inserted or removed from a +** node, its number of child nodes changes. In order to maintain the +** pre-defined range, internal nodes may be joined or split. Because a range of +** child nodes is permitted, B-trees do not need re-balancing as frequently as +** other self-balancing search trees, but may waste some space, since nodes are +** not entirely full. +** +** A B-tree is kept balanced by requiring that all leaf nodes are at the same +** depth. This depth will increase slowly as elements are added to the tree, +** but an increase in the overall depth is infrequent, and results in all leaf +** nodes being one more node further away from the root. +** +** A B-tree of order m (the maximum number of children for each node) is a tree +** which satisfies the following properties: +** +** * Every node has between m/2-1 and m-1 entries, except for the root node, +** which has between 1 and m-1 entries. +** * All leaves are the same distance from the root. +** * Every non-leaf node with k entries has k+1 subnodes arranged in sorted +** order between the entries (for details, see {@link Node}). +** +** B-trees have substantial advantages over alternative implementations when +** node access times far exceed access times within nodes. This usually occurs +** when most nodes are in secondary storage such as hard drives. By maximizing +** the number of child nodes within each internal node, the height of the tree +** decreases, balancing occurs less often, and efficiency increases. Usually +** this value is set such that each node takes up a full disk block or an +** analogous size in secondary storage. +** +** If {@link #NODE_MIN} is {@linkplain #BTreeMap(int) set to} 2, the resulting +** tree is structurally and algorithmically equivalent to a 2-3-4 tree, which +** is itself equivalent to a red-black tree, which is the implementation of +** {@link TreeMap} in the standard Java Class Library. This class has extra +** overheads due to the B-tree generalisation of the structures and algorithms +** involved, and is therefore only meant to be used to represent a B-tree +** stored in secondary storage, as the above paragraph describes. +** +** NOTE: at the moment there is a slight bug in this implementation that causes +** '''indeterminate behaviour''' when used with a comparator that admits {@code +** null} keys. This is due to {@code null} having special semantics, meaning +** "end of map", for the {@link Node#lkey} and {@link Node#rkey} fields. This +** is NOT an urgent priority to fix, but might be done in the future. +** +** Note: this implementation, like {@link TreeMap}, is not thread-safe. +** +** @author infinity0 +** @see TreeMap +** @see Comparator +** @see Comparable +*/ +public class BTreeMap extends AbstractMap +implements Map, SortedMap/*, NavigableMap, Cloneable, Serializable*/ { + + /* + ** list (most urgent first): + ** + ** TODO HIGH clean up the use of Node._size + ** TODO HIGH make Node's fields private/immutable + ** TODO LOW use Node.{merge|split} instead of the long code in BTreeMap.{merge|split} + ** FIXME LOW {@link ConcurrentModificationException} for the entrySet iterator + ** + *************************************************************************** + ** IMPORTANT NOTE FOR MAINTAINERS: + *************************************************************************** + ** + ** When navigating the tree, either Node.isLeaf() or Node.nodeSize() MUST + ** be called at least once **for each node visited**, before accessing or + ** modifying its contents. This is to give SkeletonBTreeMap the chance to + ** throw a DataNotLoadedException in case the node has not been loaded. + ** + ** Additionally, each node has a Node._size field which is a cache for + ** Node.totalSize(), which calculates the number of entries in it and all + ** its children. To invalidate the cache, set Node._size to -1. This must + ** be done whenever an operation has >0 chance of changing the total size. + ** + ** FIXME LOW atm there is a slight bug in this implementation that causes + ** indeterminate behaviour when used with a comparator that admits null + ** keys. This is due to null having special semantics ("end of map"), for + ** the lkey and rkey fields. This is NOT an urgent priority to fix, but + ** might be done in the future. + ** + ** NULLNOTICE marks the parts of the code which are affected by this logic, + ** as well as all occurences of /\.[lr]key == null/. + */ + + /** + ** Minimum number of children of each node. + */ + final public int NODE_MIN; + + /** + ** Maximum number of children of each node. Equal to {@code NODE_MIN * 2}. + */ + final public int NODE_MAX; + + /** + ** Minimum number of entries in each node. Equal to {@code NODE_MIN - 1}. + */ + final public int ENT_MIN; + + /** + ** Maximum number of entries in each node. Equal to {@code NODE_MAX - 1}. + */ + final public int ENT_MAX; + + /** + ** Comparator for this {@link SortedMap}. + */ + final protected Comparator comparator; + + /** + ** Root node of the tree. The only node that can have less than ENT_MIN + ** entries. + */ + protected Node root = newNode(null, null, true); + + /** + ** Number of entries currently in the map. + */ + protected int size = 0;; + + /** + ** Creates a new empty map, sorted according to the given comparator, and + ** with each non-root node having the given minimum number of subnodes. + ** + ** @param cmp The comparator for the tree, or {@code null} to use the keys' + ** {@link Comparable natural} ordering. + ** @param node_min Minimum number of subnodes in each node + */ + public BTreeMap(Comparator cmp, int node_min) { + if (node_min < 2) { + throw new IllegalArgumentException("The minimum number of subnodes must be set to at least 2"); + } + comparator = cmp; + NODE_MIN = node_min; + NODE_MAX = node_min<<1; + ENT_MIN = NODE_MIN - 1; + ENT_MAX = NODE_MAX - 1; + } + + /** + ** Creates a new empty map, sorted according to the keys' {@link Comparable + ** natural} ordering, and with each non-root node having the given minimum + ** number of subnodes. + ** + ** @param node_min Minimum number of subnodes in each node + **/ + public BTreeMap(int node_min) { + this(null, node_min); + } + + /** + ** Returns the subset of the given {@link SortedSet}, exclusively between + ** the given keys. If either key is {@code null}, this will be treated to + ** mean the relevant edge of the set. + ** + ** OPT NORM + */ + public static SortedSet subSet(SortedSet set, K lk, K rk) { + // NULLNOTICE + // JDK6 + if (lk == null) { + return (rk == null)? set: set.headSet(rk); + } else { + SortedSet ss = (rk == null)? set.tailSet(lk): set.subSet(lk, rk); + if (ss.isEmpty() || !compare0(ss.first(), lk, set.comparator())) { return ss; } + K k2 = Sorted.higher(ss, lk); + return (k2 == null)? ss.headSet(lk): ss.tailSet(k2); + // headSet() will create an empty set with the right comparator and of the right class + } + } + + /** + ** DOCUMENT. + */ + public static class PairIterable implements Iterable> { + + final protected E lobj; + final protected E robj; + final protected Iterable ib; + + public PairIterable(E lo, Iterable objs, E ro) { + lobj = lo; + ib = objs; + robj = ro; + } + + public Iterator> iterator() { + return new Iterator>() { + + final Iterator it = ib.iterator(); + E prev = lobj; + + public boolean hasNext() { + return (it.hasNext() || prev != robj); + } + + public X2 next() { + E rk = (!it.hasNext() && prev != robj)? robj: it.next(); + return new X2(prev, prev = rk); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + } + + /** + ** A B-tree node. It has the following structure: + ** + ** * Every entry in a non-leaf node has two adjacent subnodes, holding + ** entries which are all strictly greater and smaller than the entry. + ** * Conversely, every subnode has two adjacent entries, which are strictly + ** greater and smaller than every entry in the subnode. Note: edge + ** subnodes' "adjacent" keys are not part of the node's entries, but + ** inherited from some ancestor parent.) + ** + ** ^ Node ^ + ** | | + ** | V1 V2 V3 V4 V5 V6 V7 V8 | + ** | | | | | | | | | | + ** lkey K1 K2 K3 K4 K5 K6 K7 K8 rkey + ** \ / \ / \ / \ / \ / \ / \ / \ / \ / + ** Node Node Node Node Node Node Node Node Node + ** + ** | - {@link #entries} mappings + ** \ - {@link #rnodes} mappings + ** / - {@link #lnodes} mappings + ** + ** Note: before anyone gets any ideas about making this class static + ** (believe me, I tried), note that {@link TreeMap} '''requires''' the + ** comparator to be passed at construction. The neatest way to do this is + ** to pass either the tree or the comparator to the node's constructor. + ** + ** This is fine in the context of {@link BTreeMap}, but serialisation makes + ** it extremely awkard. Passing the tree requires us to extend {@link + ** plugins.Library.io.serial.Translator} to take a context paramater, and + ** passing the comparator requires us to add checks to ensure that the + ** comparators are equal when the node is re-attached to the tree. + ** + ** And even if this is implemented, nodes still need to know their parent + ** tree (to set {@link plugins.Library.io.serial.Serialiser}s etc) and so you + ** would need to code a secondary initialisation scheme to be called after + ** object construction, and after execution returns from {@link + ** plugins.Library.io.serial.Translator#rev(Object)}. All in all, this is more + ** trouble than it's worth. + ** + ** So I've gone with the non-static class, which means a new {@code + ** Translator} needs to be constructed for each new B-tree. This is only + ** two fields above an {@link Object} (if you re-use the same objects for + ** the {@link SkeletonBTreeMap.NodeTranslator#ktr} and {@code + ** SkeletonBTreeMap.NodeTranslator#mtr} fields) and so should be negligible + ** compared to the memory size of the rest of the B-tree. + ** + ** @author infinity0 + */ + protected class Node implements Comparable { + + /** + ** Whether this node is a leaf. + */ + final protected boolean isLeaf; + + /** + ** Map of entries in this node. + */ + final /*private*/ protected SortedMap entries; + + /** + ** Map of entries to their immediate smaller nodes. The greatest node + ** is mapped to by {@link #rkey}. + */ + final /*private*/ protected Map lnodes; + + /** + ** Map of entries to their immediate greater nodes. The smallest node + ** is mapped to by {@link #lkey}. + */ + final /*private*/ protected Map rnodes; + + /** + ** Greatest key smaller than all keys in this node and subnodes. This + ** is {@code null} when the node is at the minimum-key end of the tree. + */ + /*final*/ protected K lkey; + + /** + ** Smallest key greater than all keys in this node and subnodes. This + ** is {@code null} when the node is at the maximum-key end of the tree. + */ + /*final*/ protected K rkey; + + /** + ** Cache for the total number of entries in this node and all subnodes. + ** A negative value means the cache was invalidated. + */ + transient int _size = -1; + + /** + ** Creates a new node for the BTree, with a custom map to store the + ** entries. + ** + ** It is '''assumed''' that the input map is empty; it is up to the + ** caller to ensure that this holds, or if not, then at least put the + ** node into a consistent state before releasing it for use. + ** + ** @param lk The {@code Node#lkey} for the node. + ** @param rk The {@code Node#rkey} for the node. + ** @param lf Whether to create a leaf node + ** @param map A {@link SortedMap} to use to store the entries + */ + protected Node(K lk, K rk, boolean lf, SortedMap map) { + lkey = lk; + rkey = rk; + isLeaf = lf; + entries = map; + // we don't use sentinel Nil elements because that wastes memory due to + // having to maintain dummy rnodes and lnodes maps. + lnodes = (lf)? null: new HashMap(NODE_MAX); + rnodes = (lf)? null: new HashMap(NODE_MAX); + } + + /** + ** Creates a new node for the BTree + ** + ** @param lk The {@code Node#lkey} for the node. + ** @param rk The {@code Node#rkey} for the node. + ** @param lf Whether to create a leaf node + */ + protected Node(K lk, K rk, boolean lf) { + this(lk, rk, lf, new TreeMap(comparator)); + } + + /** + ** Add the given entries and subnodes to this node. The subnodes are + ** added with {@link #addChildNode(BTreeMap.Node)}. + ** + ** It is '''assumed''' that the entries are located exactly exclusively + ** between the nodes (ie. like in an actual node); it is up to the + ** caller to ensure that this holds. + ** + ** @param ent The entries to add + ** @param nodes The subnodes to add + */ + protected void addAll(SortedMap ent, Iterable nodes) { + assert(ent.comparator() == comparator()); + assert(ent.isEmpty() || compareL(lkey, ent.firstKey()) < 0); + assert(ent.isEmpty() || compareR(ent.lastKey(), rkey) < 0); + entries.putAll(ent); + if (!isLeaf) { + for (Node ch: nodes) { + addChildNode(ch); + } + } + } + + /** + ** @return Number of subnodes + */ + public int childCount() { + return (rnodes == null)? 0: rnodes.size(); + } + + /** + ** @return Number of local entries + */ + public int nodeSize() { + return entries.size(); + } + + /** + ** @return Whether this is a leaf node + */ + public boolean isLeaf() { + return isLeaf; + } + + /** + ** @return Whether the node is a ghost. This cannot be {@code true} for + ** normal maps, but is used by {@link SkeletonBTreeMap}. + */ + public boolean isGhost() { + return entries == null; + } + + /** + ** @return Total number of entries in this node and all subnodes + */ + protected int totalSize() { + if (_size < 0) { + int s = nodeSize(); // makes any future synchronization easier + if (!isLeaf()) { + for (Node n: lnodes.values()) { + s += n.totalSize(); + } + } + _size = s; + } + return _size; + } + + /** + ** Returns the greatest subnode smaller than the given node. + ** + ** It is '''assumed''' that the input is an actual subnode of this + ** node; it is up to the caller to ensure that this holds. + ** + ** @param node The node to lookup its neighbour for + ** @return The neighbour node, or null if the input node is at the edge + ** @throws NullPointerException if this is a leaf node + */ + public Node nodeL(Node node) { + assert(lnodes.get(node.rkey) == node + && rnodes.get(node.lkey) == node); + + return (compare0(lkey, node.lkey))? null: lnodes.get(node.lkey); + } + + /** + ** Returns the smallest subnode greater than the given node. + ** + ** It is '''assumed''' that the input is an actual subnode of this + ** node; it is up to the caller to ensure that this holds. + ** + ** @param node The node to lookup its neighbour for + ** @return The neighbour node, or null if the input node is at the edge + ** @throws NullPointerException if this is a leaf node + */ + public Node nodeR(Node node) { + assert(lnodes.get(node.rkey) == node + && rnodes.get(node.lkey) == node); + + return (compare0(node.rkey, rkey))? null: rnodes.get(node.rkey); + } + + /** + ** Returns the subnode a that given key belongs in. The key must not be + ** local to the node. + ** + ** It is '''assumed''' that the input key is in the correct range; it + ** is up to the caller to ensure that this holds. + ** + ** @param key The key to select the node for + ** @return The subnode for the key, or null if the key is local to the + ** node + ** @throws NullPointerException if this is a leaf node; or if the key + ** is {@code null} and the entries map uses natural order, or + ** its comparator does not tolerate {@code null} keys + */ + public Node selectNode(K key) { + assert(compareL(lkey, key) < 0 && compareR(key, rkey) < 0); + + SortedMap tailmap = entries.tailMap(key); + if (tailmap.isEmpty()) { + return lnodes.get(rkey); + } else { + K next = tailmap.firstKey(); + return (compare(key, next) == 0)? null: lnodes.get(next); + } + } + + /** + ** Attaches a child via its {@code lkey}, {@code rkey} fields. + ** + ** It is '''assumed''' that both keys are in the local entries map, and + ** that there is no node between these keys. It is up to the caller to + ** ensure that this holds. + ** + ** @param child The child to attach + */ + protected void addChildNode(Node child) { + assert(compare0(rkey, child.rkey) || entries.containsKey(child.rkey)); + assert(compare0(lkey, child.lkey) || entries.containsKey(child.lkey)); + assert(lnodes.get(child.rkey) == null); + assert(rnodes.get(child.lkey) == null); + lnodes.put(child.rkey, child); + rnodes.put(child.lkey, child); + } + + private transient Iterable> _iterNodesK; + /** + ** A view of all subnodes with their lkey and rkey, as an {@link + ** Iterable}. Iteration occurs in '''sorted''' order. + ** + ** TODO LOW this method does not check that this node actually has subnodes + ** (ie. non-leaf). This may or may not change in the future. + */ + public Iterable> iterNodesK() { + if (_iterNodesK == null) { + _iterNodesK = new CompositeIterable, X3>(iterKeyPairs(), true) { + @Override protected X3 nextFor(X2 kp) { + return new X3(kp._0, rnodes.get(kp._0), kp._1); + } + }; + } + return _iterNodesK; + } + + private transient Iterable _iterNodes; + /** + ** A view of all subnodes as an {@link Iterable}. Iteration occurs in + ** '''no particular order'''. + ** + ** TODO LOW this method does not check that this node actually has subnodes + ** (ie. non-leaf). This may or may not change in the future. + */ + public Iterable iterNodes() { + if (_iterNodes == null) { + _iterNodes = new CompositeIterable(rnodes.values(), true) { + @Override protected Node nextFor(Node n) { return n; } + }; + } + return _iterNodes; + } + + private transient Iterable _iterKeys; + /** + ** A view of all local keys as an {@link Iterable}. Iteration occurs + ** in '''sorted''' order. + */ + public Iterable iterKeys() { + if (_iterKeys == null) { + _iterKeys = new CompositeIterable(entries.keySet(), true) { + @Override protected K nextFor(K k) { return k; } + }; + } + return _iterKeys; + } + + private transient Iterable _iterAllKeys; + /** + ** A view of all keys (including {@code lkey}, {@code rkey}) as an + ** {@link Iterable}. Iteration occurs in '''sorted''' order. + */ + public Iterable iterAllKeys() { + if (_iterAllKeys == null) { + _iterAllKeys = new Iterable() { + public Iterator iterator() { + return new Iterator() { + + final Iterator it = entries.keySet().iterator(); + byte stage = 0; + + public boolean hasNext() { + return (stage < 2); + } + + public K next() { + switch (stage) { + case 0: + stage = 1; + return lkey; + case 1: + if (it.hasNext()) { return it.next(); } + else { stage = 2; return rkey; } + case 2: + return it.next(); // throws NoSuchElementException + } + throw new AssertionError(); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + return _iterAllKeys; + } + + private transient Iterable> _iterKeyPairs; + /** + ** A view of adjacent key pairs (including {@code lkey}, {@code rkey}) + ** as an {@link Iterable}. Iteration occurs in '''sorted''' order. + */ + public Iterable> iterKeyPairs() { + if (_iterKeyPairs == null) { + // TODO NORM replace this with PairIterable when we make lkey/rkey final + _iterKeyPairs = new Iterable>() { + public Iterator> iterator() { + return new Iterator>() { + + final Iterator it = entries.keySet().iterator(); + K lastkey = lkey; + + public boolean hasNext() { + return (it.hasNext() || lastkey != rkey); + } + + public X2 next() { + K rk = (!it.hasNext() && lastkey != rkey)? rkey: it.next(); + return new X2(lastkey, lastkey = rk); + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + return _iterKeyPairs; + } + + /** + ** A view of child nodes exclusively between {@code lk}, {@code rk}, as + ** an {@link Iterable}. Iteration occurs in '''sorted''' order. + ** + ** It is '''assumed''' that the input keys are all local to the node + ** (or its lkey/rkey), and in the correct order; it is up to the caller + ** to ensure that this holds. + ** + ** Returns {@code null} if this is a leaf. + ** + ** @param lk The key on the L-side, exclusive + ** @param rk The key on the R-side, exclusive + */ + protected Iterable iterNodes(K lk, K rk) { + assert(lk == null || rk == null || compare(lk, rk) < 0); + if (isLeaf) { return null; } + // TODO LOW make a proper iterator.. + List nodes = new java.util.ArrayList(); + SortedSet skey = Sorted.keySet(subEntries(lk, rk)); + if (skey.isEmpty()) { return nodes; } + for (K key: skey) { + nodes.add(lnodes.get(key)); + } + nodes.add(rnodes.get(skey.last())); + return nodes; + } + + /** + ** A view of local entries exclusively between {@code lk}, {@code rk}, + ** as a {@link SortedMap}. + ** + ** It is '''assumed''' that the input keys are all local to the node + ** (or its lkey/rkey), and in the correct order; it is up to the caller + ** to ensure that this holds. + ** + ** @param lk The key on the L-side, exclusive + ** @param rk The key on the R-side, exclusive + */ + protected SortedMap subEntries(K lk, K rk) { + assert(lk == null || rk == null || compare(lk, rk) < 0); + if (compare0(lk, lkey)) { + return (compare0(rk, rkey))? entries: entries.headMap(rk); + } else { + SortedSet tset = Sorted.keySet(entries).tailSet(lk); + assert(!tset.isEmpty()); + if (tset.size() == 1) { + // create an empty set with the right comparator and of the right class + return entries.subMap(lk, lk); + } + K k2 = Sorted.higher(tset, lk); + return (compare0(rk, rkey))? entries.tailMap(k2): entries.subMap(k2, rk); + } + } + + /** + ** Merge child nodes between the given keys. The merged node is added + ** to this node, and the old children are discarded. + ** + ** It is '''assumed''' that the input keys are all local to the node, + ** and in the correct order; it is up to the caller to ensure that this + ** holds. + ** + ** @param lk The {@code lkey} of the new node + ** @param keys The keys of this node at which to merge + ** @param rk The {@code rkey} of the new node + */ + protected void merge(K lk, Iterable keys, K rk) { + Iterator it = iterNodes(lk, rk).iterator(); + Node child = it.next(); + assert(child == rnodes.get(lk)); + + for (K k: keys) { + child.entries.put(k, entries.remove(k)); + } + + while (it.hasNext()) { + Node next = it.next(); + child.addAll(next.entries, (next.isLeaf)? null: next.iterNodes()); + } + + child.rkey = rk; + } + + /** + ** Split a child node at the given keys. The split children are added + ** to this node, and the old child is discarded. + ** + ** It is '''assumed''' that the input keys are all local to the node, + ** and in the correct order; it is up to the caller to ensure that this + ** holds. + ** + ** @param lk The {@code lkey} of the child node + ** @param keys The keys of the child node at which to split + ** @param rk The {@code rkey} of the child node + */ + protected void split(K lk, Iterable keys, K rk) { + Node child = rnodes.get(lk); + assert(compare0(child.rkey, rk)); + + for (K k: keys) { + swapKey(k, child, this); + } + + // to satisfy conditions for addChildNode() + lnodes.remove(rk); + rnodes.remove(lk); + + for (X2 kp: new PairIterable(lk, keys, rk)) { + Node newnode = newNode(kp._0, kp._1, child.isLeaf); + newnode.addAll(child.subEntries(kp._0, kp._1), child.iterNodes(kp._0, kp._1)); + addChildNode(newnode); + } + } + + /** + ** Defines a total ordering on the nodes. This is useful for eg. deciding + ** which nodes to visit first in a breadth-first search algorithm that uses + ** a priority queue ({@link java.util.concurrent.PriorityBlockingQueue}) + ** instead of a FIFO ({@link java.util.concurrent.LinkedBlockingQueue}). + ** + ** A node is "compare-equal" to another iff: + ** + ** * Both their corresponding endpoints are compare-equal. + ** + ** A node is "smaller" than another iff: + ** + ** * Its right endpoint is strictly smaller than the other's, or + ** * Its right endpoint is compare-equal to the other's, and its left + ** endpoint is strictly greater than the other's. (Note that this case + ** would never be reached in a breadth-first search.) + ** + ** Or, equivalently, and perhaps more naturally (but less obvious why it + ** is a total order): + ** + ** * Both its endpoints are strictly smaller than the other's corresponding + ** endpoints, or + ** * Both of its endpoints are contained within the other's endpoints, + ** and one of them strictly so. + ** + ** @param n The other node to compare with + */ + /*@Override**/ public int compareTo(Node n) { + int b = compareR(rkey, n.rkey); + return (b == 0)? compareL(n.lkey, lkey): b; + // the more "natural" version + // return (a == 0)? ((b == 0)? 0: (b < 0)? -1: 1) + // (a < 0)? ((b == 0)? 1: (b < 0)? -1: 1): + // ((b == 0)? -1: (b < 0)? -1: 1); + } + + public String toTreeString(String istr) { + String nistr = istr + "\t"; + StringBuilder s = new StringBuilder(); + s.append(istr).append('(').append(lkey).append(')').append('\n'); + if (isLeaf) { + for (Map.Entry en: entries.entrySet()) { + s.append(istr).append(en.getKey()).append(" : ").append(en.getValue()).append('\n'); + } + } else { + s.append(rnodes.get(lkey).toTreeString(nistr)); + for (Map.Entry en: entries.entrySet()) { + s.append(istr).append(en.getKey()).append(" : ").append(en.getValue()).append('\n'); + s.append(rnodes.get(en.getKey()).toTreeString(nistr)); + } + } + s.append(istr).append('(').append(rkey).append(')').append('\n'); + return s.toString(); + } + + public String getName() { + return "BTreeMap node " + getRange(); + } + + public String getRange() { + return (lkey == null? "*": lkey) + "-" + (rkey == null? "*": rkey); + } + + } + + public String toTreeString() { + return root.toTreeString(""); + } + + /** + ** Compares two keys using the comparator for this tree, or the keys' + ** {@link Comparable natural} ordering if no comparator was given. + ** + ** @throws ClassCastException if the keys cannot be compared by the given + ** comparator, or if they cannot be compared naturally (ie. they + ** don't implement {@link Comparable}) + ** @throws NullPointerException if either of the keys are {@code null}. + */ + final public static int compare(K key1, K key2, Comparator comparator) { + return (comparator != null)? comparator.compare(key1, key2): ((Comparable)key1).compareTo(key2); + } + + protected int compare(K key1, K key2) { + return compare(key1, key2, comparator); + } + + /** + ** Tests for compare-equality. Two values are compare-equal if they are + ** both {@code null} or they {@link #compare(Object, Object)} to {@code 0}. + ** We use this rather than {@link #equals(Object)} to maintain sorting + ** order consistency in case the comparator is inconsistent with equals. + ** + ** @throws ClassCastException if the keys cannot be compared by the given + ** comparator, or if they cannot be compared naturally (ie. they + ** don't implement {@link Comparable}) + */ + final public static boolean compare0(K key1, K key2, Comparator comparator) { + return key1 == null? key2 == null: key2 == null? false: + ((comparator != null)? comparator.compare(key1, key2): ((Comparable)key1).compareTo(key2)) == 0; + } + + protected boolean compare0(K key1, K key2) { + return compare0(key1, key2, comparator); + } + + /** + ** Comparison between {@link Node#lkey} values, where {@code null} means + ** "a value smaller than any other". Use this when one of the operands is + ** the {@code lkey} of some node. + ** + ** @throws ClassCastException if the keys cannot be compared by the given + ** comparator, or if they cannot be compared naturally (ie. they + ** don't implement {@link Comparable}) + */ + protected int compareL(K lkey1, K lkey2) { + return (lkey1 == null)? ((lkey2 == null)? 0: -1): + ((lkey2 == null)? 1: compare(lkey1, lkey2, comparator)); + } + + /** + ** Comparison between {@link Node#rkey} values, where {@code null} means + ** "a value greater than any other". Use this when one of the operands is + ** the {@code rkey} of some node. + ** + ** @throws ClassCastException if the keys cannot be compared by the given + ** comparator, or if they cannot be compared naturally (ie. they + ** don't implement {@link Comparable}) + */ + protected int compareR(K rkey1, K rkey2) { + return (rkey2 == null)? ((rkey1 == null)? 0: -1): + ((rkey1 == null)? 1: compare(rkey1, rkey2, comparator)); + } + + /** + ** Simulates an assertion. + ** + ** @param b The test condition + ** @throws IllegalStateException if the test condition is false + */ + final static void verify(boolean b) { + if (!b) { throw new IllegalStateException("Verification failed"); } + } + + /** + ** Package-private debugging method. This one checks node integrity: + ** + ** * lkey < firstkey, lastkey < rkey + ** + ** For non-leaf nodes: + ** + ** * for each pair of adjacent keys (including two null keys either side of + ** the list of keys), it is possible to traverse between the keys, and + ** the single node between them, using the lnodes, rnodes maps and + ** the lkey, rkey pointers. + ** * lnodes' and rnodes' size is 1 greater than that of entries. Ie. there + ** is nothing else in the node beyond what we visited in the previous + ** step. + ** + ** It also checks the BTree node constraints: + ** + ** * The node has at most ENT_MAX entries. + ** * The node has at least ENT_MIN entries, except the root. + ** * The root has at least 1 entry. + ** + ** @throws IllegalStateException if the constraints are not satisfied + */ + final void verifyNodeIntegrity(Node node) { + if (node.entries.isEmpty()) { + // don't test node == root since it might be a newly-created detached node + verify(node.lkey == null && node.rkey == null); + verify(node.isLeaf() && node.rnodes == null && node.lnodes == null); + return; + } + + verify(compareL(node.lkey, node.entries.firstKey()) < 0); + verify(compareR(node.entries.lastKey(), node.rkey) < 0); + + int s = node.nodeSize(); + if (!node.isLeaf()) { + Iterator it = node.entries.keySet().iterator(); + verify(it.hasNext()); + + K prev = node.lkey; + K curr = it.next(); + while (curr != node.rkey || prev != node.rkey) { + Node node1 = node.rnodes.get(prev); + verify(node1 != null); + s += node1.totalSize(); + verify(compare0(curr, node1.rkey)); + Node node2 = node.lnodes.get(curr); + verify(node1 == node2 && compare0(node2.lkey, prev)); + prev = curr; + curr = it.hasNext()? it.next(): node.rkey; + } + + verify(node.nodeSize() + 1 == node.rnodes.size()); + verify(node.nodeSize() + 1 == node.lnodes.size()); + } + /* DEBUG if (node._size > 0 && node._size != s) { + System.out.println(node._size + " vs " + s); + System.out.println(node.toTreeString("\t")); + }*/ + verify(node._size < 0 || node._size == s); + + verify(node.nodeSize() <= ENT_MAX); + // don't test node == root since it might be a newly-created detached node + verify(node.lkey == null && node.rkey == null && node.nodeSize() >= 1 + || node.nodeSize() >= ENT_MIN); + } + + /** + ** Package-private debugging method. This one checks BTree constraints for + ** the subtree rooted at a given node: + ** + ** * All nodes follow the node constraints, listed above. + ** * All leaves appear in the same level + ** + ** @return depth of all leaves + ** @throws IllegalStateException if the constraints are not satisfied + */ + final int verifyTreeIntegrity(Node node) { + verifyNodeIntegrity(node); + if (node.isLeaf()) { + return 0; + } else { + // breath-first search would take up too much memory for a broad BTree + int depth = -1; + for (Node n: node.lnodes.values()) { + int d = verifyTreeIntegrity(n); + if (depth < 0) { depth = d; } + verify(d == depth); + } + return depth + 1; + } + } + + /** + ** Package-private debugging method. This one checks BTree constraints for + ** the entire tree. + ** + ** @throws IllegalStateException if the constraints are not satisfied + */ + final void verifyTreeIntegrity() { + verifyTreeIntegrity(root); + } + + /** + ** Creates a new node. + ** + ** @param lk The {@code Node#lkey} for the node. + ** @param rk The {@code Node#rkey} for the node. + ** @param lf Whether the node is a leaf. + */ + protected Node newNode(K lk, K rk, boolean lf) { + return new Node(lk, rk, lf); + } + + /** + ** Move the given key from the given source node to the given target node. + ** This method should be used whenever entries need to be moved, so that + ** eg. we can proceed even when the values have not been loaded (such as + ** in a {@link SkeletonBTreeMap}). + ** + ** FIXME HIGH do this for everything in the class. ATM only Node.split() + ** uses it. + */ + protected void swapKey(K key, Node src, Node dst) { + V val = src.entries.remove(key); + dst.entries.put(key, val); + } + + /** + ** Split a maximal child node into two minimal nodes, using the median key + ** as the separator between these new nodes in the parent. If the parent is + ** {@code null}, creates a new {@link Node} and points {@link #root} to it. + ** + ** It is '''assumed''' that the child is an actual subnode of the parent, + ** and that it is full. It is up to the caller to ensure that this holds. + ** + ** The exact implementation of this may change from time to time, so the + ** calling code should regard the input subnode as effectively destroyed, + ** and re-get the desired result subnode from calling the appropriate + ** get methods on the parent node. + ** + ** @param parent The node to (re)attach the split subnodes to. + ** @param child The subnode to split + */ + private K split(Node parent, Node child) { + assert(child.nodeSize() == ENT_MAX); + assert(parent == null? (child.lkey == null && child.rkey == null): + (parent.nodeSize() < ENT_MAX + && !parent.isLeaf() + && parent.rnodes.get(child.lkey) == child + && parent.lnodes.get(child.rkey) == child)); + + if (parent == null) { + parent = root = newNode(null, null, false); + parent.addChildNode(child); + } + parent._size = child._size = -1; + + Node lnode = newNode(null, null, child.isLeaf()); + K mkey; + + if (child.isLeaf()) { + // this is just the same as the code in the else block, but with leaf + // references to rnodes and lnodes removed (since they are null) + Iterator> it = child.entries.entrySet().iterator(); + for (int i=0; i entry = it.next(); it.remove(); + K key = entry.getKey(); + + lnode.entries.put(key, entry.getValue()); + } + Map.Entry median = it.next(); it.remove(); + mkey = median.getKey(); + + lnode.lkey = child.lkey; + lnode.rkey = child.lkey = mkey; + + parent.rnodes.put(lnode.lkey, lnode); + parent.lnodes.put(child.rkey, child); + parent.entries.put(mkey, median.getValue()); + parent.rnodes.put(mkey, child); + parent.lnodes.put(mkey, lnode); + + } else { + lnode.rnodes.put(child.lkey, child.rnodes.remove(child.lkey)); + Iterator> it = child.entries.entrySet().iterator(); + for (int i=0; i entry = it.next(); it.remove(); + K key = entry.getKey(); + + lnode.entries.put(key, entry.getValue()); + lnode.lnodes.put(key, child.lnodes.remove(key)); + lnode.rnodes.put(key, child.rnodes.remove(key)); + } + Map.Entry median = it.next(); it.remove(); + mkey = median.getKey(); + lnode.lnodes.put(mkey, child.lnodes.remove(mkey)); + + lnode.lkey = child.lkey; + lnode.rkey = child.lkey = mkey; + + parent.rnodes.put(lnode.lkey, lnode); + parent.lnodes.put(child.rkey, child); + parent.entries.put(mkey, median.getValue()); + parent.rnodes.put(mkey, child); + parent.lnodes.put(mkey, lnode); + } + + assert(parent.rnodes.get(mkey) == child); + assert(parent.lnodes.get(mkey) == lnode); + return mkey; + } + + /** + ** Merge two minimal child nodes into a maximal node, using the key that + ** separates them in the parent as the key that joins the halfnodes in the + ** merged node. If the parent is the {@link #root} and the merge makes it + ** empty, point {@code root} to the new merged node. + ** + ** It is '''assumed''' that both childs are actual subnodes of the parent, + ** that they are adjacent in the parent, and that they are minimally full. + ** It is up to the caller to ensure that this holds. + ** + ** The exact implementation of this may change from time to time, so the + ** calling code should regard both input subnodes as effectively destroyed, + ** and re-get the desired result subnode from calling the appropriate + ** get methods on the parent node. + ** + ** @param parent The node to (re)attach the merge node to. + ** @param lnode The smaller subnode to merge + ** @param rnode The greater subnode to merge + */ + private K merge(Node parent, Node lnode, Node rnode) { + assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges + assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); + assert(lnode.nodeSize() == ENT_MIN); + assert(rnode.nodeSize() == ENT_MIN); + assert(parent == root && parent.nodeSize() > 0 + || parent.nodeSize() > ENT_MIN); + assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode + && parent.lnodes.get(rnode.lkey) == lnode); + parent._size = lnode._size = rnode._size = -1; + + K mkey = rnode.lkey; // same as lnode.rkey; + + if (rnode.isLeaf()) { + // this is just the same as the code in the else block, but with leaf + // references to rnodes and lnodes removed (since they are null) + rnode.entries.putAll(lnode.entries); + + rnode.entries.put(mkey, parent.entries.remove(mkey)); + rnode.lkey = lnode.lkey; + + parent.rnodes.remove(mkey); + parent.lnodes.remove(mkey); + parent.rnodes.put(lnode.lkey, rnode); + + } else { + rnode.entries.putAll(lnode.entries); + rnode.lnodes.putAll(lnode.lnodes); + rnode.rnodes.putAll(lnode.rnodes); + + rnode.entries.put(mkey, parent.entries.remove(mkey)); + rnode.lnodes.put(mkey, lnode.lnodes.get(mkey)); + rnode.rnodes.put(lnode.lkey, lnode.rnodes.get(lnode.lkey)); + rnode.lkey = lnode.lkey; + + parent.rnodes.remove(mkey); + parent.lnodes.remove(mkey); + parent.rnodes.put(lnode.lkey, rnode); + + } + + if (parent == root && parent.entries.isEmpty()) { + assert(parent.lkey == null && parent.rkey == null + && rnode.lkey == null && rnode.rkey == null); + root = rnode; + } + + assert(parent.rnodes.get(rnode.lkey) == rnode); + assert(parent.lnodes.get(rnode.rkey) == rnode); + return mkey; + } + + /** + ** Performs a rotate operation towards the smaller node. A rotate operation + ** is where: + ** + ** * an entry M from the parent node is moved to a subnode + ** * an entry S from an adjacent subnode is moved to fill gap left by M + ** * a subnode N associated with S is cut from M and attached to S + ** + ** with the operands chosen appropriately to maintain tree constraints. + ** + ** Here, M is the key that separates {@code lnode} and {@code rnode}, S is + ** the smallest key of {@code rnode}, and N its smallest subnode. + ** + ** It is '''assumed''' that both childs are actual subnodes of the parent, + ** that they are adjacent in the parent, and that the child losing an entry + ** has more than {@link #ENT_MIN} entries. It is up to the caller to ensure + ** that this holds. + ** + ** @param parent The parent node of the children. + ** @param lnode The smaller subnode, which accepts an entry from the parent + ** @param rnode The greater subnode, which loses an entry to the parent + */ + private K rotateL(Node parent, Node lnode, Node rnode) { + assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges + assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); + assert(rnode.nodeSize() >= lnode.nodeSize()); + assert(rnode.nodeSize() > ENT_MIN); + assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode + && parent.lnodes.get(rnode.lkey) == lnode); + parent._size = lnode._size = rnode._size = -1; + + K mkey = rnode.lkey; + K skey = rnode.entries.firstKey(); + + lnode.entries.put(mkey, parent.entries.remove(mkey)); + parent.entries.put(skey, rnode.entries.remove(skey)); + parent.rnodes.put(skey, parent.rnodes.remove(mkey)); + parent.lnodes.put(skey, parent.lnodes.remove(mkey)); + + lnode.rkey = rnode.lkey = skey; + + if (!lnode.isLeaf()) { + lnode.rnodes.put(mkey, rnode.rnodes.remove(mkey)); + lnode.lnodes.put(skey, rnode.lnodes.remove(skey)); + } + + assert(parent.rnodes.get(skey) == rnode); + assert(parent.lnodes.get(skey) == lnode); + return mkey; + } + + /** + ** Performs a rotate operation towards the greater node. A rotate operation + ** is where: + ** + ** * an entry M from the parent node is moved to a subnode + ** * an entry S from an adjacent subnode is moved to fill gap left by M + ** * a subnode N associated with S is cut from M and attached to S + ** + ** with the operands chosen appropriately to maintain tree constraints. + ** + ** Here, M is the key that separates {@code lnode} and {@code rnode}, S is + ** the greatest key of {@code rnode}, and N its greatest subnode. + ** + ** It is '''assumed''' that both childs are actual subnodes of the parent, + ** that they are adjacent in the parent, and that the child losing an entry + ** has more than {@link #ENT_MIN} entries. It is up to the caller to ensure + ** that this holds. + ** + ** @param parent The parent node of the children. + ** @param lnode The smaller subnode, which loses an entry to the parent + ** @param rnode The greater subnode, which accepts an entry from the parent + */ + private K rotateR(Node parent, Node lnode, Node rnode) { + assert(compare(lnode.rkey, rnode.lkey) == 0); // not compare0 since can't be at edges + assert(lnode.isLeaf() && rnode.isLeaf() || !lnode.isLeaf() && !rnode.isLeaf()); + assert(lnode.nodeSize() >= rnode.nodeSize()); + assert(lnode.nodeSize() > ENT_MIN); + assert(!parent.isLeaf() && parent.rnodes.get(lnode.rkey) == rnode + && parent.lnodes.get(rnode.lkey) == lnode); + parent._size = lnode._size = rnode._size = -1; + + K mkey = lnode.rkey; + K skey = lnode.entries.lastKey(); + + rnode.entries.put(mkey, parent.entries.remove(mkey)); + parent.entries.put(skey, lnode.entries.remove(skey)); + parent.lnodes.put(skey, parent.lnodes.remove(mkey)); + parent.rnodes.put(skey, parent.rnodes.remove(mkey)); + + lnode.rkey = rnode.lkey = skey; + + if (!rnode.isLeaf()) { + rnode.lnodes.put(mkey, lnode.lnodes.remove(mkey)); + rnode.rnodes.put(skey, lnode.rnodes.remove(skey)); + } + + assert(parent.rnodes.get(skey) == rnode); + assert(parent.lnodes.get(skey) == lnode); + return mkey; + } + + /** + ** Returns the number of entries contained in the root node. + */ + public int sizeRoot() { + return root.nodeSize(); + } + + public int nodeMin() { + return NODE_MIN; + } + + public int entMax() { + return ENT_MAX; + } + + /** + ** Gives a quick estimate of the height of the tree. This method will tend + ** to over-estimate rather than underestimate. + ** + ** The exact formula is {@code 1 + floor(log(size)/log(NODE_MIN))}. I + ** couldn't be bothered trying to prove that this is a firm upper bound; + ** feel free. + */ + public int heightEstimate() { + // round(log(0)/x) is -infinity + return 1 + size == 0? 0: (int)Math.floor(Math.log(size) / Math.log(NODE_MIN)); + } + + /** + ** Returns the minimum number of nodes (at a single level) needed for the + ** given number of entries. + */ + public int minNodesFor(int entries) { + // find the smallest k such that k * ENT_MAX + (k-1) >= n + // ie. k * NODE_MAX > n :: k = floor( n / NODE_MAX ) + 1 + return entries / NODE_MAX + 1; + } + + /** + ** Returns the minimum number of separator keys (at a single level) needed + ** for the given number of entries. + */ + public int minKeysFor(int entries) { + // 1 less than minNodesFor() + return entries / NODE_MAX; + } + + /** + ** Returns the entry at a particular (zero-based) index. + */ + public Map.Entry getEntry(int index) { + if (index < 0 || index >= size) { + throw new IndexOutOfBoundsException("Index outside of range [0," + size + ")"); + } + + int current = 0; + Node node = root; + + for (;;) { + if (node.isLeaf()) { + for (Map.Entry en: node.entries.entrySet()) { + if (current == index) { return en; } + ++current; + } + throw new IllegalStateException("BTreeMap getKey method is buggy, please report."); + } + + // OPT LOW better way to do this than linear iteration through the entries. + // this performs badly when large nodes are accessed repeatedly. + // + // one way would be to insert the last sum-key pair we calculate at each node, into + // a cache of SoftReference> for that node. then, next time we visit + // the same node, we can use this cache to generate a submap - ie. + // node.entries.tailMap(cache.get(cache.tailMap(index).firstKey())) // firstEntry().getValue() is Java 6 only :( + // - and iterate only over this submap (don't forget to set "current" to the right value) + // + Node nextnode = node.rnodes.get(node.lkey); + int next = current + nextnode.totalSize(); + if (index < next) { node = nextnode; continue; } + current = next; + + for (Map.Entry en: node.entries.entrySet()) { + if (current == index) { return en; } + ++current; + + nextnode = node.rnodes.get(en.getKey()); + next = current + nextnode.totalSize(); + if (index < next) { node = nextnode; break; } + current = next; + } + + assert(current <= index); + } + } + + /*======================================================================== + public interface Map + ========================================================================*/ + + @Override public int size() { + return size; + } + + @Override public boolean isEmpty() { + return size == 0; + } + + @Override public void clear() { + root = newNode(null, null, true); + size = 0; + } + + /** + ** {@inheritDoc} + ** + ** This implementation just descends the tree, returning {@code true} if it + ** finds the given key. + ** + ** @throws ClassCastException key cannot be compared with the keys + ** currently in the map + ** @throws NullPointerException key is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + @Override public boolean containsKey(Object k) { + K key = (K) k; + Node node = root; + + for (;;) { + if (node.isLeaf()) { + return node.entries.containsKey(key); + } + + Node nextnode = node.selectNode(key); + if (nextnode == null) { + return true; + } + + node = nextnode; + } + } + + /* provided by AbstractMap + @Override public boolean containsValue(Object key) { + throw new UnsupportedOperationException("not implemented"); + }*/ + + /** + ** {@inheritDoc} + ** + ** This implementation just descends the tree, returning the value for the + ** given key if it can be found. + ** + ** @throws ClassCastException key cannot be compared with the keys + ** currently in the map + ** @throws NullPointerException key is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + @Override public V get(Object k) { + K key = (K) k; + Node node = root; + + for (;;) { + if (node.isLeaf()) { + return node.entries.get(key); + } + + Node nextnode = node.selectNode(key); + if (nextnode == null) { + return node.entries.get(key); + } + + node = nextnode; + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation uses the BTree single-pass insertion algorithm. We + ** descend the tree, restructuring it as needed so that we can insert an + ** entry into a leaf. + ** + ** Start at the root node. At each stage of the algorithm, we restructure + ** the tree so that the next node we reach has less than {@link #ENT_MAX} + ** entries, and the insertion can occur without breaking constraints. + ** + ** (*) If the number of entries is {@link #ENT_MAX}, then perform {@link + ** #split(Node, Node) split} on the node. Both the newly created nodes now + ** have less than {@link #ENT_MAX} entries; pick the appropriate halfnode + ** to for the rest of the operation, depending on the original input key. + ** + ** If the node is a leaf, insert the value and stop. Otherwise, if the key + ** is not already in the node, select the appropriate subnode and repeat + ** from (*). + ** + ** Otherwise, replace the value and stop. + ** + ** @throws ClassCastException key cannot be compared with the keys + ** currently in the map + ** @throws NullPointerException key is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + @Override public V put(K key, V value) { + if (key == null) { throw new UnsupportedOperationException("Sorry, this BTreeMap implementation can't handle null keys, even if the comparator supports it."); } + Node node = root, parent = null; + + for (;;) { + node._size = -1; // pre-emptively invalidate node size cache + + if (node.nodeSize() == ENT_MAX) { + K median = split(parent, node); + if (parent == null) { parent = root; } + + node = parent.selectNode(key); + if (node == null) { return parent.entries.put(key, value); } + } + assert(node.nodeSize() < ENT_MAX); + + if (node.isLeaf()) { + int sz = node.nodeSize(); + V v = node.entries.put(key, value); + if (node.nodeSize() != sz) { ++size; } // update tree size counter + return v; + } + + Node nextnode = node.selectNode(key); + if (nextnode == null) { // key is already in the node + return node.entries.put(key, value); + } + + parent = node; + node = nextnode; + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation uses the BTree single-pass deletion algorithm. We + ** descend the tree, restructuring it as needed so that we can delete the + ** entry from a leaf. + ** + ** Start at the root node. At each stage of the algorithm, we restructure + ** the tree so that the next node we reach has more than {@link #ENT_MIN} + ** entries, and the deletion can occur without breaking constraints. + ** + ** (*) If the node is not the root, and if the number of entries is equal + ** to {@link #ENT_MIN}, select its two siblings (L and R). Perform one of + ** the following operations (ties being broken arbitrarily): + ** + ** * {@link #merge(Node, Node, Node) merge} with X, if X also has {@link + ** #ENT_MIN} entries + ** * {@link #rotateL(Node, Node, Node) rotateL} with R, if the R subnode + ** has more entries than L (or equal) + ** * {@link #rotateR(Node, Node, Node) rotateR} with L, if the L subnode + ** has more entries than R (or equal) + ** + ** The selected node now has more than {@link #ENT_MIN} entries. + ** + ** If the node is a leaf, remove the value and stop. Otherwise, if the key + ** is not already in the node, select the appropriate subnode and repeat + ** from (*). + ** + ** Otherwise, select the two subnodes (L and R) that this key separates. + ** Perform one of the following operations (ties being broken arbitrarily): + ** + ** * {@link #merge(Node, Node, Node) merge}, if both subnodes have {@link + ** #ENT_MIN} entries. + ** * {@link #rotateL(Node, Node, Node) rotateL}, if the R subnode has more + ** entries than L (or equal) + ** * {@link #rotateR(Node, Node, Node) rotateR}, if the L subnode has more + ** entries than R (or equal) + ** + ** The node that the key ended up in now has more than {@link #ENT_MIN} + ** entries (and will be selected for the next stage). + ** + ** @throws ClassCastException key cannot be compared with the keys + ** currently in the map + ** @throws NullPointerException key is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + @Override public V remove(Object k) { + K key = (K) k; + Node node = root, parent = null; + + for (;;) { + node._size = -1; // pre-emptively invalidate node size cache + + if (node != root && node.nodeSize() == ENT_MIN) { + Node lnode = parent.nodeL(node), rnode = parent.nodeR(node); + int L = (lnode == null)? -1: lnode.nodeSize(); + int R = (rnode == null)? -1: rnode.nodeSize(); + + K kk = // in java, ?: must be used in a statement :| + // lnode doesn't exist + (L < 0)? ((R == ENT_MIN)? merge(parent, node, rnode): + rotateL(parent, node, rnode)): + // rnode doesn't exist + (R < 0)? ((L == ENT_MIN)? merge(parent, lnode, node): + rotateR(parent, lnode, node)): + // pick the node with more entries + (R > L)? rotateL(parent, node, rnode): + (L > R)? rotateR(parent, lnode, node): + // otherwise pick one at "random" + (size&1) == 1? (R == ENT_MIN? merge(parent, node, rnode): + rotateL(parent, node, rnode)): + (L == ENT_MIN? merge(parent, lnode, node): + rotateR(parent, lnode, node)); + node = parent.selectNode(key); + assert(node != null); + } + assert(node == root || node.nodeSize() >= ENT_MIN); + + if (node.isLeaf()) { // leaf node + int sz = node.nodeSize(); + V v = node.entries.remove(key); + if (node.nodeSize() != sz) { --size; } // update tree size counter + return v; + } + + Node nextnode = node.selectNode(key); + if (nextnode == null) { // key is already in the node + Node lnode = node.lnodes.get(key), rnode = node.rnodes.get(key); + int L = lnode.nodeSize(), R = rnode.nodeSize(); + + K kk = + // both lnode and rnode must exist, so + // pick the one with more entries + (R > L)? rotateL(node, lnode, rnode): + (L > R)? rotateR(node, lnode, rnode): + // otherwise pick one at "random" + (size&1) == 1? (R == ENT_MIN? merge(node, lnode, rnode): + rotateL(node, lnode, rnode)): + (L == ENT_MIN? merge(node, lnode, rnode): + rotateR(node, lnode, rnode)); + nextnode = node.selectNode(key); + assert(nextnode != null); + } + + parent = node; + node = nextnode; + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation iterates over the given map's {@code entrySet}, + ** adding each mapping in turn, except for when {@code this} map is empty, + ** and the input map is a non-empty {@link SortedMap}. In this case, it + ** uses the BTree bulk-loading algorithm: + ** + ** * distribute all the entries of the map across the least number of nodes + ** possible, excluding the entries that will act as separators between + ** these nodes + ** * repeat for the separator entries, and use them to join the nodes from + ** the previous level appropriately to form a node at this level + ** * repeat until there are no more entries on a level, at which point use + ** the (single) node from the previous level as the root + ** + ** (The optimisation is also used if the input map is {@code this}.) + ** + ** @param t mappings to be stored in this map + */ + @Override public void putAll(Map t) { + // t == this to support restructure() + if (t.isEmpty()) { + return; + } else if (t == this || isEmpty() && t instanceof SortedMap) { + SortedMap map = (SortedMap)t, nextmap; + Map lnodes = null, nextlnodes; + + if (!(comparator == null && map.comparator() == null || comparator.equals(map.comparator()))) { + super.putAll(map); + return; + } + + while (map.size() > 0) { + int k = minNodesFor(map.size()); + + nextlnodes = new HashMap(k<<1); + nextmap = new TreeMap(comparator); + + Iterator> it = map.entrySet().iterator(); + K prevkey = null; // NULLNOTICE + + // allocate all entries into these k nodes, except for k-1 parents + for (Integer n: Integers.allocateEvenly(map.size()-k+1, k)) { + // put n entries into a new leaf + Map.Entry en = makeNode(it, n, prevkey, lnodes, nextlnodes); + if (en != null) { nextmap.put(prevkey = en.getKey(), en.getValue()); } + } + + lnodes = nextlnodes; + map = nextmap; + } + + assert(lnodes.size() == 1); + root = lnodes.get(null); + size = map.size(); + + } else { + super.putAll(t); + } + } + + /** + ** Helper method for the bulk-loading algorithm. + ** + ** @param it Iterator over the entries at the current level + ** @param n Number of entries to add to the map + ** @param prevkey Last key encountered before calling this method + ** @param lnodes Complete map of keys to their lnodes at this level + ** @param nextlnodes In-construction lnodes map for the next level + */ + private Map.Entry makeNode(Iterator> it, int n, K prevkey, Map lnodes, Map nextlnodes) { + Node node; + if (lnodes == null) { // create leaf nodes + node = newNode(prevkey, null, true); + for (int i=0; i en = it.next(); + K key = en.getKey(); + node.entries.put(key, en.getValue()); + prevkey = key; + } + } else { + node = newNode(prevkey, null, false); + for (int i=0; i en = it.next(); + K key = en.getKey(); + node.entries.put(key, en.getValue()); + Node subnode = lnodes.get(key); + node.rnodes.put(prevkey, subnode); + node.lnodes.put(key, subnode); + prevkey = key; + } + } + + Map.Entry next; + K key; + if (it.hasNext()) { + next = it.next(); + key = next.getKey(); + } else { + next = null; + key = null; + } + + if (lnodes != null) { + Node subnode = lnodes.get(key); + node.rnodes.put(prevkey, subnode); + node.lnodes.put(key, subnode); + } + node.rkey = key; + nextlnodes.put(key, node); + + return next; + } + + /** + ** Restructures the tree, distributing the entries evenly between the leaf + ** nodes. This method merely calls {@link #putAll(Map)} with {@code this}. + */ + public void restructure() { + putAll(this); + } + + private Set> entrySet = null; + @Override public Set> entrySet() { + if (entrySet == null) { + entrySet = new AbstractSet>() { + + @Override public int size() { return BTreeMap.this.size(); } + + @Override public Iterator> iterator() { + // FIXME LOW - this does NOT yet throw ConcurrentModificationException + // use a modCount counter + return new Iterator>() { + + Stack nodestack = new Stack(); + Stack>> itstack = new Stack>>(); + + Node cnode = BTreeMap.this.root; + Iterator> centit = cnode.entries.entrySet().iterator(); + + K lastkey = null; + boolean removeok = false; + + // DEBUG ONLY, remove when unneeded + /*public String toString() { + StringBuilder s = new StringBuilder(); + for (Node n: nodestack) { + s.append(n.getRange()).append(", "); + } + return "nodestack: [" + s + "]; cnode: " + cnode.getRange() + "; lastkey: " + lastkey; + }*/ + + /*@Override**/ public boolean hasNext() { + // TODO LOW ideally iterate in the reverse order + for (Iterator> it: itstack) { + if (it.hasNext()) { return true; } + } + if (centit.hasNext()) { return true; } + if (!cnode.isLeaf()) { return true; } + return false; + } + + /*@Override**/ public Map.Entry next() { + if (cnode.isLeaf()) { + while (!centit.hasNext()) { + if (nodestack.empty()) { + assert(itstack.empty()); + throw new NoSuchElementException(); + } + cnode = nodestack.pop(); + centit = itstack.pop(); + } + } else { + while (!cnode.isLeaf()) { + // NULLNOTICE lastkey initialised to null, so this will get the right node + // even at the smaller edge of the map + Node testnode = cnode.rnodes.get(lastkey); + testnode.isLeaf(); // trigger DataNotLoadedException if this is a GhostNode + // node OK, proceed + nodestack.push(cnode); + itstack.push(centit); + cnode = testnode; + centit = cnode.entries.entrySet().iterator(); + } + } + Map.Entry next = centit.next(); lastkey = next.getKey(); + removeok = true; + return next; + } + + /*@Override**/ public void remove() { + if (!removeok) { + throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); + } + // OPT LOW this could probably be a *lot* more efficient... + BTreeMap.this.remove(lastkey); + + // we need to find our position in the tree again, since there may have + // been structural modifications to it. + // OPT LOW have a "structual modifications counter" that is incremented by + // split, merge, rotateL, rotateR, and do the below only if it changes + nodestack.clear(); + itstack.clear(); + cnode = BTreeMap.this.root; + centit = cnode.entries.entrySet().iterator(); + + K stopkey = null; + while(!cnode.isLeaf()) { + stopkey = findstopkey(stopkey); + nodestack.push(cnode); + itstack.push(centit); + // NULLNOTICE stopkey initialised to null, so this will get the right node + // even at the smaller edge of the map + cnode = cnode.rnodes.get(stopkey); + centit = cnode.entries.entrySet().iterator(); + } + + lastkey = findstopkey(stopkey); + removeok = false; + } + + private K findstopkey(K stopkey) { + SortedMap headmap = cnode.entries.headMap(lastkey); + if (!headmap.isEmpty()) { + stopkey = headmap.lastKey(); + while (compare(centit.next().getKey(), stopkey) < 0); + } + return stopkey; + } + + }; + } + + @Override public void clear() { + BTreeMap.this.clear(); + } + + @Override public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) { return false; } + Map.Entry e = (Map.Entry)o; + Object value = BTreeMap.this.get(e.getKey()); + return value != null && value.equals(e.getValue()); + } + + @Override public boolean remove(Object o) { + if (contains(o)) { + Map.Entry e = (Map.Entry)o; + BTreeMap.this.remove(e.getKey()); + return true; + } + return false; + } + + }; + } + return entrySet; + } + + /* provided by AbstractMap + ** OPT LOW - the default clear() method is inefficient + @Override public Set keySet() { + throw new UnsupportedOperationException("not implemented"); + }*/ + + /* provided by AbstractMap + ** OPT LOW - the default clear() method is inefficient + @Override public Collection values() { + throw new UnsupportedOperationException("not implemented"); + }*/ + + /*======================================================================== + public interface SortedMap + ========================================================================*/ + + /*@Override**/ public Comparator comparator() { + return comparator; + } + + /*@Override**/ public K firstKey() { + Node node = root; + for (;;) { + K fkey = node.entries.firstKey(); + if (node.isLeaf()) { return fkey; } + node = node.lnodes.get(fkey); + } + } + + /*@Override**/ public K lastKey() { + Node node = root; + for (;;) { + K lkey = node.entries.lastKey(); + if (node.isLeaf()) { return lkey; } + node = node.rnodes.get(lkey); + } + } + + /** + ** {@inheritDoc} + ** + ** Not yet implemented - throws {@link UnsupportedOperationException} + */ + /*@Override**/ public SortedMap headMap(K rkey) { + throw new UnsupportedOperationException("not implemented"); + } + + /** + ** {@inheritDoc} + ** + ** Not yet implemented - throws {@link UnsupportedOperationException} + */ + /*@Override**/ public SortedMap tailMap(K lkey) { + throw new UnsupportedOperationException("not implemented"); + } + + /** + ** {@inheritDoc} + ** + ** Not yet implemented - throws {@link UnsupportedOperationException} + */ + /*@Override**/ public SortedMap subMap(K lkey, K rkey) { + throw new UnsupportedOperationException("not implemented"); + } + +} diff --git a/src/main/java/plugins/Library/util/BTreeSet.java b/src/main/java/plugins/Library/util/BTreeSet.java new file mode 100644 index 00000000..75e24054 --- /dev/null +++ b/src/main/java/plugins/Library/util/BTreeSet.java @@ -0,0 +1,76 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version) {. See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Comparator; +import java.util.Set; +import java.util.SortedSet; + +/** +** A B-tree set implementation backed by a {@link BTreeMap}. DOCUMENT +** +** @author infinity0 +*/ +public class BTreeSet extends SortedMapSet> +implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { + + /** + ** Creates a new empty set, sorted according to the given comparator, and + ** with each non-root node having the given minimum number of subnodes. + ** + ** @param cmp The comparator for the tree, or {@code null} to use the keys' + ** {@link Comparable natural} ordering. + ** @param node_min Minimum number of subnodes in each node + */ + public BTreeSet(Comparator cmp, int node_min) { + super(new BTreeMap(cmp, node_min)); + } + + /** + ** Creates a new empty set, sorted according to the keys' {@link Comparable + ** natural} ordering, and with each non-root node having the given minimum + ** number of subnodes. + ** + ** @param node_min Minimum number of subnodes in each node + **/ + public BTreeSet(int node_min) { + super(new BTreeMap(node_min)); + } + + /** + ** Protected constructor for use by the {@link SkeletonBTreeSet} + ** constructors. + */ + protected BTreeSet(BTreeMap m) { + super(m); + } + + /** + ** Returns the number of entries contained in the root node. + */ + public int sizeRoot() { + return bkmap.sizeRoot(); + } + + public int nodeMin() { + return bkmap.nodeMin(); + } + + public int entMax() { + return bkmap.entMax(); + } + + public int heightEstimate() { + return bkmap.heightEstimate(); + } + + /** + ** Returns the element at a particular (zero-based) index. + */ + public E get(int i) { + return bkmap.getEntry(i).getKey(); + } + + +} diff --git a/src/main/java/plugins/Library/util/BytePrefixKey.java b/src/main/java/plugins/Library/util/BytePrefixKey.java new file mode 100644 index 00000000..ae0efad0 --- /dev/null +++ b/src/main/java/plugins/Library/util/BytePrefixKey.java @@ -0,0 +1,147 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.util.PrefixTree.PrefixKey; +import plugins.Library.util.PrefixTree.AbstractPrefixKey; + +import java.util.Arrays; + +/** +** A PrefixKey backed by an array of bytes. +** +** @author infinity0 +*/ +abstract public class BytePrefixKey> extends AbstractPrefixKey implements PrefixKey { + + /** + ** Returns the string representation of a byte array. + */ + public static String bytesToHex(byte[] hash) { + StringBuilder buf = new StringBuilder(); + for (int i = 0; i < hash.length; i++) { + int h = (hash[i] >> 4) & 0x0F; + int l = hash[i] & 0x0F; + buf.append((char)((0<=h && h<=9)? '0'+h: 'a'+h-10)); + buf.append((char)((0<=l && l<=9)? '0'+l: 'a'+l-10)); + } + return buf.toString(); + } + + /** + ** Returns the byte array representation of a hex string. + */ + public static byte[] hexToBytes(String hex) { + byte[] bs = new byte[hex.length()>>1]; + for (int i=0, j=0; i clone(); + + /*@Override**/ public int symbols() { + return 256; + } + + /*@Override**/ public int size() { + return hash.length; + } + + /*@Override**/ public int get(int i) { + return hash[i] & 0xFF; + } + + /*@Override**/ public void set(int i, int v) { + hash[i] = (byte)v; + } + + /*@Override**/ public void clear(int i) { + hash[i] = 0; + } + + /*======================================================================== + public class Object + ========================================================================*/ + + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (o instanceof BytePrefixKey) { + return Arrays.equals(hash, ((BytePrefixKey)o).hash); + } + return false; + } + + @Override public int hashCode() { + return Arrays.hashCode(hash); + } + +} diff --git a/src/main/java/plugins/Library/util/CompositeIterable.java b/src/main/java/plugins/Library/util/CompositeIterable.java new file mode 100644 index 00000000..1fa8cd20 --- /dev/null +++ b/src/main/java/plugins/Library/util/CompositeIterable.java @@ -0,0 +1,68 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Iterator; + +/** +** An {@link Iterable} backed by another {@link Iterable}. +** +** @param Type of source +** @param Type of target +** @author infinity0 +*/ +abstract public class CompositeIterable implements Iterable { + + /** + ** The backing {@link Iterable}. + */ + final protected Iterable ib; + + /** + ** Whether the iterator supports {@link Iterator#remove()}. + */ + final public boolean immutable; + + /** + ** Create a new mutable iterable backed by the given iterable. Note that + ** mutability will only have an effect if the backing iterator is also + ** mutable. + */ + public CompositeIterable(Iterable i) { + ib = i; + immutable = false; + } + + /** + ** Create a new iterable backed by the given iterable, with the given + ** mutability setting. Note that mutability will only have an effect if the + ** backing iterator is also mutable. + */ + public CompositeIterable(Iterable i, boolean immute) { + ib = i; + immutable = immute; + } + + /*@Override**/ public Iterator iterator() { + return immutable? + new Iterator() { + final Iterator it = ib.iterator(); + /*@Override**/ public boolean hasNext() { return it.hasNext(); } + /*@Override**/ public T next() { return CompositeIterable.this.nextFor(it.next()); } + /*@Override**/ public void remove() { throw new UnsupportedOperationException("Immutable iterator"); } + }: + new Iterator() { + final Iterator it = ib.iterator(); + /*@Override**/ public boolean hasNext() { return it.hasNext(); } + /*@Override**/ public T next() { return CompositeIterable.this.nextFor(it.next()); } + /*@Override**/ public void remove() { it.remove(); } + }; + } + + /** + ** Returns an object of the target type given an object of the source type. + */ + abstract protected T nextFor(S elem); + +} diff --git a/src/main/java/plugins/Library/util/DataNotLoadedException.java b/src/main/java/plugins/Library/util/DataNotLoadedException.java new file mode 100644 index 00000000..1212c5ea --- /dev/null +++ b/src/main/java/plugins/Library/util/DataNotLoadedException.java @@ -0,0 +1,64 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +/** +** Thrown when data hasn't been loaded yet, eg. when a {@link Skeleton} hasn't +** been fully loaded into memory. Exceptions should be constructed such that +** a method call to {@link Skeleton#inflate()} on {@link #getParent()} with +** {@link #getKey()} as its argument will load the data and prevent an +** exception from being thrown the next time round the data is accessed. +** +** Ideally, this would be {@code DataNotLoadedException}, where {@code K} is +** the type parameter for {@link Skeleton}, but java does not currently allow +** parametrised classes to extend {@link Throwable}. +** +** @author infinity0 +** @see Skeleton +*/ +public class DataNotLoadedException extends RuntimeException { + + /** + ** The parent container of the not-yet-loaded data. + */ + final Skeleton parent; + + /** + ** The key, if any, that the not-yet-loaded data was associated with. + */ + final Object key; + + /** + ** The metadata for the not-yet-loaded data. + */ + final Object meta; + + public DataNotLoadedException(String s, Throwable t, Skeleton p, Object k, Object v) { + super(s, t); + parent = p; + key = k; + meta = v; + } + + public DataNotLoadedException(Throwable t, Skeleton p, Object k, Object v) { + this(null, t, p, k, v); + } + + public DataNotLoadedException(String s, Skeleton p, Object k, Object v) { + this(s, null, p, k, v); + } + + public DataNotLoadedException(String s, Skeleton p, Object k) { + this(s, null, p, k, null); + } + + public DataNotLoadedException(String s, Skeleton p) { + this(s, null, p, null, null); + } + + public Skeleton getParent() { return parent; } + public Object getKey() { return key; } + public Object getValue() { return meta; } + +} diff --git a/src/main/java/plugins/Library/util/IdentityComparator.java b/src/main/java/plugins/Library/util/IdentityComparator.java new file mode 100644 index 00000000..4320731b --- /dev/null +++ b/src/main/java/plugins/Library/util/IdentityComparator.java @@ -0,0 +1,84 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Comparator; +import java.util.SortedSet; +import java.util.IdentityHashMap; +import java.util.HashMap; + +/** +** A comparator that is consistent with the identity operator on objects. The +** ordering is arbitrary, but consistent with the general contract for +** compare methods. This is useful when you want to have a sorted collection +** like {@link SortedSet}, but want to be able to have two objects (that would +** otherwise compare equal) to be present in the collection simultaneously. +** +** This implementation will remain consistent even if it encounters two objects +** with the *same* identity hash code, by assigning a unique 64-bit id to each +** distinct object, as it encounters such objects. +** +** Since identity hashcodes are generated arbitrarily by the JVM, the ordering +** imposed by this comparator will change between different runs of the JVM. +** Hence, the exact ordering must not be treated as an instrinsic or immutable +** property of the underlying collection. +** +** The intended use of this class is for another class to subclass it, or to +** have a {@link Comparable#compareTo(Object)} call the method of the singleton +** provided in the {@link #comparator} field. +** +** @author infinity0 +** @see System#identityHashCode(Object) +** @see Comparator#compare(Object, Object) +** @see Comparable#compareTo(Object) +*/ +abstract public class IdentityComparator implements Comparator { + + /** + ** A singleton comparator for use by {@link Comparable#compareTo(Object)}. + */ + final public static IdentityComparator comparator = new IdentityComparator() {}; + + /** + ** Keeps track of objects with the same identity hashcode, and the unique + ** IDs assigned to them by the comparator. This map is *ONLY* used if the + ** comparator encounters two distinct objects with the same identity + ** hashcodes. + */ + final private static IdentityHashMap objectid = new IdentityHashMap(4); + + /** + ** Counts the number of objects that have a given identity hashcode. This + ** map is *ONLY* used if the comparator encounters two distinct objects + ** with the same identity hashcodes. + */ + final private static HashMap idcounter = new HashMap(4); + + /** + ** Compare two objects by identity. + */ + public int compare(T o1, T o2) { + if (o1 == o2) { return 0; } + int h1 = System.identityHashCode(o1); + int h2 = System.identityHashCode(o2); + if (h1 != h2) { + return (h1 > h2)? 1: -1; + } else { + synchronized (IdentityComparator.class) { + Long counter = idcounter.get(h1); + if (counter == null) { counter = 0L; } + + Long l1 = objectid.get(o1); + Long l2 = objectid.get(o2); + if (l1 == null) { l1 = counter++; objectid.put(o1, l1); } + if (l2 == null) { l2 = counter++; objectid.put(o2, l2); } + + idcounter.put(h1, counter); + assert((long)l1 != (long)l2); + return (l1 > l2)? 1: -1; + } + } + } + +} diff --git a/src/main/java/plugins/Library/util/Integers.java b/src/main/java/plugins/Library/util/Integers.java new file mode 100644 index 00000000..c81d7d5b --- /dev/null +++ b/src/main/java/plugins/Library/util/Integers.java @@ -0,0 +1,82 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** +** Various integer methods. +** +** @author infinity0 +*/ +final public class Integers { + + private Integers() { } + + // TODO LOW "allocateEvenlyRandom" ie. distributely evenly but with ties + // being broken randomly, in the style of dithering + + /** + ** Allocate {@code total} resources evenly over {@code num} recipients. + ** + ** @return An {@link Iterable} of {@link Integer}, that "contains" {@code + ** num} elements, each of which is either {@code k+1} or {@code k}. + ** The {@code k+1} will themselves also be dispersed evenly + ** throughout the iteration. + */ + public static Iterable allocateEvenly(final int total, final int num) { + if (total < 0) { + throw new IllegalArgumentException("Total must be non-negative."); + } + if (num <= 0) { + throw new IllegalArgumentException("Count must be positive."); + } + + return new Iterable() { + + final int k = total / num; + final int r = total % num; + final double step = (double)num / r; + + public Iterator iterator() { + return new Iterator() { + double d = step / 2; + int i = 0; + + /*@Override**/ public boolean hasNext() { + return i < num; + } + + /*@Override**/ public Integer next() { + if (i >= num) { + throw new NoSuchElementException(); + } else if ((int)d == i++) { + d += step; + return k+1; + } else { + return k; + } + } + + /*@Override**/ public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } + + @Override public String toString() { + char[] xs = new char[num]; + int j=0; + for (Integer ii: this) { + xs[j++] = (ii == k)? ' ': '+'; + } + return total + "/" + num + ": " + k + "+[" + new String(xs) + "]"; + } + + }; + } + +} diff --git a/src/main/java/plugins/Library/util/Maps.java b/src/main/java/plugins/Library/util/Maps.java new file mode 100644 index 00000000..34678982 --- /dev/null +++ b/src/main/java/plugins/Library/util/Maps.java @@ -0,0 +1,132 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import static plugins.Library.util.Maps.$; + +import java.util.Map.Entry; // WORKAROUND javadoc bug #4464323 +import java.util.Map; + +/** +** Methods for maps. +** +** @author infinity0 +*/ +final public class Maps { + + /** + ** A simple {@link Entry} with no special properties. + */ + public static class BaseEntry implements Map.Entry { + + final public K key; + protected V val; + + public BaseEntry(K k, V v) { key = k; val = v; } + + public K getKey() { return key; } + public V getValue() { return val; } + public V setValue(V n) { V o = val; val = n; return o; } + + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof Map.Entry)) { return false; } + Map.Entry en = (Map.Entry)o; + return (key == null? en.getKey() == null: key.equals(en.getKey())) && (val == null? en.getValue() == null : val.equals(en.getValue())); + } + + @Override public int hashCode() { + return (key == null? 0: key.hashCode()) ^ (val == null? 0: val.hashCode()); + } + + } + + /** + ** A {@link Entry} whose value cannot be modified. + */ + public static class ImmutableEntry extends BaseEntry { + + public ImmutableEntry(K k, V v) { super(k, v); } + + /** + ** @throws UnsupportedOperationException always + */ + @Override public V setValue(V n) { + throw new UnsupportedOperationException("ImmutableEntry: cannot modify value after creation"); + } + + } + + /** + ** A {@link Entry} whose {@link Object#equals(Object)} and {@link + ** Object#hashCode()} are defined purely in terms of the key, which is + ** immutable in the entry. + ** + ** Note: technically this breaks the contract of {@link Entry}. Use only + ** if you know what you're doing. + */ + public static class KeyEntry extends BaseEntry { + + public KeyEntry(K k, V v) { super(k, v); } + + /** + ** Whether the object is also a {@link KeyEntry} and has the same + ** {@code #key} as this entry. + */ + @Override public boolean equals(Object o) { + if (!(o instanceof KeyEntry)) { return false; } + KeyEntry en = (KeyEntry)o; + return key == null && en.key == null || key.equals(en.key); + } + + @Override public int hashCode() { + return key == null ? 0 : 1 + key.hashCode(); + } + + } + + private Maps() { } + + /** + ** Returns a new {@link BaseEntry} with the given key and value. + */ + public static Map.Entry $(final K k, final V v) { + return new BaseEntry(k, v); + } + + /** + ** Returns a new {@link ImmutableEntry} with the given key and value. + */ + public static Map.Entry $$(final K k, final V v) { + return new ImmutableEntry(k, v); + } + + /** + ** Returns a new {@link KeyEntry} with the given key and value. + */ + public static Map.Entry $K(K k, V v) { + return new KeyEntry(k, v); + } + + public static Map of(Class mapcl, Map.Entry... items) { + try { + Map map = mapcl.newInstance(); + for (Map.Entry en: items) { + map.put(en.getKey(), en.getValue()); + } + return map; + } catch (InstantiationException e) { + throw new IllegalArgumentException("Could not instantiate map class", e); + } catch (IllegalAccessException e) { + throw new IllegalArgumentException("Could not access map class", e); + } + } + + /*final private static Map testmap = of(SkeletonTreeMap.class, + $("test1", "test1"), + $("test2", "test2"), + $("test3", "test3") + );*/ + +} diff --git a/src/main/java/plugins/Library/util/PrefixTree.java b/src/main/java/plugins/Library/util/PrefixTree.java new file mode 100644 index 00000000..252023c9 --- /dev/null +++ b/src/main/java/plugins/Library/util/PrefixTree.java @@ -0,0 +1,615 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Set; +import java.util.Iterator; + +/** +** Trie-like map or map-like structure (eg. multimap) with each node mapping +** keys that match a given prefix. Each node in the tree will admit a certain +** maximum number of mappings before it starts creating subtrees. +** +** @deprecated +** @author infinity0 +*/ +abstract public class PrefixTree { + + /** + ** The prefix that all keys in this tree must match. + */ + final protected K prefix; + + /** + ** The length of the prefix for this tree. + */ + final protected int preflen; + + /** + ** Array holding the child trees. There is one cell in the array for each + ** symbol in the alphabet of the key. For all i: {@link #child}[i] != null, + ** or {@link #child}[i].{@link #lastIndex()} == i. + */ + final protected PrefixTree[] child; + + /** + ** The size of the {@link #child} array. + */ + final public transient int subtreesMax; + + /** + ** Maximum size of ({@link #sizeLocal()} + {@link #child}) before we start + ** to create subtrees. + */ + final public int capacityLocal; + + /** + ** Number of subtrees. At all times, this should equal the number of + ** non-null members of {@link #child}. + */ + protected int subtrees = 0; + + /** + ** Number of elements contained in the tree. At all times, it should equal + ** the sum of the size of the subtrees, plus the {@link #sizeLocal()}. + */ + protected int size = 0; + + /** + ** Counts the number of mappings in each prefix group, meaning the set of + ** all mappings whose keys give the same key.get(preflen). At all times, + ** the sum the array elements should equal the size field. Additionally, + ** for all i: ({@link #child}[i] != null) implies ({@link #sizePrefix}[i] + ** == {@link #child}[i].size()); and {@link #sizeLocal()} == sum{ {@link + ** #sizePrefix}[j] : {@link #child}[j] == null } + */ + protected int sizePrefix[]; + + /** + ** Cache for {@link #smallestChild()}. At all times, this should either be + ** -1, or be the index of the subtree with the smallest size. If -1, then + ** either there are no subtrees (check {@link #subtrees} == 0) or the cache + ** has been invalidated. + */ + protected transient int smch_ = -1; + + protected PrefixTree(K p, int len, int maxsz, PrefixTree[] chd) { + if (chd != null) { + // if the child array is null, we assume the caller knows what they're + // doing... this is needed for SkeletonPrefixTreeMap.DummyChild + if (chd.length != p.symbols()) { + throw new IllegalArgumentException("The child array must be able to exactly hold all its potential children, of which there are " + p.symbols()); + } + for (PrefixTree c: chd) { + if (c != null) { throw new IllegalArgumentException("Child array to attach must be empty."); } + } + } + if (maxsz < p.symbols()) { + throw new IllegalArgumentException("This tree must be able to hold all its potential children, of which there are " + p.symbols()); + } + if (len > p.size()) { + throw new IllegalArgumentException("The key is shorter than the length specified."); + } + if (len < 0) { + throw new IllegalArgumentException("Length cannot be negative."); + } + + prefix = p; + prefix.clearFrom(len); + preflen = len; + + child = chd; + sizePrefix = (child == null)? null: new int[child.length]; + + subtreesMax = p.symbols(); + capacityLocal = maxsz; + } + + /** + ** Return the space left in the local map. + */ + public int sizeLeft() { + return capacityLocal - (sizeLocal() + subtrees); + } + + /** + ** Returns the prefix in string form. + */ + public String prefixString() { + // OPTIMISE maybe have prefixString save into a cache? or too bothersome... + String ps = prefix.toString(); + return ps.substring(0, ps.length() * preflen / prefix.size()); + } + + /** + ** Returns the value at the last significant index of the prefix + */ + public int lastIndex() { + return prefix.get(preflen-1); + } + + /** + ** Returns the index of the smallest child. + */ + protected int smallestChild() { + if (smch_ < 0 && subtrees > 0) { + for (int i=0; i 0); + assert(subtrees < subtreesMax); + + int mcount = -1; + int msym = 0; + for (int i=0; i mcount) { + mcount = sizePrefix[i]; + msym = i; + } + } + + Object[] mkeys = new Object[mcount]; + + // TODO make this use Sorted methods after this class implements Sorted + int i = 0; + for (K ikey: keySetLocal()) { + int isym = ikey.get(preflen); + if (isym < msym) { + continue; + } else if (isym == msym) { + mkeys[i] = ikey; + ++i; + } else if (isym > msym) { + break; + } + } + //assert(i == mkeys.length); + assert(i <= mkeys.length); // Multimaps should be <=, Maps should be == + assert(child[msym] == null); + + child[msym] = makeSubTree(msym); + ++subtrees; + + for (i=0; i -1 && sizePrefix[msym] < sizePrefix[smch_]) { + smch_ = msym; + } + + return msym; + } + + /** + ** Put all entries of a subtree into this node, if there is enough space. + ** (If there is enough space, then this implies that the subtree itself has + ** no child trees, due to the size constraints in the constructor.) + ** + ** @param ch Subtree to free + ** @return Whether there was enough space, and the operation completed. + */ + private boolean freeSubTree(int ch) { + if (sizePrefix[ch] <= sizeLeft()+1) { + // cache invalidated due to child removal + if (ch == smch_) { smch_ = -1; } + + transferSubtreeToLocal(child[ch]); + child[ch] = null; + --subtrees; + return true; + } + return false; + } + + /** + ** After a put operation, move some entries into new subtrees, with big + ** subtrees taking priority, until there is enough space to fit all the + ** remaining entries. That is, ensure that there exists sz such that: + ** - {@link #sizePrefix}[{@link #smallestChild()}] == sz + ** - sz > {@link #sizeLeft()} + ** - and that for all i: + ** - ({@link #child}[i] == null) implies {@link #sizePrefix}[i] <= sz + ** - ({@link #child}[i] != null) implies {@link #sizePrefix}[i] >= sz + ** + ** @param i Subgroup of element that was inserted + */ + protected void reshuffleAfterPut(int i) { + ++sizePrefix[i]; ++size; + + if (child[i] != null) { + // cache possibly invalidated due to put operation on child + if (i == smch_) { smch_ = -1; } + + } else { + if (sizeLeft() < 0) { + bindSubTree(); + // clearly, a maximum of one subtree can be freed here + freeSubTree(smallestChild()); // smallestChild() exists due to bindSubTree + + } else { + int sm = smallestChild(); + if (sm > -1 && sizePrefix[sm] < sizePrefix[i]) { + // Let sz be the size of the smallest child, which is (*) greater or + // equal to the size of the largest non-child subgroup. Then: + // - before the put, the maximum space left in the data structure was sz-1 + // since we have sz > sizeLeft() + // - before the put, the maximum size of subgroup i was sz, due to (*) + // - so the maximum space left after binding this subgroup is 2sz-1 + // (any elements added due to the put are irrelevant to this) + // - now, the minimum size of two subtrees is 2sz > 2sz-1 + // - so a maximum of one subtree can be freed after binding the subgroup + int j = bindSubTree(); + assert(i == j); + freeSubTree(sm); + } + } + } + } + + /** + ** Reshuffle after a put operation of many elements, keeping the same + ** constraints. The elements must belong to the same subgroup. + ** + ** @param i Subgroup of elements + ** @param n Number of elements + */ + protected void reshuffleAfterPut(int i, int n) { + // logic of reshuffleAfterPut is independent of how many elements are + // actually added during the put operation + sizePrefix[i] += (n-1); size += (n-1); + reshuffleAfterPut(i); + } + + /** + ** After a remove operation, merge some subtrees into this node, with small + ** subtrees taking priority, until there is no more space to fit any more + ** subtrees. That is, ensure that there exists sz such that: + ** - {@link #sizePrefix}[{@link #smallestChild()}] == sz + ** - sz > {@link #sizeLeft()} + ** - and that for all i: + ** - ({@link #child}[i] == null) implies {@link #sizePrefix}[i] <= sz + ** - ({@link #child}[i] != null) implies {@link #sizePrefix}[i] >= sz + ** + ** @param i Subgroup of element that was removed + */ + protected void reshuffleAfterRemove(int i) { + --sizePrefix[i]; --size; + + if (child[i] != null) { + if (smch_ < 0 || i == smch_) { + freeSubTree(smallestChild()); // smallestChild() exists since child[i] != null + + } else if (sizePrefix[i] < sizePrefix[smch_]) { + // we potentially have a new (real) smallestChild, but wait and see... + + if (!freeSubTree(i)) { + // if the tree wasn't freed then we have a new smallestChild + smch_ = i; + } + // else, if the tree was freed, then freeSubTree() would not have reset + // smch_, since it still pointed to the old value (which was + // incorrect before the method call, but now correct, so nothing needs to + // be done). + } + // else, the smallestChild hasn't changed, so no more trees can be freed. + + } else { + int t = smallestChild(); + if (t > -1) { freeSubTree(t); } + } + } + + /** + ** Reshuffle after a remove operation of many elements, keeping the same + ** constraints. The elements must belong to the same subgroup. + ** + ** @param i Subgroup of elements + ** @param n Number of elements + */ + protected void reshuffleAfterRemove(int i, int n) { + // Let sz be the size of the smallest child, which is (*) greater or equal + // to the size of the largest non-child subgroup. Then, in the case where + // child[i] == null: + // - the maximum space left in the data structure is sz-1, since we have + // sz > sizeLeft() + // - the maximum space freed by a remove operation is sz, due to (*) + // - so the maximum space left after a remove operation is 2sz-1 + // - now, the minimum size of two subtrees is 2sz > 2sz-1 + // - so a maximum of one subtree can be freed after a remove operation + // and in the case where child[i] != null, the same logic applies as for + // one-element removal + sizePrefix[i] -= (n-1); size -= (n-1); + reshuffleAfterRemove(i); + } + + /** + ** Make a subtree with the appropriate prefix. For effeciency, this method + ** assumes that {@link #child}[i] does not already have a tree attached; it + ** is up to the calling code to ensure that this holds. + ** + ** @param msym The last symbol in the prefix for the subtree. + ** @return The subtree. + */ + abstract protected PrefixTree makeSubTree(int msym); + + /** + ** Transfer the value for a key to the appropriate subtree. For efficiency, + ** this method assumes that {@link #child}[i] already exists and that its + ** prefix matches the key; it is up to the calling code to ensure that this + ** holds. + */ + abstract protected void transferLocalToSubtree(int i, K key); + + /** + ** Transfer all mappings of a subtree to the local map. For efficiency, the + ** subtree is assumed to be an actual direct subtree of this map, ie. the + ** same object as {@link #child}[i] for some i; it is up to the calling + ** code to ensure that this holds. + */ + abstract protected void transferSubtreeToLocal(PrefixTree ch); + + /** + ** Select the object which handles keys with i as the next prefix element. + ** Should return {@link #child}[i] if and only if {@link #child}[i] != null + ** and a pointer to the local map otherwise. + */ + abstract protected Object selectNode(int i); + + /** + ** Returns the local map. + */ + abstract protected Object getLocalMap(); + + /** + ** Clears the local map. + */ + abstract protected void clearLocal(); + + /** + ** Returns the set of keys of the local map. + */ + abstract protected Set keySetLocal(); + + /** + ** Returns the size of the local map. + */ + abstract public int sizeLocal(); + + /** + ** TODO Returns the set of keys of the whole node. + */ + abstract protected Set keySet(); + + + /*======================================================================== + public interface Map, Multimap + ========================================================================*/ + + public void clear() { + size = 0; + clearLocal(); + smch_ = -1; + for (int i=0; i ch: child) { + if (ch == null) { continue; } + sum += ch.hashCode(); + } + return sum; + } + + + /** + ** Defines an interface that provides prefix-related operations, such as + ** returning the next component of a key, + ** + ** @author infinity0 + */ + public interface PrefixKey> extends Cloneable, Comparable { + + public PrefixKey clone(); + + /** + ** Returns the number of possible symbols at each cell of the key. This + ** should return the same value for any instance. + */ + public int symbols(); + + /** + ** Returns the size of the key. This should return the same value for + ** any instance. + */ + public int size(); + + /** + ** Gets one cell of the key. + */ + public int get(int i); + + /** + ** Sets one cell of the key. + */ + public void set(int i, int v); + + /** + ** Clears one cell of the key. + */ + public void clear(int i); + + /** + ** Returns a new key with a new value set for one of the cells. + */ + public K spawn(int i, int v); + + /** + ** Clears all cells from a given index. + */ + public void clearFrom(int len); + + /** + ** Whether two keys have matching prefixes. + ** + ** @param p The key to match against + ** @param len Length of prefix to match + */ + public boolean match(K p, int len); + + } + + /** + ** Provides implementations of various higher-level functionalities of + ** PrefixKey in terms of lower-level ones. + ** + ** @author infinity0 + */ + abstract public static class AbstractPrefixKey> implements PrefixKey { + + @Override abstract public AbstractPrefixKey clone(); + + public K spawn(int i, int v) { + K p = (K)clone(); + p.set(i, v); + return p; + } + + public void clearFrom(int len) { + for (int i=len; i y) ? 1: -1; } + } + return 0; + } + + } + + /** + ** TODO Provides an iterator over the PrefixTree. incomplete + */ + abstract public class PrefixTreeKeyIterator implements Iterator { + + final Iterator iterLocal; + Iterator iterChild; + + protected int index = 0; + protected K nextLocal = null; + + protected PrefixTreeKeyIterator() { + iterLocal = keySetLocal().iterator(); + while (index < subtreesMax && child[index++] == null); + if (index < subtreesMax) { iterChild = child[index].keySet().iterator(); } + if (iterLocal.hasNext()) { nextLocal = iterLocal.next(); } + } + + public boolean hasNext() { + return nextLocal != null || iterChild != null; + } + + public K next() { + while (index < subtreesMax || nextLocal != null) { + if (nextLocal != null && nextLocal.get(preflen) < index) { + K k = nextLocal; + nextLocal = (iterLocal.hasNext())? iterLocal.next(): null; + return k; + + } else if (iterChild.hasNext()) { + return iterChild.next(); + + } else { + do { ++index; } while (index < subtreesMax && child[index] == null); + iterChild = (index < subtreesMax)? child[index].keySet().iterator(): null; + continue; + + } + } + throw new java.util.NoSuchElementException(); + } + + public void remove() { + throw new UnsupportedOperationException("Not implemented."); + } + + } + +} diff --git a/src/main/java/plugins/Library/util/Skeleton.java b/src/main/java/plugins/Library/util/Skeleton.java new file mode 100644 index 00000000..652e808a --- /dev/null +++ b/src/main/java/plugins/Library/util/Skeleton.java @@ -0,0 +1,73 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.io.serial.Serialiser; +import plugins.Library.util.exec.TaskAbortException; + +/** +** Defines an interface for an extension of a data structure which is only +** partially loaded into memory. Operations on the missing data should throw +** {@link DataNotLoadedException}. +** +** @author infinity0 +** @see Serialiser +** @see DataNotLoadedException +*/ +public interface Skeleton> { + + /** + ** Whether the skeleton is fully loaded and has no data missing. + */ + public boolean isLive(); + + /** + ** Whether the skeleton is bare and has no data loaded at all. + */ + public boolean isBare(); + + /** + ** Get the serialiser for this skeleton. + */ + public S getSerialiser(); + + /** + ** Get the meta data associated with this skeleton. + */ + public Object getMeta(); + + /** + ** Set the meta data associated with this skeleton. + */ + public void setMeta(Object m); + + /** + ** Inflate the entire skeleton so that after the method call, {@link + ** #isLive()} returns true. + */ + public void inflate() throws TaskAbortException; + + /** + ** Deflate the entire skeleton so that after the method call, {@link + ** #isBare()} returns true. + */ + public void deflate() throws TaskAbortException; + + /** + ** Partially inflate the skeleton based on some parameter object. This + ** method may or may not also inflate other parts of the skeleton. + ** + ** @param param The parameter for the partial inflate. + */ + public void inflate(K param) throws TaskAbortException; + + /** + ** Partially deflate the skeleton based on some parameter object. This + ** method may or may not also deflate other parts of the skeleton. + ** + ** @param param The parameter for the partial deflate. + */ + public void deflate(K param) throws TaskAbortException; + +} diff --git a/src/main/java/plugins/Library/util/SkeletonBTreeMap.java b/src/main/java/plugins/Library/util/SkeletonBTreeMap.java new file mode 100644 index 00000000..84628770 --- /dev/null +++ b/src/main/java/plugins/Library/util/SkeletonBTreeMap.java @@ -0,0 +1,1774 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.IterableSerialiser; +import plugins.Library.io.serial.ScheduledSerialiser; +import plugins.Library.io.serial.MapSerialiser; +import plugins.Library.io.serial.Translator; +import plugins.Library.io.DataFormatException; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.exec.TaskCompleteException; +import plugins.Library.util.func.Tuples.X2; +import plugins.Library.util.func.Tuples.X3; + +import java.util.AbstractSet; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Collection; +import java.util.Map; +import java.util.List; +import java.util.LinkedHashMap; +import java.util.ArrayList; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.Stack; + +// TODO NORM tidy this +import java.util.Queue; +import java.util.PriorityQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import plugins.Library.util.exec.Progress; +import plugins.Library.util.exec.ProgressParts; +import plugins.Library.util.exec.BaseCompositeProgress; +import plugins.Library.io.serial.Serialiser; +import plugins.Library.io.serial.ProgressTracker; +import plugins.Library.util.exec.TaskCompleteException; +import plugins.Library.util.BTreeMap.Node; +import plugins.Library.util.concurrent.Scheduler; + +import java.util.Collections; +import java.util.SortedSet; +import java.util.SortedMap; +import java.util.TreeSet; +import java.util.TreeMap; +import java.util.HashMap; + +import freenet.support.Logger; +import plugins.Library.util.Sorted; +import plugins.Library.util.concurrent.BoundedPriorityBlockingQueue; +import plugins.Library.util.concurrent.ExceptionConvertor; +import plugins.Library.util.concurrent.Notifier; +import plugins.Library.util.concurrent.ObjectProcessor; +import plugins.Library.util.concurrent.Executors; +import plugins.Library.util.event.TrackingSweeper; +import plugins.Library.util.event.CountingSweeper; +import plugins.Library.util.func.Closure; +import plugins.Library.util.func.SafeClosure; +import static plugins.Library.util.Maps.$K; + +/** +** {@link Skeleton} of a {@link BTreeMap}. DOCUMENT +** +** TODO HIGH get rid of uses of rnode.get(K). All other uses of rnode, as well +** as lnode and entries, have already been removed. This will allow us to +** re-implement the Node class. +** +** To the maintainer: this class is very very unstructured and a lot of the +** functionality should be split off elsewhere. Feel free to move stuff around. +** +** @author infinity0 +*/ +public class SkeletonBTreeMap extends BTreeMap implements SkeletonMap { + + /* + ** Whether entries are "internal to" or "contained within" nodes, ie. + ** are the entries for a node completely stored (including values) with + ** that node in the serialised representation, or do they refer to other + ** serialised data that is external to the node? + ** + ** This determines whether a {@link TreeMap} or a {@link SkeletonTreeMap} + ** is used to back the entries in a node. + ** + ** Eg. a {@code BTreeMap>} would have this + ** {@code true} for the map, and {@code false} for the map backing the set. + */ + //final protected boolean internal_entries; + /* + ** OPT HIGH disable for now, since I can't think of a good way to + ** implement this tidily. + ** + ** three options: + ** + ** 0 have SkeletonNode use TreeMap when int_ent is true rather than + ** SkeletonTreeMap but this will either break the Skeleton contract of + ** deflate(), which expects isBare() to be true afterwards, or it will + ** break the contract of isBare(), if we modifiy that method to return + ** true for TreeMaps instead. + ** + ** - pros: uses an existing class, efficient + ** - cons: breaks contracts (no foreseeable way to avoid), complicated + ** to implement + ** + ** 1 have another class that extends SkeletonTreeMap which has one single + ** boolean value isDeflated, alias *flate(K) to *flate(), and all those + ** functions do is set that boolean. then override get() etc to throw + ** DNLEx depending on the value of that boolean; and have SkeletonNode + ** use this class when int_ent is true. + ** + ** - pros: simple, efficient OPT HIGH + ** - cons: requires YetAnotherClass + ** + ** 2 don't have the internal_entries, and just use a dummy serialiser that + ** copies task.data to task.meta for push tasks, and vice versa for pull + ** tasks. + ** + ** - pros: simple to implement + ** - cons: a hack, inefficient + ** + ** for now using option 2, will probably implement option 1 at some point.. + */ + + /** + ** Serialiser for the node objects. + */ + protected IterableSerialiser nsrl; + + /** + ** Serialiser for the value objects. + */ + protected MapSerialiser vsrl; + + public void setSerialiser(IterableSerialiser n, MapSerialiser v) { + if ((nsrl != null || vsrl != null) && !isLive()) { + throw new IllegalStateException("Cannot change the serialiser when the structure is not live."); + } + nsrl = n; + vsrl = v; + ((SkeletonNode)root).setSerialiser(); + } + + static volatile boolean logMINOR; + static volatile boolean logDEBUG; + + static { + Logger.registerClass(SkeletonBTreeMap.class); + } + + final public Comparator> CMP_PULL = new Comparator>() { + /*@Override**/ public int compare(PullTask t1, PullTask t2) { + return ((GhostNode)t1.meta).compareTo((GhostNode)t2.meta); + } + }; + + final public Comparator> CMP_PUSH = new Comparator>() { + /*@Override**/ public int compare(PushTask t1, PushTask t2) { + return t1.data.compareTo(t2.data); + } + }; + + final public Comparator> CMP_ENTRY = new Comparator>() { + /*@Override**/ public int compare(Map.Entry t1, Map.Entry t2) { + return SkeletonBTreeMap.this.compare(t1.getKey(), t2.getKey()); + } + }; + + public class SkeletonNode extends Node implements Skeleton> { + + protected int ghosts = 0; + + protected SkeletonNode(K lk, K rk, boolean lf, SkeletonTreeMap map) { + super(lk, rk, lf, map); + setSerialiser(); + } + + protected SkeletonNode(K lk, K rk, boolean lf) { + this(lk, rk, lf, new SkeletonTreeMap(comparator)); + } + + protected SkeletonNode(K lk, K rk, boolean lf, SkeletonTreeMap map, Collection gh) { + this(lk, rk, lf, map); + _size = map.size(); + if (!lf) { + if (map.size()+1 != gh.size()) { + throw new IllegalArgumentException("SkeletonNode: in constructing " + getName() + ", got size mismatch: map:" + map.size() + "; gh:" + gh.size()); + } + Iterator it = gh.iterator(); + for (X2 kp: iterKeyPairs()) { + GhostNode ghost = it.next(); + ghost.lkey = kp._0; + ghost.rkey = kp._1; + ghost.parent = this; + _size += ghost._size; + addChildNode(ghost); + } + ghosts = gh.size(); + } + } + + /** + ** Set the value-serialiser for this node and all subnodes to match + ** the one assigned for the entire tree. + */ + public void setSerialiser() { + ((SkeletonTreeMap)entries).setSerialiser(vsrl); + if (!isLeaf()) { + for (Node n: iterNodes()) { + if (!n.isGhost()) { + ((SkeletonNode)n).setSerialiser(); + } + } + } + } + + /** + ** Create a {@link GhostNode} object that represents this node. + */ + public GhostNode makeGhost(Object meta) { + GhostNode ghost = new GhostNode(lkey, rkey, totalSize()); + ghost.setMeta(meta); + return ghost; + } + + /*@Override**/ public Object getMeta() { return null; } + + /*@Override**/ public void setMeta(Object m) { } + + /*@Override**/ public IterableSerialiser getSerialiser() { return nsrl; } + + /*@Override**/ public boolean isLive() { + if (ghosts > 0 || !((SkeletonTreeMap)entries).isLive()) { return false; } + if (!isLeaf()) { + for (Node n: iterNodes()) { + SkeletonNode skel = (SkeletonNode)n; + if (!skel.isLive()) { return false; } + } + } + return true; + } + + /*@Override**/ public boolean isBare() { + if (!isLeaf()) { + if (ghosts < childCount()) { + return false; + } + } + return ((SkeletonTreeMap)entries).isBare(); + } + + /** + ** {@inheritDoc} + */ + @Override protected void addChildNode(Node child) { + super.addChildNode(child); + if (child.isGhost()) { ++ghosts; } + } + + /** + ** DOCUMENT. + */ + protected void setChildNode(Node child) { + assert(lnodes.containsKey(child.rkey)); + assert(rnodes.containsKey(child.lkey)); + assert(lnodes.get(child.rkey) == rnodes.get(child.lkey)); + lnodes.put(child.rkey, child); + rnodes.put(child.lkey, child); + } + + /** + ** Attaches a child {@link GhostNode}. + ** + ** It is '''assumed''' that there is already a {@link SkeletonNode} in + ** its place; the ghost will replace it. It is up to the caller to + ** ensure that this holds. + ** + ** @param ghost The GhostNode to attach + */ + protected void attachGhost(GhostNode ghost) { + assert(!rnodes.get(ghost.lkey).isGhost()); + ghost.parent = this; + setChildNode(ghost); + ++ghosts; + } + + /** + ** Attaches a child {@link SkeletonNode}. + ** + ** It is '''assumed''' that there is already a {@link GhostNode} in its + ** place; the skeleton will replace it. It is up to the caller to + ** ensure that this holds. + ** + ** @param skel The SkeletonNode to attach + */ + protected void attachSkeleton(SkeletonNode skel) { + assert(rnodes.get(skel.lkey).isGhost()); + setChildNode(skel); + --ghosts; + } + + /*@Override**/ public void deflate() throws TaskAbortException { + if (!isLeaf()) { + List> tasks = new ArrayList>(childCount() - ghosts); + for (Node node: iterNodes()) { + if (node.isGhost()) { continue; } + if (!((SkeletonNode)node).isBare()) { + ((SkeletonNode)node).deflate(); + } + tasks.add(new PushTask((SkeletonNode)node)); + } + + nsrl.push(tasks); + for (PushTask task: tasks) { + try { + attachGhost((GhostNode)task.meta); + } catch (RuntimeException e) { + throw new TaskAbortException("Could not deflate BTreeMap Node " + getRange(), e); + } + } + } + ((SkeletonTreeMap)entries).deflate(); + assert(isBare()); + } + + // OPT make this parallel + /*@Override**/ public void inflate() throws TaskAbortException { + ((SkeletonTreeMap)entries).inflate(); + if (!isLeaf()) { + for (Node node: iterNodes()) { + inflate(node.lkey, true); + } + } + assert(isLive()); + } + + /*@Override**/ public void inflate(K key) throws TaskAbortException { + inflate(key, false); + } + + /** + ** Deflates the node to the immediate right of the given key. + ** + ** Expects metadata to be of type {@link GhostNode}. + ** + ** @param key The key + */ + /*@Override**/ public void deflate(K key) throws TaskAbortException { + if (isLeaf()) { return; } + Node node = rnodes.get(key); + if (node.isGhost()) { return; } + + if (!((SkeletonNode)node).isBare()) { + throw new IllegalStateException("Cannot deflate non-bare BTreeMap node"); + } + + PushTask task = new PushTask((SkeletonNode)node); + try { + nsrl.push(task); + attachGhost((GhostNode)task.meta); + + // TODO LOW maybe just ignore all non-error abortions + } catch (TaskCompleteException e) { + assert(node.isGhost()); + } catch (RuntimeException e) { + throw new TaskAbortException("Could not deflate BTreeMap Node " + node.getRange(), e); + } + } + + /** + ** Inflates the node to the immediate right of the given key. + ** + ** Passes metadata of type {@link GhostNode}. + ** + ** @param key The key + ** @param auto Whether to recursively inflate the node's subnodes. + */ + public void inflate(K key, boolean auto) throws TaskAbortException { + if (isLeaf()) { return; } + Node node = rnodes.get(key); + if (!node.isGhost()) { return; } + + PullTask task = new PullTask(node); + try { + nsrl.pull(task); + postPullTask(task, this); + if (auto) { task.data.inflate(); } + + } catch (TaskCompleteException e) { + assert(!node.isGhost()); + } catch (DataFormatException e) { + throw new TaskAbortException("Could not inflate BTreeMap Node " + node.getRange(), e); + } catch (RuntimeException e) { + throw new TaskAbortException("Could not inflate BTreeMap Node " + node.getRange(), e); + } + } + + } + + public class GhostNode extends Node { + + /** + ** Points to the parent {@link SkeletonNode}. + ** + ** Maintaining this field's value is a bitch, so I've tried to remove uses + ** of this from the code. Currently, it is only used for the parent field + ** a {@link DataFormatException}, which lets us retrieve the serialiser, + ** progress, etc etc etc. TODO NORM somehow find another way of doing that + ** so we can get rid of it completely. + */ + protected SkeletonNode parent; + + protected Object meta; + + protected GhostNode(K lk, K rk, SkeletonNode p, int s) { + super(lk, rk, false, null); + parent = p; + _size = s; + } + + protected GhostNode(K lk, K rk, int s) { + this(lk, rk, null, s); + } + + public Object getMeta() { + return meta; + } + + public void setMeta(Object m) { + meta = m; + } + + @Override public int nodeSize() { + throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); + } + + @Override public int childCount() { + throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); + } + + @Override public boolean isLeaf() { + throw new DataNotLoadedException("BTreeMap Node not loaded: " + getRange(), parent, lkey, this); + } + + @Override public Node nodeL(Node n) { + // this method-call should never be reached in the B-tree algorithm + throw new AssertionError("GhostNode: called nodeL()"); + } + + @Override public Node nodeR(Node n) { + // this method-call should never be reached in the B-tree algorithm + throw new AssertionError("GhostNode: called nodeR()"); + } + + @Override public Node selectNode(K key) { + // this method-call should never be reached in the B-tree algorithm + throw new AssertionError("GhostNode: called selectNode()"); + } + + } + + public SkeletonBTreeMap(Comparator cmp, int node_min) { + super(cmp, node_min); + } + + public SkeletonBTreeMap(int node_min) { + super(node_min); + } + + /** + ** Post-processes a {@link PullTask} and returns the {@link SkeletonNode} + ** pulled. + ** + ** The tree will be in a consistent state after the operation, if it was + ** in a consistent state before it. + */ + protected SkeletonNode postPullTask(PullTask task, SkeletonNode parent) throws DataFormatException { + SkeletonNode node = task.data; + GhostNode ghost = (GhostNode)task.meta; + if (!compare0(ghost.lkey, node.lkey) || !compare0(ghost.rkey, node.rkey)) { + throw new DataFormatException("BTreeMap Node lkey/rkey does not match", null, node); + } + + parent.attachSkeleton(node); + return node; + } + + /** + ** Post-processes a {@link PushTask} and returns the {@link GhostNode} + ** pushed. + ** + ** The tree will be in a consistent state after the operation, if it was + ** in a consistent state before it. + */ + protected GhostNode postPushTask(PushTask task, SkeletonNode parent) { + GhostNode ghost = (GhostNode)task.meta; + parent.attachGhost(ghost); + return ghost; + } + + @Override protected Node newNode(K lk, K rk, boolean lf) { + return new SkeletonNode(lk, rk, lf); + } + + @Override protected void swapKey(K key, Node src, Node dst) { + SkeletonTreeMap.swapKey(key, (SkeletonTreeMap)src.entries, (SkeletonTreeMap)dst.entries); + } + + /*======================================================================== + public interface SkeletonMap + ========================================================================*/ + + /*@Override**/ public Object getMeta() { return null; } + + /*@Override**/ public void setMeta(Object m) { } + + /*@Override**/ public MapSerialiser getSerialiser() { return vsrl; } + + /*@Override**/ public boolean isLive() { + return ((SkeletonNode)root).isLive(); + } + + /*@Override**/ public boolean isBare() { + return ((SkeletonNode)root).isBare(); + } + + /*@Override**/ public void deflate() throws TaskAbortException { + ((SkeletonNode)root).deflate(); + } + + // TODO NORM tidy this; this should proboably go in a serialiser + // and then we will access the Progress of a submap with a task whose + // metadata is (lkey, rkey), or something..(PROGRESS) + BaseCompositeProgress pr_inf = new BaseCompositeProgress(); + public BaseCompositeProgress getProgressInflate() { return pr_inf; } // REMOVE ME + /** + ** Parallel bulk-inflate. At the moment, this will inflate all the values + ** of each nodes too. + ** + ** Not yet thread safe, but ideally it should be. See source for details. + */ + /*@Override**/ public void inflate() throws TaskAbortException { + + // TODO NORM adapt the algorithm to track partial loads of submaps (SUBMAP) + // TODO NORM if we do that, we'll also need to make it thread-safe. (THREAD) + // TODO NORM and do the PROGRESS stuff whilst we're at it + + if (!(nsrl instanceof ScheduledSerialiser)) { + // TODO LOW could just use the code below - since the Scheduler would be + // unavailable, the tasks could be executed in the current thread, and the + // priority queue's comparator would turn it into depth-first search + // automatically. + ((SkeletonNode)root).inflate(); + return; + } + + final Queue nodequeue = new PriorityQueue(); + + Map, ProgressTracker> ids = null; + ProgressTracker ntracker = null;; + + if (nsrl instanceof Serialiser.Trackable) { + ids = new LinkedHashMap, ProgressTracker>(); + ntracker = ((Serialiser.Trackable)nsrl).getTracker(); + // PROGRESS make a ProgressTracker track this instead of "pr_inf". + pr_inf.setSubProgress(ProgressTracker.makePullProgressIterable(ids)); + pr_inf.setSubject("Pulling all entries in B-tree"); + } + + final ObjectProcessor, SkeletonNode, TaskAbortException> proc_pull + = ((ScheduledSerialiser)nsrl).pullSchedule( + new PriorityBlockingQueue>(0x10, CMP_PULL), + new LinkedBlockingQueue, TaskAbortException>>(0x10), + new HashMap, SkeletonNode>() + ); + //System.out.println("Using scheduler"); + //int DEBUG_pushed = 0, DEBUG_popped = 0; + + try { + nodequeue.add((SkeletonNode)root); + + // FIXME HIGH make a copy of the deflated root so that we can restore it if the + // operation fails + + do { + //System.out.println("pushed: " + DEBUG_pushed + "; popped: " + DEBUG_popped); + + // handle the inflated tasks and attach them to the tree. + // THREAD progress tracker should prevent this from being run twice for the + // same node, but what if we didn't use a progress tracker? hmm... + while (proc_pull.hasCompleted()) { + X3, SkeletonNode, TaskAbortException> res = proc_pull.accept(); + PullTask task = res._0; + SkeletonNode parent = res._1; + TaskAbortException ex = res._2; + if (ex != null) { + assert(!(ex instanceof plugins.Library.util.exec.TaskInProgressException)); // by contract of ScheduledSerialiser + if (!(ex instanceof TaskCompleteException)) { + // TODO LOW maybe dump it somewhere else and throw it at the end... + throw ex; + } + // retrieve the inflated SkeletonNode and add it to the queue... + GhostNode ghost = (GhostNode)task.meta; + // THREAD race condition here... if another thread has inflated the task + // but not yet attached the inflated node to the tree, the assertion fails. + // could check to see if the Progress for the Task still exists, but the + // performance of this depends on the GC freeing weak referents quickly... + assert(!parent.rnodes.get(ghost.lkey).isGhost()); + nodequeue.add((SkeletonNode)parent.rnodes.get(ghost.lkey)); + } else { + SkeletonNode node = postPullTask(task, parent); + nodequeue.add(node); + } + //++DEBUG_popped; + } + + // go through the nodequeue and add any child ghost nodes to the tasks queue + while (!nodequeue.isEmpty()) { + SkeletonNode node = nodequeue.remove(); + // TODO HIGH this needs to be asynchronous + ((SkeletonTreeMap)node.entries).inflate(); // SUBMAP here + + if (node.isLeaf()) { continue; } + for (Node next: node.iterNodes()) { // SUBMAP here + if (!next.isGhost()) { + SkeletonNode skel = (SkeletonNode)next; + if (!skel.isLive()) { nodequeue.add(skel); } + continue; + } + PullTask task = new PullTask((GhostNode)next); + if (ids != null) { ids.put(task, ntracker); } + ObjectProcessor.submitSafe(proc_pull, task, node); + //++DEBUG_pushed; + } + } + + Thread.sleep(0x40); // FIXME HIGH + // we really should put this higher, but BIndexTest tries to inflate a + // of trees in serial and with a 1 second-wait between each, this would + // take ages + } while (proc_pull.hasPending()); + + pr_inf.setEstimate(ProgressParts.TOTAL_FINALIZED); + + } catch (DataFormatException e) { + throw new TaskAbortException("Bad data format", e); + } catch (InterruptedException e) { + throw new TaskAbortException("interrupted", e); + } finally { + proc_pull.close(); + //System.out.println("pushed: " + DEBUG_pushed + "; popped: " + DEBUG_popped); + //assert(DEBUG_pushed == DEBUG_popped); + } + } + + /*@Override**/ public void deflate(K key) throws TaskAbortException { + throw new UnsupportedOperationException("not implemented"); + } + + /*@Override**/ public void inflate(K key) throws TaskAbortException { + inflate(key, false); + } + + /*@Override**/ public void inflate(K key, boolean deflateRest) throws TaskAbortException { + // TODO NORM tidy up + // OPT LOW could write a more efficient version by keeping track of + // the already-inflated nodes so get() doesn't keep traversing down the + // tree - would only improve performance from O(log(n)^2) to O(log(n)) so + // not that big a priority + for (;;) { + try { + if(deflateRest) + getDeflateRest(key); + else + get(key); + break; + } catch (DataNotLoadedException e) { + e.getParent().inflate(e.getKey()); + } + } + } + + /** + ** {@inheritDoc} + ** + ** This implementation just descends the tree, returning the value for the + ** given key if it can be found. + * @throws TaskAbortException + ** + ** @throws ClassCastException key cannot be compared with the keys + ** currently in the map + ** @throws NullPointerException key is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + public V getDeflateRest(Object k) throws TaskAbortException { + K key = (K) k; + Node node = root; + + for (;;) { + if (node.isLeaf()) { + return node.entries.get(key); + } + + Node nextnode = node.selectNode(key); + if (nextnode == null) { + for(Node sub : node.iterNodes()) { + if(sub != nextnode && sub instanceof SkeletonBTreeMap.SkeletonNode) { + ((SkeletonNode)sub).deflate(); + ((SkeletonNode)node).deflate(sub.lkey); + } + } + return node.entries.get(key); + } + + node = nextnode; + } + } + + /** + ** @param putmap Entries to insert into this map + ** @param remkey Keys to remove from this map + ** @see #update(SortedSet, SortedSet, SortedMap, Closure) + */ + public void update(SortedMap putmap, SortedSet remkey) throws TaskAbortException { + update(null, remkey, putmap, null, new TaskAbortExceptionConvertor()); + } + + /** + ** @param putkey Keys to insert into this map + ** @param remkey Keys to remove from this map + ** @param value_handler Closure to retrieve the value for each putkey + ** @see #update(SortedSet, SortedSet, SortedMap, Closure) + */ + public void update(SortedSet putkey, SortedSet remkey, Closure, X> value_handler, ExceptionConvertor conv) throws TaskAbortException { + update(putkey, remkey, null, value_handler, conv); + } + + /** + ** The executor backing {@link #VALUE_EXECUTOR}. + */ + private static Executor value_exec = null; + + /** + * Separate executor for value handlers. Value handlers can themselves call + * update() and end up polling, so we need to keep them separate from the + * threads that do the actual work! */ + final public static Executor VALUE_EXECUTOR = new Executor() { + /*@Override**/ public void execute(Runnable r) { + synchronized (Executors.class) { + if (value_exec == null) { + value_exec = new ThreadPoolExecutor( + 0, 0x40, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + new ThreadPoolExecutor.CallerRunsPolicy() + ); + } + } + value_exec.execute(r); + } + }; + + /** + ** The executor backing {@link #VALUE_EXECUTOR}. + */ + private static Executor deflate_exec = null; + + /** + * Separate executor for deflating. We don't want update()'s to prevent actual + * pushes. */ + final public static Executor DEFLATE_EXECUTOR = new Executor() { + /*@Override**/ public void execute(Runnable r) { + synchronized (Executors.class) { + if (deflate_exec == null) { + deflate_exec = new ThreadPoolExecutor( + 0, 0x40, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + new ThreadPoolExecutor.CallerRunsPolicy() + ); + } + } + deflate_exec.execute(r); + } + }; + + /** + ** Asynchronously updates a remote B-tree. This uses two-pass merge/split + ** algorithms (as opposed to the one-pass algorithms of the standard {@link + ** BTreeMap}) since it assumes a copy-on-write backing data store, where + ** the advantages (concurrency, etc) of one-pass algorithms disappear, and + ** the disadvantages (unneeded merges/splits) remain. + ** + ** Unlike the (ideal design goal of the) inflate/deflate methods, this is + ** designed to support accept only one concurrent update. It is up to the + ** caller to ensure that this holds. TODO NORM enforce this somehow + ** + ** Currently, this method assumes that the root.isBare(). TODO NORM enforce + ** this.. + ** + ** This is a wrapper method to deal with the occasional key rejection due to + ** trying to add too many keys to a single node. It also checks parameters. + ** + ** Note: {@code remkey} is not implemented yet. + ** + ** @throws UnsupportedOperationException if {@code remkey} is not empty + */ + protected void update( + SortedSet putkey, SortedSet remkey, + final SortedMap putmap, Closure, X> value_handler, + ExceptionConvertor conv + ) throws TaskAbortException { + + // Check parameters. + + if (value_handler == null) { + // synchronous value callback - null, remkey, putmap, null + assert(putkey == null); + putkey = Sorted.keySet(putmap); + } else { + // asynchronous value callback - putkey, remkey, null, closure + assert(putmap == null); + } + + if (remkey != null && !remkey.isEmpty()) { + throw new UnsupportedOperationException("SkeletonBTreeMap: update() currently only supports merge operations"); + } + + // Handle keys rejected due to node too small for the number of keys we are adding to it. + + while(true) { + + final SortedSet rejected; + if(value_handler == null) + rejected = null; + else + rejected = new TreeSet(); + + update(putkey, putmap, value_handler, conv, rejected); + + if(rejected == null || rejected.isEmpty()) return; + System.err.println("Rejected keys: "+rejected.size()+" - re-running merge with rejected keys."); + putkey = rejected; + } + } + + protected void update( + SortedSet putkey, + final SortedMap putmap, Closure, X> value_handler, + ExceptionConvertor conv, final SortedSet rejected + ) throws TaskAbortException { + + // Avoid polling. + final Notifier notifier = new Notifier(); + + /* + ** The code below might seem confusing at first, because the action of + ** the algorithm on a single node is split up into several asynchronous + ** parts, which are not visually adjacent. Here is a more contiguous + ** description of what happens to a single node between being inflated + ** and then eventually deflated. + ** + ** Life cycle of a node: + ** + ** - node gets popped from proc_pull + ** - enter InflateChildNodes + ** - subnodes get pushed into proc_pull + ** - (recurse for each subnode) + ** - subnode gets popped from proc_pull + ** - etc + ** - split-subnodes get pushed into proc_push + ** - wait for all: split-subnodes get popped from proc_push + ** - enter SplitNode + ** - for each item in the original node's DeflateNode + ** - release the item and acquire it on + ** - the parent's DeflateNode if the item is a separator + ** - a new DeflateNode if the item is now in a split-node + ** - for each split-node: + ** - wait for all: values get popped from proc_val; SplitNode to close() + ** - enter DeflateNode + ** - split-node gets pushed into proc_push (except for root) + ** + */ + + // TODO LOW - currently, the algorithm will automatically completely + // inflate each node as it is pulled in from the network (ie. inflate + // all of its values). this is not necessary, since not all of the + // values may be updated. it would be possible to make it work more + // efficiently, which would save some network traffic, but this would + // be quite fiddly. also, the current way allows the Packer to be more + // aggressive in packing the values into splitfiles. + + // FIXME NORM - currently, there is a potential deadlock issue here, + // because proc_{pull,push,val} all use the same ThreadPoolExecutor to + // execute tasks. if the pool fills up with manager tasks {pull,push} + // then worker tasks {val} cannot execute, resulting in deadlock. at + // present, this is avoided by having ObjectProcessor.maxconc less than + // the poolsize, but future developers may change this unknowingly. + // + // a proper solution would be to have manager and worker tasks use + // different thread pools, ie. different ThreadPoolExecutors. care + // needs to be taken when doing this because the call can be recursive; + // ie. if the values are also SkeletonBTreeMaps, then the workers might + // start child "manager" tasks by calling value.update() + // + // arguably a better solution would be to not use threads, and use + // the async interface (i should have done this when i first coded it). + // see doc/todo.txt for details + + final ObjectProcessor, SafeClosure, TaskAbortException> proc_pull + = ((ScheduledSerialiser)nsrl).pullSchedule( + new PriorityBlockingQueue>(0x10, CMP_PULL), + new LinkedBlockingQueue, TaskAbortException>>(8), + new HashMap, SafeClosure>() + ); + proc_pull.setNotifier(notifier); + + final ObjectProcessor, CountingSweeper, TaskAbortException> proc_push + = ((ScheduledSerialiser)nsrl).pushSchedule( + new PriorityBlockingQueue>(0x10, CMP_PUSH), + new LinkedBlockingQueue, TaskAbortException>>(0x10), + new HashMap, CountingSweeper>() + ); + proc_push.setNotifier(notifier); + + /** + ** Deposit for a value-retrieval operation + */ + class DeflateNode extends TrackingSweeper> implements Runnable, SafeClosure> { + + /** The node to be pushed, when run() is called. */ + final SkeletonNode node; + /** A closure for the parent node. This is called when all of its children (of + ** which this.node is one) have been pushed. */ + final CountingSweeper parNClo; + private boolean deflated; + + protected DeflateNode(SkeletonNode n, CountingSweeper pnc) { + super(true, true, new TreeSet(comparator), null); + parNClo = pnc; + node = n; + } + + /** + ** Push this node. This is run when this sweeper is cleared, which + ** happens after all the node's local values have been obtained, + ** '''and''' the node has passed through SplitNode (which closes + ** this sweeper). + */ + public void run() { + try { + deflate(); + } catch (TaskAbortException e) { + throw new RuntimeException(e); // FIXME HIGH + } + assert(node.isBare()); + if (parNClo == null) { + // do not deflate the root + assert(node == root); + return; + } + ObjectProcessor.submitSafe(proc_push, new PushTask(node), parNClo); + } + + /** + ** Update the key's value in the node. Runs whenever an entry is + ** popped from proc_val. + */ + public void invoke(Map.Entry en) { + assert(node.entries.containsKey(en.getKey())); + node.entries.put(en.getKey(), en.getValue()); + if(logMINOR) Logger.minor(this, "New value for key "+en.getKey()+" : "+en.getValue()+" in "+node+" parent = "+parNClo); + } + + public void deflate() throws TaskAbortException { + synchronized(this) { + if(deflated) return; + deflated = true; + } + ((SkeletonTreeMap)node.entries).deflate(); + } + + } + + // must be located after DeflateNode's class definition + // The proc_val output queue must comfortably cover everything generated by a single invocation of InflateChildNodes, or else we will deadlock. + // Note that this is all deflated sub-trees, so they are relatively small - some of the other queues + // handle much larger structures in their counters. + final ObjectProcessor, DeflateNode, X> proc_val = (value_handler == null)? null + : new ObjectProcessor, DeflateNode, X>( + new BoundedPriorityBlockingQueue>(0x10, CMP_ENTRY), + new LinkedBlockingQueue, X>>(ENT_MAX*3), + new HashMap, DeflateNode>(), + value_handler, VALUE_EXECUTOR, conv, notifier // These can block so pool them separately. + ).autostart(); + + final Comparator CMP_DEFLATE = new Comparator() { + + public int compare(DeflateNode arg0, DeflateNode arg1) { + return arg0.node.compareTo(arg1.node); + } + + }; + + final ObjectProcessor proc_deflate = + new ObjectProcessor( + new BoundedPriorityBlockingQueue(0x10, CMP_DEFLATE), + new LinkedBlockingQueue>(), + new HashMap(), + new Closure() { + + public void invoke(DeflateNode param) throws TaskAbortException { + param.deflate(); + } + + }, DEFLATE_EXECUTOR, new TaskAbortExceptionConvertor(), notifier).autostart(); + + // Limit it to 2 at once to minimise memory usage. + // The actual inserts will occur in parallel anyway. + proc_deflate.setMaxConc(2); + + // Dummy constant for SplitNode + final SortedMap EMPTY_SORTEDMAP = new TreeMap(comparator); + + /** + ** Deposit for a PushTask + */ + class SplitNode extends CountingSweeper implements Runnable { + + /** The node to be split, when run() is called. */ + final SkeletonNode node; + /** The node's parent, where new nodes/keys are attached during the split process. */ + /*final*/ SkeletonNode parent; + /** The value-closure for this node. This keeps track of all unhandled values + * in this node. If this node is split, and the values have not been handled, + * they are passed onto the value-closures of the relevant split nodes (or + * that of the parent node, if the key turns out to be a separator key). */ + final DeflateNode nodeVClo; + /** The closure for the parent node, also a CountingSweeper. We keep a + * reference to this so we can add the new split nodes to it. */ + /*final*/ SplitNode parNClo; + /** The value-closure for the parent node. The comment for nodeVClo explains + * why we need a reference to this. */ + /*final*/ DeflateNode parVClo; + + protected SplitNode(SkeletonNode n, SkeletonNode p, DeflateNode vc, SplitNode pnc, DeflateNode pvc) { + super(true, false); + node = n; + parent = p; + nodeVClo = vc; + parNClo = pnc; + parVClo = pvc; + } + + /** + ** Closes the node's DeflateNode, and (if appropriate) splits the + ** node and updates the deposits for each key moved. This is run + ** after all its children have been deflated. + */ + public void run() { + assert(node.ghosts == node.childCount()); + + // All subnodes have been deflated, so nothing else can possibly add keys + // to this node. + nodeVClo.close(); + + int sk = minKeysFor(node.nodeSize()); + // No need to split + if (sk == 0) { + if (nodeVClo.isCleared()) { + try { + proc_deflate.submit(nodeVClo, nodeVClo.node); + } catch (InterruptedException e) { + // Impossible ? + throw new RuntimeException(e); + } + } + return; + } + + if (parent == null) { + assert(parNClo == null && parVClo == null); + // create a new parent, parNClo, parVClo + // similar stuff as for InflateChildNodes but no merging + parent = new SkeletonNode(null, null, false); + parent.addAll(EMPTY_SORTEDMAP, Collections.singleton(node)); + parVClo = new DeflateNode(parent, null); + parNClo = new SplitNode(parent, null, parVClo, null, null); + parNClo.acquire(node); + parNClo.close(); + root = parent; + } + + Collection keys = Sorted.select(Sorted.keySet(node.entries), sk); + parent.split(node.lkey, keys, node.rkey); + Iterable nodes = parent.iterNodes(node.lkey, node.rkey); + + // WORKAROUND unnecessary cast here - bug in SunJDK6; works fine on OpenJDK6 + SortedSet held = (SortedSet)nodeVClo.view(); + // if synchronous, all values should have already been handled + assert(proc_val != null || held.size() == 0); + + // reassign appropriate keys to parent sweeper + for (K key: keys) { + if (!held.contains(key)) { continue; } + reassignKeyToSweeper(key, parVClo); + } + + //System.out.println("parent:"+parent.getRange()+"\nseps:"+keys+"\nheld:"+held); + parNClo.open(); + + // for each split-node, create a sweeper that will run when all its (k,v) + // pairs have been popped from value_complete + for (Node nn: nodes) { + SkeletonNode n = (SkeletonNode)nn; + DeflateNode vClo = new DeflateNode(n, parNClo); + + // reassign appropriate keys to the split-node's sweeper + SortedSet subheld = subSet(held, n.lkey, n.rkey); + //try { + assert(subheld.isEmpty() || compareL(n.lkey, subheld.first()) < 0 && compareR(subheld.last(), n.rkey) < 0); + //} catch (AssertionError e) { + // System.out.println(n.lkey + " " + subheld.first() + " " + subheld.last() + " " + n.rkey); + // throw e; + //} + for (K key: subheld) { + reassignKeyToSweeper(key, vClo); + } + vClo.close(); + // if no keys were added + if (vClo.isCleared()) { + try { + proc_deflate.submit(vClo, vClo.node); + } catch (InterruptedException e) { + // Impossible ? + throw new RuntimeException(e); + } + } + + parNClo.acquire(n); + } + + // original (unsplit) node had a ticket on the parNClo sweeper, release it + parNClo.release(node); + + parNClo.close(); + assert(!parNClo.isCleared()); // we always have at least one node to deflate + } + + /** + ** When we move a key to another node (eg. to the parent, or to a new node + ** resulting from the split), we must deassign it from the original node's + ** sweeper and reassign it to the sweeper for the new node. + ** + ** NOTE: if the overall co-ordinator algorithm is ever made concurrent, + ** this section MUST be made atomic + ** + ** @param key The key + ** @param clo The sweeper to reassign the key to + */ + private void reassignKeyToSweeper(K key, DeflateNode clo) { + clo.acquire(key); + //assert(((UpdateValue)value_closures.get(key)).node == node); + proc_val.update($K(key, (V)null), clo); + // FIXME what if it has already run??? + if(logMINOR) Logger.minor(this, "Reassigning key "+key+" to "+clo+" on "+this+" parent="+parent+" parent split node "+parNClo+" parent deflate node "+parVClo); + // nodeVClo.release(key); + // this is unnecessary since nodeVClo() will only be used if we did not + // split its node (and never called this method) + } + + } + + /** + ** Deposit for a PullTask + */ + class InflateChildNodes implements SafeClosure { + + /** The parent node, passed to closures which need it. */ + final SkeletonNode parent; + /** The keys to put into this node. */ + final SortedSet putkey; + /** The closure for the parent node, passed to closures which need it. */ + final SplitNode parNClo; + /** The value-closure for the parent node, passed to closures which need it. */ + final DeflateNode parVClo; + + protected InflateChildNodes(SkeletonNode p, SortedSet ki, SplitNode pnc, DeflateNode pvc) { + parent = p; + putkey = ki; + parNClo = pnc; + parVClo = pvc; + } + + protected InflateChildNodes(SortedSet ki) { + this(null, ki, null, null); + } + + /** + ** Merge the relevant parts of the map into the node, and inflate its + ** children. Runs whenever a node is popped from proc_pull. + */ + public void invoke(SkeletonNode node) { + assert(compareL(node.lkey, putkey.first()) < 0); + assert(compareR(putkey.last(), node.rkey) < 0); + + // FIXME HIGH make this asynchronous + try { ((SkeletonTreeMap)node.entries).inflate(); } catch (TaskAbortException e) { throw new RuntimeException(e); } + + // closure to be called when all local values have been obtained + DeflateNode vClo = new DeflateNode(node, parNClo); + + // closure to be called when all subnodes have been handled + SplitNode nClo = new SplitNode(node, parent, vClo, parNClo, parVClo); + + // invalidate every totalSize cache directly after we inflate it + node._size = -1; + + // OPT LOW if putkey is empty then skip + + // each key in putkey is either added to the local entries, or delegated to + // the the relevant child node. + if (node.isLeaf()) { + // add all keys into the node, since there are no children. + + if (proc_val == null) { + // OPT: could use a splice-merge here. for TreeMap, there is not an + // easy way of doing this, nor will it likely make a lot of a difference. + // however, if we re-implement SkeletonNode, this might become relevant. + for (K key: putkey) { + if(putmap.get(key) == null) throw new NullPointerException(); + node.entries.put(key, putmap.get(key)); + } + } else { + if(putkey.size() < proc_val.outputCapacity()) { + for (K key: putkey) { handleLocalPut(node, key, vClo); } + } else { + // Add as many as we can, then put the rest into rejected. + TreeSet accepted = new TreeSet(Sorted.select(putkey, proc_val.outputCapacity()/2)); + List> notAccepted = Sorted.split(putkey, accepted, new TreeSet()); // FIXME NullSet ??? + System.err.println("Too many keys to add to a single node: We can add "+proc_val.outputCapacity()+" but are being asked to add "+putkey.size()+" - adding "+accepted.size()+" and rejecting the rest."); + for (K key: accepted) { handleLocalPut(node, key, vClo); } + synchronized(rejected) { + for(SortedSet set : notAccepted) { + rejected.addAll(set); + } + } + } + } + + } else { + // only add keys that already exist locally in the node. other keys + // are delegated to the relevant child node. + + SortedSet fkey = new TreeSet(comparator); + Iterable> range = Sorted.split(putkey, Sorted.keySet(node.entries), fkey); + + if (proc_val == null) { + for (K key: fkey) { + if(putmap.get(key) == null) throw new NullPointerException(); + node.entries.put(key, putmap.get(key)); + } + } else { + for (K key: fkey) { handleLocalPut(node, key, vClo); } + } + + for (SortedSet rng: range) { + GhostNode n; + try { + n = (GhostNode)node.selectNode(rng.first()); + } catch (ClassCastException e) { + // This has been seen in practice. I have no idea what it means. + // FIXME HIGH !!! + Logger.error(this, "Node is already loaded?!?!?!: "+node.selectNode(rng.first())); + continue; + } + PullTask task = new PullTask(n); + + // possibly re-design CountingSweeper to not care about types, or have acquire() instead + nClo.acquire((SkeletonNode)null); // dirty hack. FIXME LOW + // WORKAROUND unnecessary cast here - bug in SunJDK6; works fine on OpenJDK6 + ObjectProcessor.submitSafe(proc_pull, task, (SafeClosure)new InflateChildNodes(node, rng, nClo, vClo)); + } + } + + nClo.close(); + if (nClo.isCleared()) { nClo.run(); } // eg. if no child nodes need to be modified + } + + /** + ** Handle a planned local put to the node. + ** + ** Keys added locally have their values set to null. These will be updated + ** with the correct values via UpdateValue, when the value-getter completes + ** its operation on the key. We add the keys now, **before** the value is + ** obtained, so that SplitNode can work out how to split the node as early + ** as possible. + ** + ** @param n The node to put the key into + ** @param key The key + ** @param vClo The sweeper that tracks if the key's value has been obtained + */ + private void handleLocalPut(SkeletonNode n, K key, DeflateNode vClo) { + V oldval = n.entries.put(key, null); + vClo.acquire(key); + if(logMINOR) Logger.minor(this, "handleLocalPut for key "+key+" old value "+oldval+" for deflate node "+vClo+" - passing to proc_val"); + ObjectProcessor.submitSafe(proc_val, $K(key, oldval), vClo); + } + + private void handleLocalRemove(SkeletonNode n, K key, TrackingSweeper> vClo) { + throw new UnsupportedOperationException("not implemented"); + } + + } + + proc_pull.setName("pull"); + proc_push.setName("push"); + if (proc_val != null) { proc_val.setName("val"); } + proc_deflate.setName("deflate"); + + try { + + (new InflateChildNodes(putkey)).invoke((SkeletonNode)root); + + // FIXME HIGH make a copy of the deflated root so that we can restore it if the + // operation fails + int olds = size; + + boolean progress = true; + int count = 0; + int ccount = 0; + + do { + + //System.out.println(System.identityHashCode(this) + " " + proc_pull + " " + proc_push + " " + ((proc_val == null)? "": proc_val)); + + // Only sleep if we run out of jobs. + if((!progress) && (count++ > 10)) { + count = 0; +// if(ccount++ > 10) { + System.out.println(/*System.identityHashCode(this) + " " + */proc_val + " " + proc_pull + " " + proc_push+ " "+proc_deflate); +// ccount = 0; +// } + notifier.waitUpdate(1000); + } + progress = false; + + boolean loop = false; + while (proc_push.hasCompleted()) { + X3, CountingSweeper, TaskAbortException> res = proc_push.accept(); + PushTask task = res._0; + CountingSweeper sw = res._1; + TaskAbortException ex = res._2; + if (ex != null) { + // FIXME HIGH + throw new UnsupportedOperationException("SkeletonBTreeMap.update(): PushTask aborted; handler not implemented yet", ex); + } + + postPushTask(task, ((SplitNode)sw).node); + sw.release(task.data); + if (sw.isCleared()) { ((Runnable)sw).run(); } + loop = true; + progress = true; + } + if(loop) continue; + + //System.out.println(System.identityHashCode(this) + " " + proc_push + " " + ((proc_val == null)? "": proc_val+ " ") + proc_pull); + + if(proc_deflate.hasCompleted()) { + X3 res = proc_deflate.accept(); + DeflateNode sw = res._0; + TaskAbortException ex = res._2; + if(ex != null) + // FIXME HIGH + throw ex; + sw.run(); + progress = true; + continue; + } + if(loop) continue; + + if (proc_val != null && proc_val.hasCompleted()) { + X3, DeflateNode, X> res = proc_val.accept(); + Map.Entry en = res._0; + DeflateNode sw = res._1; + X ex = res._2; + if (ex != null) { + // FIXME HIGH + throw new UnsupportedOperationException("SkeletonBTreeMap.update(): value-retrieval aborted; handler not implemented yet", ex); + } + + sw.invoke(en); + sw.release(en.getKey()); + if (sw.isCleared()) { + proc_deflate.submit(sw, sw.node); + } + progress = true; + continue; + } + + if (proc_pull.hasCompleted()) { + X3, SafeClosure, TaskAbortException> res = proc_pull.accept(); + PullTask task = res._0; + SafeClosure clo = res._1; + TaskAbortException ex = res._2; + if (ex != null) { + // FIXME HIGH + throw new UnsupportedOperationException("SkeletonBTreeMap.update(): PullTask aborted; handler not implemented yet", ex); + } + + SkeletonNode node = postPullTask(task, ((InflateChildNodes)clo).parent); + clo.invoke(node); + progress = true; + continue; + } + + } while (proc_pull.hasPending() || proc_push.hasPending() || (proc_val != null && proc_val.hasPending()) || proc_deflate.hasPending()); + + size = root.totalSize(); + + } catch (RuntimeException e) { + throw new TaskAbortException("Task failed", e); + } catch (DataFormatException e) { + throw new TaskAbortException("Bad data format", e); + } catch (InterruptedException e) { + throw new TaskAbortException("interrupted", e); + } finally { + proc_pull.close(); + proc_push.close(); + if (proc_val != null) { proc_val.close(); } + proc_deflate.close(); + } + + } + + + /** + ** Creates a translator for the nodes of the B-tree. This method is + ** necessary because {@link NodeTranslator} is a non-static class. + ** + ** For an in-depth discussion on why that class is not static, see the + ** class description for {@link BTreeMap.Node}. + ** + ** TODO LOW maybe store these in a WeakHashSet or something... will need to + ** code equals() and hashCode() for that + ** + ** @param ktr Translator for the keys + ** @param mtr Translator for each node's local entries map + */ + public NodeTranslator makeNodeTranslator(Translator ktr, Translator, R> mtr) { + return new NodeTranslator(ktr, mtr); + } + + /************************************************************************ + ** DOCUMENT. + ** + ** For an in-depth discussion on why this class is not static, see the + ** class description for {@link BTreeMap.Node}. + ** + ** @param Target type of key-translator + ** @param Target type of map-translater + ** @author infinity0 + */ + public class NodeTranslator implements Translator> { + + /** + ** An optional translator for the keys. + */ + final Translator ktr; + + /** + ** An optional translator for each node's local entries map. + */ + final Translator, R> mtr; + + public NodeTranslator(Translator k, Translator, R> m) { + ktr = k; + mtr = m; + } + + /*@Override**/ public Map app(SkeletonNode node) { + if (!node.isBare()) { + throw new IllegalStateException("Cannot translate non-bare node " + node.getRange() + "; size:" + node.nodeSize() + "; ghosts:" + node.ghosts + "; root:" + (root == node)); + } + Map map = new LinkedHashMap(); + map.put("lkey", (ktr == null)? node.lkey: ktr.app(node.lkey)); + map.put("rkey", (ktr == null)? node.rkey: ktr.app(node.rkey)); + map.put("entries", (mtr == null)? node.entries: mtr.app((SkeletonTreeMap)node.entries)); + + if (!node.isLeaf()) { + Map subnodes = new LinkedHashMap(); + for (X3 next: node.iterNodesK()) { + GhostNode gh = (GhostNode)(next._1); + subnodes.put(gh.getMeta(), gh.totalSize()); + } + map.put("subnodes", subnodes); + } + return map; + } + + /*@Override**/ public SkeletonNode rev(Map map) throws DataFormatException { + try { + boolean notleaf = map.containsKey("subnodes"); + List gh = null; + if (notleaf) { + Map subnodes = (Map)map.get("subnodes"); + gh = new ArrayList(subnodes.size()); + for (Map.Entry en: subnodes.entrySet()) { + GhostNode ghost = new GhostNode(null, null, null, en.getValue()); + ghost.setMeta(en.getKey()); + gh.add(ghost); + } + } + SkeletonNode node = new SkeletonNode( + (ktr == null)? (K)map.get("lkey"): ktr.rev((Q)map.get("lkey")), + (ktr == null)? (K)map.get("rkey"): ktr.rev((Q)map.get("rkey")), + !notleaf, + (mtr == null)? (SkeletonTreeMap)map.get("entries") + : mtr.rev((R)map.get("entries")), + gh + ); + if (!node.isBare()) { + throw new IllegalStateException("map-translator did not produce a bare SkeletonTreeMap: " + mtr); + } + + verifyNodeIntegrity(node); + return node; + } catch (ClassCastException e) { + throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); + } catch (IllegalArgumentException e) { + throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); + } catch (IllegalStateException e) { + throw new DataFormatException("Could not build SkeletonNode from data", e, map, null, null); + } + } + + } + + + /************************************************************************ + ** {@link Translator} with access to the members of {@link BTreeMap}. + ** DOCUMENT. + ** + ** @author infinity0 + */ + public static class TreeTranslator implements Translator, Map> { + + final Translator ktr; + final Translator, ?> mtr; + + public TreeTranslator(Translator k, Translator, ?> m) { + ktr = k; + mtr = m; + } + + /*@Override**/ public Map app(SkeletonBTreeMap tree) { + if (tree.comparator() != null) { + throw new UnsupportedOperationException("Sorry, this translator does not (yet) support comparators"); + } + Map map = new LinkedHashMap(); + map.put("node_min", tree.NODE_MIN); + map.put("size", tree.size); + Map rmap = tree.makeNodeTranslator(ktr, mtr).app((SkeletonBTreeMap.SkeletonNode)tree.root); + map.put("entries", rmap.get("entries")); + if (!tree.root.isLeaf()) { + map.put("subnodes", rmap.get("subnodes")); + } + return map; + } + + /*@Override**/ public SkeletonBTreeMap rev(Map map) throws DataFormatException { + try { + SkeletonBTreeMap tree = new SkeletonBTreeMap((Integer)map.get("node_min")); + tree.size = (Integer)map.get("size"); + // map.put("lkey", null); // NULLNOTICE: get() gives null which matches + // map.put("rkey", null); // NULLNOTICE: get() gives null which matches + tree.root = tree.makeNodeTranslator(ktr, mtr).rev(map); + if (tree.size != tree.root.totalSize()) { + throw new DataFormatException("Mismatched sizes - tree: " + tree.size + "; root: " + tree.root.totalSize(), null, null); + } + return tree; + } catch (ClassCastException e) { + throw new DataFormatException("Could not build SkeletonBTreeMap from data", e, map, null, null); + } + } + + } + + public String toString() { + // Default toString for an AbstractCollection dumps everything underneath it. + // We don't want that here, especially as they may not be loaded. + return getClass().getName() + "@" + System.identityHashCode(this)+":size="+size; + } + + /** + * FIXME implement entrySet() along similar lines - auto-activation and auto-deactivation. + * keySet() achieves a slightly different purpose. It never activates values. + * + * NOTE: This is not called keySet() because some callers may not want us to auto-deflate. + */ + private Set keySet = null; + public Set keySetAutoDeflate() { + if (keySet == null) { + keySet = new AbstractSet() { + + @Override public int size() { return SkeletonBTreeMap.this.size(); } + + @Override public Iterator iterator() { + // FIXME LOW - this does NOT yet throw ConcurrentModificationException + // use a modCount counter + return new Iterator() { + + Stack nodestack = new Stack(); + Stack>> itstack = new Stack>>(); + + SkeletonNode cnode = (SkeletonNode)(SkeletonBTreeMap.this.root); + Iterator> centit = cnode.entries.entrySet().iterator(); + + K lastkey = null; + boolean removeok = false; + + + // DEBUG ONLY, remove when unneeded + /*public String toString() { + StringBuilder s = new StringBuilder(); + for (Node n: nodestack) { + s.append(n.getRange()).append(", "); + } + return "nodestack: [" + s + "]; cnode: " + cnode.getRange() + "; lastkey: " + lastkey; + }*/ + + /*@Override**/ public boolean hasNext() { + // TODO LOW ideally iterate in the reverse order + for (Iterator> it: itstack) { + if (it.hasNext()) { return true; } + } + if (centit.hasNext()) { return true; } + if (!cnode.isLeaf()) { return true; } + try { + deflate(); + } catch (TaskAbortException e) { + throw new RuntimeException(e); + } + return false; + } + + /*@Override**/ public K next() { + if (cnode.isLeaf()) { + while (!centit.hasNext()) { + if (nodestack.empty()) { + assert(itstack.empty()); + throw new NoSuchElementException(); + } + SkeletonNode parent = nodestack.pop(); + try { + cnode.deflate(); + parent.deflate(cnode.lkey); + } catch (TaskAbortException e) { + throw new RuntimeException(e); + } + cnode = parent; + centit = itstack.pop(); + } + } else { + while (!cnode.isLeaf()) { + // NULLNOTICE lastkey initialised to null, so this will get the right node + // even at the smaller edge of the map + Node testnode = cnode.rnodes.get(lastkey); + if(testnode.isGhost()) { + try { + cnode.inflate(testnode.lkey, false); + } catch (TaskAbortException e) { + throw new RuntimeException(e); + } + testnode = cnode.rnodes.get(lastkey); + assert(testnode instanceof SkeletonBTreeMap.SkeletonNode); + } + assert(!testnode.isGhost()); + SkeletonNode n = (SkeletonNode) testnode; + // node OK, proceed + nodestack.push(cnode); + itstack.push(centit); + cnode = n; + centit = cnode.entries.entrySet().iterator(); + } + } + if(!centit.hasNext()) { + try { + deflate(); + } catch (TaskAbortException e) { + throw new RuntimeException(e); + } + // We will throw but we may as well clean up in case anyone decides to be clever. + } + K next = centit.next().getKey(); lastkey = next; + removeok = true; + return next; + } + + /*@Override**/ public void remove() { + throw new UnsupportedOperationException(); + // FIXME +// if (!removeok) { +// throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); +// } +// // OPT LOW this could probably be a *lot* more efficient... +// BTreeMap.this.remove(lastkey); +// +// // we need to find our position in the tree again, since there may have +// // been structural modifications to it. +// // OPT LOW have a "structual modifications counter" that is incremented by +// // split, merge, rotateL, rotateR, and do the below only if it changes +// nodestack.clear(); +// itstack.clear(); +// cnode = BTreeMap.this.root; +// centit = cnode.entries.entrySet().iterator(); +// +// K stopkey = null; +// while(!cnode.isLeaf()) { +// stopkey = findstopkey(stopkey); +// nodestack.push(cnode); +// itstack.push(centit); +// // NULLNOTICE stopkey initialised to null, so this will get the right node +// // even at the smaller edge of the map +// cnode = cnode.rnodes.get(stopkey); +// centit = cnode.entries.entrySet().iterator(); +// } +// +// lastkey = findstopkey(stopkey); +// removeok = false; + } + + private K findstopkey(K stopkey) { + SortedMap headmap = cnode.entries.headMap(lastkey); + if (!headmap.isEmpty()) { + stopkey = headmap.lastKey(); + while (compare(centit.next().getKey(), stopkey) < 0); + } + return stopkey; + } + + }; + } + + @Override public void clear() { + SkeletonBTreeMap.this.clear(); + } + + @Override public boolean contains(Object o) { + Object value = SkeletonBTreeMap.this.get(o); + return value != null; + } + + @Override public boolean remove(Object o) { + if (contains(o)) { + SkeletonBTreeMap.this.remove(o); + return true; + } + return false; + } + + }; + } + return keySet; + } + + + +} diff --git a/src/main/java/plugins/Library/util/SkeletonBTreeSet.java b/src/main/java/plugins/Library/util/SkeletonBTreeSet.java new file mode 100644 index 00000000..33647eec --- /dev/null +++ b/src/main/java/plugins/Library/util/SkeletonBTreeSet.java @@ -0,0 +1,177 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.io.DataFormatException; +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.IterableSerialiser; +import plugins.Library.io.serial.MapSerialiser; +import plugins.Library.io.serial.Translator; +import plugins.Library.util.exec.TaskAbortException; + +import java.util.Comparator; +import java.util.Collection; +import java.util.Map; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.ArrayList; + +/** +** {@link Skeleton} of a {@link BTreeSet}. DOCUMENT +** +** TODO LOW make this implement Skeleton, ?> ? - ie. +** deflate(int, int) to get the relevant subset? +** +** @author infinity0 +*/ +public class SkeletonBTreeSet extends BTreeSet /*implements Skeleton>*/ { + + // TODO NORM when we implement internal_entries in SkeletonBTreeMap, then + // we can have it automatically set to TRUE here. + + public void setSerialiser(IterableSerialiser.SkeletonNode> n, MapSerialiser v) { + ((SkeletonBTreeMap)bkmap).setSerialiser(n, v); + } + + public SkeletonBTreeSet(Comparator cmp, int node_min) { + super(new SkeletonBTreeMap(cmp, node_min)); + } + + public SkeletonBTreeSet(int node_min) { + super(new SkeletonBTreeMap(node_min)); + } + + protected SkeletonBTreeSet(SkeletonBTreeMap m) { + super(m); + } + + /*======================================================================== + public interface Skeleton TODO NORM actually implement this... + ========================================================================*/ + + public boolean isBare() { + return ((SkeletonBTreeMap)bkmap).isBare(); + } + + public boolean isLive() { + return ((SkeletonBTreeMap)bkmap).isLive(); + } + + public void deflate() throws TaskAbortException { + ((SkeletonBTreeMap)bkmap).deflate(); + } + + public void inflate() throws TaskAbortException { + ((SkeletonBTreeMap)bkmap).inflate(); + } + + // TODO NORM tidy this - see SkeletonBTreeMap.inflate() for details + public plugins.Library.util.exec.BaseCompositeProgress getProgressInflate() { + return ((SkeletonBTreeMap)bkmap).pr_inf; + } + + // TODO NORM tidy this, etc. note that we don't need async update() because + // we don't have a value that can be updated (it's always just the key) + public void update(SortedSet putset, SortedSet remset) throws TaskAbortException { + ((SkeletonBTreeMap)bkmap).update(null, remset, new SortedSetMap>(putset), null, new TaskAbortExceptionConvertor()); + } + + /** + ** Creates a translator for the nodes of the B-tree. This method just calls + ** the {@link SkeletonBTreeMap#makeNodeTranslator(Translator, Translator) + ** corresponding method} for the map backing this set. + ** + ** @param ktr Translator for the keys + ** @param mtr Translator for each node's local entries map + */ + public SkeletonBTreeMap.NodeTranslator makeNodeTranslator(Translator ktr, Translator, R> mtr) { + return ((SkeletonBTreeMap)bkmap).makeNodeTranslator(ktr, mtr); + } + + + /************************************************************************ + ** {@link Translator} between a {@link Collection} and a {@link Map} of + ** keys to themselves. Used by {@link TreeSetTranslator}. + ** + ** OPT HIGH make this return a *view* instead of a copy + ** + ** @author infinity0 + */ + abstract public static class MapCollectionTranslator, C extends Collection> + implements Translator { + + public static , C extends Collection> C app(M src, C dst) { + for (E k: src.keySet()) { dst.add(k); } + return dst; + } + + public static , C extends Collection> M rev(C src, M dst) { + for (E e: src) { dst.putGhost(e, e); } + return dst; + } + + } + + + /************************************************************************ + ** Translator for the {@link java.util.TreeMap} backing a {@link + ** java.util.TreeSet}. This will turn a map into a list and vice versa, + ** so that the serialised representation doesn't store duplicates. + ** + ** maybe this belongs in a SkeletonTreeSet? if that's ever implemented.. + ** + ** @author infinity0 + */ + public static class TreeSetTranslator + extends MapCollectionTranslator, Collection> { + + public Collection app(SkeletonTreeMap src) { + Collection dst = new ArrayList(src.size()); + app(src, dst); + return dst; + } + + public SkeletonTreeMap rev(Collection src) { + // TODO LOW make it use a comparator that can be passed to the constructor + SkeletonTreeMap dst = new SkeletonTreeMap(); + rev(src, dst); + assert(dst.isBare()); + return dst; + } + + } + + + /************************************************************************ + ** {@link Translator} with access to the members of {@link BTreeSet}. + ** + ** This implementation just serialises the map backing the set. + ** + ** @author infinity0 + */ + public static class TreeTranslator implements Translator, Map> { + + final SkeletonBTreeMap.TreeTranslator trans; + + public TreeTranslator(Translator k, Translator, ? extends Collection> m) { + trans = new SkeletonBTreeMap.TreeTranslator(k, m); + } + + /*@Override**/ public Map app(SkeletonBTreeSet tree) { + return trans.app((SkeletonBTreeMap)tree.bkmap); + } + + /*@Override**/ public SkeletonBTreeSet rev(Map tree) throws DataFormatException { + return new SkeletonBTreeSet(trans.rev(tree)); + } + + } + + public String toString() { + // Default toString for an AbstractCollection dumps everything underneath it. + // We don't want that here, especially as they may not be loaded. + return getClass().getName() + "@" + System.identityHashCode(this); + } + +} diff --git a/src/main/java/plugins/Library/util/SkeletonMap.java b/src/main/java/plugins/Library/util/SkeletonMap.java new file mode 100644 index 00000000..0e4bc7a1 --- /dev/null +++ b/src/main/java/plugins/Library/util/SkeletonMap.java @@ -0,0 +1,58 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.io.serial.MapSerialiser; +import plugins.Library.util.exec.TaskAbortException; + +import java.util.Map; + +/** +** A {@link Skeleton} of a {@link Map}. +** +** @author infinity0 +*/ +public interface SkeletonMap extends Map, Skeleton> { + + /** + ** {@inheritDoc} + ** + ** In other words, for all keys k: {@link Map#get(Object) get(k)} must + ** return the appropriate value. + */ + /*@Override**/ public boolean isLive(); + + /** + ** {@inheritDoc} + ** + ** In other words, for all keys k: {@link Map#get(Object) get(k)} must + ** throw {@link DataNotLoadedException}. + */ + /*@Override**/ public boolean isBare(); + + /*@Override**/ public MapSerialiser getSerialiser(); + + /** + ** {@inheritDoc} + ** + ** For a {@code SkeletonMap}, this inflates the value for a key so that + ** after the method call, {@link Map#get(Object) get(key)} will not throw + ** {@link DataNotLoadedException}. + ** + ** @param key The key for whose value to inflate. + */ + /*@Override**/ public void inflate(K key) throws TaskAbortException; + + /** + ** {@inheritDoc} + ** + ** For a {@code SkeletonMap}, this deflates the value for a key so that + ** after the method call, {@link Map#get(Object) get(k)} will throw a + ** {@link DataNotLoadedException}. + ** + ** @param key The key for whose value to deflate. + */ + /*@Override**/ public void deflate(K key) throws TaskAbortException; + +} diff --git a/src/main/java/plugins/Library/util/SkeletonTreeMap.java b/src/main/java/plugins/Library/util/SkeletonTreeMap.java new file mode 100644 index 00000000..866fb2bc --- /dev/null +++ b/src/main/java/plugins/Library/util/SkeletonTreeMap.java @@ -0,0 +1,949 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.Translator; +import plugins.Library.io.serial.MapSerialiser; +import plugins.Library.io.DataFormatException; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.util.exec.TaskCompleteException; + +import java.util.Iterator; +import java.util.Comparator; +import java.util.Collection; +import java.util.Set; +import java.util.Map; +import java.util.AbstractCollection; +import java.util.AbstractSet; +import java.util.AbstractMap; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.HashMap; + +import freenet.support.Logger; + +/** +** A {@link SkeletonMap} of a {@link TreeMap}. DOCUMENT +** +** Note: this implementation, like {@link TreeMap}, is not thread-safe. +** +** TODO NORM "issub" should not be necessary, but need to check bounds on +** submaps before trying to remove an item. +** +** @author infinity0 +*/ +public class SkeletonTreeMap extends TreeMap +implements Map, SortedMap, SkeletonMap, Cloneable { + + // We don't actually use any of the methods or fields of TreeMap directly, + // but we need to extend it to pretend that this "is" a TreeMap. + + /** + ** The backing map. This map is only ever modified structurally - ie. the + ** {@link SkeletonValue} for a given key is never overwritten; only its + ** contents are. This ensures correct behaviour for the {@link + ** UnwrappingIterator} class. + */ + final protected TreeMap> skmap; + + /** + ** The meta data for this skeleton. + */ + protected Object mapmeta; + + /** + ** Keeps track of the number of ghost values in the map. + */ + protected transient int ghosts; + + public SkeletonTreeMap() { + skmap = new TreeMap>(); + } + + public SkeletonTreeMap(Comparator c) { + skmap = new TreeMap>(c); + } + + public SkeletonTreeMap(Map m) { + skmap = new TreeMap>(); + putAll(m); + } + + public SkeletonTreeMap(SortedMap m) { + skmap = new TreeMap>(m.comparator()); + putAll(m); + } + + public SkeletonTreeMap(SkeletonTreeMap m) { + skmap = new TreeMap>(m.comparator()); + for (Map.Entry> en: m.skmap.entrySet()) { + skmap.put(en.getKey(), en.getValue().clone()); + } + ghosts = m.ghosts; + } + + /** + ** Set the metadata for the {@link SkeletonValue} for a given key. + */ + public Object putGhost(K key, Object o) { + if(key == null) throw new IllegalArgumentException("No null keys"); + if (o == null) { + throw new IllegalArgumentException("Cannot put a null dummy into the map. Use put(K, V) to mark an object as loaded."); + } + SkeletonValue sk = skmap.get(key); + if (sk == null) { + skmap.put(key, sk = new SkeletonValue(null, o, false)); + ++ghosts; // One new ghost + return null; + } else { + if (sk.isLoaded()) { ++ghosts; } + return sk.setGhost(o); + } + } + + protected MapSerialiser serialiser; + public void setSerialiser(MapSerialiser s) { + if (serialiser != null && !isLive()) { + throw new IllegalStateException("Cannot change the serialiser when the structure is not live."); + } + serialiser = s; + } + + public static void swapKey(K key, SkeletonTreeMap src, SkeletonTreeMap dst) { + if (dst.containsKey(key)) { throw new IllegalArgumentException("SkeletonTreeMap.swapKey: key " + key + " already exists in target map"); } + if (!src.containsKey(key)) { throw new IllegalArgumentException("SkeletonTreeMap.swapKey: key " + key + " does not exist in source map"); } + SkeletonValue sk = src.skmap.remove(key); + if (!sk.isLoaded()) { --src.ghosts; ++dst.ghosts; } + dst.skmap.put(key, sk); + } + + /*======================================================================== + public interface SkeletonMap + ========================================================================*/ + + /*@Override**/ public boolean isLive() { + return ghosts == 0; + } + + /*@Override**/ public boolean isBare() { + return ghosts == skmap.size(); + } + + /*@Override**/ public MapSerialiser getSerialiser() { + return serialiser; + } + + /*@Override**/ public Object getMeta() { + return mapmeta; + } + + /*@Override**/ public void setMeta(Object m) { + mapmeta = m; + } + + /*@Override**/ public void inflate() throws TaskAbortException { + if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } + if (isLive()) { return; } + + Map> tasks = new HashMap>(size()<<1); + // load only the tasks that need pulling + for (Map.Entry> en: skmap.entrySet()) { + SkeletonValue skel = en.getValue(); + if (skel.isLoaded()) { continue; } + tasks.put(en.getKey(), new PullTask(skel.meta())); + } + serialiser.pull(tasks, mapmeta); + + for (Map.Entry> en: tasks.entrySet()) { + assert(skmap.get(en.getKey()) != null); + // TODO NORM atm old metadata is retained, could update? + put(en.getKey(), en.getValue().data); + } + } + + /*@Override**/ public void deflate() throws TaskAbortException { + if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } + if (isBare()) { return; } + + Map> tasks = new HashMap>(size()<<1); + // load all the tasks, most MapSerialisers need this info + for (Map.Entry> en: skmap.entrySet()) { + SkeletonValue skel = en.getValue(); + if(skel.data() == null) { + if(skel.isLoaded()) + // FIXME is a null loaded value legal? Should it be? Disallowing nulls is not an unreasonable policy IMHO... + // Currently nulls are used in SkeletonBTreeMap.update() as placeholders, but we do not want them to be serialised... + throw new TaskAbortException("Skeleton value in "+this+" : isLoaded() = true, data = null, meta = "+skel.meta()+" for "+en.getKey(), new NullPointerException()); + else if(skel.meta() == null) + throw new TaskAbortException("Skeleton value in "+this+" : isLoaded() = false, data = null, meta = null for "+en.getKey(), new NullPointerException()); + else { + // MapSerialiser understands PushTask's where data == null and metadata != null + // So just let it through. + // It will be fine after push() has completed. + } + } + tasks.put(en.getKey(), new PushTask(skel.data(), skel.meta())); + if (skel.data() instanceof SkeletonBTreeSet) { + SkeletonBTreeSet ss = (SkeletonBTreeSet)skel.data(); + } + } + serialiser.push(tasks, mapmeta); + + for (Map.Entry> en: tasks.entrySet()) { + assert(skmap.get(en.getKey()) != null); + putGhost(en.getKey(), en.getValue().meta); + } + } + + /*@Override**/ public void inflate(K key) throws TaskAbortException { + if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } + + SkeletonValue skel = skmap.get(key); + if (skel == null) { + throw new IllegalArgumentException("Key " + key + " does not belong to the map"); + } else if (skel.isLoaded()) { + return; + } + + Map> tasks = new HashMap>(); + tasks.put(key, new PullTask(skel.meta())); + + try { + serialiser.pull(tasks, mapmeta); + + put(key, tasks.remove(key).data); + // TODO NORM atm old metadata is retained, could update? + if (tasks.isEmpty()) { return; } + + for (Map.Entry> en: tasks.entrySet()) { + // other keys may also have been inflated, so add them, but only if the + // generated metadata match. + PullTask t = en.getValue(); + if(t.data == null) + throw new DataFormatException("Inflate got null from PullTask for "+key+" on "+this, null, null, tasks, en.getKey()); + SkeletonValue sk = skmap.get(en.getKey()); + if (sk == null) { throw new DataFormatException("SkeletonTreeMap got unexpected extra data from the serialiser.", null, sk, tasks, en.getKey()); } + if (sk.meta().equals(t.meta)) { + if (!sk.isLoaded()) { --ghosts; } + sk.set(t.data); + } + // if they don't match, then the data was only partially inflated + // (note: at the time of coding, only SplitPacker does this, and + // that is deprecated) so ignore for now. (needs a new data + // structure to allow partial inflates of values...) + } + + } catch (TaskCompleteException e) { + assert(skmap.get(key).isLoaded()); + } catch (DataFormatException e) { + throw new TaskAbortException("Could not complete inflate operation", e); + } + } + + /*@Override**/ public void deflate(K key) throws TaskAbortException { + if (serialiser == null) { throw new IllegalStateException("No serialiser set for this structure."); } + + SkeletonValue skel = skmap.get(key); + if (skel == null || !skel.isLoaded()) { return; } + + Map> tasks = new HashMap>(size()<<1); + // add the data we want to deflate, plus all already-deflated data + // OPTMISE think of a way to let the serialiser signal what + // information it needs? will be quite painful to implement that... + tasks.put(key, new PushTask(skel.data(), skel.meta())); + for (Map.Entry> en: skmap.entrySet()) { + skel = en.getValue(); + if (skel.isLoaded()) { continue; } + assert(skel.data() == null); + tasks.put(en.getKey(), new PushTask(null, skel.meta())); + } + serialiser.push(tasks, mapmeta); + + for (Map.Entry> en: tasks.entrySet()) { + assert(skmap.get(en.getKey()) != null); + // serialiser may have pulled and re-pushed some of the + // eg. Packer does this. so set all the metadata for all the tasks + // (unchanged tasks should have been discarded anyway) + putGhost(en.getKey(), en.getValue().meta); + } + } + + /************************************************************************ + ** {@link Translator} with access to the members of {@link TreeMap}. + ** + ** This implementation provides static methods to translate between this + ** class and a map from ({@link String} forms of the key) to ({@link + ** Task#meta metadata} of the values). + ** + ** TODO LOW atm this can't handle custom comparators + ** + ** @author infinity0 + */ + abstract public static class TreeMapTranslator + implements Translator, Map> { + + /** + ** Forward translation. If the translator is given is {@code null}, + ** it will use {@link Object#toString()}. + ** + ** @param map The data structue to translate + ** @param intm A map to populate with the translated mappings + ** @param ktr An optional translator between key and {@link String}. + */ + public static Map app(SkeletonTreeMap map, Map intm, Translator ktr) { + if (!map.isBare()) { + throw new IllegalArgumentException("Data structure is not bare. Try calling deflate() first."); + } + if (map.comparator() != null) { + throw new UnsupportedOperationException("Sorry, this translator does not (yet) support comparators"); + } + // FIXME LOW maybe get rid of intm and just always use HashMap + if (ktr != null) { + for (Map.Entry> en: map.skmap.entrySet()) { + intm.put(ktr.app(en.getKey()), en.getValue().meta()); + } + } else { + for (Map.Entry> en: map.skmap.entrySet()) { + intm.put(en.getKey().toString(), en.getValue().meta()); + } + } + return intm; + } + + /** + ** Backward translation. If no translator is given, a direct cast will + ** be attempted. + ** + ** @param intm The map of translated mappings to extract + ** @param map The data structue to populate with metadata + ** @param ktr A translator between key and {@link String} + */ + public static SkeletonTreeMap rev(Map intm, SkeletonTreeMap map, Translator ktr) throws DataFormatException { + if (ktr == null) { + try { + for (Map.Entry en: intm.entrySet()) { + map.putGhost((K)en.getKey(), en.getValue()); + } + } catch (ClassCastException e) { + throw new DataFormatException("TreeMapTranslator: reverse translation failed. Try supplying a non-null key-translator.", e, intm, null, null); + } + } else { + for (Map.Entry en: intm.entrySet()) { + map.putGhost(ktr.rev(en.getKey()), en.getValue()); + } + } + return map; + } + + } + + /*======================================================================== + public interface Map + ========================================================================*/ + + @Override public int size() { return skmap.size(); } + + @Override public boolean isEmpty() { return skmap.isEmpty(); } + + @Override public void clear() { + skmap.clear(); + ghosts = 0; + } + + @Override public Comparator comparator() { return skmap.comparator(); } + + @Override public boolean containsKey(Object key) { return skmap.containsKey(key); } + + @Override public boolean containsValue(Object value) { + if (!isLive()) { + // TODO LOW maybe make this work even when map is partially loaded + throw new DataNotLoadedException("TreeMap not fully loaded.", this); + } else { + for (SkeletonValue v: skmap.values()) { + if (value.equals(v.data())) { return true; } + } + return false; + } + } + + @Override public V get(Object key) { + SkeletonValue sk = skmap.get(key); + if (sk == null) { return null; } + if (!sk.isLoaded()) { throw new DataNotLoadedException("Data not loaded for key " + key + ": " + sk.meta(), this, key, sk.meta()); } + return sk.data(); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: if the value for the key hasn't been loaded yet, then this method + ** will return **null** instead of returning the actual previous value + ** (that hasn't been loaded yet). + ** + ** TODO LOW could code a setStrictChecksMode() or something to have this + ** method throw {@link DataNotLoadedException} in such circumstances, at + ** the user's discretion. This applies for CombinedEntry.setValue() too. + */ + @Override public V put(K key, V value) { + if(key == null) throw new NullPointerException(); + SkeletonValue sk = skmap.get(key); + if (sk == null) { + skmap.put(key, sk = new SkeletonValue(value, null, true)); + // ghosts is unchanged. + return null; + } else { + if (!sk.isLoaded()) { --ghosts; } + return sk.set(value); + } + } + + @Override public void putAll(Map map) { + SortedMap> putmap; + if (map instanceof SkeletonTreeMap) { + putmap = ((SkeletonTreeMap)map).skmap; + } else if (map instanceof SkeletonTreeMap.UnwrappingSortedSubMap) { + putmap = ((UnwrappingSortedSubMap)map).bkmap; + } else { + for (Map.Entry en: map.entrySet()) { + put((K)en.getKey(), (V)en.getValue()); + } + return; + } + int g = 0; + for (Map.Entry> en: putmap.entrySet()) { + SkeletonValue sk = en.getValue(); + SkeletonValue old = skmap.put(en.getKey(), sk); + if (old == null) { + if (!sk.isLoaded()) { ++g; } + } else { + if (old.isLoaded()) { + if (!sk.isLoaded()) { ++g; } + } else { + if (sk.isLoaded()) { --g; } + } + } + } + ghosts += g; + } + + /** + ** {@inheritDoc} + ** + ** NOTE: if the value for the key hasn't been loaded yet, then this method + ** will return **null** instead of returning the actual previous value + ** (that hasn't been loaded yet). + ** + ** TODO LOW could code a setStrictChecksMode() or something to have this + ** method throw {@link DataNotLoadedException} in such circumstances, at + ** the user's discretion. + */ + @Override public V remove(Object key) { + SkeletonValue sk = skmap.remove(key); + if (sk == null) { return null; } + if (!sk.isLoaded()) { --ghosts; } + return sk.data(); + } + + private transient Set> entries; + @Override public Set> entrySet() { + if (entries == null) { + entries = new UnwrappingEntrySet(skmap, false); + } + return entries; + } + + private transient Set keys; + @Override public Set keySet() { + if (keys == null) { + // OPT NORM make this implement a SortedSet + keys = new AbstractSet() { + + @Override public int size() { return skmap.size(); } + + @Override public Iterator iterator() { + return new UnwrappingIterator(skmap.entrySet().iterator(), UnwrappingIterator.KEY, false); + } + + @Override public void clear() { SkeletonTreeMap.this.clear(); } + + @Override public boolean contains(Object o) { + return SkeletonTreeMap.this.containsKey(o); + } + + @Override public boolean remove(Object o) { + boolean c = contains(o); + SkeletonTreeMap.this.remove(o); + return c; + } + + }; + } + return keys; + } + + private transient Collection values; + @Override public Collection values() { + if (values == null) { + values = new AbstractCollection() { + + @Override public int size() { return SkeletonTreeMap.this.size(); } + + @Override public Iterator iterator() { + return new UnwrappingIterator(skmap.entrySet().iterator(), UnwrappingIterator.VALUE, false); + } + + @Override public void clear() { SkeletonTreeMap.this.clear(); } + + }; + } + return values; + } + + @Override public K firstKey() { + return skmap.firstKey(); + } + + @Override public K lastKey() { + return skmap.lastKey(); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method is limited in its + ** functionality, and will throw {@link UnsupportedOperationException} for + ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. + */ + @Override public SortedMap subMap(K fr, K to) { + return new UnwrappingSortedSubMap(skmap.subMap(fr, to)); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method is limited in its + ** functionality, and will throw {@link UnsupportedOperationException} for + ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. + */ + @Override public SortedMap headMap(K to) { + return new UnwrappingSortedSubMap(skmap.headMap(to)); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method is limited in its + ** functionality, and will throw {@link UnsupportedOperationException} for + ** most of its methods. For details, see {@link UnwrappingSortedSubMap}. + */ + @Override public SortedMap tailMap(K fr) { + return new UnwrappingSortedSubMap(skmap.tailMap(fr)); + } + + + /*======================================================================== + public class Object + ========================================================================*/ + + @Override public boolean equals(Object o) { + if (o instanceof SkeletonTreeMap) { + return skmap.equals(((SkeletonTreeMap)o).skmap); + } + return super.equals(o); + } + + /* provided by AbstractMap + @Override public int hashCode() { + throw new UnsupportedOperationException("not implemented"); + }*/ + + @Override public Object clone() { + return new SkeletonTreeMap(this); + } + + // public String toString() { return super.toString(); } + + + /************************************************************************ + ** {@link Iterator} that will throw {@link DataNotLoadedException} when it + ** encounters such data. Precise behaviour: + ** + ** * The key iterator will never throw an exception + ** * The entry iterator will never throw an exception, but the {@link + ** UnwrappingEntry#getValue()} method of the entry will throw one if + ** data is not loaded at the time of the method call. This allows you to + ** load ''and retrieve'' the data during the middle of an iteration, by + ** polling the {@code getValue()} method of the entry. + ** * The value iterator will throw an exception if the value is not loaded, + ** and keep throwing this exception for subsequent method calls, pausing + ** the iteration until the value is loaded. + ** + ** The {@link #remove()} method will set the {@link #ghosts} field + ** correctly as long as (and only if) access to (the {@link SkeletonValue} + ** corresponding to the item being removed) is either synchronized, or + ** contained within one thread. + ** + ** @author infinity0 + */ + protected class UnwrappingIterator implements Iterator { + + final protected Iterator>> iter; + + /** + ** Last entry returned by the backing iterator. Used by {@link + ** #remove()} to update the {@link #ghosts} counter correctly. + */ + protected Map.Entry> last; + + final protected static int KEY = 0; + final protected static int VALUE = 1; + final protected static int ENTRY = 2; + + /** + ** Whether this is a view of a submap {@link SkeletonTreeMap}. Currently, + ** this determines whether attempts to modify the view will fail, since + ** it's a bitch to keep track of the ghost counter. + */ + final protected boolean issub; + protected boolean gotvalue; + + /** + ** Type of iterator. + ** + ** ;{@link #KEY} : Key iterator + ** ;{@link #VALUE} : Value iterator + ** ;{@link #ENTRY} : Entry iterator + */ + final protected int type; + + /** + ** Construct an iterator backed by the given iterator over the entries + ** of the backing map. + ** + ** @param it The backing iterator + ** @param t The {@link #type} of iterator + */ + protected UnwrappingIterator(Iterator>> it, int t, boolean sub) { + assert(t == KEY || t == VALUE || t == ENTRY); + type = t; + iter = it; + issub = sub; + } + + public boolean hasNext() { + return iter.hasNext(); + } + + public T next() { + switch (type) { + case KEY: + last = iter.next(); + return (T)last.getKey(); + case VALUE: + if (last == null || gotvalue) { last = iter.next(); } + SkeletonValue skel = last.getValue(); + if (!(gotvalue = skel.isLoaded())) { + throw new DataNotLoadedException("SkeletonTreeMap: Data not loaded for key " + last.getKey() + ": " + skel.meta(), SkeletonTreeMap.this, last.getKey(), skel.meta()); + } + return (T)skel.data(); + case ENTRY: + last = iter.next(); + return (T)new UnwrappingEntry(last); + } + throw new AssertionError(); + } + + public void remove() { + if (issub) { throw new UnsupportedOperationException("not implemented"); } + if (last == null) { throw new IllegalStateException("Iteration has not yet begun, or the element has already been removed."); } + if (!last.getValue().isLoaded()) { --ghosts; } + iter.remove(); + last = null; + } + + + /************************************************************************ + ** {@link java.util.Map.Entry} backed by one whose value is wrapped inside a {@link + ** SkeletonValue}. This class will unwrap the value when it is required, + ** throwing {@link DataNotLoadedException} as necessary. + ** + ** @author infinity0 + */ + protected class UnwrappingEntry implements Map.Entry { + + final K key; + final SkeletonValue skel; + + protected UnwrappingEntry(Map.Entry> en) { + key = en.getKey(); + skel = en.getValue(); + } + + protected void verifyLoaded() { + if (!skel.isLoaded()) { + throw new DataNotLoadedException("SkeletonTreeMap: Data not loaded for key " + key + ": " + skel.meta(), SkeletonTreeMap.this, key, skel.meta()); + } + } + + public K getKey() { + return key; + } + + public V getValue() { + verifyLoaded(); + return skel.data(); + } + + /** + ** See also note for {@link SkeletonTreeMap#put(Object, Object)}. + */ + public V setValue(V value) { + if (!skel.isLoaded()) { --ghosts; } + return skel.set(value); + } + + @Override public int hashCode() { + verifyLoaded(); + return (key==null? 0: key.hashCode()) ^ + (skel.data()==null? 0: skel.data().hashCode()); + } + + @Override public boolean equals(Object o) { + if (!(o instanceof Map.Entry)) { return false; } + verifyLoaded(); + Map.Entry en = (Map.Entry)o; + return (key==null? en.getKey()==null: key.equals(en.getKey())) && + (skel.data()==null? en.getValue()==null: skel.data().equals(en.getValue())); + } + + } + + } + + + /************************************************************************ + ** Submap of a {@link SkeletonTreeMap}. Currently, this only implements the + ** {@link #firstKey()}, {@link #lastKey()} and {@link #isEmpty()} methods. + ** This is because it's a bitch to implement an entire {@link SortedMap}, + ** and I only needed those three methods for {@link SkeletonBTreeMap} to + ** work. Feel free to expand it. + ** + ** @author infinity0 + */ + protected class UnwrappingSortedSubMap extends AbstractMap implements SortedMap { + + final SortedMap> bkmap; + + protected UnwrappingSortedSubMap(SortedMap> sub) { + bkmap = sub; + } + + @Override public int size() { + return bkmap.size(); + } + + @Override public boolean isEmpty() { + return bkmap.isEmpty(); + } + + /*@Override**/ public Comparator comparator() { + return bkmap.comparator(); + } + + /*@Override**/ public K firstKey() { + return bkmap.firstKey(); + } + + /*@Override**/ public K lastKey() { + return bkmap.lastKey(); + } + + private transient Set> entries; + @Override public Set> entrySet() { + if (entries == null) { + entries = new UnwrappingEntrySet(bkmap, true); + } + return entries; + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method has the {@link + ** UnwrappingSortedSubMap same limitations} as this class. + */ + /*@Override**/ public SortedMap subMap(K fr, K to) { + return new UnwrappingSortedSubMap(bkmap.subMap(fr, to)); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method has the {@link + ** UnwrappingSortedSubMap same limitations} as this class. + */ + /*@Override**/ public SortedMap headMap(K to) { + return new UnwrappingSortedSubMap(bkmap.headMap(to)); + } + + /** + ** {@inheritDoc} + ** + ** NOTE: the sorted map returned by this method has the {@link + ** UnwrappingSortedSubMap same limitations} as this class. + */ + /*@Override**/ public SortedMap tailMap(K fr) { + return new UnwrappingSortedSubMap(bkmap.tailMap(fr)); + } + + } + + + /************************************************************************ + ** DOCUMENT + ** + ** @author infinity0 + */ + protected class UnwrappingEntrySet extends AbstractSet> { + + final protected SortedMap> bkmap; + /** + ** Whether this is a view of a submap {@link SkeletonTreeMap}. Currently, + ** this determines whether attempts to modify the view will fail, since + ** it's a bitch to keep track of the ghost counter. + */ + final protected boolean issub; + + protected UnwrappingEntrySet(SortedMap> bk, boolean sub) { + bkmap = bk; + issub = sub; + } + + @Override public int size() { return bkmap.size(); } + + @Override public Iterator> iterator() { + return new UnwrappingIterator>(bkmap.entrySet().iterator(), UnwrappingIterator.ENTRY, issub); + } + + @Override public void clear() { + if (issub) { throw new UnsupportedOperationException("not implemented"); } + SkeletonTreeMap.this.clear(); + } + + @Override public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) { return false; } + Map.Entry en = (Map.Entry)o; + Object key = en.getKey(); Object val = en.getValue(); + // return SkeletonTreeMap.this.get(e.getKey()).equals(e.getValue()); + SkeletonValue sk = bkmap.get(key); + if (sk == null) { return false; } + if (!sk.isLoaded()) { throw new DataNotLoadedException("Data not loaded for key " + key + ": " + sk.meta(), SkeletonTreeMap.this, key, sk.meta()); } + return sk.data() == null && val == null || sk.data().equals(val); + } + + @Override public boolean remove(Object o) { + if (issub) { throw new UnsupportedOperationException("not implemented"); } + // SkeletonTreeMap.remove() called, which automatically updates _ghosts + boolean c = contains(o); + if (c) { SkeletonTreeMap.this.remove(((Map.Entry)o).getKey()); } + return c; + } + + } + + public String toString() { + if(this.isLive()) + return getClass().getName() + "@" + System.identityHashCode(this) +":" + super.toString(); + else + return getClass().getName() + "@" + System.identityHashCode(this); + } + + + +} + +/************************************************************************ +** A class that wraps the associated value (which may not be loaded) of a +** key in a {@link SkeletonTreeMap}, along with metadata and its loaded +** status. +** +** meta and data are private with accessors to ensure proper encapsulation +** i.e. the only way data can be != null is if isLoaded(), and the only way +** meta can be != null is if !isLoaded(). Note that with the methods and the +** class final, there should be no performance penalty. +** +** @author infinity0 +*/ +final class SkeletonValue implements Cloneable { + + private Object meta; + private V data; + + private boolean isLoaded; + + public SkeletonValue(V d, Object o, boolean loaded) { + if(loaded) { + assert(o == null); + // Null data is allowed as we use it in SkeletonBTreeMap.update() as a placeholder. + isLoaded = true; + data = d; + } else { + assert(d == null); + assert(o != null); // null metadata is invalid, right? + isLoaded = false; + meta = o; + } + } + + public final V data() { + return data; + } + + public final Object meta() { + return meta; + } + + public final boolean isLoaded() { + return isLoaded; + } + + /** + ** Set the data and mark the value as loaded. + */ + public V set(V v) { + V old = data; + data = v; + // meta = null; TODO LOW decide what to do with this. + isLoaded = true; + return old; + } + + /** + ** Set the metadata and mark the value as not loaded. + */ + public Object setGhost(Object m) { + if(m == null) throw new NullPointerException(); + Object old = meta; + meta = m; + data = null; + isLoaded = false; + return old; + } + + @Override public String toString() { + return "(" + data + ", " + meta + ", " + isLoaded + ")"; + } + + @Override public SkeletonValue clone() { + try { + return (SkeletonValue)super.clone(); + } catch (CloneNotSupportedException e) { + throw new AssertionError(e); + } + } + +} + + diff --git a/src/main/java/plugins/Library/util/Sorted.java b/src/main/java/plugins/Library/util/Sorted.java new file mode 100644 index 00000000..0b390d51 --- /dev/null +++ b/src/main/java/plugins/Library/util/Sorted.java @@ -0,0 +1,268 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.SortedMap; +import java.util.SortedSet; +import java.util.AbstractSet; +import java.util.ArrayList; + +/** +** Methods on sorted collections +** +** Most of the stuff here is completely fucking pointless and only because we +** need to support Java 5. Otherwise, we can just use standard Navigable* +** methods. :| +** +** @author infinity0 +*/ +final public class Sorted { + + public enum Inclusivity { NONE, LEFT, RIGHT, BOTH } + + private Sorted() { } + + private static class SortedKeySet extends AbstractSet implements SortedSet { + + final SortedMap map; + + public SortedKeySet(SortedMap m) { + map = m; + } + + @Override public int size() { return map.size(); } + + @Override public Iterator iterator() { + return map.keySet().iterator(); + } + + @Override public void clear() { map.clear(); } + + @Override public boolean contains(Object o) { + return map.containsKey(o); + } + + @Override public boolean remove(Object o) { + boolean c = contains(o); + map.remove(o); + return c; + } + + /*@Override**/ public Comparator comparator() { return map.comparator(); } + /*@Override**/ public K first() { return map.firstKey(); } + /*@Override**/ public K last() { return map.lastKey(); } + /*@Override**/ public SortedSet headSet(K r) { return new SortedKeySet(map.headMap(r)); } + /*@Override**/ public SortedSet tailSet(K l) { return new SortedKeySet(map.tailMap(l)); } + /*@Override**/ public SortedSet subSet(K l, K r) { return new SortedKeySet(map.subMap(l, r)); } + + } + + /** + ** Returns the second element in an iterable, or {@code null} if there is + ** no such element. The iterable is assumed to have at least one element. + */ + public static E second(Iterable ib) { + Iterator it = ib.iterator(); + it.next(); + return (it.hasNext())? it.next(): null; + } + + /** + ** Returns the least element (in the given set) >= the given element, or + ** {@code null} if there is no such element. + ** + ** This is identical to Java 6's {@code NavigableSet.ceiling(E)}, but works + ** for {@link SortedSet}. + */ + public static E ceiling(SortedSet set, E el) { + SortedSet tail = set.tailSet(el); + return (tail.isEmpty())? null: tail.first(); + } + + /** + ** Returns the greatest element (in the given set) <= the given element, or + ** {@code null} if there is no such element. + ** + ** This is identical to Java 6's {@code NavigableSet.floor(E)}, but works + ** for {@link SortedSet}. + */ + public static E floor(SortedSet set, E el) { + SortedSet head; + return (set.contains(el))? el: ((head = set.headSet(el)).isEmpty())? null: head.last(); + } + + /** + ** Returns the least element (in the given set) > the given element, or + ** {@code null} if there is no such element. + ** + ** This is identical to Java 6's {@code NavigableSet.higher(E)}, but works + ** for {@link SortedSet}. + */ + public static E higher(SortedSet set, E el) { + SortedSet tail = set.tailSet(el); + return (set.contains(el))? second(tail): (tail.isEmpty())? null: tail.first(); + } + + /** + ** Returns the greatest element (in the given set) < the given element, or + ** {@code null} if there is no such element. + ** + ** This is identical to Java 6's {@code NavigableSet.lower(E)}, but works + ** for {@link SortedSet}. + */ + public static E lower(SortedSet set, E el) { + SortedSet head = set.headSet(el); + return (head.isEmpty())? null: head.last(); + } + + /** + ** Returns a {@link SortedSet} view of the given {@link SortedMap}'s keys. + ** You would think that the designers of the latter would have overridden + ** the {@link SortedMap#keySet()} method to do this, but noooo... + ** + ** JDK6 remove this pointless piece of shit. + */ + public static SortedSet keySet(SortedMap map) { + Set ks = map.keySet(); + return (ks instanceof SortedSet)? (SortedSet)ks: new SortedKeySet(map); + } + + /** + ** Splits the given sorted set at the given separators. + ** + ** FIXME LOW currently this method assumes that the comparator of the set + ** is consistent with equals(). we should use the SortedSet's comparator() + ** instead, or "natural" ordering. + ** + ** JDK6 use a NavigableSet instead of a SortedSet. + ** + ** @param subj Subject of the split + ** @param sep Separators to split at + ** @param foundsep An empty set which will be filled with the separators + ** that were also contained in the subject set. + ** @return A list of subsets; each subset contains all entries between two + ** adjacent separators, or an edge separator and the corresponding + ** edge of the set. The list is in sorted order. + ** @throws NullPointerException if any of the inputs are {@code null} + */ + public static List> split(SortedSet subj, SortedSet sep, SortedSet foundsep) { + if (!foundsep.isEmpty()) { + throw new IllegalArgumentException("split(): Must provide an empty set to add found separators to"); + } + + if (subj.isEmpty()) { + return Collections.emptyList(); + } else if (sep.isEmpty()) { + return Collections.singletonList(subj); + } + + List> res = new ArrayList>(sep.size()+2); + E csub = subj.first(), csep, nsub, nsep; + + do { + csep = floor(sep, csub); // JDK6 sep.floor(csub); + assert(csub != null); + if (csub.equals(csep)) { + assert(subj.contains(csep)); + foundsep.add(csep); + csub = second(subj.tailSet(csep)); // JDK6 subj.higher(csep); + continue; + } + + nsep = (csep == null)? sep.first(): second(sep.tailSet(csep)); // JDK6 sep.higher(csep); + assert(nsep == null || ((Comparable)csub).compareTo(nsep) < 0); + + nsub = (nsep == null)? subj.last(): floor(subj, nsep); // JDK6 subj.floor(nsep); + assert(nsub != null); + if (nsub.equals(nsep)) { + foundsep.add(nsep); + nsub = lower(subj, nsep); // JDK6 subj.lower(nsep); + } + + assert(csub != null && ((Comparable)csub).compareTo(nsub) <= 0); + assert(csep != null || nsep != null); // we already took care of sep.size() == 0 + res.add( + (csep == null)? subj.headSet(nsep): + (nsep == null)? subj.tailSet(csub): + subj.subSet(csub, nsep) + ); + if (nsep == null) { break; } + + csub = second(subj.tailSet(nsub)); // JDK6 subj.higher(nsub); + } while (csub != null); + + return res; + } + + /** + ** Select {@code n} separator elements from the subject sorted set. The + ** elements are distributed evenly amongst the set. + ** + ** @param subj The set to select elements from + ** @param num Number of elements to select + ** @param inc Which sides of the set to select. For example, if this is + ** {@code BOTH}, then the first and last elements will be selected. + ** @return The selected elements + */ + public static List select(SortedSet subj, int num, Inclusivity inc) { + if (num >= 2) { + // test most common first + } else if (num == 1) { + switch (inc) { + case LEFT: return Collections.singletonList(subj.first()); + case RIGHT: return Collections.singletonList(subj.last()); + case BOTH: throw new IllegalArgumentException("select(): can't have num=1 and inc=BOTH"); + // case NONE falls through to main section + } + } else if (num == 0) { + if (inc == Inclusivity.NONE) { + return Collections.emptyList(); + } else { + throw new IllegalArgumentException("select(): can't have num=0 and inc!=NONE"); + } + } else { // num < 0 + throw new IllegalArgumentException("select(): cannot select a negative number of items"); + } + + if (subj.size() < num) { + throw new IllegalArgumentException("select(): cannot select " + num + " elements from a set of size " + subj.size()); + } else if (subj.size() == num) { + return new ArrayList(subj); + } + + List sel = new ArrayList(num); + int n = num; + Iterator it = subj.iterator(); + + switch (inc) { + case NONE: ++n; break; + case BOTH: --n; + case LEFT: sel.add(it.next()); break; + } + + for (Integer s: Integers.allocateEvenly(subj.size() - num, n)) { + for (int i=0; i List select(SortedSet subj, int n) { + return select(subj, n, Inclusivity.NONE); + } + +} diff --git a/src/main/java/plugins/Library/util/SortedArraySet.java b/src/main/java/plugins/Library/util/SortedArraySet.java new file mode 100644 index 00000000..bf9564e6 --- /dev/null +++ b/src/main/java/plugins/Library/util/SortedArraySet.java @@ -0,0 +1,219 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version) {. See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import freenet.support.Fields; // JDK6: use this instead of Arrays.binarySearch + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.SortedMap; +import java.util.AbstractSet; + +/** +** An immutable {@link SortedSet} backed by a sorted array. +** +** DOCUMENT +** +** OPT LOW make a SortedArrayMap out of this, and make the BTreeMap node +** use that (or a SortedArrayTripleMap) instead of TreeMap +** +** @author infinity0 +*/ +public class SortedArraySet extends AbstractSet +implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { + + /** + ** The backing array. + */ + final protected E[] bkarr; + + /** + ** The comparator used to sort the array. + */ + final protected Comparator comparator; + + /** + ** Left index, inclusive. + */ + final int li; + + /** + ** Right index, exclusive. + */ + final int ri; + + + public SortedArraySet(E[] arr) { + this(arr, null, false); + } + + /** + ** Construct a new set from the given array, sorted according to the given + ** comparator. The new set will be '''backed''' by the array, so it must + ** not be changed elsewhere in a program. Ideally, all references to it + ** should be discarded, and all access to the array should occur through + ** this wrapper class. + ** + ** @param arr The backing array + ** @param cmp The comparator used for sorting. A {@code null} value means + ** {@linkplain Comparable natural ordering} is used. + ** @param sorted Whether the array is already sorted. If this is {@code + ** true}, the input array will not be sorted again. + ** @throws IllegalArgumentException if the array contains duplicate + ** elements, or if {@code sorted} is {@code true} but the array is + ** not actually sorted. + */ + public SortedArraySet(E[] arr, Comparator cmp, boolean sorted) { + this(arr, 0, arr.length, cmp); + if (!sorted) { + Arrays.sort(bkarr, noDuplicateComparator(cmp)); + } else { + Comparator dcmp = noDuplicateComparator(cmp); + for (int i=0, j=1; j 0) { + throw new IllegalArgumentException("Array is not sorted"); + } + } + } + } + + protected SortedArraySet(E[] arr, int l, int r, Comparator cmp) { + assert(0 <= l && l <= r && r <= arr.length); + bkarr = arr; + li = l; + ri = r; + comparator = cmp; + } + + public static Comparator noDuplicateComparator(final Comparator cmp) { + if (cmp == null) { + return new Comparator() { + public int compare(E e1, E e2) { + int d = ((Comparable)e1).compareTo(e2); + if (d == 0) { throw new IllegalArgumentException("A set cannot have two duplicate elements"); } + return d; + } + }; + } else { + return new Comparator() { + public int compare(E e1, E e2) { + int d = cmp.compare(e1, e2); + if (d == 0) { throw new IllegalArgumentException("A set cannot have two duplicate elements"); } + return d; + } + }; + } + } + + /*======================================================================== + public interface Set + ========================================================================*/ + + @Override public int size() { + return ri - li; + } + + @Override public boolean isEmpty() { + return ri == li; + } + + @Override public boolean contains(Object o) { + return Arrays.binarySearch(bkarr, li, ri, (E)o, comparator) > 0; + } + + @Override public Iterator iterator() { + return new Iterator() { + int i = li; + + public boolean hasNext() { + return i < ri; + } + + public E next() { + return bkarr[i++]; + } + + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + /* provided by AbstractSet + @Override public Object[] toArray() { } + */ + + /* provided by AbstractSet + @Override public T[] toArray(T[] a) { } + */ + + @Override public boolean add(E o) { + throw new UnsupportedOperationException("This set is immutable"); + } + + @Override public boolean remove(Object o) { + throw new UnsupportedOperationException("This set is immutable"); + } + + /* provided by AbstractSet + @Override public boolean containsAll(Collection c) { } + @Override public boolean addAll(Collection c) { } + @Override public boolean retainAll(Collection c) { } + @Override public boolean removeAll(Collection c) { } + */ + + @Override public void clear() { + if (!isEmpty()) { + throw new UnsupportedOperationException("This set is immutable"); + } + } + + /* provided by AbstractSet + @Override public boolean equals(Object o) { } + @Override public int hashCode() { } + */ + + /*======================================================================== + public interface SortedSet + ========================================================================*/ + + /*@Override**/ public Comparator comparator() { + return comparator; + } + + /*@Override**/ public E first() { + return bkarr[li]; + } + + /*@Override**/ public E last() { + return bkarr[ri-1]; + } + + /*@Override**/ public SortedSet headSet(E to) { + int d = Arrays.binarySearch(bkarr, li, ri, to, comparator); + if (d < 0) { d = ~d; } + if (d < li || ri < d) { + throw new IllegalArgumentException("Argument not in this subset's range"); + } + return new SortedArraySet(bkarr, li, d, comparator); + } + + /*@Override**/ public SortedSet tailSet(E fr) { + int d = Arrays.binarySearch(bkarr, li, ri, fr, comparator); + if (d < 0) { d = ~d; } + if (d < li || ri < d) { + throw new IllegalArgumentException("Argument not in this subset's range"); + } + return new SortedArraySet(bkarr, d, ri, comparator); + } + + /*@Override**/ public SortedSet subSet(E fr, E to) { + return tailSet(fr).headSet(to); + } + +} diff --git a/src/main/java/plugins/Library/util/SortedMapSet.java b/src/main/java/plugins/Library/util/SortedMapSet.java new file mode 100644 index 00000000..82fff6fb --- /dev/null +++ b/src/main/java/plugins/Library/util/SortedMapSet.java @@ -0,0 +1,164 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version) {. See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Set; +import java.util.Map; +import java.util.SortedSet; +import java.util.SortedMap; +import java.util.AbstractSet; + +/** +** A {@link SortedSet} backed by a {@link SortedMap}, with one additional bonus +** method which I felt was lacking from the {@link Set} interface, namely +** {@link #get(Object)}. +** +** This implementation assumes that for the backing map, looking up items by +** key is more efficient than by value. It is up to the programmer to ensure +** that this holds; most maps are designed to have this property though. +** +** TODO LOW this could be made to extend a new {@code MapSet} class. +** +** @author infinity0 +*/ +public class SortedMapSet> extends AbstractSet +implements Set, SortedSet/*, NavigableSet, Cloneable, Serializable*/ { + + /** + ** {@link SortedMap} backing this {@link SortedSet}. + */ + final protected M bkmap; + + /** + ** Construct a set backed by the given {@link SortedMap}. + ** + ** It is '''assumed''' that every key already in this map, is mapped to + ** itself. It is up to the caller to ensure that this holds. + */ + protected SortedMapSet(M m) { + bkmap = m; + } + + /** + ** Verifies the backing map for the condition described in the constructor. + ** + ** Note: this test is disabled (will always return true) since it is + ** impossible to test this for partially-loaded data structures. Code kept + ** here to remind future programmers of this. + ** + ** @throws UnsupportedOperationException + */ + final static boolean verifyBackingMapIntegrity(SortedMap m) { + /*for (Map.Entry en: m.entrySet()) { + assert(en.getKey() == en.getValue()); + }*/ + throw new UnsupportedOperationException("impossible to implement"); + } + + /** + ** Returns the object reference-identical to the one added into the set. + ** + ** @param o An object "equal" (ie. compares 0) to the one in the set. + ** @return The object contained in the set. + ** @throws ClassCastException object cannot be compared with the objects + ** currently in the map + ** @throws NullPointerException o is {@code null} and this map uses + ** natural order, or its comparator does not tolerate {@code null} + ** keys + */ + public E get(Object o) { + return bkmap.get(o); + } + + /*======================================================================== + public interface Set + ========================================================================*/ + + @Override public int size() { + return bkmap.size(); + } + + @Override public boolean isEmpty() { + return bkmap.isEmpty(); + } + + @Override public boolean contains(Object o) { + return bkmap.containsKey(o); + } + + @Override public Iterator iterator() { + return bkmap.keySet().iterator(); + } + + /* provided by AbstractSet + @Override public Object[] toArray() { } + */ + + /* provided by AbstractSet + @Override public T[] toArray(T[] a) { } + */ + + @Override public boolean add(E o) { + if (o == null) { + // BTreeMap doesn't support null keys at the time of coding, but this may change + return !bkmap.containsKey(null)? bkmap.put(null, null) == null: false; + } + return bkmap.put(o, o) == null; + } + + @Override public boolean remove(Object o) { + if (o == null) { + // BTreeMap doesn't support null keys at the time of coding, but this may change + return bkmap.containsKey(null)? bkmap.remove(null) == null: false; + } + return bkmap.remove(o) == o; + } + + /* provided by AbstractSet + @Override public boolean containsAll(Collection c) { } + @Override public boolean addAll(Collection c) { } + @Override public boolean retainAll(Collection c) { } + @Override public boolean removeAll(Collection c) { } + */ + + @Override public void clear() { + bkmap.clear(); + } + + /* provided by AbstractSet + @Override public boolean equals(Object o) { } + @Override public int hashCode() { } + */ + + /*======================================================================== + public interface SortedSet + ========================================================================*/ + + /*@Override**/ public Comparator comparator() { + return bkmap.comparator(); + } + + /*@Override**/ public E first() { + return bkmap.firstKey(); + } + + /*@Override**/ public E last() { + return bkmap.lastKey(); + } + + /*@Override**/ public SortedSet headSet(E to) { + return new SortedMapSet>(bkmap.headMap(to)); + } + + /*@Override**/ public SortedSet tailSet(E fr) { + return new SortedMapSet>(bkmap.tailMap(fr)); + } + + /*@Override**/ public SortedSet subSet(E fr, E to) { + return new SortedMapSet>(bkmap.subMap(fr, to)); + } + +} diff --git a/src/main/java/plugins/Library/util/SortedSetMap.java b/src/main/java/plugins/Library/util/SortedSetMap.java new file mode 100644 index 00000000..ff14de5e --- /dev/null +++ b/src/main/java/plugins/Library/util/SortedSetMap.java @@ -0,0 +1,160 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version) {. See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import java.util.Comparator; +import java.util.Iterator; +import java.util.Collection; +import java.util.Set; +import java.util.Map; +import java.util.SortedSet; +import java.util.SortedMap; +import java.util.AbstractSet; +import java.util.AbstractMap; + +/** +** A {@link SortedMap} view of a {@link SortedSet}, with each key mapped to +** itself. The {@link #put(Object, Object)} method only accepts both arguments +** if they are the same object. +** +** Note: this class assumes the set's comparator is consistent with {@link +** Object#equals(Object)}. FIXME LOW +** +** @author infinity0 +*/ +public class SortedSetMap> extends AbstractMap +implements Map, SortedMap/*, NavigableMap, Cloneable, Serializable*/ { + + /** + ** {@link SortedSet} backing this {@link SortedMap}. + */ + final protected S bkset; + + /** + ** Construct a map-view of the given set. + */ + public SortedSetMap(S s) { + bkset = s; + } + + /*======================================================================== + public interface Map + ========================================================================*/ + + @Override public int size() { + return bkset.size(); + } + + @Override public boolean isEmpty() { + return bkset.isEmpty(); + } + + @Override public boolean containsKey(Object o) { + return bkset.contains(o); + } + + @Override public boolean containsValue(Object o) { + return bkset.contains(o); + } + + @Override public void clear() { + bkset.clear(); + } + + @Override public E put(E k, E v) { + if (k != v) { + throw new IllegalArgumentException("SortedMapSet: cannot accept a non-self mapping"); + } + return bkset.add(k)? null: v; + } + + @Override public E get(Object o) { + return bkset.contains(o)? (E)o: null; + } + + @Override public E remove(Object o) { + return bkset.remove(o)? (E)o: null; + } + + private transient Set> entries; + @Override public Set> entrySet() { + if (entries == null) { + entries = new AbstractSet>() { + + @Override public int size() { return bkset.size(); } + + @Override public Iterator> iterator() { + return new Iterator>() { + final Iterator it = bkset.iterator(); + /*@Override**/ public boolean hasNext() { return it.hasNext(); } + /*@Override**/ public Map.Entry next() { + E e = it.next(); + return Maps.$$(e, e); + } + /*@Override**/ public void remove() { it.remove(); } + }; + } + + @Override public void clear() { + bkset.clear(); + } + + @Override public boolean contains(Object o) { + if (!(o instanceof Map.Entry)) { return false; } + Map.Entry en = (Map.Entry)o; + if (en.getKey() != en.getValue()) { return false; } + return bkset.contains(en.getKey()); + } + + @Override public boolean remove(Object o) { + boolean c = contains(o); + if (c) { bkset.remove(((Map.Entry)o).getKey()); } + return c; + } + + }; + } + return entries; + } + + @Override public SortedSet keySet() { + // FIXME LOW Map.keySet() should be remove-only + return bkset; + } + + @Override public Collection values() { + // FIXME LOW Map.values() should be remove-only + return bkset; + } + + /*======================================================================== + public interface SortedMap + ========================================================================*/ + + /*@Override**/ public Comparator comparator() { + return bkset.comparator(); + } + + /*@Override**/ public E firstKey() { + return bkset.first(); + } + + /*@Override**/ public E lastKey() { + return bkset.last(); + } + + /*@Override**/ public SortedMap headMap(E to) { + return new SortedSetMap>(bkset.headSet(to)); + } + + /*@Override**/ public SortedMap tailMap(E fr) { + return new SortedSetMap>(bkset.tailSet(fr)); + } + + /*@Override**/ public SortedMap subMap(E fr, E to) { + return new SortedSetMap>(bkset.subSet(fr, to)); + } + + +} diff --git a/src/main/java/plugins/Library/util/TaskAbortExceptionConvertor.java b/src/main/java/plugins/Library/util/TaskAbortExceptionConvertor.java new file mode 100644 index 00000000..5cefdd02 --- /dev/null +++ b/src/main/java/plugins/Library/util/TaskAbortExceptionConvertor.java @@ -0,0 +1,13 @@ +package plugins.Library.util; + +import plugins.Library.util.concurrent.ExceptionConvertor; +import plugins.Library.util.exec.TaskAbortException; + +public class TaskAbortExceptionConvertor implements + ExceptionConvertor { + + public TaskAbortException convert(RuntimeException e) { + return new TaskAbortException(e.getMessage(), e); + } + +} diff --git a/src/main/java/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java b/src/main/java/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java new file mode 100644 index 00000000..d9e4c802 --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/BoundedPriorityBlockingQueue.java @@ -0,0 +1,196 @@ +package plugins.Library.util.concurrent; + +import java.util.Collection; +import java.util.Comparator; +import java.util.Iterator; +import java.util.concurrent.PriorityBlockingQueue; +import java.util.concurrent.TimeUnit; + +public class BoundedPriorityBlockingQueue extends PriorityBlockingQueue { + + int maxSize; + + /* Locking strategy: + * We do not have access to the parent's lock object. + * We need to be able to wait for the queue to become non-full, meaning we need to be able to signal it. + * We need to be able to do so reliably i.e. we need a lock surrounding it. + * We could do this with a Lock and a Condition, but the easiest solution is just to lock on an Object. + * Because callers might expect us to use an internal lock, we'll use a separate object. + */ + protected final Object fullLock = new Object(); + + public BoundedPriorityBlockingQueue(int capacity) { + super(capacity); + this.maxSize = capacity; + } + + public BoundedPriorityBlockingQueue(int capacity, Comparator comp) { + super(capacity, comp); + this.maxSize = capacity; + } + + // add() calls offer() + // put() calls offer() + + @Override + public void put(E o) { + synchronized(fullLock) { + if(super.size() < maxSize) { + super.offer(o); + return; + } + // We have to wait. + while(true) { + try { + fullLock.wait(); + } catch (InterruptedException e) { + // Ignore. + // PriorityBlockingQueue.offer doesn't throw it so we can't throw it. + } + if(super.size() < maxSize) { + super.offer(o); + return; + } + } + } + } + + @Override + public boolean offer(E o, long timeout, TimeUnit unit) { + synchronized(fullLock) { + if(super.size() < maxSize) { + super.offer(o); + return true; + } + long waitTime = unit.toMillis(timeout); + if(waitTime <= 0) waitTime = 0; + long now = System.currentTimeMillis(); + // We have to wait. + long end = now + waitTime; + while(now < end) { + try { + fullLock.wait((int)(Math.min(Integer.MAX_VALUE, end - now))); + } catch (InterruptedException e) { + // Ignore. + // PriorityBlockingQueue.offer doesn't throw it so we can't throw it. + } + if(super.size() < maxSize) { + super.offer(o); + return true; + } + now = System.currentTimeMillis(); + } + return false; + } + } + + @Override + public boolean offer(E o) { + synchronized(fullLock) { + if(super.size() < maxSize) { + super.offer(o); + return true; + } else return false; + } + } + + public E poll() { + E o = super.poll(); + if(o == null) return null; + synchronized(fullLock) { + fullLock.notifyAll(); + } + return o; + } + + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + E o = super.poll(timeout, unit); + if(o == null) return null; + synchronized(fullLock) { + fullLock.notifyAll(); + } + return o; + } + + public E take() throws InterruptedException { + E o = super.take(); + synchronized(fullLock) { + fullLock.notifyAll(); + } + return o; + } + + // peek() is safe. + // size() is safe. + + public int remainingCapacity() { + synchronized(fullLock) { + return maxSize - size(); + } + } + + public boolean remove(Object o) { + if(super.remove(o)) { + synchronized(fullLock) { + fullLock.notifyAll(); + } + return true; + } else return false; + } + + // contains() is safe + // toArray() is safe + // toString() is safe + + public int drainTo(Collection c) { + int moved = super.drainTo(c); + if(moved > 0) { + synchronized(fullLock) { + fullLock.notifyAll(); + } + } + return moved; + } + + public int drainTo(Collection c, int maxElements) { + int moved = super.drainTo(c, maxElements); + if(moved > 0) { + synchronized(fullLock) { + fullLock.notifyAll(); + } + } + return moved; + } + + public void clear() { + super.clear(); + synchronized(fullLock) { + fullLock.notifyAll(); + } + } + + public Iterator iterator() { + final Iterator underlying = super.iterator(); + return new Iterator() { + + public boolean hasNext() { + return underlying.hasNext(); + } + + public E next() { + return underlying.next(); + } + + public void remove() { + underlying.remove(); + synchronized(fullLock) { + fullLock.notifyAll(); + } + } + + }; + } + + // writeObject() is safe + +} diff --git a/src/main/java/plugins/Library/util/concurrent/ExceptionConvertor.java b/src/main/java/plugins/Library/util/concurrent/ExceptionConvertor.java new file mode 100644 index 00000000..62454895 --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/ExceptionConvertor.java @@ -0,0 +1,7 @@ +package plugins.Library.util.concurrent; + +public interface ExceptionConvertor { + + public X convert(RuntimeException e); + +} diff --git a/src/main/java/plugins/Library/util/concurrent/Executors.java b/src/main/java/plugins/Library/util/concurrent/Executors.java new file mode 100644 index 00000000..b8d9513f --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/Executors.java @@ -0,0 +1,73 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.concurrent; + +import plugins.Library.util.func.SafeClosure; + +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; // WORKAROUND javadoc bug #4464323 +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + +/** +** Class providing various {@link Executor}s. +** +** @author infinity0 +*/ +public class Executors { + + /** + ** A {@link SafeClosure} for shutting down an {@link ExecutorService}. (If + ** the input is not an {@link ExecutorService}, does nothing.) + */ + final public static SafeClosure CLEANUP_EXECSERV = new SafeClosure() { + /*@Override**/ public void invoke(Executor exec) { + if (!(exec instanceof ExecutorService)) { return; } + ExecutorService x = (ExecutorService)exec; + x.shutdown(); + try { + while (!x.awaitTermination(1, TimeUnit.SECONDS)); + } catch (InterruptedException e) { } + } + }; + + /** + ** A JVM-wide executor that objects/classes can reference when they don't + ** want to create a new executor themselves. This is a wrapper around a + ** real executor, which can be set ({@link #setDefaultExecutor(Executor)}) + ** without calling methods on every object/class that uses the wrapper. + ** If no backing executor has been set by the time the first call to {@link + ** Executor#execute(Runnable)} is made, a {@link ThreadPoolExecutor} is + ** created, with a thread-cache size of 64, timeout of 1s, and rejection + ** policy of "{@link CallerRunsPolicy caller runs}". + */ + final public static Executor DEFAULT_EXECUTOR = new Executor() { + /*@Override**/ public void execute(Runnable r) { + synchronized (Executors.class) { + if (default_exec == null) { + default_exec = new ThreadPoolExecutor( + 1, 0x40, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + new ThreadPoolExecutor.CallerRunsPolicy() + ); + } + } + default_exec.execute(r); + } + }; + + public static synchronized void setDefaultExecutor(Executor e) { + default_exec = e; + } + + /** + ** The executor backing {@link #DEFAULT_EXECUTOR}. + */ + private static Executor default_exec = null; + + private Executors() { } + +} diff --git a/src/main/java/plugins/Library/util/concurrent/Notifier.java b/src/main/java/plugins/Library/util/concurrent/Notifier.java new file mode 100644 index 00000000..c1a6fd7f --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/Notifier.java @@ -0,0 +1,31 @@ +package plugins.Library.util.concurrent; + +public class Notifier { + + private boolean notified; + + public synchronized void notifyUpdate() { + notified = true; + notifyAll(); + } + + public synchronized void waitUpdate(int maxWait) { + long start = -1; + long now = -1; + while(!notified) { + if(start == -1) { + start = now = System.currentTimeMillis(); + } else { + now = System.currentTimeMillis(); + } + if(start + maxWait <= now) return; + try { + wait((start + maxWait - now)); + } catch (InterruptedException e) { + // Ignore + } + } + notified = false; + } + +} diff --git a/src/main/java/plugins/Library/util/concurrent/ObjectProcessor.java b/src/main/java/plugins/Library/util/concurrent/ObjectProcessor.java new file mode 100644 index 00000000..b0049a34 --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/ObjectProcessor.java @@ -0,0 +1,400 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.concurrent; + +import static plugins.Library.util.func.Tuples.X2; +import static plugins.Library.util.func.Tuples.X3; + +import java.lang.ref.WeakReference; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executor; +import java.util.concurrent.RejectedExecutionException; + +import plugins.Library.util.func.Closure; +import plugins.Library.util.func.SafeClosure; +import freenet.support.Logger; + +/** +** A class that wraps around an {@link Executor}, for processing any given type +** of object, not just {@link Runnable}. Each object must be accompanied by a +** secondary "deposit" object, which is returned with the object when it has +** been processed. Any exceptions thrown are also returned. +** +** @param Type of object to be processed +** @param Type of object to be used as a deposit +** @param Type of exception thrown by {@link #clo} +** @author infinity0 +*/ +public class ObjectProcessor implements Scheduler { + + final protected BlockingQueue in; + final protected BlockingQueue> out; + final protected Map dep; + final protected Closure clo; + final protected Executor exec; + final protected ExceptionConvertor convertor; + // Allows a group of ObjectProcessor's to all notify the same object so that the caller doesn't have to poll. + protected Notifier notifier; + + protected volatile boolean open = true; + protected int dispatched = 0; + protected int completed = 0; + protected int started = 0; + + private static volatile boolean logMINOR; + private static volatile boolean logDEBUG; + + static { + Logger.registerClass(ObjectProcessor.class); + } + + // Most ObjectProcessor's are likely to be autostart()'ed, and this way we can + // still use ConcurrentMap for pending while having garbage collection. + private final WeakReference myRef = new WeakReference(this); + + // TODO NORM make a more intelligent way of adjusting this + final public static int default_maxconc = 0x28; + int maxconc = default_maxconc; + + final protected SafeClosure> postProcess = new SafeClosure>() { + /*@Override**/ public void invoke(X2 res) { + try { + out.put(res); + if(notifier != null) notifier.notifyUpdate(); + synchronized(ObjectProcessor.this) { ++completed; } + } catch (InterruptedException e) { + throw new UnsupportedOperationException(); + } + } + }; + + public int outputCapacity() { + return out.remainingCapacity(); + } + + public int outputSize() { + return out.size(); + } + + final private static ConcurrentMap, Boolean> pending = new ConcurrentHashMap, Boolean>(); + // This must only be modified in a static synchronized block + private static Thread auto = null; + + /** + ** Constructs a new processor. The processor itself will be thread-safe + ** as long as the queues and deposit map are not exposed to other threads, + ** and the closure's invoke method is also thread-safe. + ** + ** If the {@code closure} parameter is {@code null}, it is expected that + ** {@link #createJobFor(Object)} will be overridden appropriately. + ** + ** @param input Queue for input items + ** @param output Queue for output/error items + ** @param deposit Map for item deposits + ** @param closure Closure to call on each item + ** @param executor Executor to run each closure call + ** @param autostart Whether to start an {@link #auto()} autohandler + ** @param notifier A Notifier to tell whenever something has happened - e.g. results are ready. + */ + public ObjectProcessor( + BlockingQueue input, BlockingQueue> output, Map deposit, + Closure closure, Executor executor, ExceptionConvertor conv, Notifier n + ) { + in = input; + out = output; + dep = deposit; + clo = closure; + exec = executor; + convertor = conv; + notifier = n; + } + + public ObjectProcessor( + BlockingQueue input, BlockingQueue> output, Map deposit, + Closure closure, Executor executor, ExceptionConvertor conv + ) { + this(input, output, deposit, closure, executor, conv, null); + } + + // FIXME should notifier be volatile? If we set it before starting, maybe that's not necessary? + public void setNotifier(Notifier n) { + this.notifier = n; + } + + public synchronized void setMaxConc(int x) { + maxconc = x; + } + + /** + ** Safely submits the given item and deposit to the given processer. Only + ** use this when the input queue's {@link BlockingQueue#put(Object)} method + ** does not throw {@link InterruptedException}, such as that of {@link + ** java.util.concurrent.PriorityBlockingQueue}. + */ + public static void submitSafe(ObjectProcessor proc, T item, E deposit) { + try { + proc.submit(item, deposit); + } catch (InterruptedException e) { + throw new IllegalArgumentException("ObjectProcessor: abuse of submitSafe(). Blame the programmer, who did not know what they were doing", e); + } + } + + /** + ** Submits an item for processing, with the given deposit. + ** + ** @throws IllegalStateException if the processor has already been {@link + ** #close() closed} + ** @throws IllegalArgumentException if the item is already being held + */ + public void submit(T item, E deposit) throws InterruptedException { + synchronized(this) { + if (!open) { throw new IllegalStateException("ObjectProcessor: not open"); } + if (dep.containsKey(item)) { + throw new IllegalArgumentException("ObjectProcessor: object " + item + " already submitted"); + } + + dep.put(item, deposit); + } + // in.put() can block. Don't hold the outer lock during pushing. + // Note that this can result in more stuff being in dep than is in in. This is okay, assuming that + // we don't have an infinite number of calling threads. + in.put(item); + } + + /** + ** Updates the deposit for a given item. + ** + ** @throws IllegalStateException if the processor has already been {@link + ** #close() closed} + ** @throws IllegalArgumentException if the item is not currently being held + */ + public synchronized void update(T item, E deposit) { + if (!open) { throw new IllegalStateException("ObjectProcessor: not open"); } + if (!dep.containsKey(item)) { + throw new IllegalArgumentException("ObjectProcessor: object " + item + " not yet submitted"); + } + + dep.put(item, deposit); + } + + /** + ** Retrieved a processed item, along with its deposit and any exception + ** that caused processing to abort. + */ + public X3 accept() throws InterruptedException { + X2 item = out.take(); + // DO NOT hold the lock while blocking on out. + synchronized(this) { + return X3(item._0, dep.remove(item._0), item._1); + } + } + + /** + ** Whether there are any unprocessed items (including completed tasks not + ** yet retrieved by the submitter). + */ + public synchronized boolean hasPending() { + return !dep.isEmpty(); + } + + /** + ** Whether there are any completed items that have not yet been retrieved. + */ + public synchronized boolean hasCompleted() { + return !out.isEmpty(); + } + + /** + ** Number of unprocessed tasks. + */ + public synchronized int size() { + return dep.size(); + } + + /** + ** Retrieves an item by calling {@link BlockingQueue#take()} on the input + ** queue. If this succeeds, a job is {@linkplain #createJobFor(Object) + ** created} for it, and sent to {@link #exec} to be executed. + ** + ** This method is provided for completeness, in case anyone needs it; + ** {@link #auto()} should be adequate for most purposes. + ** + ** @throws InterruptedExeception if interrupted whilst waiting + */ + public synchronized void dispatchTake() throws InterruptedException { + throw new UnsupportedOperationException("not implemented"); + /* + * TODO NORM first needs a way of obeying maxconc + T item = in.take(); + exec.execute(createJobFor(item)); + ++dispatched; + */ + } + + /** + ** Retrieves an item by calling {@link BlockingQueue#poll()} on the input + ** queue. If this succeeds, a job is {@linkplain #createJobFor(Object) + ** created} for it, and sent to {@link #exec} to be executed. + ** + ** This method is provided for completeness, in case anyone needs it; + ** {@link #auto()} should be adequate for most purposes. + ** + ** @return Whether a task was retrieved and executed + */ + public boolean dispatchPoll() { + synchronized(this) { + if (dispatched - completed >= maxconc) { return false; } + } + // DO NOT hold the lock while blocking on in. + T item = in.poll(); + synchronized(this) { + if (item == null) { return false; } + exec.execute(createJobFor(item)); + ++dispatched; + return true; + } + } + + /** + ** Creates a {@link Runnable} to process the item and push it onto the + ** output queue, along with any exception that aborted the process. + ** + ** The default implementation invokes {@link #clo} on the item, and then + ** adds the appropriate data onto the output queue. + */ + protected Runnable createJobFor(final T item) { + if (clo == null) { + throw new IllegalStateException("ObjectProcessor: no closure given, but createJobFor() was not overidden"); + } + return new Runnable() { + /*@Override**/ public void run() { + X ex = null; + synchronized(ObjectProcessor.this) { ++started; } + RuntimeException ee = null; + try { clo.invoke(item); } + // FIXME NORM this could throw RuntimeException + catch (RuntimeException e) { + Logger.error(this, "Caught "+e, e); + System.err.println("In ObjProc-"+name+" : "+e); + e.printStackTrace(); + ex = convertor.convert(e); + } + catch (Exception e) { ex = (X)e; } + postProcess.invoke(X2(item, ex)); + } + }; + } + + /** + ** Start a new thread to run the {@link #pending} processors, if one is not + ** already running. + */ + private static synchronized void ensureAutoHandler() { + if (auto != null) { return; } + auto = new Thread() { + @Override public void run() { + final int timeout = 4; + int t = timeout; + while (!pending.isEmpty() && (t=timeout) == timeout || t-- > 0) { + for (Iterator> it = pending.keySet().iterator(); it.hasNext();) { + WeakReference ref = it.next(); + ObjectProcessor proc = ref.get(); + if(proc == null) { + it.remove(); + continue; + } + try { + boolean o = proc.open; + while (proc.dispatchPoll()); + if (!o) { it.remove(); } + } catch (RejectedExecutionException e) { + // FIXME NORM + // neither Executors.DEFAULT_EXECUTOR nor Freenet's in-built executors + // throw this, so this is not a high priority + Logger.error(this, "REJECTED EXECUTION", e); + } + } + try { + // FIXME why do we sleep here at all? + // FIXME for small jobs, such as merging to disk in SpiderIndexUploader, this can + // dominate the execution time! + // FIXME the executors are perfectly capable of limiting the number of threads running... + Thread.sleep(100); + } catch (InterruptedException e) { + // TODO LOW log this somewhere + } + // System.out.println("pending " + pending.size()); + + if (t > 0) { continue; } + synchronized (ObjectProcessor.class) { + // if auto() was called just before we entered this synchronized block, + // then its ensureAutoHandler() would have done nothing. so we want to keep + // this thread running to take care of the new addition. + if (!pending.isEmpty()) { continue; } + // otherwise we can safely discard this thread, since ensureAutoHandler() + // cannot be called as long as we are in this block. + auto = null; + return; + } + } + throw new AssertionError("ObjectProcessor: bad exit in autohandler. this is a bug; please report."); + } + }; + auto.start(); + } + + /** + ** Add this processor to the collection of {@link #pending} processes, and + ** makes sure there is a thread to handle them. + ** + ** @return Whether the processor was not already being handled. + */ + public boolean auto() { + Boolean r = ObjectProcessor.pending.put(myRef, Boolean.TRUE); + ObjectProcessor.ensureAutoHandler(); + return r == null; + } + + /** + ** Call {@link #auto()} and return {@code this}. + */ + public ObjectProcessor autostart() { + auto(); + return this; + } + + /** + ** Stop accepting new submissions or deposit updates. Held items can still + ** be processed and retrieved, and if an {@linkplain #auto() auto-handler} + ** is running, it will run until all such items have been processed. + */ + /*@Override**/ public void close() { + open = false; + } + + // public class Object + + /** + ** {@inheritDoc} + ** + ** This implementation just calls {@link #close()}. + */ + @Override public void finalize() { + close(); + } + + + protected String name; + public void setName(String n) { + name = n; + } + @Override public String toString() { + return "ObjProc-" + name + ":{" + size() + "|" + dispatched + "|" + started + "|" + completed + "}"; + } + +} diff --git a/src/main/java/plugins/Library/util/concurrent/Scheduler.java b/src/main/java/plugins/Library/util/concurrent/Scheduler.java new file mode 100644 index 00000000..e1759445 --- /dev/null +++ b/src/main/java/plugins/Library/util/concurrent/Scheduler.java @@ -0,0 +1,21 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.concurrent; + +/** +** An interface for a general class that accepts objects to be acted on. +** +** TODO NORM maybe just get rid of this since we now have ObjectProcessor, or +** import its methods here? +** +** @author infinity0 +*/ +public interface Scheduler extends java.io.Closeable { + + /** + ** Stop accepting objects (but continue any started actions). + */ + public void close(); + +} diff --git a/src/main/java/plugins/Library/util/event/AbstractSweeper.java b/src/main/java/plugins/Library/util/event/AbstractSweeper.java new file mode 100644 index 00000000..3eaa8d03 --- /dev/null +++ b/src/main/java/plugins/Library/util/event/AbstractSweeper.java @@ -0,0 +1,118 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.event; + +/** +** A partial implementation of {@link Sweeper}, defining some high-level +** methods in terms of lower ones. +** +** This implementation is '''not''' thread-safe. +** +** @author infinity0 +*/ +abstract public class AbstractSweeper implements Sweeper { + + protected State state; + final public boolean once; + + /** + ** Construct a new sweeper. + ** + ** @param autostart Whether to construct the sweeper already open + ** @param onceonly Whether the sweeper can only be opened once + */ + protected AbstractSweeper(boolean autostart, boolean onceonly) { + state = (autostart)? State.OPEN: State.NEW; + once = onceonly; + } + + /** + ** Add an object to the underlying collection. Implementations need not + ** check for a valid state; this is done by {@link #acquire(Object)}. + ** + ** @return Whether the object was added + */ + abstract protected boolean add(T object); + + /** + ** Remove an object from the underlying collection. Implementations need not + ** check for a valid state; this is done by {@link #release(Object)}. + ** + ** @return Whether the object was removed + */ + abstract protected boolean remove(T object); + + /** + ** Helper method for implementations that also implement {@link Iterable}. + ** + ** @param it The iterator to remove an element from + */ + protected void releaseFrom(java.util.Iterator it) { + if (state == State.NEW) { throw new IllegalStateException("Sweeper: not yet opened"); } + if (state == State.CLEARED) { throw new IllegalStateException("Sweeper: already cleared"); } + + it.remove(); + if (state == State.CLOSED && !it.hasNext()) { state = State.CLEARED; } + } + + /*======================================================================== + public interface Sweeper + ========================================================================*/ + + /** + ** {@inheritDoc} + ** + ** This implementation also throws {@link IllegalStateException} if {@code + ** once} is {@code true} and the sweeper has already been opened once. + */ + /*@Override**/ public void open() { + if (state != State.NEW && (once || state != State.CLOSED)) { throw new IllegalStateException("Sweeper: already opened or cleared"); } + state = State.OPEN; + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public boolean acquire(T object) { + if (state != State.OPEN) { throw new IllegalStateException("Sweeper: not open"); } + + return add(object); + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public boolean release(T object) { + if (state == State.NEW) { throw new IllegalStateException("Sweeper: not yet opened"); } + if (state == State.CLEARED) { throw new IllegalStateException("Sweeper: already cleared"); } + + boolean b = remove(object); + if (state == State.CLOSED && size() == 0) { state = State.CLEARED; } + return b; + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public void close() { + if (state != State.OPEN) { throw new IllegalStateException("Sweeper: not open"); } + state = State.CLOSED; + if (size() == 0) { state = State.CLEARED; } + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public State getState() { + return state; + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public boolean isCleared() { + return state == State.CLEARED; + } + +} diff --git a/src/main/java/plugins/Library/util/event/CountingSweeper.java b/src/main/java/plugins/Library/util/event/CountingSweeper.java new file mode 100644 index 00000000..f883209d --- /dev/null +++ b/src/main/java/plugins/Library/util/event/CountingSweeper.java @@ -0,0 +1,56 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.event; + +/** +** A {@link Sweeper} which only counts the number of objects added. It does +** not care about which objects these are; it assumes the user handles this. +** +** @author infinity0 +*/ +public class CountingSweeper extends AbstractSweeper { + + protected int count = 0; + + /** + ** Construct a new sweeper. + ** + ** @param autostart Whether to construct the sweeper already open + ** @param onceonly Whether the sweeper can only be opened once + */ + public CountingSweeper(boolean autostart, boolean onceonly) { + super(autostart, onceonly); + } + + /** + ** {@inheritDoc} + ** + ** This implementation will always return true, since it does not keep + ** track of the actual count in the collection. + */ + @Override protected boolean add(T object) { + ++count; + return true; + } + + /** + ** {@inheritDoc} + ** + ** This implementation will always return true, since it does not keep + ** track of the actual count in the collection. + */ + @Override protected boolean remove(T object) { + if (count == 0) { throw new IllegalStateException("CountingSweeper: no objects to release: " + object); } + --count; + return true; + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public int size() { + return count; + } + +} diff --git a/src/main/java/plugins/Library/util/event/Sweeper.java b/src/main/java/plugins/Library/util/event/Sweeper.java new file mode 100644 index 00000000..ea8de10c --- /dev/null +++ b/src/main/java/plugins/Library/util/event/Sweeper.java @@ -0,0 +1,70 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.event; + +/** +** A class which holds a group of objects, eg. for the purposes of delaying an +** action until all objects have been released. +** +** @author infinity0 +*/ +public interface Sweeper extends java.io.Closeable { + + public enum State { NEW, OPEN, CLOSED, CLEARED } + + /** + ** Start accepting objects. If the trigger condition is reached whilst the + ** sweeper is accepting objects, execution should be delayed until {@link + ** #close()} is called. + ** + ** @throws IllegalStateException if the current state is not {@link + ** State#NEW NEW} or {@link State#CLOSED CLOSED} + */ + public void open(); + + /** + ** Acquire the given object for tracking. + ** + ** @return Whether the object was acquired + ** @throws IllegalStateException if the current state is not {@link + ** State#OPEN OPEN} + */ + public boolean acquire(T object); + + /** + ** Release the given object from being tracked. + ** + ** When the sweeper is {@link State#CLOSED CLOSED} and there are no + ** more objects to track, it becomes {@link State#CLEARED CLEARED}. + ** + ** @return Whether the object was released + ** @throws IllegalStateException if the current state is not {@link + ** State#OPEN OPEN} or {@link State#CLOSED CLOSED}. + */ + public boolean release(T object); + + /** + ** Stop accepting objects. + ** + ** @throws IllegalStateException if the current state is not {@link + ** State#OPEN OPEN}. + */ + public void close(); + + /** + ** Returns the current state. + */ + public State getState(); + + /** + ** Returns whether the sweeper is {@link State#CLEARED CLEARED}. + */ + public boolean isCleared(); + + /** + ** Returns the number of objects held by the sweeper. + */ + public int size(); + +} diff --git a/src/main/java/plugins/Library/util/event/TrackingSweeper.java b/src/main/java/plugins/Library/util/event/TrackingSweeper.java new file mode 100644 index 00000000..a1e00849 --- /dev/null +++ b/src/main/java/plugins/Library/util/event/TrackingSweeper.java @@ -0,0 +1,112 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.event; + +import java.util.Collections; +import java.util.Iterator; +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.SortedSet; + +/** +** A {@link Sweeper} which uses a given {@link Collection} to keep track of the +** objects added. It also allows iteration through these objects, and supports +** calls to {@link Iterator#remove()}. +** +** @author infinity0 +*/ +public class TrackingSweeper> extends AbstractSweeper implements Iterable { + + final protected C objs; + final protected C objs_i; + + /** + ** Construct a new sweeper. + ** + ** If {@code coll_immute} is {@code null}, then {@code coll} will be used + ** in its place. This has a slight performance advantage over a wrapping + ** "immutable" view; but only use it when trusted code is going to access + ** the sweeper. + ** + ** @param autostart Whether to construct the sweeper already open + ** @param onceonly Whether the sweeper can only be opened once + ** @param coll A {@link Collection} to hold the items in + ** @param coll_immute An immutable view of the same collection + */ + public TrackingSweeper(boolean autostart, boolean onceonly, C coll, C coll_immute) { + super(autostart, onceonly); + if (!coll.isEmpty()) { + throw new IllegalArgumentException("TrackingSweeper: cannot use a non-empty collection"); + } + objs = coll; + objs_i = (coll_immute == null)? coll: coll_immute; + } + + /** + ** Construct a new sweeper, automatically inferring an immutable view using + ** {@link #inferImmutable(Collection)} + ** + ** @param autostart Whether to construct the sweeper already open + ** @param coll A {@link Collection} to hold the items in + */ + public TrackingSweeper(boolean autostart, boolean onceonly, C coll) { + this(autostart, onceonly, coll, inferImmutable(coll)); + } + + /** + ** Attempts to infer an immutable view of the given collection, using the + ** available static creators given in {@link Collections}. + ** + ** Note that this method will always return an immutable {@link Collection} + ** if the input is not a {@link SortedSet}, {@link Set}, or {@link List}; + ** this may or may not be what you want. + */ + public static > C inferImmutable(C coll) { + if (coll instanceof SortedSet) { + return (C)Collections.unmodifiableSortedSet((SortedSet)coll); + } else if (coll instanceof Set) { + return (C)Collections.unmodifiableSet((Set)coll); + } else if (coll instanceof List) { + return (C)Collections.unmodifiableList((List)coll); + } else { + return (C)Collections.unmodifiableCollection(coll); + } + } + + public C view() { + return objs_i; + } + + /** + ** {@inheritDoc} + */ + @Override protected boolean add(T object) { + return objs.add(object); + } + + /** + ** {@inheritDoc} + */ + @Override protected boolean remove(T object) { + return objs.remove(object); + } + + /** + ** {@inheritDoc} + */ + /*@Override**/ public int size() { + return objs.size(); + } + + /*@Override**/ public Iterator iterator() { + return new Iterator() { + final Iterator it = objs.iterator(); + /*@Override**/ public boolean hasNext() { return it.hasNext(); } + /*@Override**/ public T next() { return it.next(); } + /*@Override**/ public void remove() { releaseFrom(it); } + }; + } + +} diff --git a/src/main/java/plugins/Library/util/exec/AbstractExecution.java b/src/main/java/plugins/Library/util/exec/AbstractExecution.java new file mode 100644 index 00000000..7b8a4c0f --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/AbstractExecution.java @@ -0,0 +1,176 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +import java.util.Date; +import java.util.Set; +import java.util.HashSet; + +/** +** A partial implementation of {@link Execution}, defining some higher-level +** functionality in terms of lower-level ones. +** +** To implement {@link Execution} fully from this class, the programmer needs to +** implement the following methods: +** +** * {@link Progress#getParts()} +** +** The programmer must call {@link #setResult(Object)} and {@link +** #setError(TaskAbortException)} to set the error and result fields correctly. +** If these methods are overridden, they '''must''' call the overriden ones +** using {@code super}. +** +** The programmer might also wish to override the following: +** +** * {@link #join()} +** +** @author MikeB +** @author infinity0 +*/ +public abstract class AbstractExecution implements Execution { + + protected Date start; + protected Date stop = null; + + /** + ** Keeps track of the acceptors. + */ + final protected Set> accept = new HashSet>(); + + /** + ** Subject of the execution. + */ + final protected String subject; + + /** + ** Holds the error that caused the operation to abort, if any. If not + ** {@code null}, then thrown by {@link #getResult()}. + */ + private TaskAbortException error; + + /** + ** Holds the result of the operation once it completes. If {@link #error} + ** is {@code null}, then returned by {@link #getResult()}. + */ + private V result; + + /** + ** Create an {@code Execution} with the given subject. + ** + ** @param subj The subject + */ + public AbstractExecution(String subj) { + this.subject = subj; + setStartDate(); + } + + protected synchronized void setStartDate() { + if (start != null) { throw new IllegalStateException("Execution is already running"); } + start = new Date(); + for (ExecutionAcceptor acc: accept) { offerStarted(acc); } + } + + protected void offerStarted(ExecutionAcceptor acc) { + try { + acc.acceptStarted(this); + } catch (RuntimeException e) { + // FIXME NORM log this somewhere + } + } + + protected synchronized void setResult(V res) { + if (stop != null) { throw new IllegalStateException("Execution has already finished."); } + result = res; + stop = new Date(); + notifyAll(); + for (ExecutionAcceptor acc: accept) { offerDone(acc); } + } + + protected void offerDone(ExecutionAcceptor acc) { + try { + acc.acceptDone(this, result); + } catch (RuntimeException e) { + // FIXME NORM log this somewhere + } + } + + protected synchronized void setError(TaskAbortException err) { + if (stop != null) { throw new IllegalStateException("Execution has already finished."); } + error = err; + stop = new Date(); + notifyAll(); + for (ExecutionAcceptor acc: accept) { offerAborted(acc); } + } + + protected void offerAborted(ExecutionAcceptor acc) { + try { + acc.acceptAborted(this, error); + } catch (RuntimeException e) { + // FIXME NORM log this somewhere + } + } + + /*======================================================================== + public interface Progress + ========================================================================*/ + + /*@Override**/ public String getSubject() { + return subject; + } + + abstract public String getStatus(); + + abstract public ProgressParts getParts() throws TaskAbortException; + + /*@Override**/ public synchronized boolean isDone() throws TaskAbortException { + if (error != null) { throw error; } + return result != null; + } + + /*@Override**/ public synchronized boolean isStarted() { + return start != null; + } + + /*@Override**/ public synchronized void join() throws InterruptedException, TaskAbortException { + while (stop == null) { wait(); } + if (error != null) { throw error; } + } + + /*======================================================================== + public interface Execution + ========================================================================*/ + + /*@Override**/ public Date getStartDate() { + return start; + } + + /*@Override**/ public long getTimeElapsed() { + return System.currentTimeMillis() - start.getTime(); + } + + /*@Override**/ public long getTimeTaken() { + if (stop == null) { return -1; } + return stop.getTime() - start.getTime(); + } + + /** + ** {@inheritDoc} + ** + ** This implementation returns {@link #result} if {@link #error} is {@code + ** null}, otherwise it throws it. + */ + /*@Override**/ public V getResult() throws TaskAbortException { + if (error != null) { throw error; } + return result; + } + + /*@Override**/ public synchronized void addAcceptor(ExecutionAcceptor acc) { + accept.add(acc); + // trigger the event if the task is already done/aborted + if (start != null) { offerStarted(acc); } + if (error != null) { offerDone(acc); } + if (result != null) { offerAborted(acc); } + } + +} diff --git a/src/main/java/plugins/Library/util/exec/BaseCompositeProgress.java b/src/main/java/plugins/Library/util/exec/BaseCompositeProgress.java new file mode 100644 index 00000000..050f3f02 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/BaseCompositeProgress.java @@ -0,0 +1,150 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** A progress that accumulates its data from the given group of progresses. +** +** TODO NORM perhaps synchronize +** +** DOCUMENT +** +** @author infinity0 +*/ +public class BaseCompositeProgress implements CompositeProgress { + + protected String subject = "??"; + + protected Iterable subprogress = java.util.Collections.emptyList(); + + protected int esttype = ProgressParts.ESTIMATE_UNKNOWN; + + public BaseCompositeProgress() { } + + /** + ** Sets the iterable to accumulate progress information from. There are + ** several static helper methods you can use to create progress iterables + ** that are backed by a collection of tracker ids; see the rest of the + ** documentation for this class. + ** + ** @param subs The subprogresses to accumulating information from + */ + public void setSubProgress(Iterable subs) { + if (subs == null) { + throw new IllegalArgumentException("Can't set a null progress iterable"); + } + subprogress = subs; + } + + public void setSubject(String s) { + subject = s; + } + + /** + ** @see ProgressParts#totalest + */ + public void setEstimate(int est) { + // TODO NORM size check? + esttype = est; + } + + /*======================================================================== + public interface Progress + ========================================================================*/ + + /*@Override**/ public String getSubject() { + return subject; + } + + /*@Override**/ public String getStatus() { + try { + return getParts().toString(); + } catch (TaskAbortException e) { + assert(e.isError()); // in-progress / complete tasks should have been rm'd from the underlying iterable + return "Aborted: " + e.getMessage(); + } + } + + /*@Override**/ public ProgressParts getParts() throws TaskAbortException { + return ProgressParts.getParts(subprogress, esttype); + } + + /*@Override**/ public Iterable getSubProgress() { + // TODO NORM make this check that subprogress is immutable, and if not return a wrapper + return subprogress; + } + + /*@Override**/ public boolean isStarted() { + for (Progress p: subprogress) { + if (p != null && p.isStarted()) { return true; } + } + return false; + } + + /*@Override**/ public boolean isDone() throws TaskAbortException { + for (Progress p: subprogress) { + if (p == null || !p.isDone()) { return false; } + } + return true; + } + + /** + ** {@inheritDoc} + ** + ** This method just checks to see if at least one subprogress is done. + */ + /*@Override**/ public boolean isPartiallyDone() { + for (Progress p: subprogress) { + try { + if (p != null && !p.isDone()) { return true; } + } catch (TaskAbortException e) { + continue; + } + } + return false; + } + + /*@Override**/ public void join() throws InterruptedException, TaskAbortException { + int s = 1; + for (;;) { + boolean foundnull = false; + for (Progress p: subprogress) { + if (p == null) { foundnull = true; continue; } + p.join(); + } + if (!foundnull) { break; } + // yes, this is a really shit way of doing this. but the best alternative I + // could come up with was the ugly commented-out stuff in ProgressTracker. + + // anyhow, Progress objects are usually created very very soon after its + // tracking ID is added to the collection backing the Iterable, + // so this shouldn't happen that often. + + Thread.sleep(s*1000); + // extend the sleep time with reducing probability, or reset it to 1 + if (s == 1 || Math.random() < 1/Math.log(s)/4) { ++s; } else { s = 1; } + } + } + + /* + // moved here from ParallelSerialiser. might be useful. + protected void joinAll(Iterable

plist) throws InterruptedException, TaskAbortException { + Iterator

it = plist.iterator(); + while (it.hasNext()) { + P p = it.next(); + try { + p.join(); + } catch (TaskAbortException e) { + if (e.isError()) { + throw e; + } else { + // TODO LOW perhaps have a handleNonErrorAbort() that can be overridden + it.remove(); + } + } + } + } + */ + +} diff --git a/src/main/java/plugins/Library/util/exec/ChainedProgress.java b/src/main/java/plugins/Library/util/exec/ChainedProgress.java new file mode 100644 index 00000000..2529064d --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/ChainedProgress.java @@ -0,0 +1,42 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Interface representing a {@link Progress} made up a sequence of progresses, +** each occuring one after the other, and whose completion usually (this is +** not enforced) depends on all of the subprogresses completing successfully. +** +** @author infinity0 +*/ +public interface ChainedProgress extends Progress { + + /** + ** {@inheritDoc} + ** + ** Implementations should return {@link ProgressParts#getParts(Iterable, + ** int)}, with the first argument being the collection of already + ** completed progresses plus the current one. + */ + /*@Override**/ public ProgressParts getParts() throws TaskAbortException; + + /** + ** Gets the latest subprogress. The earlier ones should all be complete. + ** Note: '''this can return {@code null}''' which means the next stage + ** hasn't started yet. + */ + public Progress getCurrentProgress(); + + /* + + display would look something like + + {as for Progress.this} + +----------------------------------------------------+ + | {as for getCurrentProgress()} | + +----------------------------------------------------+ + + */ + +} diff --git a/src/main/java/plugins/Library/util/exec/CompositeProgress.java b/src/main/java/plugins/Library/util/exec/CompositeProgress.java new file mode 100644 index 00000000..37353c05 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/CompositeProgress.java @@ -0,0 +1,54 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Interface representing a {@link Progress} made up of several subprogresses, +** which are generally (though not necessarily) independent. +** +** @author infinity0 +*/ +public interface CompositeProgress extends Progress { + + /** + ** {@inheritDoc} + ** + ** Implementations should return {@link ProgressParts#getSubParts(Iterable, + ** boolean)}, with the first argument being the same underlying collection + ** as {@link #getSubProgress()}. + ** + ** TODO HIGH: BaseCompositeProgress actually uses {@link + ** ProgressParts#getParts(Iterable, int)}.... should we change the above + ** spec? + */ + /*@Override**/ public ProgressParts getParts() throws TaskAbortException; + + /** + ** Gets all the Progress objects backing this progress. The iterator + ** should not support {@link java.util.Iterator#remove()}. + */ + public Iterable getSubProgress(); + + /** + ** Whether the tasks has partially completed. (Implementations at their + ** discretion might chooseto take this to mean "enough tasks to be + ** acceptable", rather "any one task".) + */ + public boolean isPartiallyDone(); + + /* + display would look something like: + + {as for (Progress)this} + [ details ] + +--------------------------------------------------+ + | for (Progress p: getSubProgress()) | + | {as for p} | + +--------------------------------------------------+ + + */ + + +} + diff --git a/src/main/java/plugins/Library/util/exec/Execution.java b/src/main/java/plugins/Library/util/exec/Execution.java new file mode 100644 index 00000000..aee8b54f --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/Execution.java @@ -0,0 +1,76 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +import java.util.Date; +//import java.util.concurrent.Future; + +/** +** Interface for an execution being handled concurrently to the threads which +** need to know its status and results. +** +** TODO LOW maybe have {@code Execution} and {@code K getSubject()} +** +** @param Type of result of the execution +** @author MikeB +** @author infinity0 +*/ +public interface Execution extends Progress/*, Future*/ { + + /** + ** Returns the Date object representing the time at which this request was + ** started. + */ + public Date getStartDate(); + + /** + ** Get the number of milliseconds elapsed since the request was started. + */ + public long getTimeElapsed(); + + /** + ** Get the number of milliseconds it took for the execution to complete or + ** abort, or {@code -1} if it has not yet done so. + */ + public long getTimeTaken(); + + /** + ** Gets the result of this operation, or throws the error that caused it to + ** abort. + */ + public V getResult() throws TaskAbortException; + + /** + ** Attach an {@link ExecutionAcceptor} to this execution. + ** + ** Implementations should trigger the appropriate methods on all acceptors + ** as the corresponding events are triggered, and on newly-added acceptors + ** if events have already happened, ''in the same order in which they + ** happened'''. (This is slightly different from a listener, where past + ** events are missed by newly-added listeners.) + ** + ** Generally, adding an acceptor twice should not give different results, + ** but this is up to the implementation. Unchecked exceptions (ie. {@link + ** RuntimeException}s) thrown by any acceptor should be caught (and ideally + ** handled). + */ + public void addAcceptor(ExecutionAcceptor acc); + + /*======================================================================== + public interface Future + ========================================================================*/ + + // TODO LOW could retrofit this if it's ever needed... + + //*@Override**/ public boolean cancel(boolean mayInterruptIfRunning); + + //*@Override**/ public V get(); + + //*@Override**/ public V get(long timeout, TimeUnit unit); + + //*@Override**/ public boolean isCancelled(); + + //*@Override**/ public boolean isDone(); + +} diff --git a/src/main/java/plugins/Library/util/exec/ExecutionAcceptor.java b/src/main/java/plugins/Library/util/exec/ExecutionAcceptor.java new file mode 100644 index 00000000..1bb7b3e0 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/ExecutionAcceptor.java @@ -0,0 +1,20 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Accepts state-changes on an execution. +** +** @param The specific type of execution, if any. +** @author infinity0 +*/ +public interface ExecutionAcceptor { + + public void acceptStarted(Execution opn); + + public void acceptDone(Execution opn, V result); + + public void acceptAborted(Execution opn, TaskAbortException abort); + +} diff --git a/src/main/java/plugins/Library/util/exec/Progress.java b/src/main/java/plugins/Library/util/exec/Progress.java new file mode 100644 index 00000000..95110c10 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/Progress.java @@ -0,0 +1,54 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** An abstraction of the progress of a task. +** +** @author infinity0 +*/ +public interface Progress { + + /** + ** Returns the subject / name of the task, if any. + */ + public String getSubject(); + + /** + ** Returns the current completion status. + */ + public String getStatus(); + + /** + ** Get the details of this progress as a {@link ProgressParts} object, or + ** throw an exception if the task has aborted. + */ + public ProgressParts getParts() throws TaskAbortException; + + /** + ** Whether the progress has started. + */ + public boolean isStarted(); + + /** + ** Whether the progress has completed successfully, or throw an exception + ** if it has aborted. + */ + public boolean isDone() throws TaskAbortException; + + /** + ** Wait for the progress to finish. + */ + public void join() throws InterruptedException, TaskAbortException; + + /* + + display would look something like: + + getName(): getStatus() + | done| started| known| estimate| + + */ + +} diff --git a/src/main/java/plugins/Library/util/exec/ProgressParts.java b/src/main/java/plugins/Library/util/exec/ProgressParts.java new file mode 100644 index 00000000..f1438d02 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/ProgressParts.java @@ -0,0 +1,303 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +import java.util.Formatter; + +/** +** Immutable class representing the parts of a progress. +** +** @author infinity0 +*/ +public class ProgressParts { + + /** + ** The value for {@link #totalest} representing an unknown estimate. + */ + final public static int ESTIMATE_UNKNOWN = -2; + + /** + ** The value for {@link #totalest} representing a finalized estimate. + */ + final public static int TOTAL_FINALIZED = -1; + + /** + ** Parts done. This is never greater than parts started. + */ + final public int done; + + /** + ** Parts started. This is never greater than parts known. + */ + final public int started; + + /** + ** Parts known. If there is an estimate, this is never greater than it. + */ + final public int known; + + /** + ** Total estimated parts. Special values: + ** + ** ;{@code -1} : The final total is known and is equal to {@link #known}. + ** ;{@code -2} : No estimate is available. + ** + ** (All other negative values are invalid.) + */ + final public int totalest; + + /** + ** Constructs a new {@code ProgressPart} from the given arguments, fixing + ** any values which are greater than their next neighbour (For example, + ** 5/4/3/2 -> 2/2/2/2, and 3/7/4/? -> 3/4/4/?). + ** + ** @param d Parts {@linkplain #done} + ** @param s Parts {@linkplain #started} + ** @param k Parts {@linkplain #known} + ** @param t {@linkplain #totalest Estimated} total parts + */ + public static ProgressParts normalise(int d, int s, int k, int t) { + if (t >= 0 && k > t) { k = t; } + if (s > k) { s = k; } + if (d > s) { d = s; } + return new ProgressParts(d, s, k, t); + } + + /** + ** Constructs a new {@code ProgressPart} from the given arguments, fixing + ** any values which are greater than their next neighbour (for example, + ** 5/4/3/2 -> 2/2/2/2, and 3/7/4/? -> 3/4/4/?). + ** + ** @param d Parts {@linkplain #done}, same as parts {@linkplain #started} + ** @param k Parts {@linkplain #known} + */ + public static ProgressParts normalise(int d, int k) { + return normalise(d, d, k, ESTIMATE_UNKNOWN); + } + + /** + ** Constructus a new {@code ProgressPart} from the given arguments. + ** + ** @param d Parts {@linkplain #done} + ** @param s Parts {@linkplain #started} + ** @param k Parts {@linkplain #known} + ** @param t {@linkplain #totalest Estimated} total parts + ** @throws IllegalArgumentException if the constraints for the fields are + ** not met. + */ + public ProgressParts(int d, int s, int k, int t) { + if (0 > d || d > s || s > k || (t >= 0 && k > t) || (t < 0 && t != ESTIMATE_UNKNOWN && t != TOTAL_FINALIZED)) { + throw new IllegalArgumentException("ProgressParts (" + d + "/" + s + "/" + k + "/" + t + ") must obey the contract 0 <= done <= started <= known <= totalest or totalest == ESTIMATE_UNKNOWN or TOTAL_FINALIZED"); + } + done = d; + started = s; + known = k; + totalest = t; + } + + /** + ** Constructus a new {@code ProgressPart} from the given arguments. + ** + ** @param d Parts {@linkplain #done} + ** @param s Parts {@linkplain #started} + ** @param k Parts {@linkplain #known} + ** @throws IllegalArgumentException if the constraints for the fields are + ** not met. + */ + public ProgressParts(int d, int s, int k) { + this(d, s, k, ESTIMATE_UNKNOWN); + } + + /** + ** Constructus a new {@code ProgressPart} from the given arguments. + ** + ** @param d Parts {@linkplain #done}, same as parts {@linkplain #started} + ** @param k Parts {@linkplain #known} + ** @throws IllegalArgumentException if the constraints for the fields are + ** not met. + */ + public ProgressParts(int d, int k) { + this(d, d, k, ESTIMATE_UNKNOWN); + } + + /** + ** Whether the total is finalized. ({@code totalest == TOTAL_FINALIZED}) + */ + final public boolean finalizedTotal() { + return totalest == TOTAL_FINALIZED; + } + + /** + ** Whether an estimate exists. ({@code totalest != ESTIMATE_UNKNOWN}) + */ + final public boolean hasEstimate() { + return totalest != ESTIMATE_UNKNOWN; + } + + /** + ** Whether the task is done, ie. whether {@code done == staretd == known} + ** and the total is {@linkplain #totalest finalized}. + */ + final public boolean isDone() { + return done == started && started == known && totalest == TOTAL_FINALIZED; + } + + /** + ** Returns the parts done as a fraction of the parts known. + */ + final public float getKnownFractionDone() { + return known == 0? 0.0f: (float)done / known; + } + + /** + ** Returns the parts started as a fraction of the parts known. + */ + final public float getKnownFractionStarted() { + return known == 0? 0.0f: (float)started / known; + } + + /** + ** Returns the parts done as a fraction of the estimated total parts. + ** Note: this will return negative (ie. invalid) if there is no estimate. + */ + final public float getEstimatedFractionDone() { + return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? getKnownFractionDone(): (float)done / totalest; + } + + /** + ** Returns the parts started as a fraction of the estimated total parts. + ** Note: this will return negative (ie. invalid) if there is no estimate. + */ + final public float getEstimatedFractionStarted() { + return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? getKnownFractionStarted(): (float)started / totalest; + } + + /** + ** Returns the parts known as a fraction of the estimated total parts. + ** Note: this will return negative (ie. invalid) if there is no estimate. + */ + final public float getEstimatedFractionKnown() { + return totalest == 0? 0.0f: totalest == TOTAL_FINALIZED? 1.0f: (float)known / totalest; + } + + /** + ** Returns a summary of the progress in the form {@code d/s/k/t??}. + ** + ** * If {@code done == started}, only one will be included. + ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate + ** will be ommited. + ** * If there is no estimate, only the {@code ??} will be included. + */ + @Override final public String toString() { + String s = done + "/"; + if (done != started) { s += started + "/"; } + s += known; + if (!finalizedTotal()) { + s += hasEstimate()? "/" + totalest + "??" : "/??"; + } + return s; + } + + /** + ** Returns a summary of the progress in the form {@code known_fraction_done + ** (estimated_fraction_done)}. + ** + ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate + ** will be ommited. + ** * If there is no estimate, only the {@code ??} will be included. + */ + final public String toFractionString() { + Formatter f = new Formatter(); + if (finalizedTotal()) { + f.format("%.4f", getKnownFractionDone()); + } else if (!hasEstimate()) { + f.format("%.4f (??)", getKnownFractionDone()); + } else { + f.format("%.4f (%.4f??)", getKnownFractionDone(), getEstimatedFractionDone()); + } + return f.toString(); + } + + /** + ** Returns a summary of the progress in the form {@code known_percent_done + ** (estimated_percent_done)}. + ** + ** * If the total is {@linkplain #finalizedTotal() finalized}, the estimate + ** will be ommited. + ** * If there is no estimate, only the {@code ??} will be included. + */ + final public String toPercentageString() { + Formatter f = new Formatter(); + if (finalizedTotal()) { + f.format("%.2f", getKnownFractionDone()*100); + } else if (!hasEstimate()) { + f.format("%.2f (??)", getKnownFractionDone()*100); + } else { + f.format("%.2f (%.2f??)", getKnownFractionDone()*100, getEstimatedFractionDone()*100); + } + return f.toString(); + } + + + /** + ** Constructs a new {@code ProgressParts} based on the total number of + ** parts (done, started, known) in the given iterable. Total estimated + ** parts is calculated based on the value of {@code try_to_be_smart}: + ** + ** ;{@code true} : try to extrapolate an estimated total parts based on the + ** number of subprogresses having estimates, the sum of these estimates, + ** and the total number of subprogresses. + ** + ** ;{@code false} : an estimate will only be supplied if all subprogresses + ** have estimates. + */ + public static ProgressParts getSubParts(Iterable subprogress, boolean try_to_be_smart) throws TaskAbortException { + int d = 0, s = 0, k = 0, t = 0; + int num = 0, unknown = 0; + boolean totalfinalized = true; + for (Progress p: subprogress) { + ++num; + if (p == null) { ++unknown; continue; } + ProgressParts parts = p.getParts(); + d += parts.done; + s += parts.started; + k += parts.known; + switch (parts.totalest) { + case ESTIMATE_UNKNOWN: + ++unknown; + totalfinalized = false; + break; + case TOTAL_FINALIZED: + t += parts.known; + break; + default: + totalfinalized = false; + t += parts.totalest; + } + } + if (num == unknown) { + t = ESTIMATE_UNKNOWN; + } else if (unknown > 0) { + t = (try_to_be_smart)? (int)Math.round(t * num / (float)(num-unknown)): ESTIMATE_UNKNOWN; + } + return new ProgressParts(d, s, k, (totalfinalized)? TOTAL_FINALIZED: t); + } + + /** + ** Constructs a new {@code ProgressParts} based on the number of progresses + ** (done, started, known) in the given iterable, with the given total + ** estimate. + */ + public static ProgressParts getParts(Iterable subprogress, int estimate) throws TaskAbortException { + int d = 0, s = 0, k = 0; + for (Progress p: subprogress) { + ++k; + if (p == null) { continue; } + ++s; + if (p.isDone()) { ++d; } + } + return new ProgressParts(d, s, k, estimate); + } + +} diff --git a/src/main/java/plugins/Library/util/exec/SimpleProgress.java b/src/main/java/plugins/Library/util/exec/SimpleProgress.java new file mode 100644 index 00000000..de1f7426 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/SimpleProgress.java @@ -0,0 +1,239 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Basic progress implementation. The number of parts known is implicily equal +** to the number of parts started. +** +** '''USE OF THIS CLASS IS PRONE TO DEADLOCK - MAKE SURE YOU MATCH UP YOUR +** CALLS TO {@link #addPartDone()} AND {@link #addPartKnown(int, boolean)}.''' +** I will deal with this at some point, and make this class easier to use... +** possibly have a setDone() method like AtomicProgress has... +** +** NOTE: this implementation considers the task '''done''' when a call to +** {@link #addPartDone()} triggers the condition {@code pdone == known} when +** {@link #finalizedTotal()} is true. '''As soon as''' this occurs, all threads +** blocked on {@link #join()} will unblock and assume the task has finished. +** To prevent this from happening unexpectedly, it is recommended to call +** {@link #enteredSerialiser()} and {@link #exitingSerialiser()} appropriately +** '''for each serialiser visited'''. Failure to follow this advice may result +** in deadlock or task data being retrieved before the task is properly +** complete. For more details, see the source code. +** +** TODO NORM perhaps synchronize +** +** @author infinity0 +*/ +public class SimpleProgress implements Progress { + + /* + ** More details about the note in the class description: + ** + ** If you pass the progress object onto a child serialiser, but do extra + ** processing of the task data after the child returns, it is **vital** + ** that you call enteredSerialiser() and exitingSerialiser(). Otherwise, + ** if any child serialiser triggers the aforementioned condition, it will + ** cause all threads blocked on join() to unblock and think the task has + ** been completed, before the extra processing has occured. ie: + ** + ** * Parent: [progress is at 8/8, not final] + ** * Parent: subsrl.pullLive(task, progress) + ** * Child: addPartKnown(8, true); [progress is at 8/16, final] + ** * Child: addPartDone() * 8; [progress is at 16/16, final] + ** * **all threads blocked on join() will unblock** + ** * Parent: yay, child has done its stuff, now I need to do some extra + ** stuff to actually complete the task + ** * other threads: "wtf? data's changing randomly!?" + ** + ** (We don't wait for the child serialiser to finish to finalise the total, + ** because we want the total to be finalised as soon as possible. Generally, we + ** let the leaf serialiser finalise the total.) + */ + + protected String subject = "??"; + + protected String status = null; + + /** + ** Number of parts done. + */ + protected volatile int pdone; + + /** + ** Number of parts known. For {@link SimpleProgress}, this is implicitly + ** also the number of parts started. + */ + protected volatile int known; + + /** + ** Estimated total number of parts. + ** + ** @see ProgressParts#totalest + */ + protected volatile int estimate = ProgressParts.ESTIMATE_UNKNOWN; + + private boolean inprogress = true; // this variable is only used in synchronized blocks so doesn't need to be volatile + + private volatile TaskAbortException abort = null; + + public SimpleProgress() { } + + public boolean finalizedTotal() { + return estimate == ProgressParts.TOTAL_FINALIZED; + } + + /** + ** Add a done part. If {@code pdone == known} is triggered while {@link + ** #finalizedTotal()} is true, then the task is deemed to be completed and + ** all threads blocked on {@link #join()} will be released. + */ + public synchronized void addPartDone() { + if (pdone == known) { + throw new IllegalStateException("Can't increased parts done above parts known"); + } + pdone++; + if (finalizedTotal() && pdone == known) { + inprogress = false; + notifyAll(); + } + } + + /** + ** Add some known parts, and either finalise the total or invalidate the + ** previous estimate. + */ + public synchronized void addPartKnown(int parts, boolean finalise) { + if (parts < 0) { + throw new IllegalArgumentException("Can't decrease the number of parts done."); + } else if (finalise) { + if (finalizedTotal() && parts > 0) { + throw new IllegalArgumentException("Total already finalised"); + } + // update estimate first to maintain ProgressParts contract + // in case another thread reads this concurrently + estimate = known + parts; + known = estimate; + estimate = ProgressParts.TOTAL_FINALIZED; + } else { + if (finalizedTotal()) { + throw new IllegalArgumentException("Cannot un-finalise a final total!"); + } + estimate = ProgressParts.ESTIMATE_UNKNOWN; + known += parts; + } + } + + /** + ** Set an estimate. This should be used carefully due to the automatic + ** thread-unblocking behaviour of {@link #addPartDone()}. Specifically, you + ** should '''never''' give {@link ProgressParts#TOTAL_FINALIZED} as the + ** argument to this method. + ** + ** @see ProgressParts#totalest + */ + public synchronized void setEstimate(int e) { + if (e < known) { + throw new IllegalArgumentException("Can't give a lower estimate than what is already known."); + } + estimate = e; + } + + /** + ** This method should be called first thing after entering a serialiser. + ** Otherwise, deadlock may result, or thread-safety may be broken. + */ + public void enteredSerialiser() { + addPartKnown(1, false); + } + + /** + ** This method should be called last thing before leaving a serialiser. + ** Otherwise, deadlock may result, or thread-safety may be broken. + ** + ** (Note: normal exits; not exceptions. Ie. call this in a {@code try} + ** block, not a {@code finally} block.) + */ + public void exitingSerialiser() { + addPartDone(); + } + + public synchronized void abort(TaskAbortException e) throws TaskAbortException { + abort = e; + inprogress = false; + notifyAll(); + throw e; + } + + public void setSubject(String s) { + if (s == null) { + throw new IllegalArgumentException("Can't set a null progress subject"); + } + subject = s; + } + + public void setStatus(String s) { + status = s; + } + + public void setStatusDefault() { + status = null; + } + + /*======================================================================== + public interface Progress + ========================================================================*/ + + /*@Override**/ public String getSubject() { + return subject; + } + + /** + ** {@inheritDoc} + ** + ** This implementation (in order of priority), the message for {@link + ** #abort}, {@link #status}, or string constructed from {@link #pdone}, + ** {@link #known} and {@link #estimate}. + */ + /*@Override**/ public String getStatus() { + if (abort != null) { return abort.getMessage(); } + if (status != null) { return status; } + try { + return getParts().toString(); + } catch (TaskAbortException e) { + // abort could have been set between lines 1 and 4 + return abort.getMessage(); + } + } + + /*@Override**/ public ProgressParts getParts() throws TaskAbortException { + // updates are made such that the ProgressParts contract isn't broken even + // in mid-update so we don't need to synchronize here. + if (abort != null) { throw new TaskAbortException("Task failed", abort); } + return new ProgressParts(pdone, known, known, estimate); + } + + /** + ** {@inheritDoc} + ** + ** This implementation returns {@code true}; it's assume that the task has + ** already started by the time you construct one of these objects. + */ + /*@Override**/ public boolean isStarted() { + return true; + } + + /*@Override**/ public boolean isDone() throws TaskAbortException { + // updates are made such that the ProgressParts contract isn't broken even + // in mid-update so we don't need to synchronize here. + if (abort != null) { throw new TaskAbortException("Task failed", abort); } + return finalizedTotal() && pdone == known; + } + + /*@Override**/ public synchronized void join() throws InterruptedException, TaskAbortException { + while (inprogress) { wait(); } + if (abort != null) { throw new TaskAbortException("Task failed", abort); } + } + +} diff --git a/src/main/java/plugins/Library/util/exec/TaskAbortException.java b/src/main/java/plugins/Library/util/exec/TaskAbortException.java new file mode 100644 index 00000000..4100fd8b --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/TaskAbortException.java @@ -0,0 +1,85 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Thrown when a task aborts. DOCUMENT +** +** @author infinity0 +*/ +public class TaskAbortException extends Exception { + + /** + ** Whether the abortion was due to an error condition. Defaults to {@code + ** true}. A {@code false} value generally means that the task was itself + ** aborted, but that the intended effects of the task can be treated as + ** having been accomplished, eg. {@link TaskCompleteException}. + ** + ** Multiple-task serialisers should handle abortions as follows: non-error + ** abortions result in the task being removed from the taskgroup; error + ** abortions result are rethrown as-is. + */ + final protected boolean error; + + /** + ** Whether the failure is temporary, and the client should try again at + ** a later time. Eg. this should be {@code true} for abortions caused by + ** {@link java.io.IOException}, and {@code false} for abortions caused by + ** {@link plugins.Library.io.DataFormatException}. Defaults to {@code + ** false}. + */ + final protected boolean retry; + + /** + ** Constructs a new exception with the specified parameters. + ** + ** @param s The detail message + ** @param t The cause + ** @param e Whether the current abortion is {@link #error}. + ** @param r Whether a {@link #retry} is likely to succeed. + */ + public TaskAbortException(String s, Throwable t, boolean e, boolean r) { + super(s, t); + error = e; + retry = r; + } + + /** + ** Constructs a new exception with the specified paramaters, marked as + ** error. + ** + ** @param s The detail message + ** @param t The cause + ** @param r Whether a {@link #retry} is likely to succeed. + */ + public TaskAbortException(String s, Throwable t, boolean r) { + this(s, t, true, r); + } + + /** + ** Constructs a new exception with the specified paramaters, marked as + ** error and non-retry. + ** + ** @param s The detail message + ** @param t The cause + */ + public TaskAbortException(String s, Throwable t) { + this(s+" : "+t.getMessage(), t, true, false); + } + + /** + ** @see #error + */ + public boolean isError() { + return error; + } + + /** + ** @see #retry + */ + public boolean shouldRetry() { + return retry; + } + +} diff --git a/src/main/java/plugins/Library/util/exec/TaskCompleteException.java b/src/main/java/plugins/Library/util/exec/TaskCompleteException.java new file mode 100644 index 00000000..b81de820 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/TaskCompleteException.java @@ -0,0 +1,20 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Thrown when a task aborts due to the task already having been done, eg. by +** another thread. +** +** @author infinity0 +*/ +public class TaskCompleteException extends TaskAbortException { + + // EXPAND + + public TaskCompleteException(String s) { + super(s, null, false, false); + } + +} diff --git a/src/main/java/plugins/Library/util/exec/TaskInProgressException.java b/src/main/java/plugins/Library/util/exec/TaskInProgressException.java new file mode 100644 index 00000000..5ec800b1 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/TaskInProgressException.java @@ -0,0 +1,37 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.exec; + +/** +** Thrown when a task is already in progress elsewhere. +** +** @author infinity0 +*/ +public class TaskInProgressException extends TaskAbortException { + + final Progress prog; + + public TaskInProgressException(String s, Progress p) { + super(s, null, false, false); + prog = p; + } + + public TaskInProgressException(Progress p) { + super(null, null, false, false); + prog = p; + } + + public Progress getProgress() { + return prog; + } + + /** + ** DOCUMENT + */ + public TaskCompleteException join() throws InterruptedException, TaskAbortException { + prog.join(); + return new TaskCompleteException("Completed task: " + prog.getSubject()); + } + +} diff --git a/src/main/java/plugins/Library/util/exec/package-info.java b/src/main/java/plugins/Library/util/exec/package-info.java new file mode 100644 index 00000000..24a5a855 --- /dev/null +++ b/src/main/java/plugins/Library/util/exec/package-info.java @@ -0,0 +1,9 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains classes to represent and monitor execution of tasks. +** +** @author infinity0 +*/ +package plugins.Library.util.exec; diff --git a/src/main/java/plugins/Library/util/func/Closure.java b/src/main/java/plugins/Library/util/func/Closure.java new file mode 100644 index 00000000..d32ca796 --- /dev/null +++ b/src/main/java/plugins/Library/util/func/Closure.java @@ -0,0 +1,40 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.func; + +/** +** An object representing a partially-evaluated subroutine. This is useful in +** many situations, for example when we have a general subroutine that takes +** several parameters, and we want to create a derivative subroutine for which +** a subset of these parameters is fixed (this is sometimes referred to as the +** "environment"). The derivate is sometimes called a "curried function". +** +** This idea can be applied to implement continuations in an algorithm that +** blocks waiting for some of its parameters to arrive. +** +** This interface defines a checked exception to be thrown from the {@link +** #invoke(Object)} method; if you call it on a general {@code Closure} then +** you will also need to catch {@link Exception}. If you don't need to do this, +** use {@link SafeClosure} instead. +** +** (Unfortunately, using a type paramater to indicate the type of exception +** thrown is unacceptable; the signature {@code throws T} disallows throwing +** multiple types of exception, whereas {@code throws Exception} allows it.) +** +** @param

Type of the input parameter. If you need more than one input +** parameter, consider using the classes in {@link Tuples}. +** @param Type of the thrown exception(s). If your implementation throws +** more than one type of exception, it's recommended to give the closest +** superclass of all the exceptions, though {@link Exception} is always +** safe. If your implementation throws no checked exceptions, consider +** using {@link SafeClosure} instead of this class. +** @author infinity0 +** @see Closure +** (computer science) +*/ +public interface Closure { + + public void invoke(P param) throws E; + +} diff --git a/src/main/java/plugins/Library/util/func/SafeClosure.java b/src/main/java/plugins/Library/util/func/SafeClosure.java new file mode 100644 index 00000000..1235781c --- /dev/null +++ b/src/main/java/plugins/Library/util/func/SafeClosure.java @@ -0,0 +1,17 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.func; + +/** +** A {@link Closure} that cannot throw a checked exception. +** +** @param

Type of the input parameter. If you want a closure with more than +** one input parameter, consider using the classes in {@link Tuples}. +** @author infinity0 +*/ +public interface SafeClosure

extends Closure { + + public void invoke(P param); + +} diff --git a/src/main/java/plugins/Library/util/func/Tuples.java b/src/main/java/plugins/Library/util/func/Tuples.java new file mode 100644 index 00000000..ec080174 --- /dev/null +++ b/src/main/java/plugins/Library/util/func/Tuples.java @@ -0,0 +1,160 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util.func; + +/** +** Tuple classes that retain type-safety through generics. These classes can +** be used to create throwaway anonymous types that can be referred back to, +** unlike anonymous classes. They are also useful for when you want to have a +** {@link Closure} with more than 1 input parameter. +** +** Example usage: +** +** import static plugins.Library.func.Tuples.*; +** import plugins.Library.func.Closure; +** +** ... +** +** Closure> my_closure = new +** Closure>() { +** +** public void invoke(X2 param) { +** +** ... +** +** } +** +** } +** +** my_closure.invoke(X2(3, "test")); +** +** Note that (for n > 0) each n-tuple class extends the (n-1)-tuple class, +** recursively, so that you can give any (k+e)-tuple to a {@code Closure} that +** takes a k-tuple, assuming the types also match. +** +** TODO NORM override equals() and hashCode() for all of these +** +** @author infinity0 +*/ +final public class Tuples { + + private Tuples() {} + + /** + ** An immutable 0-tuple. + */ + public static class X0 { + @Override public String toString() { return "()"; } + @Override public int hashCode() { return 0; } + @Override public boolean equals(Object o) { return o instanceof X0 && !(o instanceof X1); } + } + + /** + ** An immutable 1-tuple. + */ + public static class X1 extends X0 { + final public T0 _0; + public X1(T0 v0) { super(); _0 = v0; } + @Override public String toString() { return "(" + _0 + ")"; } + @Override public int hashCode() { return _0.hashCode() + 1; } + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof X1) || o instanceof X2) { return false; } + @SuppressWarnings("unchecked") X1 x = (X1)o; + return (_0 == null? x._0 == null: _0.equals(x._0)); + } + } + + /** + ** An immutable 2-tuple. + */ + public static class X2 extends X1 { + final public T1 _1; + public X2(T0 v0, T1 v1) { super(v0); _1 = v1; } + @Override public String toString() { return "(" + _0 + ", " + _1 + ")"; } + @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + 2; } + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof X2) || o instanceof X3) { return false; } + @SuppressWarnings("unchecked") X2 x = (X2)o; + return (_0 == null? x._0 == null: _0.equals(x._0)) + && (_1 == null? x._1 == null: _1.equals(x._1)); + } + } + + /** + ** An immutable 3-tuple. + */ + public static class X3 extends X2 { + final public T2 _2; + public X3(T0 v0, T1 v1, T2 v2) { super(v0, v1); _2 = v2; } + @Override public String toString() { return "(" + _0 + ", " + _1 + ", " + _2 + ")"; } + @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + _2.hashCode() + 3; } + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof X3) || o instanceof X4) { return false; } + @SuppressWarnings("unchecked") X3 x = (X3)o; + return (_0 == null? x._0 == null: _0.equals(x._0)) + && (_1 == null? x._1 == null: _1.equals(x._1)) + && (_2 == null? x._2 == null: _2.equals(x._2)); + } + } + + /** + ** An immutable 4-tuple. + */ + public static class X4 extends X3 { + final public T3 _3; + public X4(T0 v0, T1 v1, T2 v2, T3 v3) { super(v0, v1, v2); _3 = v3; } + @Override public String toString() { return "(" + _0 + ", " + _1 + ", " + _2 + ", " + _3 + ")"; } + @Override public int hashCode() { return _0.hashCode() + _1.hashCode() + _2.hashCode() + _3.hashCode() + 4; } + @Override public boolean equals(Object o) { + if (o == this) { return true; } + if (!(o instanceof X4) /*|| o instanceof X5*/) { return false; } + @SuppressWarnings("unchecked") X4 x = (X4)o; + return (_0 == null? x._0 == null: _0.equals(x._0)) + && (_1 == null? x._1 == null: _1.equals(x._1)) + && (_2 == null? x._2 == null: _2.equals(x._2)) + && (_3 == null? x._3 == null: _3.equals(x._3)); + } + } + + final private static X0 X0 = new X0(); + + /** + ** Returns a {@link X0 0-tuple}. + */ + public static X0 X0() { + return X0; + } + + /** + ** Creates a new {@link X1 1-tuple} from the given parameters. + */ + public static X1 X1(T0 v0) { + return new X1(v0); + } + + /** + ** Creates a new {@link X2 2-tuple} from the given parameters. + */ + public static X2 X2(T0 v0, T1 v1) { + return new X2(v0, v1); + } + + /** + ** Creates a new {@link X3 3-tuple} from the given parameters. + */ + public static X3 X3(T0 v0, T1 v1, T2 v2) { + return new X3(v0, v1, v2); + } + + /** + ** Creates a new {@link X4 4-tuple} from the given parameters. + */ + public static X4 X4(T0 v0, T1 v1, T2 v2, T3 v3) { + return new X4(v0, v1, v2, v3); + } + +} diff --git a/src/main/java/plugins/Library/util/func/package-info.java b/src/main/java/plugins/Library/util/func/package-info.java new file mode 100644 index 00000000..a801b916 --- /dev/null +++ b/src/main/java/plugins/Library/util/func/package-info.java @@ -0,0 +1,12 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains classes that implement various functional programming concepts in +** Java. +** +** @author infinity0 +** @see Functional +** programming +*/ +package plugins.Library.util.func; diff --git a/src/main/java/plugins/Library/util/package-info.java b/src/main/java/plugins/Library/util/package-info.java new file mode 100644 index 00000000..fcc63aa7 --- /dev/null +++ b/src/main/java/plugins/Library/util/package-info.java @@ -0,0 +1,10 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +/** +** Contains various general-purpose utility classes. These do not depend on +** any other part of the plugin code, and may be useful to outside projects. +** +** @author infinity0 +*/ +package plugins.Library.util; diff --git a/src/test/java/plugins/Library/Tester.java b/src/test/java/plugins/Library/Tester.java new file mode 100644 index 00000000..bb16a4a8 --- /dev/null +++ b/src/test/java/plugins/Library/Tester.java @@ -0,0 +1,381 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library; + +import plugins.Library.client.*; +import plugins.Library.util.exec.*; +import plugins.Library.util.func.Closure; +import plugins.Library.index.*; +import plugins.Library.io.*; +import plugins.Library.io.serial.*; +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.util.*; +import plugins.Library.*; + +import freenet.keys.FreenetURI; +import freenet.node.RequestStarter; + +import java.util.*; +import java.io.*; +import java.net.*; + + +/** +** Various on-freenet tests. +** +** @author infinity0 +*/ +public class Tester { + + + public static String runTest(Library lib, String test) { + if (test.equals("push_index")) { + return testPushIndex(); + } else if (test.equals("push_progress")) { + return testPushProgress(); + } else if (test.equals("autodetect")) { + return testAutoDetect(lib); + } else if (test.equals("push_merge")) { + return testPushAndMergeIndex(); + } + return "Add &plugins.Library.Tester=<test name> to the URL to run a test.\n"+"Tests: push_index, push_progress, autodetect, push_merge"; + } + + final public static String PAGE_START = "\n\n"; + final public static String PAGE_END = "\n"; + + + public static String testAutoDetect(Library library) { + List indexes = Arrays.asList( + "CHK@MIh5-viJQrPkde5gmRZzqjBrqOuh~Wbjg02uuXJUzgM,rKDavdwyVF9Z0sf5BMRZsXj7yiWPFUuewoe0CPesvXE,AAIC--8", + "SSK@5hH~39FtjA7A9~VXWtBKI~prUDTuJZURudDG0xFn3KA,GDgRGt5f6xqbmo-WraQtU54x4H~871Sho9Hz6hC-0RA,AQACAAE/Search-20", + "SSK@US6gHsNApDvyShI~sBHGEOplJ3pwZUDhLqTAas6rO4c,3jeU5OwV0-K4B6HRBznDYGvpu2PRUuwL0V110rn-~8g,AQACAAE/freenet-index-2", + "SSK@jzLqXo2AnlWw2CHWAkOB~LmbvowLXn9UhAyz7n3FuAs,PG6wEqQeWw2KNQ6ZbNJti1mn9iXSdIwdRkEK7IyfrlE,AQACAAE/testing-0" + ); + + StringBuilder s = new StringBuilder(); + for (String index: indexes) { + try { + Class t = library.getIndexType(new FreenetURI(index)); + s.append("

").append(t.getName()).append(": ").append(index).append("

"); + } catch (Exception e) { + appendError(s, e); + } + } + return s.toString(); + } + + volatile static Thread push_progress_thread; + volatile static SimpleProgress push_progress_progress = new SimpleProgress(); + volatile static Throwable push_progress_error; + volatile static Date push_progress_start; + volatile static FreenetURI push_progress_endURI; + volatile static FreenetURI push_progress_insertURI; + public static String testPushProgress() { + if (push_progress_thread == null) { + push_progress_thread = new Thread() { + YamlReaderWriter yamlrw = new YamlReaderWriter(); + FreenetArchiver> arx = Library.makeArchiver(yamlrw, "text/yaml", 0x10000, RequestStarter.INTERACTIVE_PRIORITY_CLASS); + + @Override public void run() { + push_progress_start = new Date(); + Map testmap = new TreeMap(); + for(int i=0; i<0x10000; ++i) { + testmap.put(""+i, i); + } + try { + push_progress_insertURI = new FreenetURI("USK@EArbDzzgEeTOgOn-I2BND0-tgH6ql-XqRYSvPxZhFHo,PG6wEqQeWw2KNQ6ZbNJti1mn9iXSdIwdRkEK7IyfrlE,AQECAAE/testing/0"); + PushTask> task = new PushTask>(testmap, push_progress_insertURI); + arx.pushLive(task, push_progress_progress); + push_progress_endURI = (FreenetURI)task.meta; + } catch (TaskAbortException e) { + push_progress_error = e; + } catch (MalformedURLException e) { + push_progress_error = e; + } + } + }; + push_progress_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { + public void uncaughtException(Thread t, Throwable e) { + push_progress_error = e; + } + }); + push_progress_thread.start(); + } + + StringBuilder s = new StringBuilder(); + s.append(PAGE_START); + appendTimeElapsed(s, push_progress_start); + if (push_progress_insertURI != null) { s.append("

Insert: ").append(push_progress_insertURI.toString()).append("

\n"); } + s.append("

").append(push_progress_progress.getStatus()).append("

\n"); + appendResultURI(s, push_progress_endURI); + try { + ProgressParts parts = push_progress_progress.getParts(); + s.append("

").append(parts.done); + s.append(" ").append(parts.known); + s.append(" ").append(parts.finalizedTotal()).append("

\n"); + } catch (TaskAbortException e) { + appendError(s, push_progress_error); + } + s.append(PAGE_END); + return s.toString(); + } + + + volatile static Thread push_index_thread; + volatile static String push_index_status = ""; + volatile static FreenetURI push_index_endURI; + volatile static Date push_index_start; + volatile static Throwable push_index_error; + volatile static Set push_index_words = new TreeSet(Arrays.asList( + "Lorem", "ipsum", "dolor", "sit", "amet", "consectetur", "adipisicing", + "elit", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore", + "et", "dolore", "magna", "aliqua", "Ut", "enim", "ad", "minim", + "veniam", "quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", + "ut", "aliquip", "ex", "ea", "commodo", "consequat", "Duis", "aute", + "irure", "dolor", "in", "reprehenderit", "in", "voluptate", "velit", + "esse", "cillum", "dolore", "eu", "fugiat", "nulla", "pariatur", + "Excepteur", "sint", "occaecat", "cupidatat", "non", "proident", "sunt", + "in", "culpa", "qui", "officia", "deserunt", "mollit", "anim", "id", "est", + "laborum" + )); + public static String testPushIndex() { + if (push_index_thread == null) { + push_index_start = new Date(); + push_index_thread = new Thread() { + ProtoIndexSerialiser srl = ProtoIndexSerialiser.forIndex(push_index_endURI, RequestStarter.INTERACTIVE_PRIORITY_CLASS); + ProtoIndex idx; + Random rand = new Random(); + + @Override public void run() { + try { + idx = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0); + } catch (java.net.MalformedURLException e) { + throw new AssertionError(e); + } + ProtoIndexComponentSerialiser.get().setSerialiserFor(idx); + + for (String key: push_index_words) { + SkeletonBTreeSet entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + ProtoIndexComponentSerialiser.get().setSerialiserFor(entries); + int n = rand.nextInt(0x200) + 0x200; + for (int j=0; j> en: idx.ttab.entrySet()) { + push_index_status = "Deflating entry " + en.getKey() + " (" + en.getValue().size() + " entries)"; + en.getValue().deflate(); + } + push_index_status = "Deflating the term table"; + idx.ttab.deflate(); + PushTask task1 = new PushTask(idx); + push_index_status = "Deflating the index"; + srl.push(task1); + push_index_status = "Done!"; + push_index_endURI = (FreenetURI)task1.meta; + } catch (TaskAbortException e) { + push_index_error = e; + } + } + }; + push_index_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { + public void uncaughtException(Thread t, Throwable e) { + push_index_error = e; + } + }); + push_index_thread.start(); + } + + StringBuilder s = new StringBuilder(); + s.append(PAGE_START); + s.append("

Pushing index with terms: ").append(push_index_words.toString()).append("

\n"); + appendTimeElapsed(s, push_index_start); + s.append("

").append(push_index_status).append("

"); + appendError(s, push_index_error); + appendResultURI(s, push_index_endURI); + s.append(PAGE_END); + return s.toString(); + } + + volatile static int push_phase; + static final List generatedURIs = new ArrayList(); + + public static String testPushAndMergeIndex() { + + // Split it up. + int length = push_index_words.size(); + final int divideInto = 5; + final TreeSet[] phaseWords = new TreeSet[5]; + for(int i=0;i(); + int x = 0; + for(String s : push_index_words) { + phaseWords[x++].add(s.toLowerCase()); + if(x == divideInto) x = 0; + } + + if (push_index_thread == null) { + push_index_start = new Date(); + + final File cacheDir = new File("tester-cache-temp"); + cacheDir.mkdir(); + FreenetArchiver.setCacheDir(cacheDir); + + push_index_thread = new Thread() { + ProtoIndexSerialiser srl = ProtoIndexSerialiser.forIndex(push_index_endURI, RequestStarter.INTERACTIVE_PRIORITY_CLASS); + ProtoIndex idx; + Random rand = new Random(); + + @Override public void run() { + + try { + idx = new ProtoIndex(new FreenetURI("CHK@"), "test", null, null, 0); + } catch (java.net.MalformedURLException e) { + throw new AssertionError(e); + } + ProtoIndexComponentSerialiser.get().setSerialiserFor(idx); + + try { + for (Map.Entry> en: idx.ttab.entrySet()) { + push_index_status = "Deflating entry " + en.getKey() + " (" + en.getValue().size() + " entries)"; + en.getValue().deflate(); + } + push_index_status = "Deflating the term table"; + idx.ttab.deflate(); + PushTask task1 = new PushTask(idx); + push_index_status = "Deflating the index"; + srl.push(task1); + push_index_status = "Done!"; + push_index_endURI = (FreenetURI)task1.meta; + synchronized(generatedURIs) { + generatedURIs.add(push_index_endURI); + } + } catch (TaskAbortException e) { + push_index_error = e; + return; + } + + for(push_phase = 0; push_phase < divideInto; push_phase++) { + + // Merge new data in. + + long mergeStartTime = System.currentTimeMillis(); + // generate new set to merge + final SortedSet randAdd = phaseWords[push_phase]; + final Map> newtrees = new HashMap>(); + int entriesadded = 0; + for (String k: randAdd) { + SortedSet set = new TreeSet(); + entriesadded += fillEntrySet(k, set); + newtrees.put(k, set); + } + + // async merge + Closure>, TaskAbortException> clo = new + Closure>, TaskAbortException>() { + /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { + String key = entry.getKey(); + SkeletonBTreeSet tree = entry.getValue(); + //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); + if (tree == null) { + entry.setValue(tree = makeEntryTree()); + } + assert(tree.isBare()); + tree.update(newtrees.get(key), null); + assert(tree.isBare()); + //System.out.println("handled " + key); + } + }; + try { + assert(idx.ttab.isBare()); + idx.ttab.update(randAdd, null, clo, new TaskAbortExceptionConvertor()); + assert(idx.ttab.isBare()); + PushTask task4 = new PushTask(idx); + srl.push(task4); + + FreenetArchiver arch = (FreenetArchiver) srl.getChildSerialiser(); + arch.waitForAsyncInserts(); + + long mergeEndTime = System.currentTimeMillis(); + + System.out.print(entriesadded + " entries merged in " + (mergeEndTime-mergeStartTime) + " ms, root at " + task4.meta + ", "); + push_index_status = "Done!"; + push_index_endURI = (FreenetURI)task4.meta; + synchronized(generatedURIs) { + generatedURIs.add(push_index_endURI); + } + } catch (TaskAbortException e) { + push_index_error = e; + return; + } + + } + + } + }; + push_index_thread.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { + public void uncaughtException(Thread t, Throwable e) { + push_index_error = e; + } + }); + push_index_thread.start(); + } + + StringBuilder s = new StringBuilder(); + s.append(PAGE_START); + s.append("

Pushing index with terms:

    \n"); + for(int i=0;i").append(phaseWords[i].toString()).append("\n"); + s.append("
\n"); + appendTimeElapsed(s, push_index_start); + s.append("

").append(push_index_status).append("

"); + appendError(s, push_index_error); + appendResultURI(s, push_index_endURI); + synchronized(generatedURIs) { + s.append("

Phase: "+push_phase+" URIs generated: "+generatedURIs.size()+"

    "); + for(FreenetURI uri : generatedURIs) + s.append("
  • "+uri.toASCIIString()+"
  • "); + s.append("
"); + } + s.append(PAGE_END); + return s.toString(); + } + + protected static int fillEntrySet(String key, SortedSet tree) { + int n = Generators.rand.nextInt(0x20) + 0x20; + for (int j=0; j makeEntryTree() { + SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + ProtoIndexComponentSerialiser.get().setSerialiserFor(tree); + return tree; + } + + public static void appendError(StringBuilder s, Throwable th) { + if (th == null) { return; } + ByteArrayOutputStream bs = new ByteArrayOutputStream(0x1000); + th.printStackTrace(new PrintWriter(bs, true)); + s.append("
").append(bs.toString()).append("
\n"); + } + + public static void appendTimeElapsed(StringBuilder s, Date start) { + if (start == null) { return; } + s.append("

").append(System.currentTimeMillis() - start.getTime()).append("ms elapsed

\n"); + } + + public static void appendResultURI(StringBuilder s, FreenetURI u) { + if (u != null) { s.append("

Result: ").append(u.toString()).append("

\n"); } + } + + +} diff --git a/src/test/java/plugins/Library/index/BIndexTest.java b/src/test/java/plugins/Library/index/BIndexTest.java new file mode 100644 index 00000000..e3fadcf6 --- /dev/null +++ b/src/test/java/plugins/Library/index/BIndexTest.java @@ -0,0 +1,381 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import junit.framework.TestCase; +import static plugins.Library.util.Generators.rand; + +import plugins.Library.util.*; +import plugins.Library.util.func.*; +import plugins.Library.util.exec.*; +import plugins.Library.io.serial.*; +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.index.*; + +import freenet.keys.FreenetURI; + +import java.util.*; +import java.io.*; + +/** +** TODO most of this code needs to go into WriteableIndex at some point. +** +** @author infinity0 +*/ +public class BIndexTest extends TestCase { + + // mininum number of entries in a b-tree node. + final public static int node_size = 0x04; + + // base number of keys in an index. the final size (after async-update) will be around double this. + final public static int index_size = 0x80; + + // base number of entries for a key. the actual size (per key) varies between 1x-2x this. + final public static int entry_size = 0x20; + + final public boolean extensive = Boolean.getBoolean("extensiveTesting"); + + final public static int it_full = 4; + final public static int it_partial = 2; + final public static boolean fuller = false; + + // TODO HIGH code a test that stores a huge on-disk index with repeated calls to update() + // this could be done in the same test as the progress test + final public static boolean disabled_progress = true; + + static { + ProtoIndex.BTREE_NODE_MIN = node_size; + System.out.println("ProtoIndex B-tree node_min set to " + ProtoIndex.BTREE_NODE_MIN); + } + + long time = 0; + + public long timeDiff() { + long oldtime = time; + time = System.currentTimeMillis(); + return time - oldtime; + } + + public BIndexTest() { + f = new File("BindexTest"); + f.mkdir(); + srl = ProtoIndexSerialiser.forIndex(f); + csrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, srl.getChildSerialiser()); + } + + private final File f; + private final ProtoIndexSerialiser srl; + private final ProtoIndexComponentSerialiser csrl; + private ProtoIndex idx; + + Set randomWords = new HashSet(Arrays.asList( + "Lorem", "ipsum", "dolor", "sit", "amet,", "consectetur", "adipisicing", + "elit,", "sed", "do", "eiusmod", "tempor", "incididunt", "ut", "labore", + "et", "dolore", "magna", "aliqua.", "Ut", "enim", "ad", "minim", + "veniam,", "quis", "nostrud", "exercitation", "ullamco", "laboris", "nisi", + "ut", "aliquip", "ex", "ea", "commodo", "consequat.", "Duis", "aute", + "irure", "dolor", "in", "reprehenderit", "in", "voluptate", "velit", + "esse", "cillum", "dolore", "eu", "fugiat", "nulla", "pariatur.", + "Excepteur", "sint", "occaecat", "cupidatat", "non", "proident,", "sunt", + "in", "culpa", "qui", "officia", "deserunt", "mollit", "anim", "id", "est", + "laborum." + )); + + protected void newTestSkeleton() { + try { + idx = new ProtoIndex(new FreenetURI("CHK@yeah"), "test", null, null, 0); + } catch (java.net.MalformedURLException e) { + assertTrue(false); + } + csrl.setSerialiserFor(idx); + timeDiff(); + } + + protected SkeletonBTreeSet makeEntryTree() { + SkeletonBTreeSet tree = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + csrl.setSerialiserFor(tree); + return tree; + } + + protected int fillEntrySet(String key, SortedSet tree) { + int n = rand.nextInt(entry_size) + entry_size; + for (int j=0; j> tree) { + int total = 0; + for (int i=0; i entries = makeEntryTree(); + total += fillEntrySet(key, entries); + tree.put(key, entries); + } + return total; + } + + protected SortedSet randomMixset(Set set) { + SortedSet sub = new TreeSet(); + for (String s: set) { + sub.add((rand.nextInt(2) == 0)? s: Generators.rndKey()); + } + return sub; + } + + public void fullInflateDeflateUpdate() throws TaskAbortException { + newTestSkeleton(); + int totalentries = fillRootTree(idx.ttab); + System.out.print(totalentries + " entries generated in " + timeDiff() + " ms, "); + + // make a copy of the tree + Map> origtrees = new TreeMap>(); + for (Map.Entry> en: idx.ttab.entrySet()) { + origtrees.put(en.getKey(), new TreeSet(en.getValue())); + } + assertTrue(origtrees.equals(idx.ttab)); + + // full deflate + for (SkeletonBTreeSet entries: idx.ttab.values()) { + entries.deflate(); + assertTrue(entries.isBare()); + } + idx.ttab.deflate(); + assertTrue(idx.ttab.isBare()); + assertFalse(idx.ttab.isLive()); + PushTask task1 = new PushTask(idx); + srl.push(task1); + System.out.println("deflated in " + timeDiff() + " ms, root at " + task1.meta + "."); + + if (fuller) { + // full inflate + PullTask task2 = new PullTask(task1.meta); + srl.pull(task2); + idx = task2.data; + idx.ttab.inflate(); + assertTrue(idx.ttab.isLive()); + assertFalse(idx.ttab.isBare()); + System.out.print("inflated in " + timeDiff() + " ms, "); + + // full deflate (1) + for (SkeletonBTreeSet entries: idx.ttab.values()) { + // inflating the root tree does not automatically inflate the trees for + // each entry, so these should already be bare + assertTrue(entries.isBare()); + } + idx.ttab.deflate(); + assertTrue(idx.ttab.isBare()); + assertFalse(idx.ttab.isLive()); + PushTask task3 = new PushTask(idx); + srl.push(task3); + System.out.println("re-deflated in " + timeDiff() + " ms, root at " + task3.meta + "."); + } + + // generate new set to merge + final SortedSet randAdd = randomMixset(origtrees.keySet()); + final Map> newtrees = new HashMap>(); + int entriesadded = 0; + for (String k: randAdd) { + SortedSet set = new TreeSet(); + entriesadded += fillEntrySet(k, set); + newtrees.put(k, set); + } + + // async merge + Closure>, TaskAbortException> clo = new + Closure>, TaskAbortException>() { + /*@Override**/ public void invoke(Map.Entry> entry) throws TaskAbortException { + String key = entry.getKey(); + SkeletonBTreeSet tree = entry.getValue(); + //System.out.println("handling " + key + ((tree == null)? " (new)":" (old)")); + if (tree == null) { + entry.setValue(tree = makeEntryTree()); + } + assertTrue(tree.isBare()); + tree.update(newtrees.get(key), null); + assertTrue(tree.isBare()); + //System.out.println("handled " + key); + } + }; + assertTrue(idx.ttab.isBare()); + idx.ttab.update(randAdd, null, clo, new TaskAbortExceptionConvertor()); + assertTrue(idx.ttab.isBare()); + PushTask task4 = new PushTask(idx); + srl.push(task4); + System.out.print(entriesadded + " entries merged in " + timeDiff() + " ms, root at " + task4.meta + ", "); + + // Iterate it. + Iterator keys = idx.ttab.keySetAutoDeflate().iterator(); + long count = 0; + String prev = null; + while(keys.hasNext()) { + count++; + String x = keys.next(); + assertFalse("Inconsistent iteration: Previous was "+prev+" this is "+x, prev != null && x.compareTo(prev) <= 0); + prev = x; + } + assertTrue(count == idx.ttab.size()); + System.out.println("Iterated keys, total is "+count+" size is "+idx.ttab.size()); + assertTrue(idx.ttab.isBare()); + + // full inflate (2) + PullTask task5 = new PullTask(task4.meta); + srl.pull(task5); + idx = task5.data; + idx.ttab.inflate(); + assertTrue(idx.ttab.isLive()); + assertFalse(idx.ttab.isBare()); + System.out.println("re-inflated in " + timeDiff() + " ms."); + + // merge added trees into backup, and test against the stored version + for (Map.Entry> en: newtrees.entrySet()) { + String key = en.getKey(); + SortedSet tree = origtrees.get(key); + if (tree == null) { + origtrees.put(key, tree = new TreeSet()); + } + tree.addAll(en.getValue()); + } + System.out.println("validating merge. this will take a few minutes, mostly due to Thread.sleep(). just be patient :-)"); + for (SkeletonBTreeSet entries: idx.ttab.values()) { + // FIXME HIGH make some way of doing this in parallel, maybe + entries.inflate(); + assertTrue(entries.isLive()); + } + assertTrue(origtrees.equals(idx.ttab)); + System.out.println("merge validated in " + timeDiff() + " ms."); + + System.out.println(""); + + } + + public void testBasicMulti() throws TaskAbortException { + if (!extensive) { return; } + for (int i=0; i entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + csrl.setSerialiserFor(entries); + + int n = rand.nextInt(0xF0) + 0x10; + totalentries += n; + + for (int j=0; j entries: idx.ttab.values()) { + entries.deflate(); + assertTrue(entries.isBare()); + } + idx.ttab.deflate(); + assertTrue(idx.ttab.isBare()); + assertFalse(idx.ttab.isLive()); + PushTask task = new PushTask(idx); + srl.push(task); + + System.out.print("deflated in " + timeDiff() + " ms, root at " + task.meta + ", "); + + PullTask tasq = new PullTask(task.meta); + srl.pull(tasq); + + for (String s: randomWords) { + //assertFalse(test.isLive()); // might be live if inflate(key) inflates some other keys too + idx.ttab.inflate(s); + idx.ttab.get(s); + assertFalse(idx.ttab.isBare()); + } + assertTrue(idx.ttab.isLive()); + assertFalse(idx.ttab.isBare()); + System.out.println("inflated all terms separately in " + timeDiff() + " ms"); + + } + + public void testPartialInflateMulti() throws TaskAbortException { + if (!extensive) { return; } + for (int i=0; i entries = new SkeletonBTreeSet(ProtoIndex.BTREE_NODE_MIN); + csrl.setSerialiserFor(entries); + + int n = rand.nextInt(0x200) + 0x200; + totalentries += n; + + for (int j=0; j entries: idx.ttab.values()) { + entries.deflate(); + assertTrue(entries.isBare()); + } + idx.ttab.deflate(); + assertTrue(idx.ttab.isBare()); + assertFalse(idx.ttab.isLive()); + PushTask task = new PushTask(idx); + srl.push(task); + + System.out.print("deflated in " + timeDiff() + " ms, root at " + task.meta + ", "); + plugins.Library.io.serial.FileArchiver.setTestMode(); + + System.out.println("Requesting entries for term " + sterm); + Execution> rq1 = idx.getTermEntries(sterm); + Execution> rq2 = idx.getTermEntries(sterm); + Execution> rq3 = idx.getTermEntries(sterm); + + assertTrue(rq1 == rq2); + assertTrue(rq2 == rq3); + assertTrue(rq3 == rq1); + + Set entries; + while ((entries = rq1.getResult()) == null) { + System.out.println(rq1.getStatus()); + try { Thread.sleep(1000); } catch (InterruptedException x) { } + } + + int count=0; + for (TermEntry en: entries) { + ++count; + } + assertTrue(count == entries.size()); + System.out.println(count + " entries successfully got in " + rq1.getTimeElapsed() + "ms"); + + } + +} diff --git a/src/test/java/plugins/Library/index/TermEntryTest.java b/src/test/java/plugins/Library/index/TermEntryTest.java new file mode 100644 index 00000000..fec1256f --- /dev/null +++ b/src/test/java/plugins/Library/index/TermEntryTest.java @@ -0,0 +1,121 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.index; + +import junit.framework.TestCase; + +import plugins.Library.io.serial.Serialiser.*; +import plugins.Library.io.serial.FileArchiver; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.io.serial.Packer; + +import plugins.Library.io.YamlReaderWriter; + +import freenet.keys.FreenetURI; + +import java.util.Arrays; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.Iterator; +import java.util.UUID; +import java.net.MalformedURLException; +import java.io.*; + +/** +** @author infinity0 +*/ +public class TermEntryTest extends TestCase { + + final static TermTermEntry w = new TermTermEntry("test", 0.8f, "lol"); + final static TermIndexEntry x; + final static TermPageEntry z; + final static TermPageEntry v; + static { + try { + x = new TermIndexEntry("test", 0.8f, new FreenetURI("CHK@MIh5-viJQrPkde5gmRZzqjBrqOuh~Wbjg02uuXJUzgM,rKDavdwyVF9Z0sf5BMRZsXj7yiWPFUuewoe0CPesvXE,AAIC--8")); + z = new TermPageEntry("lol", 0.8f, new FreenetURI("CHK@9eDo5QWLQcgSuDh1meTm96R4oE7zpoMBuV15jLiZTps,3HJaHbdW~-MtC6YsSkKn6I0DTG9Z1gKDGgtENhHx82I,AAIC--8"), null); + v = new TermPageEntry("lol", 0.8f, new FreenetURI("CHK@9eDo5QWLQcgSuDh1meTm96R4oE7zpoMBuV15jLiZTps,3HJaHbdW~-MtC6YsSkKn6I0DTG9Z1gKDGgtENhHx82I,AAIC--8"), "title", null); + } catch (MalformedURLException e) { + throw new AssertionError(); + } + } + final static TermTermEntry y = new TermTermEntry("test", 0.8f, "lol2"); + + public void testBasic() throws TaskAbortException { + File f = new File("TermEntryTest"); + f.mkdir(); + FileArchiver> ym = new FileArchiver>(new YamlReaderWriter(), "test", null, ".yml", f); + + Map map = new HashMap(); + + List l = new ArrayList(); + l.add(w); + l.add(w); + l.add(x); + l.add(y); + l.add(z); + map.put("test", l); + try { + map.put("test2", new Packer.BinInfo(new FreenetURI("http://127.0.0.1:8888/CHK@WtWIvOZXLVZkmDrY5929RxOZ-woRpRoMgE8rdZaQ0VU,rxH~D9VvOOuA7bCnVuzq~eux77i9RR3lsdwVHUgXoOY,AAIC--8/Library.jar"), 123)); + } catch (java.net.MalformedURLException e) { + assert(false); + } + + ym.push(new PushTask>(map)); + PullTask> pt = new PullTask>(""); + try{ + ym.pull(pt); + } catch (Exception e) { + e.printStackTrace(); + } + + assertTrue(pt.data instanceof Map); + Map m = pt.data; + assertTrue(m.get("test") instanceof List); + List ll = (List)m.get("test"); + assertTrue(ll.get(0) instanceof TermTermEntry); + assertTrue(ll.get(1) == ll.get(0)); + // NOTE these tests fail in snakeYAML 1.2 and below, fixed in hg + assertTrue(ll.get(2) instanceof TermIndexEntry); + assertTrue(ll.get(3) instanceof TermTermEntry); + + assertTrue(m.get("test2") instanceof Packer.BinInfo); + Packer.BinInfo inf = (Packer.BinInfo)m.get("test2"); + assertTrue(inf.getID() instanceof FreenetURI); + } + + public void testBinaryReadWrite() throws IOException, TaskAbortException { + TermEntryReaderWriter rw = TermEntryReaderWriter.getInstance(); + ByteArrayOutputStream bo = new ByteArrayOutputStream(); + DataOutputStream oo = new DataOutputStream(bo); + rw.writeObject(v, oo); + rw.writeObject(w, oo); + rw.writeObject(x, oo); + rw.writeObject(y, oo); + rw.writeObject(z, oo); + oo.close(); + ByteArrayInputStream bi = new ByteArrayInputStream(bo.toByteArray()); + DataInputStream oi = new DataInputStream(bi); + TermEntry v1 = rw.readObject(oi); + TermEntry w1 = rw.readObject(oi); + TermEntry x1 = rw.readObject(oi); + TermEntry y1 = rw.readObject(oi); + TermEntry z1 = rw.readObject(oi); + oi.close(); + assertEqualButNotIdentical(v, v1); + assertEqualButNotIdentical(w, w1); + assertEqualButNotIdentical(x, x1); // this will fail before fred@a6e73dbbaa7840bd20d5e3fb95cd2c678a106e85 + assertEqualButNotIdentical(y, y1); + assertEqualButNotIdentical(z, z1); + } + + public static void assertEqualButNotIdentical(Object a, Object b) { + assertTrue(a != b); + assertTrue(a.equals(b)); + assertTrue(a.hashCode() == b.hashCode()); + } + +} diff --git a/src/test/java/plugins/Library/io/serial/PackerTest.java b/src/test/java/plugins/Library/io/serial/PackerTest.java new file mode 100644 index 00000000..7687f763 --- /dev/null +++ b/src/test/java/plugins/Library/io/serial/PackerTest.java @@ -0,0 +1,84 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import junit.framework.TestCase; + +import plugins.Library.util.Generators; +import plugins.Library.util.SkeletonTreeMap; +import plugins.Library.util.exec.TaskAbortException; +import plugins.Library.io.serial.Packer.Bin; +import plugins.Library.io.serial.Serialiser.*; + +import java.util.Map; +import java.util.List; +import java.util.Iterator; +import java.util.HashSet; +import java.util.HashMap; + +/** +** PRIORITY actually write some tests for this... +** +** @author infinity0 +*/ +public class PackerTest extends TestCase { + + final static public int NODE_MAX = 64; + + final public static Packer srl = new Packer( + new IterableSerialiser>() { + + public void pull(Iterable>> t) {} + public void push(Iterable>> t) { + for (PushTask> task: t) { + System.out.print("["); + for (Map.Entry en: task.data.entrySet()) { + System.out.print(en.getKey() + ": " + en.getValue().size() + ", "); + } + System.out.println("]"); + } + } + + public void pull(PullTask> t) {} + public void push(PushTask> t) {} + + }, + new Packer.Scale() { + @Override public int weigh(HashSet elem) { + return elem.size(); + } + }, + NODE_MAX + ); + + protected Map> generateTasks(int[] sizes) { + String meta = "dummy metadata"; + Map> tasks = new HashMap>(); + for (int size: sizes) { + HashSet hs = new HashSet(size>>1); + for (int i=0; i(hs, meta)); + } + return tasks; + } + + public void testBasic() throws TaskAbortException { + +// for (int i=0; i<16; ++i) { + // do this several times since random UUID changes the order of the task map + + srl.push(generateTasks(new int[]{1,2,3,4,5}), null); + srl.push(generateTasks(new int[]{1,2,3,4,5,6,7,8,9,10,11,12}), null); + srl.push(generateTasks(new int[]{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}), null); + +// } + + } + + // TODO write some more tests for this... + + +} diff --git a/src/test/java/plugins/Library/io/serial/YamlMapTest.java b/src/test/java/plugins/Library/io/serial/YamlMapTest.java new file mode 100644 index 00000000..414e8a21 --- /dev/null +++ b/src/test/java/plugins/Library/io/serial/YamlMapTest.java @@ -0,0 +1,123 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.io.serial; + +import junit.framework.TestCase; + +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.error.YAMLException; +import org.yaml.snakeyaml.Loader; +import org.yaml.snakeyaml.Dumper; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.representer.Representer; +import org.yaml.snakeyaml.representer.Represent; +import org.yaml.snakeyaml.nodes.Node; +import org.yaml.snakeyaml.nodes.ScalarNode; +import org.yaml.snakeyaml.nodes.MappingNode; +import org.yaml.snakeyaml.constructor.Constructor; +import org.yaml.snakeyaml.constructor.Construct; +import org.yaml.snakeyaml.constructor.ConstructorException; + +import java.io.*; +import java.util.*; + +/** +** @author infinity0 +*/ +public class YamlMapTest extends TestCase { + + public void testYamlMap() throws IOException { + Map data = new TreeMap(); + data.put("key1", new Bean()); + data.put("key2", new Bean()); + data.put("key3", new Custom("test")); + data.put("key4", new Wrapper("test", new Custom("test"))); + + Yaml yaml = new Yaml(new Loader(new ExtendedConstructor()), + new Dumper(new ExtendedRepresenter(), new DumperOptions())); + File file = new File("beantest.yml"); + + FileOutputStream os = new FileOutputStream(file); + yaml.dump(data, new OutputStreamWriter(os)); + os.close(); + + FileInputStream is = new FileInputStream(file); + Object o = yaml.load(new InputStreamReader(is)); + is.close(); + + assertTrue(o instanceof Map); + Map m = (Map)o; + assertTrue(m.get("key1") instanceof Bean); + assertTrue(m.get("key2") instanceof Bean); // NOTE these tests fail in snakeYAML 1.2 and below, fixed in 1.3 + assertTrue(m.get("key3") instanceof Custom); + assertTrue(m.get("key4") instanceof Wrapper); + } + + public static class Bean { + private String a; + public Bean() { a = ""; } + public String getA() { return a; } + public void setA(String s) { a = s; } + + } + + public static class Wrapper { + private String a; + private Custom b; + public Wrapper(String s, Custom bb) { a = s; b = bb; } + public Wrapper() { } + public String getA() { return a; } + public void setA(String s) { a = s; } + public Custom getB() { return b; } + public void setB(Custom bb) { b = bb; } + } + + public static class Custom { + final private String str; + public Custom(String s) { + str = s; + } + // the presence of this constructor causes 1.4-snapshot to fail + // fixed in 1.4-rc1 + public Custom(Integer i) { + str = ""; + } + // the absence of this constructor causes 1.3 to fail + // fixed in 1.4-rc1 + public Custom(Custom c) { + str = c.str; + } + public String toString() { return str; } + } + + + public static class ExtendedRepresenter extends Representer { + public ExtendedRepresenter() { + this.representers.put(Custom.class, new RepresentCustom()); + } + + private class RepresentCustom implements Represent { + public Node representData(Object data) { + return representScalar("!Custom", ((Custom) data).toString()); + } + } + } + + + public static class ExtendedConstructor extends Constructor { + public ExtendedConstructor() { + this.yamlConstructors.put("!Custom", new ConstructCustom()); + } + + private class ConstructCustom implements Construct { + public Object construct(Node node) { + String str = (String) constructScalar((ScalarNode)node); + return new Custom(str); + } + public void construct2ndStep(Node node, Object object) { } + } + } + +} diff --git a/src/test/java/plugins/Library/util/BTreeMapTest.java b/src/test/java/plugins/Library/util/BTreeMapTest.java new file mode 100644 index 00000000..c555a59a --- /dev/null +++ b/src/test/java/plugins/Library/util/BTreeMapTest.java @@ -0,0 +1,97 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; + +import java.util.*; + +/** +** @author infinity0 +*/ +public class BTreeMapTest extends SortedMapTestSkeleton { + + @Override public SortedMap makeTestMap() { + return new BTreeMap(0x40); + } + + final public static int sz0 = 0x400; + final public static int sz1 = sz0<<2; //sz0<<6; + + public void testBasic() { + + BTreeMap testmap = new BTreeMap(0x40); + Map backmap = new HashMap(); + try { + for (int i=0; i it = backmap.keySet().iterator(); + for (int i=0; i backmap = new TreeMap(); + for (int i=0; i testmap = new BTreeMap(2); + testmap.putAll(backmap); + testmap.verifyTreeIntegrity(); + //if (n<10) { System.out.println(testmap.toTreeString()); } + } + + } + + public void testNumericIndexes() { + + BTreeMap testmap = new BTreeMap(0x40); + int i=0; + + for (i=0; i ts = (new BTreeMap(0x40)).subSet(new TreeSet( + Arrays.asList("91665799", "93fc4806", "94ff78b2", "952225c8", "9678e897", "96bb4208", + "989c8d3f", "9a1f0877", "9faea63f", "9fec4192", "a19e7d4e", "a61a2c10", + "a6c681ec", "a72d4e4e", "a845d2cb")), "96bb4208", "9fec4192"); + assertTrue(ts.first().equals("989c8d3f")); + assertTrue(ts.last().equals("9faea63f")); + } + +} diff --git a/src/test/java/plugins/Library/util/BytePrefixKeyTest.java b/src/test/java/plugins/Library/util/BytePrefixKeyTest.java new file mode 100644 index 00000000..b951fb7b --- /dev/null +++ b/src/test/java/plugins/Library/util/BytePrefixKeyTest.java @@ -0,0 +1,25 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; +import static plugins.Library.util.Generators.rand; + +import java.util.Random; +import java.util.Arrays; + +/** +** @author infinity0 +*/ +public class BytePrefixKeyTest extends TestCase { + + public void testBasic() { + for (int i=0; i<256; ++i) { + byte[] bs = new byte[32]; + rand.nextBytes(bs); + assertTrue(Arrays.equals(bs, BytePrefixKey.hexToBytes(BytePrefixKey.bytesToHex(bs)))); + } + } + +} diff --git a/src/test/java/plugins/Library/util/Generators.java b/src/test/java/plugins/Library/util/Generators.java new file mode 100644 index 00000000..70c97faf --- /dev/null +++ b/src/test/java/plugins/Library/util/Generators.java @@ -0,0 +1,43 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; + +import plugins.Library.index.*; + +import freenet.keys.FreenetURI; + +import java.util.UUID; +import java.util.Random; +import java.net.MalformedURLException; + +/** +** @author infinity0 +*/ +final public class Generators { + + final public static Random rand; + + static { + long seed = System.currentTimeMillis(); + rand = new Random(seed); + System.err.println("Generators.rand initialised with seed " + seed); + } + + private Generators() {} + + public static String rndStr() { + return UUID.randomUUID().toString(); + } + + public static String rndKey() { + return rndStr().substring(0,8); + } + + public static TermPageEntry rndEntry(String key) { + return new TermPageEntry(key, (float)Math.random(), FreenetURI.generateRandomCHK(rand), null); + } + +} diff --git a/src/test/java/plugins/Library/util/IntegersTest.java b/src/test/java/plugins/Library/util/IntegersTest.java new file mode 100644 index 00000000..5b035517 --- /dev/null +++ b/src/test/java/plugins/Library/util/IntegersTest.java @@ -0,0 +1,71 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; +import static plugins.Library.util.Generators.rand; + +import java.util.Random; +import java.util.Arrays; + +/** +** @author infinity0 +*/ +public class IntegersTest extends TestCase { + + private int[] totals = { + 10, + 6, + 10 + }; + private int[] nums = { + 5, + 4, + 6 + }; + private int[][] shares = { + {2,2,2,2,2}, + {1,2,1,2}, + {2,1,2,2,1,2} + }; + + public void testAllocateEvenlyPredefined() { + for (int i=0; i it = Integers.allocateEvenly(total, num); + //System.out.println(it); + + int j=0; + for (Integer ii: it) { + share[j++] = ii; + } + + int k = total / num; + int r = total % num; + Arrays.sort(share); + assertTrue(share[0] == k); + assertTrue(share[num-r-1] == k); + if (r > 0) { + assertTrue(share[num-r] == k+1); + assertTrue(share[num-1] == k+1); + } + } + } + +} diff --git a/src/test/java/plugins/Library/util/SkeletonTreeMapTest.java b/src/test/java/plugins/Library/util/SkeletonTreeMapTest.java new file mode 100644 index 00000000..5573a2e1 --- /dev/null +++ b/src/test/java/plugins/Library/util/SkeletonTreeMapTest.java @@ -0,0 +1,126 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; + +import java.util.Map; +import java.util.SortedMap; + +/** +** @author infinity0 +*/ +public class SkeletonTreeMapTest extends SortedMapTestSkeleton { + + SkeletonTreeMap skelmap; + + protected void setUp() { + skelmap = new SkeletonTreeMap(); + for (int i=0; i<1024; ++i) { + skelmap.putGhost(Generators.rndKey(), Boolean.FALSE); + } + } + + protected void tearDown() { + } + + public void fillSkelMap() { + for (String s: skelmap.keySet()) { + skelmap.put(s, 123); + } + } + + @Override public SortedMap makeTestMap() { + return new SkeletonTreeMap(); + } + + public void testBasic() { + skelmap = new SkeletonTreeMap(); + skelmap.putGhost("l0l", Boolean.FALSE); + skelmap.putGhost("l1l", Boolean.FALSE); + skelmap.putGhost("l2l", Boolean.FALSE); + + assertTrue(skelmap.firstKey().equals("l0l")); + assertTrue(skelmap.lastKey().equals("l2l")); + + try { + assertTrue(skelmap.get("zzz") == null); + assertTrue(skelmap.get("123") == null); + skelmap.get("l0l"); + } catch (DataNotLoadedException e) { + assertTrue(e.getParent() == skelmap); + assertTrue(e.getKey().equals("l0l")); + } + + skelmap.put("l0l", new Integer(123)); + assertTrue(skelmap.get("l0l") == 123); + + assertTrue(skelmap.size() == 3); + assertTrue(skelmap.remove("l0l") == 123); + assertTrue(skelmap.size() == 2); + + try { + skelmap.get("l1l"); + } catch (DataNotLoadedException e) { + assertTrue(e.getParent() == skelmap); + assertTrue(e.getKey().equals("l1l")); + } + + } + + public void testIncompleteEntrySet() { + try { + for (Map.Entry en: skelmap.entrySet()) { + assertTrue(skelmap.entrySet().contains(en)); + } + } catch (DataNotLoadedException e) { + assertTrue(skelmap.firstKey() == e.getKey()); + } + + skelmap.put(skelmap.firstKey(), 123); + try { + for (Map.Entry en: skelmap.entrySet()) { + assertTrue(skelmap.entrySet().contains(en)); + } + } catch (DataNotLoadedException e) { + assertTrue(skelmap.firstKey() != e.getKey()); + } + + fillSkelMap(); + for (Map.Entry en: skelmap.entrySet()) { + assertTrue(skelmap.entrySet().contains(en)); + } + } + + public void testIncompleteValues() { + int i = 0; + + try { + for (Integer en: skelmap.values()) { + assertTrue(skelmap.values().contains(en)); + ++i; + } + } catch (DataNotLoadedException e) { + assertTrue(e.getValue() != null); + assertTrue(i == 0); + } + + skelmap.put(skelmap.firstKey(), 123); + i = 0; + try { + for (Integer en: skelmap.values()) { + assertTrue(skelmap.values().contains(en)); + ++i; + } + } catch (DataNotLoadedException e) { + assertTrue(i == 1); + } + + fillSkelMap(); + for (Integer en: skelmap.values()) { + assertTrue(skelmap.values().contains(en)); + } + } + +} diff --git a/src/test/java/plugins/Library/util/SortedArraySetTest.java b/src/test/java/plugins/Library/util/SortedArraySetTest.java new file mode 100644 index 00000000..3c1a2271 --- /dev/null +++ b/src/test/java/plugins/Library/util/SortedArraySetTest.java @@ -0,0 +1,57 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; + +import java.util.SortedSet; + +/** +** @author infinity0 +*/ +public class SortedArraySetTest extends TestCase { + + SortedArraySet marr = new SortedArraySet(new Integer[]{0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}); + + public void testSubsets() { + assertTrue(marr.first() == 0); + assertTrue(marr.last() == 15); + assertTrue(marr.size() == 16); + + SortedSet head = marr.headSet(8); + SortedSet tail = marr.tailSet(8); + SortedSet sub = marr.subSet(4, 12); + + assertTrue(head.first() == 0); + assertTrue(head.last() == 7); + assertTrue(head.size() == 8); + + assertTrue(tail.first() == 8); + assertTrue(tail.last() == 15); + assertTrue(tail.size() == 8); + + assertTrue(sub.first() == 4); + assertTrue(sub.last() == 11); + assertTrue(sub.size() == 8); + } + + public void testNoDuplicates() { + try { + SortedArraySet arr = new SortedArraySet(new Integer[]{0,1,1,2,3,4,5,6,7,8,9,10,11,12,13,14}); + fail("failed to detect an array with duplicates in"); + } catch (RuntimeException e) { + assertTrue(e instanceof IllegalArgumentException); + } + } + + public void testSortedDetection() { + try { + SortedArraySet arr = new SortedArraySet(new Integer[]{0,1,3,2,4,5,6,7,8,9,10,11,12,13,14,15}, null, true); + fail("failed to detect an unsorted array"); + } catch (RuntimeException e) { + assertTrue(e instanceof IllegalArgumentException); + } + } + +} diff --git a/src/test/java/plugins/Library/util/SortedMapTestSkeleton.java b/src/test/java/plugins/Library/util/SortedMapTestSkeleton.java new file mode 100644 index 00000000..daf687f7 --- /dev/null +++ b/src/test/java/plugins/Library/util/SortedMapTestSkeleton.java @@ -0,0 +1,141 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; + +import java.util.Map; +import java.util.SortedMap; +import java.util.Iterator; + +/** +** TODO maybe make some tailMap, headMap, subMap tests +** +** @author infinity0 +*/ +abstract public class SortedMapTestSkeleton extends TestCase { + + protected SortedMap testmap; + + abstract protected SortedMap makeTestMap(); + + public void fillTestMap() { + testmap = makeTestMap(); + for (int i=0; i<0x1000; ++i) { + testmap.put(Generators.rndStr(), Generators.rand.nextInt()); + } + } + + public void testEntrySet() { + // test that entrySet is properly backed by the map + fillTestMap(); + assertTrue(testmap.entrySet() == testmap.entrySet()); + assertTrue(testmap.size() == testmap.entrySet().size()); + testmap.put(testmap.firstKey(), 123); + Map.Entry entry = null; + for (Map.Entry en: testmap.entrySet()) { entry = en; break; } + assertTrue(testmap.entrySet().contains(entry)); + entry.setValue(124); + assertTrue(testmap.get(testmap.firstKey()) == 124); + assertTrue(testmap.entrySet().contains(entry)); + + int s = testmap.size(), i = 0; + for (Map.Entry en: testmap.entrySet()) { ++i; } + assertTrue(s == i); + i = 0; + + Map.Entry e = null; + while (testmap.size() > 0) { + // get first entry + for (Map.Entry en: testmap.entrySet()) { e = en; break; } + assertTrue(e.getKey() == testmap.firstKey()); + testmap.entrySet().remove(e); + ++i; + } + assertTrue(s == i); + } + + public void testKeySet() { + // test that keySet is properly backed by the map + fillTestMap(); + assertTrue(testmap.keySet() == testmap.keySet()); + assertTrue(testmap.size() == testmap.keySet().size()); + int s = testmap.size(), i = 0; + for (String en: testmap.keySet()) { ++i; } + assertTrue(s == i); + i = 0; + String e = null; + while (testmap.size() > 0) { + // get first entry + for (String en: testmap.keySet()) { e = en; break; } + assertTrue(e.equals(testmap.firstKey())); + testmap.keySet().remove(e); + ++i; + } + assertTrue(s == i); + } + + public void testValues() { + // test that values is properly backed by the map + fillTestMap(); + assertTrue(testmap.values() == testmap.values()); + assertTrue(testmap.size() == testmap.values().size()); + int s = testmap.size(), i = 0; + for (Integer en: testmap.values()) { ++i; } + assertTrue(s == i); + i = 0; + Integer e = null; + while (testmap.size() > 0) { + // get first entry + for (Integer en: testmap.values()) { e = en; break; } + assertTrue(e.equals(testmap.get(testmap.firstKey()))); + testmap.values().remove(e); + ++i; + } + assertTrue(s == i); + } + + public void testEntrySetIterator() { + // test that entrySet.iterator is properly backed by the map + fillTestMap(); + Iterator> it = testmap.entrySet().iterator(); + int s=testmap.size(), i=0; + while (it.hasNext()) { + assertTrue(it.next().getKey() == testmap.firstKey()); + it.remove(); + ++i; + } + assertTrue(i == s); + assertTrue(testmap.size() == 0); + } + + public void testKeySetIterator() { + // test that keySet.iterator is properly backed by the map + fillTestMap(); + Iterator it = testmap.keySet().iterator(); + int s=testmap.size(), i=0; + while (it.hasNext()) { + assertTrue(it.next().equals(testmap.firstKey())); + it.remove(); + ++i; + } + assertTrue(i == s); + assertTrue(testmap.size() == 0); + } + + public void testValuesIterator() { + // test that values.iterator is properly backed by the map + fillTestMap(); + Iterator it = testmap.values().iterator(); + int s=testmap.size(), i=0; + while (it.hasNext()) { + assertTrue(it.next().equals(testmap.get(testmap.firstKey()))); + it.remove(); + ++i; + } + assertTrue(i == s); + assertTrue(testmap.size() == 0); + } + +} diff --git a/src/test/java/plugins/Library/util/SortedTest.java b/src/test/java/plugins/Library/util/SortedTest.java new file mode 100644 index 00000000..2dffc627 --- /dev/null +++ b/src/test/java/plugins/Library/util/SortedTest.java @@ -0,0 +1,190 @@ +/* This code is part of Freenet. It is distributed under the GNU General + * Public License, version 2 (or at your option any later version). See + * http://www.gnu.org/ for further details of the GPL. */ +package plugins.Library.util; + +import junit.framework.TestCase; +import static plugins.Library.util.Generators.rand; + +import plugins.Library.util.Sorted.Inclusivity; + +import java.util.Random; +import java.util.Iterator; +import java.util.Arrays; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +/** +** @author infinity0 +*/ +public class SortedTest extends TestCase { + + final public static int rruns = 0x80; + final public static int rsizelow = 0x40; // must be > 8 + + private Integer[] split_sep = { 8, 16, 24, 32 }; + private Integer[][] split_subj = { + {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}, + {4,5,6,7,8,9,10,11,12}, + {19,24,25}, + {17,19,21}, + {16,24}, + {4,12,20,28,36}, + {12,20,28}, + {4,8,12,16,20,24,28,32,36} + }; + private Integer[][][] split_res = { + {{1,7},{9,15}}, + {{4,7},{9,12}}, + {{19,19},{25,25}}, + {{17,21}}, + {}, + {{4,4},{12,12},{20,20},{28,28},{36,36}}, + {{12,12},{20,20},{28,28}}, + {{4,4},{12,12},{20,20},{28,28},{36,36}}, + }; + private Integer[][] split_fsep = { + {8}, + {8}, + {24}, + {}, + {16,24}, + {}, + {}, + {8,16,24,32} + }; + + public void testSplitPredefined() { + SortedSet sep = new TreeSet(Arrays.asList(split_sep)); + + for (int i=0; i fsep = new TreeSet(); + List> res = Sorted.split(new TreeSet(Arrays.asList(split_subj[i])), sep, fsep); + + Iterator> rit = res.iterator(); + for (int j=0; j range = rit.next(); + assertTrue(range.first().equals(split_res[i][j][0])); + assertTrue(range.last().equals(split_res[i][j][1])); + } + assertFalse(rit.hasNext()); + + assertTrue(Arrays.deepEquals(split_fsep[i], fsep.toArray())); + Iterator fit = fsep.iterator(); + for (int j=0; j res = Sorted.select(new TreeSet(Arrays.asList(select_subj[i])), 2, incs[j]); + assertTrue(Arrays.deepEquals(select_res[i][j], res.toArray())); + } + } + } + + public void verifySplit(SortedSet subj, SortedSet sep) { + // FIXME LOW this method assumes comparator is consistent with equals() and + // there are no null entries + SortedSet fsep = new TreeSet(); + List> subs = null; + try { + subs = Sorted.split(subj, sep, fsep); + } catch (Error e) { + System.out.println(subj); + System.out.println(sep); + throw e; + } + + try { + Iterator> lit = subs.iterator(); + Iterator pit = fsep.iterator(); + Iterator subit = null; + String cursep = pit.hasNext()? pit.next(): null; + for (Iterator it = subj.iterator(); it.hasNext(); ) { + String key = it.next(); + if (subit != null && subit.hasNext()) { + assertTrue(key.equals(subit.next())); + continue; + } + if (key.equals(cursep)) { + cursep = pit.hasNext()? pit.next(): null; + continue; + } + assertTrue(lit.hasNext()); + subit = lit.next().iterator(); + assertTrue(subit.hasNext()); + assertTrue(key.equals(subit.next())); + } + assertFalse(lit.hasNext()); + assertFalse(pit.hasNext()); + } catch (Error e) { + System.out.println(subj); + System.out.println(sep); + System.out.println(fsep); + System.out.println(subs); + throw e; + } + } + + protected void randomSelectSplit(int n, int k) { + SortedSet subj = new TreeSet(); + for (int i=0; i sep = new TreeSet(); + List candsep = Sorted.select(subj, k); + assertTrue(candsep.size() == k); + for (String key: candsep) { + sep.add((rand.nextInt(2) == 0)? key: Generators.rndKey()); + } + assertTrue(sep.size() == k); + + verifySplit(subj, sep); + } + + public void testRandomSelectSplit() { + int f = 0x10; + for (int n=0; n void testGenericSweeper(S sw1, S sw2) { + testAfterNew(sw1); + testCloseClear(sw1); + testAfterClear(sw1); + + testAfterNew(sw2); + testReleaseClear(sw2); + testAfterClear(sw2); + } + + public void testIterableSweeper(S sw1, S sw2) { + testAfterNew(sw1); + testCloseClearIterable(sw1); + testAfterClear(sw1); + + testAfterNew(sw2); + testReleaseClearIterable(sw2); + testAfterClear(sw2); + } + + @SuppressWarnings("unchecked") + public void testAfterNew(final Sweeper sw) { + assertTrue(sw.getState() == Sweeper.State.NEW); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when not open"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[1]); } }, "acquiring when not open"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[2]); } }, "acquiring when not open"); + testIllegalStateException(new Runnable() { public void run() { sw.release(testobj[0]); } }, "releasing when not open"); + testIllegalStateException(new Runnable() { public void run() { sw.close(); } }, "closing when not open"); + assertTrue(sw.size() == 0); + assertTrue(sw.getState() == Sweeper.State.NEW); + sw.open(); + testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening twice"); + assertTrue(sw.getState() == Sweeper.State.OPEN); + } + + @SuppressWarnings("unchecked") + public void testCloseClear(final Sweeper sw) { + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + sw.release(testobj[0]); + sw.release(testobj[1]); + sw.release(testobj[2]); + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + sw.release(testobj[0]); + sw.release(testobj[1]); + sw.release(testobj[2]); + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.close(); + assertTrue(sw.getState() == Sweeper.State.CLEARED); + } + + @SuppressWarnings("unchecked") + public void testCloseClearIterable(final S sw) { + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.close(); + assertTrue(sw.getState() == Sweeper.State.CLEARED); + } + + @SuppressWarnings("unchecked") + public void testReleaseClear(final Sweeper sw) { + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + sw.release(testobj[0]); + sw.release(testobj[1]); + sw.release(testobj[2]); + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.close(); + assertTrue(sw.getState() == Sweeper.State.CLOSED); + testAfterClose(sw); + sw.release(testobj[0]); + sw.release(testobj[1]); + sw.release(testobj[2]); + assertTrue(sw.getState() == Sweeper.State.CLEARED); + } + + @SuppressWarnings("unchecked") + public void testReleaseClearIterable(final S sw) { + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } + assertTrue(sw.getState() == Sweeper.State.OPEN); + sw.acquire(testobj[0]); + sw.acquire(testobj[1]); + sw.acquire(testobj[2]); + sw.close(); + assertTrue(sw.getState() == Sweeper.State.CLOSED); + testAfterClose(sw); + for (Iterator it = sw.iterator(); it.hasNext();) { it.next(); it.remove(); } + assertTrue(sw.getState() == Sweeper.State.CLEARED); + } + + @SuppressWarnings("unchecked") + public void testAfterClose(final Sweeper sw) { + assertTrue(sw.getState() == Sweeper.State.CLOSED); + int s = sw.size(); + assertTrue(s > 0); + testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening when closed"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when closed"); + assertTrue(sw.getState() == Sweeper.State.CLOSED); + assertTrue(sw.size() == s); + } + + @SuppressWarnings("unchecked") + public void testAfterClear(final Sweeper sw) { + assertTrue(sw.getState() == Sweeper.State.CLEARED); + testIllegalStateException(new Runnable() { public void run() { sw.open(); } }, "opening when cleared"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[0]); } }, "acquiring when cleared"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[1]); } }, "acquiring when cleared"); + testIllegalStateException(new Runnable() { public void run() { sw.acquire(testobj[2]); } }, "acquiring when cleared"); + testIllegalStateException(new Runnable() { public void run() { sw.release(testobj[0]); } }, "releasing when cleared"); + testIllegalStateException(new Runnable() { public void run() { sw.close(); } }, "closing when cleared"); + assertTrue(sw.size() == 0); + assertTrue(sw.getState() == Sweeper.State.CLEARED); + } + + public void testIllegalStateException(Runnable r, String s) { + try { + r.run(); + fail("Failed to throw correct exception when: " + s); + } catch (IllegalStateException e) { + /* correct */ + } catch (RuntimeException e) { + fail("Failed to throw correct exception when: " + s); + } + } + +} From 8077fa6f63aedc4c0a5fa109a8619fe8b8d4f8ac Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 23:31:41 -0300 Subject: [PATCH 04/10] Update git ignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 2e5012c9..6198c6be 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ lib tmp run-test /.gradle +.idea From a8a3b0c5a05b7d44af12a8279ad015f6781b9c10 Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 23:47:27 -0300 Subject: [PATCH 05/10] Use temp files and path rather than duplicate directory structure --- src/test/java/plugins/Library/index/BIndexTest.java | 10 +++++----- src/test/java/plugins/Library/index/TermEntryTest.java | 8 +++++--- .../java/plugins/Library/io/serial/YamlMapTest.java | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/test/java/plugins/Library/index/BIndexTest.java b/src/test/java/plugins/Library/index/BIndexTest.java index e3fadcf6..27af5bdd 100644 --- a/src/test/java/plugins/Library/index/BIndexTest.java +++ b/src/test/java/plugins/Library/index/BIndexTest.java @@ -57,11 +57,11 @@ public long timeDiff() { return time - oldtime; } - public BIndexTest() { - f = new File("BindexTest"); - f.mkdir(); - srl = ProtoIndexSerialiser.forIndex(f); - csrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, srl.getChildSerialiser()); + public BIndexTest() throws IOException { + f = File.createTempFile("tmp", "BindexTest"); + f.mkdir(); + srl = ProtoIndexSerialiser.forIndex(f); + csrl = ProtoIndexComponentSerialiser.get(ProtoIndexComponentSerialiser.FMT_FILE_LOCAL, srl.getChildSerialiser()); } private final File f; diff --git a/src/test/java/plugins/Library/index/TermEntryTest.java b/src/test/java/plugins/Library/index/TermEntryTest.java index fec1256f..4e6c9516 100644 --- a/src/test/java/plugins/Library/index/TermEntryTest.java +++ b/src/test/java/plugins/Library/index/TermEntryTest.java @@ -14,6 +14,8 @@ import freenet.keys.FreenetURI; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; import java.util.List; import java.util.ArrayList; @@ -44,9 +46,9 @@ public class TermEntryTest extends TestCase { } final static TermTermEntry y = new TermTermEntry("test", 0.8f, "lol2"); - public void testBasic() throws TaskAbortException { - File f = new File("TermEntryTest"); - f.mkdir(); + public void testBasic() throws TaskAbortException, IOException { + Path p = Files.createTempDirectory("TermEntryTest"); + File f = p.toFile(); FileArchiver> ym = new FileArchiver>(new YamlReaderWriter(), "test", null, ".yml", f); Map map = new HashMap(); diff --git a/src/test/java/plugins/Library/io/serial/YamlMapTest.java b/src/test/java/plugins/Library/io/serial/YamlMapTest.java index 414e8a21..b001409a 100644 --- a/src/test/java/plugins/Library/io/serial/YamlMapTest.java +++ b/src/test/java/plugins/Library/io/serial/YamlMapTest.java @@ -37,7 +37,7 @@ public void testYamlMap() throws IOException { Yaml yaml = new Yaml(new Loader(new ExtendedConstructor()), new Dumper(new ExtendedRepresenter(), new DumperOptions())); - File file = new File("beantest.yml"); + File file = File.createTempFile("tmp", "beantest.yml"); FileOutputStream os = new FileOutputStream(file); yaml.dump(data, new OutputStreamWriter(os)); From cec221f86751e33dcd4d6b8fd0a5f323c97ce529 Mon Sep 17 00:00:00 2001 From: DC* Date: Fri, 14 Aug 2020 23:53:35 -0300 Subject: [PATCH 06/10] Add version build --- build.gradle | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 709d745e..0ba880bb 100644 --- a/build.gradle +++ b/build.gradle @@ -52,6 +52,16 @@ application { mainClassName = 'plugins.Library.ui.TestInterface' } + +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.BasicFileAttributes; + +Date getMTime(String file) { + return new Date(Files.readAttributes(Paths.get(file), BasicFileAttributes.class).lastModifiedTime().toMillis()); +} + jar { manifest { attributes 'Plugin-Main-Class': 'plugins.Library.Main', @@ -59,7 +69,7 @@ jar { 'Required-Node-Version': '1239', 'Implementation-Version': version, 'Built-By': System.getProperty('user.name'), - 'Built-Date': new Date(), + 'Built-Date': getMTime("src/main/java/plugins/Library/Version.java"), 'Built-JDK': System.getProperty('java.version') } from('src') { @@ -77,6 +87,30 @@ jar { archiveName = 'freenet-Library.jar' } +def jars = [] +gradle.addListener(new TaskExecutionListener() { + void afterExecute(Task task, TaskState state) { + if(task in AbstractArchiveTask) { + jars << task.outputs.files.singleFile + } + } + + void beforeExecute(Task task) { } +}) +gradle.addBuildListener(new BuildAdapter() { + void buildFinished(BuildResult result) { + if(jars) { + def hash = { + File file -> def sha256 = java.security.MessageDigest.getInstance('SHA-256') + file.eachByte(1024 * 4) { buffer, len -> sha256.update(buffer, 0, len) } + println "SHA-256 of ${file.name}: ${sha256.digest().encodeHex().toString()}" + } + + jars.each { hash(it) } + } + } +}) + publishing { publications { mavenJava(MavenPublication) { From 4bf1a9635c3d56e0745b934ef4b85a25adcefd03 Mon Sep 17 00:00:00 2001 From: DC* Date: Tue, 1 Sep 2020 00:48:32 -0300 Subject: [PATCH 07/10] Update gradle configuration --- build.gradle | 17 +++++++++++------ gradle/wrapper/gradle-wrapper.properties | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/build.gradle b/build.gradle index 0ba880bb..e810aeef 100644 --- a/build.gradle +++ b/build.gradle @@ -17,7 +17,6 @@ compileJava { version = "37" repositories { - mavenLocal() maven { url "http://4thline.org/m2" } maven { url 'https://mvn.freenetproject.org' } jcenter() @@ -29,10 +28,20 @@ configurations { dependencies { compile group: 'org.freenetproject', name: 'fred', version: 'build+' - compile group: 'org.yaml', name: 'snakeyaml', version: '1.5' + extraLibs group: 'org.yaml', name: 'snakeyaml', version: '1.5' testImplementation 'junit:junit:4.13' } +dependencyVerification { + verify = [ + 'org.freenetproject:fred:a181e584a055211ebbe69657a89f8e1452a5eb72af441839208223dc6515cbce', + 'org.bouncycastle:bcprov-jdk15on:1c31e44e331d25e46d293b3e8ee2d07028a67db011e74cb2443285aed1d59c85', + 'net.java.dev.jna:jna-platform:f1d00c167d8921c6e23c626ef9f1c3ae0be473c95c68ffa012bc7ae55a87e2d6', + 'net.java.dev.jna:jna:0c8eb7acf67261656d79005191debaba3b6bf5dd60a43735a245429381dbecff', + 'org.freenetproject:freenet-ext:32f2b3d6beedf54137ea2f9a3ebef67666d769f0966b08cd17fd7db59ba4d79f', + ] +} + def gitrev task buildInfo { try { @@ -72,10 +81,6 @@ jar { 'Built-Date': getMTime("src/main/java/plugins/Library/Version.java"), 'Built-JDK': System.getProperty('java.version') } - from('src') { - include '**/*.txt' - include '**/*.properties' - } from (configurations.extraLibs.collect { it.isDirectory() ? it : zipTree(it) }) { exclude "META-INF/*.SF" exclude "META-INF/*.DSA" diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 290541c7..4230076d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.3-bin.zip +distributionSha256Sum=8626cbf206b4e201ade7b87779090690447054bc93f052954c78480fa6ed186e zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists From a9d261c82de389e95744b751ad098ecf9729fc9c Mon Sep 17 00:00:00 2001 From: DC* Date: Tue, 1 Sep 2020 00:50:47 -0300 Subject: [PATCH 08/10] Remove extraLib command --- build.gradle | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index e810aeef..eb9824b3 100644 --- a/build.gradle +++ b/build.gradle @@ -28,13 +28,14 @@ configurations { dependencies { compile group: 'org.freenetproject', name: 'fred', version: 'build+' - extraLibs group: 'org.yaml', name: 'snakeyaml', version: '1.5' + compile group: 'org.yaml', name: 'snakeyaml', version: '1.5' testImplementation 'junit:junit:4.13' } dependencyVerification { verify = [ 'org.freenetproject:fred:a181e584a055211ebbe69657a89f8e1452a5eb72af441839208223dc6515cbce', + 'org.yaml:snakeyaml:9cf5e385cea2aaa58eb943a6a63c3a674d9ba00d88eca480548294d93c7b85c0', 'org.bouncycastle:bcprov-jdk15on:1c31e44e331d25e46d293b3e8ee2d07028a67db011e74cb2443285aed1d59c85', 'net.java.dev.jna:jna-platform:f1d00c167d8921c6e23c626ef9f1c3ae0be473c95c68ffa012bc7ae55a87e2d6', 'net.java.dev.jna:jna:0c8eb7acf67261656d79005191debaba3b6bf5dd60a43735a245429381dbecff', From 81cb7b04f398215b0b85aa52f05731c9ed692a5b Mon Sep 17 00:00:00 2001 From: DC* Date: Tue, 1 Sep 2020 01:00:34 -0300 Subject: [PATCH 09/10] Move resources to the corresponding folder --- src/main/java/plugins/Library/ui/StaticToadlet.java | 2 +- .../plugins/Library/ui => resources}/staticfiles/detect.js | 0 .../plugins/Library/ui => resources}/staticfiles/script.js | 0 .../plugins/Library/ui => resources}/staticfiles/style.css | 0 4 files changed, 1 insertion(+), 1 deletion(-) rename src/main/{java/plugins/Library/ui => resources}/staticfiles/detect.js (100%) rename src/main/{java/plugins/Library/ui => resources}/staticfiles/script.js (100%) rename src/main/{java/plugins/Library/ui => resources}/staticfiles/style.css (100%) diff --git a/src/main/java/plugins/Library/ui/StaticToadlet.java b/src/main/java/plugins/Library/ui/StaticToadlet.java index 0259d15e..3bd6054c 100644 --- a/src/main/java/plugins/Library/ui/StaticToadlet.java +++ b/src/main/java/plugins/Library/ui/StaticToadlet.java @@ -41,7 +41,7 @@ public String path() { } public static final String ROOT_URL = "/library/static/"; - public static final String ROOT_PATH = "staticfiles/"; + public static final String ROOT_PATH = "/staticfiles/"; public void handleMethodGET(URI uri, final HTTPRequest httprequest, final ToadletContext ctx) throws ToadletContextClosedException, IOException, RedirectException { diff --git a/src/main/java/plugins/Library/ui/staticfiles/detect.js b/src/main/resources/staticfiles/detect.js similarity index 100% rename from src/main/java/plugins/Library/ui/staticfiles/detect.js rename to src/main/resources/staticfiles/detect.js diff --git a/src/main/java/plugins/Library/ui/staticfiles/script.js b/src/main/resources/staticfiles/script.js similarity index 100% rename from src/main/java/plugins/Library/ui/staticfiles/script.js rename to src/main/resources/staticfiles/script.js diff --git a/src/main/java/plugins/Library/ui/staticfiles/style.css b/src/main/resources/staticfiles/style.css similarity index 100% rename from src/main/java/plugins/Library/ui/staticfiles/style.css rename to src/main/resources/staticfiles/style.css From ecd34d72ca0f74f7fb97fd8fe1b9150187735af3 Mon Sep 17 00:00:00 2001 From: "Arne Babenhauserheide (freenet releases)" Date: Mon, 18 Oct 2021 19:23:54 +0000 Subject: [PATCH 10/10] update checksum for 1491, checked with diff -r on unzipped; only difference: version and META-INF/MANIFEST.MF --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index eb9824b3..422bd74e 100644 --- a/build.gradle +++ b/build.gradle @@ -34,7 +34,7 @@ dependencies { dependencyVerification { verify = [ - 'org.freenetproject:fred:a181e584a055211ebbe69657a89f8e1452a5eb72af441839208223dc6515cbce', + 'org.freenetproject:fred:a6021924f7e6572c19d5c290755c16bdbb7dfcb1a948a46dd8425c9eaa3d5079', 'org.yaml:snakeyaml:9cf5e385cea2aaa58eb943a6a63c3a674d9ba00d88eca480548294d93c7b85c0', 'org.bouncycastle:bcprov-jdk15on:1c31e44e331d25e46d293b3e8ee2d07028a67db011e74cb2443285aed1d59c85', 'net.java.dev.jna:jna-platform:f1d00c167d8921c6e23c626ef9f1c3ae0be473c95c68ffa012bc7ae55a87e2d6',